序
本文主要研究下hibernate的hbm2ddl
SchemaManagementTool
hibernate-core-5.0.12.Final-sources.jar!/org/hibernate/tool/schema/spi/SchemaManagementTool.java
public interface SchemaManagementTool extends Service {
public SchemaCreator getSchemaCreator(Map options);
public SchemaDropper getSchemaDropper(Map options);
public SchemaMigrator getSchemaMigrator(Map options);
public SchemaValidator getSchemaValidator(Map options);
}
这个tool定义了create、drop、migrate、validate四个功能。
SchemaCreatorImpl
hibernate-core-5.0.12.Final-sources.jar!/org/hibernate/tool/schema/internal/SchemaCreatorImpl.java
public class SchemaCreatorImpl implements SchemaCreator {
@Override
public void doCreation(Metadata metadata, boolean createNamespaces, List<Target> targets) throws SchemaManagementException {
doCreation( metadata, createNamespaces, targets.toArray( new Target[ targets.size() ] ) );
}
//......
}
主要逻辑在doCreation里头,里头按如下顺序创建:
- 创建catalog/schema
- 创建before table auxiliary objects
- 创建sequences
- 创建tables
- 创建indexes
- 创建uniques
- 创建foreign keys
- 创建after table auxiliary objects
它们又主要借助dialect的各种exporter来实现
Dialect
hibernate-core-5.0.12.Final-sources.jar!/org/hibernate/dialect/Dialect.java
public abstract class Dialect implements ConversionContext {
//......
// DDL support ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
private StandardTableExporter tableExporter = new StandardTableExporter( this );
private StandardSequenceExporter sequenceExporter = new StandardSequenceExporter( this );
private StandardIndexExporter indexExporter = new StandardIndexExporter( this );
private StandardForeignKeyExporter foreignKeyExporter = new StandardForeignKeyExporter( this );
private StandardUniqueKeyExporter uniqueKeyExporter = new StandardUniqueKeyExporter( this );
private StandardAuxiliaryDatabaseObjectExporter auxiliaryObjectExporter = new StandardAuxiliaryDatabaseObjectExporter( this );
//......
}
这里定义了table、sequence、index、foreign key、unique key、auxiliary database object的exporter
StandardTableExporter
hibernate-core-5.0.12.Final-sources.jar!/org/hibernate/tool/schema/internal/StandardTableExporter.java
public class StandardTableExporter implements Exporter<Table> {
//.....
@Override
public String[] getSqlCreateStrings(Table table, Metadata metadata) {
final QualifiedName tableName = new QualifiedNameParser.NameParts(
Identifier.toIdentifier( table.getCatalog(), table.isCatalogQuoted() ),
Identifier.toIdentifier( table.getSchema(), table.isSchemaQuoted() ),
table.getNameIdentifier()
);
final JdbcEnvironment jdbcEnvironment = metadata.getDatabase().getJdbcEnvironment();
StringBuilder buf =
new StringBuilder( tableCreateString( table.hasPrimaryKey() ) )
.append( ' ' )
.append(
jdbcEnvironment.getQualifiedObjectNameFormatter().format(
tableName,
jdbcEnvironment.getDialect()
)
)
.append( " (" );
boolean isPrimaryKeyIdentity = table.hasPrimaryKey()
&& table.getIdentifierValue() != null
&& table.getIdentifierValue().isIdentityColumn( metadata.getIdentifierGeneratorFactory(), dialect );
// this is the much better form moving forward as we move to metamodel
//boolean isPrimaryKeyIdentity = hasPrimaryKey
// && table.getPrimaryKey().getColumnSpan() == 1
// && table.getPrimaryKey().getColumn( 0 ).isIdentity();
// Try to find out the name of the primary key in case the dialect needs it to create an identity
String pkColName = null;
if ( table.hasPrimaryKey() ) {
Column pkColumn = (Column) table.getPrimaryKey().getColumns().iterator().next();
pkColName = pkColumn.getQuotedName( dialect );
}
final Iterator columnItr = table.getColumnIterator();
boolean isFirst = true;
while ( columnItr.hasNext() ) {
final Column col = (Column) columnItr.next();
if ( isFirst ) {
isFirst = false;
}
else {
buf.append( ", " );
}
String colName = col.getQuotedName( dialect );
buf.append( colName ).append( ' ' );
if ( isPrimaryKeyIdentity && colName.equals( pkColName ) ) {
// to support dialects that have their own identity data type
if ( dialect.getIdentityColumnSupport().hasDataTypeInIdentityColumn() ) {
buf.append( col.getSqlType( dialect, metadata ) );
}
buf.append( ' ' )
.append( dialect.getIdentityColumnSupport().getIdentityColumnString( col.getSqlTypeCode( metadata ) ) );
}
else {
buf.append( col.getSqlType( dialect, metadata ) );
String defaultValue = col.getDefaultValue();
if ( defaultValue != null ) {
buf.append( " default " ).append( defaultValue );
}
if ( col.isNullable() ) {
buf.append( dialect.getNullColumnString() );
}
else {
buf.append( " not null" );
}
}
if ( col.isUnique() ) {
String keyName = Constraint.generateName( "UK_", table, col );
UniqueKey uk = table.getOrCreateUniqueKey( keyName );
uk.addColumn( col );
buf.append(
dialect.getUniqueDelegate()
.getColumnDefinitionUniquenessFragment( col )
);
}
if ( col.getCheckConstraint() != null && dialect.supportsColumnCheck() ) {
buf.append( " check (" )
.append( col.getCheckConstraint() )
.append( ")" );
}
String columnComment = col.getComment();
if ( columnComment != null ) {
buf.append( dialect.getColumnComment( columnComment ) );
}
}
if ( table.hasPrimaryKey() ) {
buf.append( ", " )
.append( table.getPrimaryKey().sqlConstraintString( dialect ) );
}
buf.append( dialect.getUniqueDelegate().getTableCreationUniqueConstraintsFragment( table ) );
applyTableCheck( table, buf );
buf.append( ')' );
if ( table.getComment() != null ) {
buf.append( dialect.getTableComment( table.getComment() ) );
}
applyTableTypeString( buf );
List<String> sqlStrings = new ArrayList<String>();
sqlStrings.add( buf.toString() );
applyComments( table, tableName, sqlStrings );
applyInitCommands( table, sqlStrings );
return sqlStrings.toArray( new String[ sqlStrings.size() ] );
}
}
上面的sql生成具体还根据传入的dialect来实现不同数据库的语句的区别。
小结
要实现一个ddl的功能,一个要有不同的dialect来屏蔽不同数据库的区别,另外一个就是需要创建索引、序列、主键、外键等相关的对象,其他的就是字段类型的映射。