ddl

package
v0.4.12 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Mar 29, 2024 License: MIT Imports: 43 Imported by: 11

Documentation

Index

Constants

View Source
const (
	PRIMARY_KEY = "PRIMARY KEY"
	FOREIGN_KEY = "FOREIGN KEY"
	UNIQUE      = "UNIQUE"
	CHECK       = "CHECK"
	EXCLUDE     = "EXCLUDE"
	INDEX       = "INDEX"

	DEFAULT_IDENTITY = "GENERATED BY DEFAULT AS IDENTITY"
	ALWAYS_IDENTITY  = "GENERATED ALWAYS AS IDENTITY"
	IDENTITY         = "IDENTITY"

	RESTRICT    = "RESTRICT"
	CASCADE     = "CASCADE"
	NO_ACTION   = "NO ACTION"
	SET_NULL    = "SET NULL"
	SET_DEFAULT = "SET DEFAULT"
)

The various keyword constants used by ddl.

View Source
const (
	DialectSQLite    = "sqlite"
	DialectPostgres  = "postgres"
	DialectMySQL     = "mysql"
	DialectSQLServer = "sqlserver"
	DialectOracle    = "oracle"
)

The dialects supported by ddl.

Variables

View Source
var Analyzer = &analysis.Analyzer{
	Name:     "ddl",
	Doc:      "validates ddl structs",
	Requires: []*analysis.Analyzer{inspect.Analyzer},
	Run: func(pass *analysis.Pass) (any, error) {
		inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
		nodeFilter := []ast.Node{(*ast.TypeSpec)(nil)}
		p := NewStructParser(pass.Fset)
		inspect.Preorder(nodeFilter, p.VisitStruct)
		var catalog Catalog
		_ = p.WriteCatalog(&catalog)
		positions, msgs := p.Diagnostics()
		if len(msgs) == 0 {
			return nil, nil
		}
		for i, msg := range msgs {
			var pos token.Pos
			if i < len(positions) {
				pos = positions[i]
			}
			pass.Reportf(pos, msg)
		}
		return nil, nil
	},
}

Analyzer is an &analysis.Analyzer which can be used in a custom linter.

Functions

func EscapeQuote added in v0.4.7

func EscapeQuote(str string, quote byte) string

TODO: replace all invocations with strings.ReplaceAll. EscapeQuote will escape the relevant quote in a string by doubling up on it (as per SQL rules).

func GenerateName added in v0.4.8

func GenerateName(nameType string, tableName string, columnNames []string) string

GenerateName generates the appropriate constraint/index name for a given table and columns. The nameType should be one of "PRIMARY KEY", "FOREIGN KEY", "UNIQUE" or "INDEX".

func NormalizeDSN added in v0.3.1

func NormalizeDSN(dsn string) (dialect, driverName, normalizedDSN string)

NormalizeDSN normalizes an input DSN (Data Source Name), using a heuristic to detect the dialect of the DSN as well as providing an appropriate driverName to be used with sql.Open().

func QuoteIdentifier added in v0.4.7

func QuoteIdentifier(dialect string, identifier string) string

QuoteIdentifier quotes an identifier if necessary using dialect-specific quoting rules.

func Register

func Register(driver Driver)

Registers registers a driver for a particular dialect. It is safe to call Register for a dialect multiple times, the last one wins.

func Sprint added in v0.4.7

func Sprint(dialect string, v any) (string, error)

Sprint is the equivalent of Sprintf but for converting a single value into its SQL representation.

func Sprintf added in v0.4.7

func Sprintf(dialect string, query string, args []any) (string, error)

Sprintf will interpolate SQL args into a query string containing prepared statement parameters. It returns an error if an argument cannot be properly represented in SQL. This function may be vulnerable to SQL injection and should be used for logging purposes only.

Types

type AutomigrateCmd

type AutomigrateCmd struct {
	// (Required) DB is the database to apply migrations to.
	DB *sql.DB

	// (Required) Dialect is the database dialect.
	Dialect string

	// DestCatalog is the destination catalog that you want to migrate to.
	DestCatalog *Catalog

	// DirFS is where the Filenames will be sourced from.
	DirFS fs.FS

	// Filenames specifies the list of files (loaded from the DirFS field) used
	// to build the DestCatalog. It will be ignored if the DestCatalog is
	// already non-nil.
	Filenames []string

	// Stdout is the command's standard out. If nil, the command writes to
	// os.Stdout.
	Stdout io.Writer

	// Stderr specifies the command's standard error. If nil, the command
	// writes to os.Stderr.
	Stderr io.Writer

	// HistoryTable is the name of the migration history table. If empty, the
	// default history table name will be "sqddl_history".
	HistoryTable string

	// DropObjects controls whether statements like DROP TABLE, DROP COLUMN
	// will be generated.
	DropObjects bool

	// AcceptWarnings will accept warnings when generating migrations.
	AcceptWarnings bool

	// If DryRun is true, the SQL queries will be written to Stdout instead of
	// being run against the database.
	DryRun bool

	// LockTimeout specifies how long to wait to acquire a lock on a table
	// before bailing out. If empty, 1*time.Second is used.
	LockTimeout time.Duration

	// Maximum number of retries on lock timeout.
	MaxAttempts int

	// Maximum delay between retries.
	MaxDelay time.Duration

	// Base delay between retries.
	BaseDelay time.Duration

	// Ctx is the command's context.
	Ctx context.Context
	// contains filtered or unexported fields
}

AutomigrateCmd implements the `sqddl automigrate` subcommand.

func AutomigrateCommand

func AutomigrateCommand(args ...string) (*AutomigrateCmd, error)

AutomigrateCommand creates a new AutomigrateCmd with the given arguments. E.g.

sqddl automigrate -db <DATABASE_URL> -dest <DEST_SCHEMA> [FLAGS]

Automigrate("-db", "postgres://user:pass@localhost:5432/sakila", "-dest", "tables/tables.go")

func (*AutomigrateCmd) Run

func (cmd *AutomigrateCmd) Run() error

Run runs the AutomigrateCmd.

type BatchInsert

type BatchInsert struct {
	// BatchSize is the batch size used in INSERT statements e.g.
	// INSERT INTO tbl VALUES (1)           /* batchsize=1 */
	// INSERT INTO tbl VALUES (1), (2), (3) /* batchsize=3 */
	// If Batchsize <= 0, a default batch size of 500 will be used.
	Batchsize int

	// Dialect is the database dialect. Possible values: "sqlite", "postgres",
	// "mysql", "sqlserver".
	Dialect string

	// TableSchema is the name of the schema that the table belongs to.
	TableSchema string

	// TableName is the name of the table.
	TableName string

	// Columns is the list of columns participating in the INSERT.
	Columns []string

	// KeyColumns is the list of columns in the table that uniquely identify a
	// row e.g. PRIMARY KEY or UNIQUE columns. If provided, the batch insert
	// will become an upsert instead (e.g. ON CONFLICT DO UPDATE).
	KeyColumns []string

	// IdentityColumns is the list of columns in the table that are identity
	// columns e.g. GENERATED BY DEFAULT AS IDENTITY (Postgres) or IDENTITY
	// (SQL Server). Postgres or SQL Server only. If provided:
	//
	// (Postgres) The identity sequence for each identity column will be
	// updated accordingly at the end of the batch insert e.g. SELECT
	// setval(pg_get_serial_sequence('table', 'id'), max(id)) FROM table;
	//
	// (SQL Server) SET IDENTITY_INSERT will be enabled for the table at the
	// start of batch insert and disabled at the end. It is vital to pass in a
	// stateful database connection (an *sql.Conn or *sql.Tx) because we need
	// to SET IDENTITY_INSERT ON and SET IDENTITY_INSERT OFF on the same
	// connection.
	IdentityColumns []string
}

BatchInsert is used to insert data into a table in batches.

func (*BatchInsert) ExecContext

func (bi *BatchInsert) ExecContext(ctx context.Context, db DB, next func([]any) error) (rowsAffected int64, err error)

ExecContext will execute the batch insert with the given database connection and the `next` iterator function.

The `next` iterator function must populate the []any slice with each successive row's values when called, and must return io.EOF once there are no more results. The length of the []any slice is guaranteed to match the length of the Columns field of the BatchInsert.

Here is a basic example:

bi := &BatchInsert{
    Dialect:         "postgres",
    TableName:       "actors",
    Columns:         []string{"actor_id", "first_name", "last_name"},
    KeyColumns:      []string{"actor_id"},
    IdentityColumns: []string{"actor_id"},
}
actors := []Actor{
    {ActorID: 1, FirstName: "PENELOPE", LastName: "GUINESS"},
    {ActorID: 2, FirstName: "NICK", LastName: "WAHLBERG"},
    {ActorID: 3, FirstName: "ED", LastName: "CHASE"},
}
i := 0
_, err := bi.ExecContext(ctx, db, func(row []any) error {
    if i >= len(actors) {
        return io.EOF
    }
    actor := actors[i]
    row[0] = actor.ActorID
    row[1] = actor.FirstName
    row[2] = actor.LastName
    i++
    return nil
})

type Catalog

type Catalog struct {
	// Dialect is the dialect of the database. Possible values: "sqlite",
	// "postgres", "mysql", "sqlserver".
	Dialect string `json:",omitempty"`

	// VersionNums holds the database's version numbers.
	//
	// Example: Postgres 14.2 would be represented as []int{14, 2}.
	VersionNums VersionNums `json:",omitempty"`

	// Database name.
	CatalogName string `json:",omitempty"`

	// CurrentSchema is the current schema of the database. For Postgres it
	// is usually "public", for MySQL this is the database name, for SQL Server
	// it is usually "dbo". It is always empty for SQLite.
	CurrentSchema string `json:",omitempty"`

	// DefaultCollation is the default collation of the database.
	DefaultCollation string `json:",omitempty"`

	// If DefaultCollationValid is false, the database's default collation is
	// unknown.
	DefaultCollationValid bool `json:",omitempty"`

	// The extensions in the database. Postgres only.
	Extensions []string `json:",omitempty"`

	// If ExtensionsValid is false, the database's extensions are unknown.
	ExtensionsValid bool `json:",omitempty"`

	// The list of schemas within the database.
	Schemas []Schema `json:",omitempty"`
}

Catalog represents a database catalog i.e. a database instance.

func (*Catalog) WriteCatalog

func (src *Catalog) WriteCatalog(dest *Catalog) error

WriteCatalog populates the dest Catalog from the src Catalog, doing a deep copy in the process (nothing is shared between the src and dest Catalogs).

type CatalogCache

type CatalogCache struct {
	// contains filtered or unexported fields
}

CatalogCache is used for querying and modifying a Catalog's nested objects without the need to loop over all the tables, columns, constraints etc just to find what you need. It does so by maintaining an internal cache of where everything is kept. All changes to a Catalog should be made through the CatalogCache it in order to keep the internal cache up-to-date.

func NewCatalogCache

func NewCatalogCache(catalog *Catalog) *CatalogCache

NewCatalogCache creates a new CatalogCache from a given Catalog.

func (*CatalogCache) AddOrUpdateColumn

func (c *CatalogCache) AddOrUpdateColumn(table *Table, column Column)

AddOrUpdateColumn adds the given Column to the Table, or updates it if it already exists.

func (*CatalogCache) AddOrUpdateConstraint

func (c *CatalogCache) AddOrUpdateConstraint(table *Table, constraint Constraint)

AddOrUpdateConstraint adds the given Constraint to the Table, or updates it if it already exists.

func (*CatalogCache) AddOrUpdateDomain

func (c *CatalogCache) AddOrUpdateDomain(schema *Schema, domain Domain)

AddOrUpdateDomain adds the given Domain to the Schema, or updates it if it already exists.

func (*CatalogCache) AddOrUpdateEnum

func (c *CatalogCache) AddOrUpdateEnum(schema *Schema, enum Enum)

AddOrUpdateEnum adds the given Enum to the Schema, or updates it if it already exists.

func (*CatalogCache) AddOrUpdateIndex

func (c *CatalogCache) AddOrUpdateIndex(table *Table, index Index)

AddOrUpdateIndex adds the given Index to the Table, or updates it if it already exists.

func (*CatalogCache) AddOrUpdateRoutine

func (c *CatalogCache) AddOrUpdateRoutine(schema *Schema, routine Routine)

AddOrUpdateRoutine adds the given Routine to the Schema, or updates it if it already exists.

func (*CatalogCache) AddOrUpdateSchema

func (c *CatalogCache) AddOrUpdateSchema(catalog *Catalog, schema Schema)

AddOrUpdateSchema adds the given Schema to the Catalog, or updates it if it already exists.

func (*CatalogCache) AddOrUpdateTable

func (c *CatalogCache) AddOrUpdateTable(schema *Schema, table Table)

AddOrUpdateTable adds the given Table to the Schema, or updates it if it already exists.

func (*CatalogCache) AddOrUpdateTrigger

func (c *CatalogCache) AddOrUpdateTrigger(table *Table, trigger Trigger)

AddOrUpdateTrigger adds the given Trigger to the Table, or updates it if it already exists.

func (*CatalogCache) AddOrUpdateView

func (c *CatalogCache) AddOrUpdateView(schema *Schema, view View)

AddOrUpdateView adds the given View to the Schema, or updates it if it already exists.

func (*CatalogCache) AddOrUpdateViewIndex

func (c *CatalogCache) AddOrUpdateViewIndex(view *View, index Index)

AddOrUpdateViewIndex adds the given Index to the View, or updates it if it already exists.

func (*CatalogCache) AddOrUpdateViewTrigger

func (c *CatalogCache) AddOrUpdateViewTrigger(view *View, trigger Trigger)

AddOrUpdateViewTrigger adds the given Trigger to the View, or updates it if it already exists.

func (*CatalogCache) GetColumn

func (c *CatalogCache) GetColumn(table *Table, columnName string) *Column

GetColumn gets a Column with the given columnName from the Table, or returns nil if it doesn't exist. If a nil table is passed in, GetColumn returns nil.

The returning Column pointer is valid as long as no new Column is added to the Table; if a new Column is added, the pointer may now be pointing at a stale Column. Call GetColumn again in order to get the new pointer.

func (*CatalogCache) GetConstraint

func (c *CatalogCache) GetConstraint(table *Table, constraintName string) *Constraint

GetConstraint gets a Constraint with the given constraintName from the Table, or returns nil if it doesn't exist. If a nil table is passed in, GetConstraint returns nil.

The returning Constraint pointer is valid as long as no new Constraint is added to the Table; if a new Constraint is added, the pointer may now be pointing at a stale Constraint. Call GetConstraint again in order to get the new pointer.

func (*CatalogCache) GetDomain

func (c *CatalogCache) GetDomain(schema *Schema, domainName string) *Domain

GetDomain gets a Domain with the given domainName from the Schema, or returns nil if it doesn't exist. If a nil schema is passed in, GetDomain returns nil.

The returning Domain pointer is valid as long as no new Domain is added to the Schema; if a new Domain is added, the pointer may now be pointing at a stale Domain. Call GetDomain again in order to get the new pointer.

func (*CatalogCache) GetEnum

func (c *CatalogCache) GetEnum(schema *Schema, enumName string) *Enum

GetEnum gets a Enum with the given enumName from the Schema, or returns nil if it doesn't exist. If a nil schema is passed in, GetEnum returns nil.

The returning Enum pointer is valid as long as no new Enum is added to the Schema; if a new Enum is added, the pointer may now be pointing at a stale Enum. Call GetEnum again in order to get the new pointer.

func (*CatalogCache) GetForeignKeys

func (c *CatalogCache) GetForeignKeys(table *Table) []*Constraint

GetForeignKeys gets the foreign keys of the table.

func (*CatalogCache) GetIndex

func (c *CatalogCache) GetIndex(table *Table, indexName string) *Index

GetIndex gets a Index with the given indexName from the Table, or returns nil if it doesn't exist. If a nil table is passed in, GetIndex returns nil.

The returning Index pointer is valid as long as no new Index is added to the Table; if a new Index is added, the pointer may now be pointing at a stale Index. Call GetIndex again in order to get the new pointer.

func (*CatalogCache) GetOrCreateColumn

func (c *CatalogCache) GetOrCreateColumn(table *Table, columnName, columnType string) *Column

GetOrCreateColumn gets a Column with the given columnName from the Table, or creates it if it doesn't exist.

The returning Column pointer is valid as long as no new Column is added to the Table; if a new Column is added, the pointer may now be pointing at a stale Column. Call GetColumn again in order to get the new pointer.

func (*CatalogCache) GetOrCreateConstraint

func (c *CatalogCache) GetOrCreateConstraint(table *Table, constraintName, constraintType string, columnNames []string) *Constraint

GetOrCreateConstraint gets a Constraint with the given constraintName from the Table, or creates it if it doesn't exist.

The returning Constraint pointer is valid as long as no new Constraint is added to the Table; if a new Constraint is added, the pointer may now be pointing at a stale Constraint. Call GetConstraint again in order to get the new pointer.

func (*CatalogCache) GetOrCreateDomain

func (c *CatalogCache) GetOrCreateDomain(schema *Schema, domainName string) *Domain

GetOrCreateDomain gets a Domain with the given domainName from the Schema, or creates it if it doesn't exist.

The returning Domain pointer is valid as long as no new Domain is added to the Schema; if a new Domain is added, the pointer may now be pointing at a stale Domain. Call GetDomain again in order to get the new pointer.

func (*CatalogCache) GetOrCreateEnum

func (c *CatalogCache) GetOrCreateEnum(schema *Schema, enumName string) *Enum

GetOrCreateEnum gets a Enum with the given enumName from the Schema, or creates it if it doesn't exist.

The returning Enum pointer is valid as long as no new Enum is added to the Schema; if a new Enum is added, the pointer may now be pointing at a stale Enum. Call GetEnum again in order to get the new pointer.

func (*CatalogCache) GetOrCreateIndex

func (c *CatalogCache) GetOrCreateIndex(table *Table, indexName string, columnNames []string) *Index

GetOrCreateIndex gets a Index with the given indexName from the Table, or creates it if it doesn't exist.

The returning Index pointer is valid as long as no new Index is added to the Table; if a new Index is added, the pointer may now be pointing at a stale Index. Call GetIndex again in order to get the new pointer.

func (*CatalogCache) GetOrCreateRoutine

func (c *CatalogCache) GetOrCreateRoutine(schema *Schema, routineName, identityArguments string) *Routine

GetOrCreateRoutine gets a Routine with the given routineName and identityArguments from the Schema, or creates it if it doesn't exist. The identityArguments string only applies to Postgres because functions may be overloaded and differ only by their identity arguments. For other database dialects, the identityArguments should be an empty string.

The returning Routine pointer is valid as long as no new Routine is added to the Schema; if a new Routine is added, the pointer may now be pointing at a stale Routine. Call GetRoutine again in order to get the new pointer.

func (*CatalogCache) GetOrCreateSchema

func (c *CatalogCache) GetOrCreateSchema(catalog *Catalog, schemaName string) *Schema

GetOrCreateSchema gets a Schema with the given schemaName from the Catalog, or creates it if it doesn't exist.

The returning Schema pointer is valid as long as no new Schema is added to the Catalog; if a new Schema is added, the pointer may now be pointing at a stale Schema. Call GetSchema again in order to get the new pointer.

func (*CatalogCache) GetOrCreateTable

func (c *CatalogCache) GetOrCreateTable(schema *Schema, tableName string) *Table

GetOrCreateTable gets a Table with the given tableName from the Schema, or creates it if it doesn't exist.

The returning Table pointer is valid as long as no new Table is added to the Schema; if a new Table is added, the pointer may now be pointing at a stale Table. Call GetTable again in order to get the new pointer.

func (*CatalogCache) GetOrCreateTrigger

func (c *CatalogCache) GetOrCreateTrigger(table *Table, triggerName string) *Trigger

GetOrCreateTrigger gets a Trigger with the given triggerName from the Table, or creates it if it doesn't exist.

The returning Trigger pointer is valid as long as no new Trigger is added to the Table; if a new Trigger is added, the pointer may now be pointing at a stale Trigger. Call GetTrigger again in order to get the new pointer.

func (*CatalogCache) GetOrCreateView

func (c *CatalogCache) GetOrCreateView(schema *Schema, viewName string) *View

GetOrCreateView gets a View with the given viewName from the Schema, or creates it if it doesn't exist.

The returning View pointer is valid as long as no new View is added to the Schema; if a new View is added, the pointer may now be pointing at a stale View. Call GetView again in order to get the new pointer.

func (*CatalogCache) GetOrCreateViewIndex

func (c *CatalogCache) GetOrCreateViewIndex(view *View, indexName string, columnNames []string) *Index

GetOrCreateViewIndex gets a Index with the given indexName from the View, or creates it if it doesn't exist.

The returning Index pointer is valid as long as no new Index is added to the View; if a new Index is added, the pointer may now be pointing at a stale Index. Call GetIndex again in order to get the new pointer.

func (*CatalogCache) GetOrCreateViewTrigger

func (c *CatalogCache) GetOrCreateViewTrigger(view *View, triggerName string) *Trigger

GetOrCreateViewTrigger gets a Trigger with the given triggerName from the View, or creates it if it doesn't exist.

The returning Trigger pointer is valid as long as no new Trigger is added to the View; if a new Trigger is added, the pointer may now be pointing at a stale Trigger. Call GetTrigger again in order to get the new pointer.

func (*CatalogCache) GetPrimaryKey

func (c *CatalogCache) GetPrimaryKey(table *Table) *Constraint

GetPrimaryKey gets the primary key of the Table, or returns nil if it doesn't exist.

func (*CatalogCache) GetRoutine

func (c *CatalogCache) GetRoutine(schema *Schema, routineName, identityArguments string) *Routine

GetRoutine gets a Routine with the given routineName (and identityArguments) from the Schema, or returns nil if it doesn't exist. If a nil schema is passed in, GetRoutine returns nil. The identityArguments string only applies to Postgres because functions may be overloaded and differ only by their identity arguments. For other database dialects, the identityArguments should be an empty string.

The returning Routine pointer is valid as long as no new Routine is added to the Schema; if a new Routine is added, the pointer may now be pointing at a stale Routine. Call GetRoutine again in order to get the new pointer.

func (*CatalogCache) GetSchema

func (c *CatalogCache) GetSchema(catalog *Catalog, schemaName string) *Schema

GetSchema gets a Schema with the given schemaName from the Catalog, or returns nil if it doesn't exist. If a nil catalog is passed in, GetSchema returns nil.

The returning Schema pointer is valid as long as no new Schema is added to the Catalog; if a new Schema is added, the pointer may now be pointing at a stale Schema. Call GetSchema again in order to get the new pointer.

func (*CatalogCache) GetTable

func (c *CatalogCache) GetTable(schema *Schema, tableName string) *Table

GetTable gets a Table with the given tableName from the Schema, or returns nil if it doesn't exist. If a nil schema is passed in, GetTable returns nil.

The returning Table pointer is valid as long as no new Table is added to the Schema; if a new Table is added, the pointer may now be pointing at a stale Table. Call GetTable again in order to get the new pointer.

func (*CatalogCache) GetTrigger

func (c *CatalogCache) GetTrigger(table *Table, triggerName string) *Trigger

GetTrigger gets a Trigger with the given triggerName from the Table, or returns nil if it doesn't exist. If a nil table is passed in, GetTrigger returns nil.

The returning Trigger pointer is valid as long as no new Trigger is added to the Table; if a new Trigger is added, the pointer may now be pointing at a stale Trigger. Call GetTrigger again in order to get the new pointer.

func (*CatalogCache) GetView

func (c *CatalogCache) GetView(schema *Schema, viewName string) *View

GetView gets a View with the given viewName from the Schema, or returns nil if it doesn't exist. If a nil schema is passed in, GetView returns nil.

The returning View pointer is valid as long as no new View is added to the Schema; if a new View is added, the pointer may now be pointing at a stale View. Call GetView again in order to get the new pointer.

func (*CatalogCache) GetViewIndex

func (c *CatalogCache) GetViewIndex(view *View, indexName string) *Index

GetViewIndex gets a Index with the given indexName from the View, or returns nil if it doesn't exist.

The returning Index pointer is valid as long as no new Index is added to the View; if a new Index is added, the pointer may now be pointing at a stale Index. Call GetViewIndex again in order to get the new pointer.

func (*CatalogCache) GetViewTrigger

func (c *CatalogCache) GetViewTrigger(view *View, triggerName string) *Trigger

GetViewTrigger gets a Trigger with the given triggerName from the View, or returns nil if it doesn't exist.

The returning Trigger pointer is valid as long as no new Trigger is added to the View; if a new Trigger is added, the pointer may now be pointing at a stale Trigger. Call GetViewTrigger again in order to get the new pointer.

type Column

type Column struct {
	// TableSchema is the name of the schema that the table and column belong to.
	TableSchema string `json:",omitempty"`

	// TableName is the name of the table that the column belongs to.
	TableName string `json:",omitempty"`

	// ColumnName is the name of the column.
	ColumnName string `json:",omitempty"`

	// ColumnType is the type of the column.
	ColumnType string `json:",omitempty"`

	// CharacterLength stores the character length of the column (as a string)
	// if applicable.
	CharacterLength string `json:",omitempty"`

	// NumericPrecision stores the numeric precision of the column (as a
	// string) if applicable.
	NumericPrecision string `json:",omitempty"`

	// NumericScale stores the numeric scale of the column (as a string) if
	// applicable.
	NumericScale string `json:",omitempty"`

	// DomainName stores the name of the domain if the column is a domain type.
	// In which case the ColumnType of the column is the underlying type of the
	// domain. Postgres only.
	DomainName string `json:",omitempty"`

	// IsEnum indicates if the column is an enum type. If true, the ColumnType
	// of the column is the name of the enum. Postgres only.
	IsEnum bool `json:",omitempty"`

	// IsNotNull indicates if the column is NOT NULL.
	IsNotNull bool `json:",omitempty"`

	// IsPrimaryKey indicates if the column is the primary key. It is true only
	// if the column is the only column participating in the primary key
	// constraint.
	IsPrimaryKey bool `json:",omitempty"`

	// IsUnique indicates if the column is unique. It is true only if the
	// column is the only column participating in the unique constraint.
	IsUnique bool `json:",omitempty"`

	// IsAutoincrement indicates if the column is AUTO_INCREMENT (MySQL) or
	// AUTOINCREMENT (SQLite).
	IsAutoincrement bool `json:",omitempty"`

	// ReferencesSchema stores the name of the referenced schema if the column
	// is a foreign key. It is filled in only if the column is the only column
	// participating in the foreign key constraint.
	ReferencesSchema string `json:",omitempty"`

	// ReferencesTable stores the name of the referenced table if the column is
	// a foreign key. It is filled in only if the column is the only column
	// participating in the foreign key constraint.
	ReferencesTable string `json:",omitempty"`

	// ReferencesTable stores the name of the referenced column if the column
	// is a foreign key. It is filled in only if the column is the only column
	// participating in the foreign key constraint.
	ReferencesColumn string `json:",omitempty"`

	// UpdateRule stores the ON UPDATE rule of the column's foreign key (if
	// applicable). Possible values: "RESTRICT", "CASCADE", "NO ACTION", "SET
	// NULL", "SET DEFAULT".
	UpdateRule string `json:",omitempty"`

	// DeleteRule stores the ON DELETE of the column's foreign key (if
	// applicable). Possible values: "RESTRICT", "CASCADE", "NO ACTION", "SET
	// NULL", "SET DEFAULT".
	DeleteRule string `json:",omitempty"`

	// IsDeferrable indicates if the column's foreign key is deferrable (if
	// applicable). Postgres only.
	IsDeferrable bool `json:",omitempty"`

	// IsInitiallyDeferred indicates if the column's foreign key is initially
	// deferred (if applicable). Postgres only.
	IsInitiallyDeferred bool `json:",omitempty"`

	// ColumnIdentity stores the identity definition of the column. Possible
	// values: "GENERATED BY DEFAULT AS IDENTITY" (Postgres), "GENERATED ALWAYS
	// AS IDENTITY" (Postgres), "IDENTITY" (SQLServer).
	ColumnIdentity string `json:",omitempty"`

	// ColumnDefault stores the default value of the column as it is literally
	// represented in SQL. So if the default value is a string, the value
	// should be surrounded by 'single quotes'. If the default value is a
	// function call, the default value should be surrounded by brackets e.g.
	// (uuid()).
	ColumnDefault string `json:",omitempty"`

	// OnUpdateCurrentTimestamp indicates if the column is updated with the
	// CURRENT_TIMESTAMP whenever the row is updated. MySQL only.
	OnUpdateCurrentTimestamp bool `json:",omitempty"`

	// IsGenerated indicates if the column is a generated column. It does not
	// have be set to true if the GeneratedExpr field is already non-empty.
	IsGenerated bool `json:",omitempty"`

	// GeneratedExpr holds the generated expression of the column if the column
	// is generated.
	GeneratedExpr string `json:",omitempty"`

	// GeneratedExprStored indicates if the generated column is STORED. If
	// false, the generated column is assumed to be VIRTUAL.
	GeneratedExprStored bool `json:",omitempty"`

	// CollationName stores the collation of the column. If empty, the column
	// collation is assumed to follow the DefaultCollation of the Catalog.
	CollationName string `json:",omitempty"`

	// Comment stores the comment on the column.
	Comment string `json:",omitempty"`

	// If Ignore is true, the column should be treated like it doesn't exist (a
	// soft delete flag).
	Ignore bool `json:",omitempty"`
}

Column represents a database column.

type Constraint

type Constraint struct {
	// TableSchema is the name of the schema that the table and constraint belong to.
	TableSchema string `json:",omitempty"`

	// TableName is the name of the table that the constraint belongs to.
	TableName string `json:",omitempty"`

	// ConstraintName is the name of the constraint.
	ConstraintName string `json:",omitempty"`

	// ConstraintType is the type of the constraint. Possible values: "PRIMARY
	// KEY", "UNIQUE", "FOREIGN KEY", "CHECK", "EXCLUDE".
	ConstraintType string `json:",omitempty"`

	// Columns holds the name of the columns participating in the constraint.
	Columns []string `json:",omitempty"`

	// ReferencesSchema stores the name of the referenced schema if the constraint
	// is a foreign key.
	ReferencesSchema string `json:",omitempty"`

	// ReferencesSchema stores the name of the referenced table if the constraint
	// is a foreign key.
	ReferencesTable string `json:",omitempty"`

	// ReferencesSchema stores the name of the referenced columns if the constraint
	// is a foreign key.
	ReferencesColumns []string `json:",omitempty"`

	// UpdateRule stores the ON UPDATE rule if the constraint is a foreign key.
	// Possible values: "RESTRICT", "CASCADE", "NO ACTION", "SET NULL", "SET
	// DEFAULT".
	UpdateRule string `json:",omitempty"`

	// DeleteRule stores the ON DELETE rule if the constraint is a foreign key.
	// Possible values: "RESTRICT", "CASCADE", "NO ACTION", "SET NULL", "SET
	// DEFAULT".
	DeleteRule string `json:",omitempty"`

	//MatchOption stores the MATCH option if the constraint is a foreign key.
	MatchOption string `json:",omitempty"`

	// CheckExpr stores the CHECK expression if the constraint is a CHECK constraint.
	CheckExpr string `json:",omitempty"`

	// ExclusionOperators stores the list of exclusion operators if the
	// constraint is an EXCLUDE constraint. Postgres only.
	ExclusionOperators []string `json:",omitempty"`

	// ExclusionIndexType stores the exclusion index type if the constraint is
	// an EXCLUDE constraint. Postgres only.
	ExclusionIndexType string `json:",omitempty"`

	// ExclusionPredicate stores the exclusion predicate if the constraint is
	// an EXCLUDE constraint. Postgres only.
	ExclusionPredicate string `json:",omitempty"`

	// IsDeferrable indicates if the constraint is deferrable. Postgres only.
	IsDeferrable bool `json:",omitempty"`

	// IsDeferrable indicates if the constraint is initially deferred. Postgres
	// only.
	IsInitiallyDeferred bool `json:",omitempty"`

	// IsClustered indicates if the constraint is the clustered index of the
	// table. SQLServer only.
	IsClustered bool `json:",omitempty"`

	// IsNotValid indicates if the constraint exists but is not valid e.g. if
	// it was constructed with the NOT VALID (Postgres) or WITH NOCHECK
	// (SQLServer).
	IsNotValid bool `json:",omitempty"`

	// Comment stores the comment on the constraint.
	Comment string `json:",omitempty"`

	// If Ignore is true, the constraint should be treated like it doesn't
	// exist (a soft delete flag).
	Ignore bool `json:",omitempty"`
}

Constraint represents a database constraint.

type DB added in v0.4.7

type DB interface {
	QueryContext(ctx context.Context, query string, args ...any) (*sql.Rows, error)
	ExecContext(ctx context.Context, query string, args ...any) (sql.Result, error)
	PrepareContext(ctx context.Context, query string) (*sql.Stmt, error)
}

type DatabaseIntrospector

type DatabaseIntrospector struct {
	// Filter is a struct used by the DatabaseIntrospector in order to narrow
	// down its search.
	Filter

	// The dialect of the database being introspected. Possible values:
	// "sqlite", "postgres", "mysql", "sqlserver".
	Dialect string

	// DB is the database connection used to introspect the database.
	DB DB
}

DatabaseIntrospector is used to introspect a database.

func NewDatabaseIntrospector

func NewDatabaseIntrospector(dialect string, db DB) *DatabaseIntrospector

NewDatabaseIntrospector creates a new DatabaseIntrospector.

func (*DatabaseIntrospector) GetColumns

func (dbi *DatabaseIntrospector) GetColumns() ([]Column, error)

GetColumns returns the columns in the database.

To narrow down your search to a specific schema and table, pass the schema and table names into the DatabaseIntrospector.Filter.Schemas slice and the DatabaseIntrospector.Filter.Tables slice respectively.

func (*DatabaseIntrospector) GetConstraints

func (dbi *DatabaseIntrospector) GetConstraints() ([]Constraint, error)

GetConstraints returns the constraints in the database.

To search for specific constraint types, add the constraint types into the Databaseintrospector.Filter.ConstraintTypes slice. An empty ConstraintTypes slice means all constraint types will be included. The possible constraint types are: "PRIMARY KEY", "UNIQUE", "FOREIGN KEY", "CHECK" and "EXCLUDE".

To narrow down your search to a specific schema and table, pass the schema and table names into the DatabaseIntrospector.Filter.Schemas slice and the DatabaseIntrospector.Filter.Tables slice respectively.

func (*DatabaseIntrospector) GetCurrentSchema

func (dbi *DatabaseIntrospector) GetCurrentSchema() (currentSchema string, err error)

GetCurrentSchema returns the current schema of the database.

func (*DatabaseIntrospector) GetDatabaseName

func (dbi *DatabaseIntrospector) GetDatabaseName() (databaseName string, err error)

GetDatabaseName returns the database name.

func (*DatabaseIntrospector) GetDefaultCollation

func (dbi *DatabaseIntrospector) GetDefaultCollation() (defaultCollation string, err error)

GetDefaultCollation returns the default collation of the database.

func (*DatabaseIntrospector) GetDomains

func (dbi *DatabaseIntrospector) GetDomains() ([]Domain, error)

GetDomains returns the domains in the database. Postgres only.

To search for specific domains, add the domain names into the DatabaseIntrospector.Filter.Domains slice. To exclude specific domains from your search, add the domain names into the DatabaseIntrospector.Filter.ExcludeDomains slice.

To narrow down your search to a specific schema, pass the schema name into the DatabaseIntrospector.Filter.Schemas slice.

func (*DatabaseIntrospector) GetEnums

func (dbi *DatabaseIntrospector) GetEnums() ([]Enum, error)

GetEnums returns the enums in the database. Postgres only.

To search for specific enums, add the enum names into the DatabaseIntrospector.Filter.Enums slice. To exclude specific enums from your search, add the enum names into the DatabaseIntrospector.Filter.ExcludeEnums slice.

To narrow down your search to a specific schema, pass the schema name into the DatabaseIntrospector.Filter.Schemas slice.

func (*DatabaseIntrospector) GetExtensions

func (dbi *DatabaseIntrospector) GetExtensions() (extensions []string, err error)

GetExtensions returns the extensions in the database. Postgres only.

To search for specific extensions, add the extension names into the DatabaseIntrospector.Filter.Extensions slice. To exclude specific extensions from your search, add the extension names into the DatabaseIntrospector.Filter.ExcludeExtensions slice.

func (*DatabaseIntrospector) GetIndexes

func (dbi *DatabaseIntrospector) GetIndexes() ([]Index, error)

GetIndexes returns the indexes in the database.

To narrow down your search to a specific schema and table, pass the schema and table names into the DatabaseIntrospector.Filter.Schemas slice and the DatabaseIntrospector.Filter.Tables slice respectively.

func (*DatabaseIntrospector) GetRoutines

func (dbi *DatabaseIntrospector) GetRoutines() ([]Routine, error)

GetRoutines returns the routines (functions and procedures) in the database.

To search for specific routines, add the routine names into the DatabaseIntrospector.Filter.Routines slice. To exclude specific routines from your search, add the routine names into the DatabaseIntrospector.Filter.ExcludeRoutines slice.

To narrow down your search to a specific schema, pass the schema name into the DatabaseIntrospector.Filter.Schemas slice.

func (*DatabaseIntrospector) GetTables

func (dbi *DatabaseIntrospector) GetTables() ([]Table, error)

GetTables returns the tables in the database. It does not automatically fetch the columns, constraints and indexes for you. You must fetch them yourself. Look into CatalogCache for a streamlined way of adding columns from different tables into their corresponding table structs.

To search for specific tables, add the table names into the DatabaseIntrospector.Filter.Tables slice. To exclude specific tables from your search, add the table names into the DatabaseIntrospector.Filter.ExcludeTables slice.

To narrow down your search to a specific schema, pass the schema name into the DatabaseIntrospector.Filter.Schemas slice.

func (*DatabaseIntrospector) GetTriggers

func (dbi *DatabaseIntrospector) GetTriggers() ([]Trigger, error)

GetTriggers returns the triggers in the database.

To narrow down your search to a specific schema and table, pass the schema and table names into the DatabaseIntrospector.Filter.Schemas slice and the DatabaseIntrospector.Filter.Tables slice respectively.

func (*DatabaseIntrospector) GetVersion added in v0.4.0

func (dbi *DatabaseIntrospector) GetVersion() (version string, err error)

func (*DatabaseIntrospector) GetVersionNums

func (dbi *DatabaseIntrospector) GetVersionNums() (versionNums VersionNums, err error)

GetVersionNums returns the version numbers of the database.

func (*DatabaseIntrospector) GetViews

func (dbi *DatabaseIntrospector) GetViews() ([]View, error)

GetViews returns the views in the database.

To search for specific views, add the view names into the DatabaseIntrospector.Filter.Views slice. To exclude specific views from your search, add the view names into the DatabaseIntrospector.Filter.ExcludeViews slice.

To narrow down your search to a specific schema, pass the schema name into the DatabaseIntrospector.Filter.Schemas slice.

func (*DatabaseIntrospector) WriteCatalog

func (dbi *DatabaseIntrospector) WriteCatalog(catalog *Catalog) error

WriteCatalog populates the Catalog by introspecting the database.

type Domain

type Domain struct {
	// DomainSchema is the name of schema that the domain type belongs to.
	DomainSchema string `json:",omitempty"`

	// DomainName is the name of the domain type.
	DomainName string `json:",omitempty"`

	// UnderlyingType is the underlying type of the domain.
	UnderlyingType string `json:",omitempty"`

	// CollationName is the collation of the domain type.
	CollationName string `json:",omitempty"`

	// IsNotNull indicates if the domain type is NOT NULL.
	IsNotNull bool `json:",omitempty"`

	// ColumnDefault is the default value of the domain type.
	ColumnDefault string `json:",omitempty"`

	// CheckNames is the list of check constraint names on the domain type.
	CheckNames []string `json:",omitempty"`

	// CheckExprs is the list of check constraints expressions on the domain
	// type.
	CheckExprs []string `json:",omitempty"`

	// Comment stores the comment on the domain type.
	Comment string `json:",omitempty"`

	// If Ignore is true, the domain type should be treated like it doesn't
	// exist (a soft delete flag).
	Ignore bool `json:",omitempty"`
}

Domain represents a database domain type. Postgres only.

type Driver

type Driver struct {
	// (Required) Dialect is the database dialect. Possible values: "sqlite", "postgres",
	// "mysql", "sqlserver".
	Dialect string

	// (Required) DriverName is the driverName to be used with sql.Open().
	DriverName string

	// If not nil, IsLockTimeout is used to check if an error is a
	// database-specific lock timeout error.
	IsLockTimeout func(error) bool

	// If not nil, PreprocessDSN will be called on a dataSourceName right
	// before it is passed in to sql.Open().
	PreprocessDSN func(string) string

	// If not nil, AnnotateError will be called on an error returned by the
	// database to display to the user. The primary purpose is to annotate the
	// error with useful information like line number where an error occurred.
	AnnotateError func(originalErr error, query string) error
}

Driver represents the capabilities of the underlying database driver for a particular dialect. It is not necessary to implement all fields.

type DumpCmd

type DumpCmd struct {
	// (Required) DB is the database to dump.
	DB *sql.DB

	// (Required) Dialect is the database dialect.
	Dialect string

	// OutputDir is the output directory where the files will be created. If empty,
	// the command creates files in the current working directory.
	OutputDir string

	// Stderr specifies the command's standard error. If nil, the command
	// writes to os.Stderr.
	Stderr io.Writer

	// HistoryTable is the name of the migration history table. If empty, the
	// default history table name will be "sqddl_history".
	HistoryTable string

	// If non-empty, Zip specifies the name of the zip file to dump the
	// contents into. The .zip suffix is optional.
	Zip string

	// If non-empty, Tgz specifies the name of the tar gzip file to dump the
	// contents into. The .tgz suffix is optional.
	Tgz string

	// If SchemaOnly is true, DumpCmd will only dump the schema.
	SchemaOnly bool

	// If DataOnly is true, DumpCmd will only dump table data.
	DataOnly bool

	// Nullstring specifies the string that is used in CSV to represent NULL.
	// Leave blank to use `\N`.
	Nullstring string

	// Binaryprefix specifies the prefix that is used in CSV to denote a
	// hexadecimal binary literal (e.g. 0xa55cfae). Leave blank to use `0x`.
	Binaryprefix string

	// Schemas is the list of schemas that will be included in the dump.
	Schemas []string

	// ExcludeSchemas is the list of schemas that will be excluded from the
	// dump.
	ExcludeSchemas []string

	// Tables is the list of tables that will be included in the dump.
	Tables []string

	// ExcludeTables is the list of tables that will be excluded from the dump.
	ExcludeTables []string

	// SubsetQueries holds the initial subset queries.
	SubsetQueries []string

	// ExtendedSubsetQueries holds the initial extended subset queries.
	ExtendedSubsetQueries []string

	// (Postgres only) Dump arrays as JSON arrays.
	ArrayAsJSON bool

	// (Postgres only) Dump UUIDs as bytes (in hexadecimal form e.g. 0x267f4bdb50a041399704c26a16f8f019).
	UUIDAsBytes bool

	// Ctx is the command's context.
	Ctx context.Context
	// contains filtered or unexported fields
}

DumpCmd implements the `sqddl dump` subcommand.

func DumpCommand

func DumpCommand(args ...string) (*DumpCmd, error)

DumpCommand creates a new DumpCmd with the given arguments. E.g.

sqddl dump -db <DATABASE_URL> [FLAGS]

DumpCommand("-db", "postgres://user:pass@localhost:5432/sakila", "-output-dir", "./db")

func (*DumpCmd) Run

func (cmd *DumpCmd) Run() error

Run runs the DumpCmd.

type Enum

type Enum struct {
	// EnumSchema is the name of schema that the enum type belongs to.
	EnumSchema string `json:",omitempty"`

	// EnumName is the name of the enum type.
	EnumName string `json:",omitempty"`

	// EnumLabels contains the list of labels associated with the enum type.
	EnumLabels []string `json:",omitempty"`

	// Comment stores the comment on the enum type.
	Comment string `json:",omitempty"`

	// If Ignore is true, the enum type should be treated like it doesn't
	// exist (a soft delete flag).
	Ignore bool `json:",omitempty"`
}

Enum represents a database enum type. Postgres only.

type Filter

type Filter struct {
	// Version holds the raw version string returned by the database. You
	// can use DatabaseIntrospector.GetVersion to populate it.
	Version string

	// VersionNums holds the version number of the underlying database
	// connection. This is used by the query templates to output different
	// queries based on the database version. If no version number is found,
	// the highest version number possible is assumed. You can use
	// DatabaseIntrospector.GetVersionNums to populate it.
	VersionNums VersionNums

	// IncludeSystemCatalogs controls whether the DatabaseIntrospector will
	// include the system tables in its search (information_schema, pg_catalog,
	// etc). Default is false.
	IncludeSystemCatalogs bool

	// ConstraintTypes controls what constraint types will be included in the
	// search. An empty slice means all constraint types will be included. The
	// possible constraint types are: "PRIMARY KEY", "UNIQUE", "FOREIGN KEY",
	// "CHECK" and "EXCLUDE".
	ConstraintTypes []string

	// ObjectTypes controls what object types will be included in the search.
	// An empty slice means all object types will be included. The possible
	// object types are: "EXTENSIONS", "ENUMS", "DOMAINS", "ROUTINES", "VIEWS"
	// and "TABLES".
	ObjectTypes []string

	// Tables is the list of tables to be included in the search. If empty, all
	// tables will be included.
	Tables []string

	// Schemas is the list of schemas to include in the search. If empty, all
	// schemas will be included.
	Schemas []string

	// ExcludeSchemas is the list of schemas to exclude from the search.
	ExcludeSchemas []string

	// ExcludeTables is the list of tables to be excluded from the search.
	ExcludeTables []string

	// Views is the list of views to be included in the search. If empty, all
	// views will be included.
	Views []string

	// Routines is the list of routines to be included in the search. If empty,
	// all routines will be included.
	Routines []string

	// ExcludeRoutines is the list of routines to be excluded from the search.
	ExcludeRoutines []string

	// ExcludeViews is the list of views to be excluded from the search.
	ExcludeViews []string

	// Enums is the list of enums to be included in the search. If empty, all
	// enums will be included.
	Enums []string

	// ExcludeEnums is the list of enums to be excluded from the search.
	ExcludeEnums []string

	// Domains is the list of domains to be included in the search. If empty,
	// all domains will be included.
	Domains []string

	// ExcludeDomains is the list of domains to be excluded from the search.
	ExcludeDomains []string

	// Extensions is the list of extensions to include in the search by
	// DatabaseIntrospector.GetExtensions. If empty, all extensions will be
	// included.
	Extensions []string

	// ExcludeExtensions is the list of extensions to exclude from the search.
	ExcludeExtensions []string
}

Filter is a struct used by DatabaseIntrospector in order to narrow down its search.

func (*Filter) IncludeConstraintType

func (f *Filter) IncludeConstraintType(constraintType string) bool

IncludeConstraintType returns a bool indicating if the constraintType should be included in the search.

type GenerateCmd

type GenerateCmd struct {
	// SrcCatalog is the source catalog that you want to migrate from.
	SrcCatalog *Catalog

	// DestCatalog is the destination catalog that you want to migrate to.
	DestCatalog *Catalog

	// DirFS is where the Filenames will be sourced from.
	DirFS fs.FS

	// Filenames specifies the list of files (loaded from the Dir) used to
	// build the DestCatalog. It will be ignored if the DestCatalog is already
	// non-nil.
	Filenames []string

	// OutputDir is where the migration scripts will be created.
	// Leave blank to use the current working directory.
	OutputDir string

	// Stdout is the command's standard out. If nil, the command writes to
	// os.Stdout.
	Stdout io.Writer

	// Stderr specifies the command's standard error. If nil, the command
	// writes to os.Stderr.
	Stderr io.Writer

	// HistoryTable is the name of the migration history table. If empty, the
	// default history table name will be "sqddl_history".
	HistoryTable string

	// Prefix is filename prefix for the migration(s). If empty, the current
	// timestamp is used.
	Prefix string

	// DropObjects controls whether statements like DROP TABLE, DROP COLUMN
	// will be generated.
	DropObjects bool

	// If DryRun is true, the SQL queries will be written to Stdout instead of
	// being written into files.
	DryRun bool

	// Dialect is the sql dialect used. This will override whatever dialect is
	// set inside the SrcCatalog and DestCatalog.
	Dialect string

	// AcceptWarnings will accept warnings when generating migrations.
	AcceptWarnings bool
}

GenerateCmd implements the `sqddl generate` subcommand.

func GenerateCommand

func GenerateCommand(args ...string) (*GenerateCmd, error)

GenerateCommand creates a new GenerateCmd with the given arguments. E.g.

sqddl generate -src <SRC_SCHEMA> -dest <DEST_SCHEMA> [FLAGS]

GenerateCommand(
  "-src", "postgres://user:pass@localhost:5432/mydatabase",
  "-dest", "tables/tables.go",
  "-output-dir", "./migrations",
)

func (*GenerateCmd) Results

func (cmd *GenerateCmd) Results() (files []fs.File, warnings []string, err error)

Results gets the results of the GenerateCmd.

Each file in the files slice should be closed once read.

func (*GenerateCmd) Run

func (cmd *GenerateCmd) Run() error

Run runs the GenerateCmd.

type Index

type Index struct {
	// TableSchema is the name of the schema that the table and index belong to.
	TableSchema string `json:",omitempty"`

	// TableName is the name of the table (or view) that the index belongs to.
	TableName string `json:",omitempty"`

	// IndexName is the name of the index.
	IndexName string `json:",omitempty"`

	// IndexType is the type of the index.
	IndexType string `json:",omitempty"`

	// IsViewIndex indicates if the index is for a view.
	IsViewIndex bool `json:",omitempty"`

	// IsUnique indicates if the index is a unique index.
	IsUnique bool `json:",omitempty"`

	// Columns holds the names of the columns participating in the index.
	Columns []string `json:",omitempty"`

	// IncludeColumns holds the names of the columns that are included by the
	// index (the INCLUDE clause).
	IncludeColumns []string `json:",omitempty"`

	// Descending indicates if each column of the index is descending.
	Descending []bool `json:",omitempty"`

	// Opclasses holds the opclass of each column of the index. Postgres only.
	Opclasses []string `json:",omitempty"`

	// Predicate stores the index predicate i.e. the index is a partial index.
	Predicate string `json:",omitempty"`

	// SQL is the SQL definition of the index.
	SQL string `json:",omitempty"`

	// Comment stores the comment on the index.
	Comment string `json:",omitempty"`

	// If Ignore is true, the index should be treated like it doesn't exist (a
	// soft delete flag).
	Ignore bool `json:",omitempty"`
}

Index represents a database index.

type LoadCmd

type LoadCmd struct {
	// (Required) DB is the database to apply migrations to.
	DB *sql.DB

	// (Required) Dialect is the database dialect.
	Dialect string

	// (Required) DirFS is where the Filenames will be sourced from.
	DirFS fs.FS

	// Filenames specifies the list of files (sql, csv, subdirectory, zip or
	// tgz) to be loaded from the DirFS.
	Filenames []string

	// Stderr specifies the command's standard error. If nil, the command
	// writes to os.Stderr.
	Stderr io.Writer

	// HistoryTable is the name of the migration history table. If empty, the
	// default history table name will be "sqddl_history".
	HistoryTable string

	// Batchsize controls the batch size of a single INSERT
	// statement. If 0, a default batch size of 1000 is used.
	Batchsize int

	// Nullstring specifies the string that is used in CSV to
	// represent NULL. If empty, `\N` is used.
	Nullstring string

	// Binaryprefix specifies the string prefix that is used in CSV to denote a
	// hexadecimal binary literal. If empty, `0x` is used.
	Binaryprefix string

	// NoNullstring specifies that the Nullstring should not be used
	// when reading CSV files.
	NoNullstring bool

	// NoBinaryprefix specifies that the Binaryprefix should not be
	// used when reading CSV files.
	NoBinaryprefix bool

	// Log start and end timestamps for each file loaded.
	Verbose bool

	// (SQLite only) Load timestamp strings as unix timestamp integers for
	// TIMESTAMP, DATETIME and DATE columns.
	TimestampAsInteger bool

	// Ctx is the command's context.
	Ctx context.Context
	// contains filtered or unexported fields
}

LoadCmd implements the `sqddl load` subcommand.

func LoadCommand

func LoadCommand(args ...string) (*LoadCmd, error)

LoadCommand creates a new LoadCmd with the given arguments. E.g.

sqddl load -db <DATABASE_URL> [FLAGS] [FILENAMES...]

LoadCommand(
    "-db", "postgres://user:pass@localhost:5432/sakila",
    "./db/schema.sql",
    "./db/actor.csv",
    "./db/language.csv",
    "./db/indexes.sql",
    "./db/constraints.sql",
)
LoadCommand("-db", "postgres://user:pass@localhost:5432/sakila", "./db")
LoadCommand("-db", "postgres://user:pass@localhost:5432/sakila", "./db/sakila.zip")
LoadCommand("-db", "postgres://user:pass@localhost:5432/sakila", "./db/sakila.tgz")

func (*LoadCmd) Run

func (cmd *LoadCmd) Run() error

Run runs the LoadCmd.

type LsCmd

type LsCmd struct {
	// (Required) DB is the database.
	DB *sql.DB

	// (Required) Dialect is the database dialect.
	Dialect string

	// (Required) DirFS is the migration directory.
	DirFS fs.FS

	// Stdout specifies the command's standard out. If nil, the command writes
	// to os.Stdout.
	Stdout io.Writer

	// HistoryTable is the name of the migration history table. If empty, the
	// default history table name will be "sqddl_history".
	HistoryTable string

	// Include pending migrations in output.
	IncludePending bool

	// Include applied migrations in output.
	IncludeApplied bool

	// Include failed migrations in output.
	IncludeFailed bool

	// Include missing migrations in output.
	IncludeMissing bool

	// Include all migrations in output.
	IncludeAll bool
	// contains filtered or unexported fields
}

LsCmd implements the `sqddl ls` subcommand.

func LsCommand

func LsCommand(args ...string) (*LsCmd, error)

LsCommand creates a new LsCmd with the given arguments. E.g.

sqddl ls -db <DATABASE_URL> -dir <MIGRATION_DIR> [FLAGS]

LsCommand("-db", "postgres://user:pass@localhost:5432/sakila", "-dir", "./migrations")

func (*LsCmd) Run

func (cmd *LsCmd) Run() error

type MigrateCmd

type MigrateCmd struct {
	// (Required) DB is the database to apply migrations to.
	DB *sql.DB

	// (Required) Dialect is the database dialect.
	Dialect string

	// (Required) DirFS is the migration directory.
	DirFS fs.FS

	// Filenames specifies the list of migration scripts within the migration
	// directory to be applied. If a provided filename has already been
	// applied, it will not be applied again.
	//
	// If Filenames is empty, all migration scripts in the Dir will be added to
	// the list.
	//
	// The order in which the filenames are provided is honoured, except for
	// repeatable migrations (files inside the "repeatable/" directory) which
	// are always run after the regular migrations.
	Filenames []string

	// Stderr specifies the command's standard error. If nil, the command
	// writes to os.Stderr.
	Stderr io.Writer

	// HistoryTable is the name of the migration history table. If empty, the
	// default history table name will be "sqddl_history".
	HistoryTable string

	// LockTimeout specifies how long to wait to acquire a lock on a table
	// before bailing out. If empty, 1*time.Second is used.
	LockTimeout time.Duration

	// Maximum number of retries on lock timeout.
	MaxAttempts int

	// Maximum delay between retries.
	MaxDelay time.Duration

	// Base delay between retries.
	BaseDelay time.Duration

	// Verbose will include the start and end timestamps of each migration in
	// the log output.
	Verbose bool

	// Run migrations without adding them to the history table.
	SkipHistoryTable bool

	// Ctx is the command's context.
	Ctx context.Context
	// contains filtered or unexported fields
}

MigrateCmd implements the `sqddl migrate` subcommand.

func MigrateCommand

func MigrateCommand(args ...string) (*MigrateCmd, error)

MigrateCommand creates a new MigrateCmd with the given arguments. E.g.

sqddl migrate -db <DATABASE_URL> -dir <MIGRATION_DIR> [FLAGS] [FILENAMES...]

MigrateCommand("-db", "postgres://user:pass@localhost:5432/sakila", "-dir", "./migrations")

func (*MigrateCmd) Run

func (cmd *MigrateCmd) Run() error

Run runs the MigrateCmd.

type MigrationError added in v0.4.6

type MigrationError struct {
	Err       error         // Migration error.
	Filename  string        // Migration filename.
	Contents  string        // Contents of the migration script.
	StartedAt time.Time     // When the migration started at.
	TimeTaken time.Duration // How long the migration took.
}

MigrationError is returned by (*MigrateCmd).Run() if any errors were encountered when running a migration script (SQL syntax errors, violated constraints, etc).

func (*MigrationError) Error added in v0.4.6

func (migrationErr *MigrationError) Error() string

Error implements the error interface.

func (*MigrationError) Unwrap added in v0.4.6

func (migrationErr *MigrationError) Unwrap() error

Unwrap returns the underlying error when running the migration.

type Modifier

type Modifier struct {
	// Dialects is the slice of dialects that the modifier is applicable for.
	// If empty, the modifier is applicable for every dialect.
	Dialects []string

	// Name is the name of the modifier.
	Name string

	// RawValue is the raw value of the modifier.
	RawValue string

	// Value is the value of the modifier (parsed from the RawValue).
	Value string

	// Submodifiers are the submodifiers of the modifier (parsed from the
	// RawValue).
	Submodifiers []Modifier
}

Modifier represents a modifier in a ddl struct tag.

func NewModifiers

func NewModifiers(s string) ([]Modifier, error)

NewModifiers parses a string into a slice of modifiers.

func (*Modifier) ExcludesDialect

func (m *Modifier) ExcludesDialect(dialect string) bool

ExcludesDialect checks if a dialect should be excluded from the effects of the modifier.

func (*Modifier) ParseRawValue

func (m *Modifier) ParseRawValue() error

ParseRawValue parses a modifier's raw value and fills in the fields `Value` and `Submodifiers`.

type Modifiers

type Modifiers []Modifier

Modifiers is a slice of modifiers.

func (Modifiers) String

func (ms Modifiers) String() string

String converts a slice of modifiers into their string representation.

type MvCmd

type MvCmd struct {
	// (Required) DB is the database.
	DB *sql.DB

	// (Required) Dialect is the database dialect.
	Dialect string

	// (Required) SrcFilename is the source filename to be renamed from.
	SrcFilename string

	// (Required) DestFilename is the destination filename to be renamed to.
	DestFilename string

	// Stderr specifies the command's standard error. If nil, the command
	// writes to os.Stderr.
	Stderr io.Writer

	// HistoryTable is the name of the migration history table. If empty, the
	// default history table name will be "sqddl_history".
	HistoryTable string
	// contains filtered or unexported fields
}

MvCmd implements the `sqddl mv` subcommand.

func MvCommand

func MvCommand(args ...string) (*MvCmd, error)

MvCommand creates a new MvCmd with the given arguments. E.g.

sqddl mv -db <DATABASE_URL> <OLD_FILENAME> <NEW_FILENAME>

MvCommand(
    "-db", "postgres://user:pass@localhost:5432/sakila",
    "-dir", "./migrations",
    "old_name.sql", "new_name.sql",
)

func (*MvCmd) Run

func (cmd *MvCmd) Run() error

Run runs the MvCmd.

type RmCmd

type RmCmd struct {
	// (Required) DB is the database.
	DB *sql.DB

	// (Required) Dialect is the database dialect.
	Dialect string

	// Filenames specifies the list of files to be removed from the
	// history table.
	Filenames []string

	// Stderr specifies the command's standard error. If nil, the command
	// writes to os.Stderr.
	Stderr io.Writer

	// HistoryTable is the name of the migration history table. If empty, the
	// default history table name will be "sqddl_history".
	HistoryTable string
	// contains filtered or unexported fields
}

RmCmd implements the `sqddl rm` subcommand.

func RmCommand

func RmCommand(args ...string) (*RmCmd, error)

RmCommand creates a new RmCmd with the given arguments.

sqddl rm -db <DATABASE_URL> [FILENAMES...]

RmCommand(
    "-db", "postgres://user:pass@localhost:5432/sakila",
    "-dir", "./migrations",
    "02_sakila.sql", 04_extras.sql",
)

func (*RmCmd) Run

func (cmd *RmCmd) Run() error

Run runs the RmCmd.

type Routine

type Routine struct {
	// RoutineSchema is the name of schema that the routine belongs to.
	RoutineSchema string `json:",omitempty"`

	// RoutineName is the name of the routine.
	RoutineName string `json:",omitempty"`

	// IdentityArguments is a string containing the identity arguments that
	// uniquely identify routines sharing the same name. Postgres only.
	IdentityArguments string `json:",omitempty"`

	// RoutineType identifies the type of the routine. Possible values:
	// "PROCEDURE", "FUNCTION".
	RoutineType string `json:",omitempty"`

	// SQL is the SQL definition of the routine.
	SQL string `json:",omitempty"`

	// Attrs stores additional metadata about the routine.
	Attrs map[string]string `json:",omitempty"`

	// Comment stores the comment on the routine.
	Comment string `json:",omitempty"`

	// If Ignore is true, the routine should be treated like it doesn't exist
	// (a soft delete flag).
	Ignore bool `json:",omitempty"`
}

Routine represents a database routine (either a stored procedure or a function).

type Schema

type Schema struct {
	// SchemaName is the name of the schema.
	SchemaName string `json:",omitempty"`

	// Tables is the list of tables within the schema.
	Tables []Table `json:",omitempty"`

	// Views is the list of views within the schema.
	Views []View `json:",omitempty"`

	// If ViewsValid is false, the schema's views are unknown.
	ViewsValid bool `json:",omitempty"`

	// Routines is the list of routines (stored procedures and functions)
	// within the schema.
	Routines []Routine `json:",omitempty"`

	// If RoutinesValid is false, the schema's routines are unknown.
	RoutinesValid bool `json:",omitempty"`

	// The list of enum types within the schema. Postgres only.
	Enums []Enum `json:",omitempty"`

	// If EnumsValid is false, the schema's enum types are unknown.
	EnumsValid bool `json:",omitempty"`

	// The list of domain types within the schema. Postgres only.
	Domains []Domain `json:",omitempty"`

	// If DomainsValid is false, the schema's domain types are unknown.
	DomainsValid bool `json:",omitempty"`

	// Comment stores the comment on the schema object.
	Comment string `json:",omitempty"`

	// If Ignore is true, the schema should be treated like it doesn't exist (a
	// soft delete flag).
	Ignore bool `json:",omitempty"`
}

Schema represents a database schema.

type StructField

type StructField struct {
	// Name is the name of the struct field.
	Name string

	// Type is the type of the struct field.
	Type string

	// NameTag is the value for the "sq" struct tag.
	NameTag string

	// Modifiers are the parsed modifiers for the "ddl" struct tag.
	Modifiers []Modifier
	// contains filtered or unexported fields
}

StructField represents a struct field within a table struct.

type StructParser

type StructParser struct {
	TableStructs TableStructs
	// contains filtered or unexported fields
}

StructParser is used to parse Go source code into TableStructs.

func NewStructParser

func NewStructParser(fset *token.FileSet) *StructParser

NewStructParser creates a new StructParser. An existing token.Fileset can be passed in. If not, passing in nil is fine and a new token.FileSet will be instantiated.

func (*StructParser) Diagnostics

func (p *StructParser) Diagnostics() ([]token.Pos, []string)

Diagnostics returns the errors encountered after calling WriteCatalog in a structured format.

func (*StructParser) Error

func (p *StructParser) Error() error

Error returns the errors encountered after calling WriteCatalog.

func (*StructParser) ParseFile

func (p *StructParser) ParseFile(f fs.File) error

ParseFile parses an fs.File containing Go source code and populates the TableStructs.

func (*StructParser) VisitNode

func (p *StructParser) VisitNode(node ast.Node) bool

VisitNode is a callback function that populates the TableStructs when passed to ast.Inspect().

func (*StructParser) VisitStruct

func (p *StructParser) VisitStruct(node ast.Node)

VisitStruct is a callback function that populates the TableStructs when passed to inspect.Inspector.Preorder(). It expects the node to be of type *ast.TypeSpec.

func (*StructParser) WriteCatalog

func (p *StructParser) WriteCatalog(catalog *Catalog) error

WriteCatalog populates the Catalog using the StructParser's TableStructs.

type Subsetter

type Subsetter struct {
	// contains filtered or unexported fields
}

Subsetter is used to dump a referentially-intact subset of the database.

func NewInMemorySubsetter

func NewInMemorySubsetter(dialect string, db DB, filter Filter) (*Subsetter, error)

NewInMemorySubsetter creates an in-memory subsetter that holds the results of each subset query in-memory.

func (*Subsetter) ExtendedSubset

func (ss *Subsetter) ExtendedSubset(query string) error

ExtendedSubset adds a new extended subset query to the subsetter.

func (*Subsetter) Query

func (ss *Subsetter) Query(tableSchema, tableName string) string

Query returns the query needed to dump a subset of a table according to the subset queries added to the subsetter thus far.

func (*Subsetter) Subset

func (ss *Subsetter) Subset(query string) error

Subset adds a new subset query to the subsetter.

func (*Subsetter) Tables

func (ss *Subsetter) Tables() []*Table

Tables returns the tables that are involved in the subset dump.

type Table

type Table struct {
	// TableSchema is the name of schema that the table belongs to.
	TableSchema string `json:",omitempty"`

	// TableName is the name of the table.
	TableName string `json:",omitempty"`

	// SQL is the SQL definition of the table.
	SQL string `json:",omitempty"`

	// IsVirtual indicates if the table is a virtual table. SQLite only.
	IsVirtual bool `json:",omitempty"`

	// Columns is the list of columns within the table.
	Columns []Column `json:",omitempty"`

	// Constraints is the list of constraints within the table.
	Constraints []Constraint `json:",omitempty"`

	// Indexes is the list of indexes within the table.
	Indexes []Index `json:",omitempty"`

	// Triggers is the list of triggers within the table.
	Triggers []Trigger `json:",omitempty"`

	// Comment stores the comment on the table.
	Comment string `json:",omitempty"`

	// If Ignore is true, the table should be treated like it doesn't exist (a
	// soft delete flag).
	Ignore bool `json:",omitempty"`
}

Table represents a database table.

type TableStruct

type TableStruct struct {
	// Name is the name of the table struct.
	Name string

	// Fields are the table struct fields.
	Fields []StructField
}

TableStruct represents a table struct.

type TableStructs

type TableStructs []TableStruct

TableStructs is a slice of TableStructs.

func NewTableStructs added in v0.3.3

func NewTableStructs(dialect string, db *sql.DB, filter Filter) (TableStructs, error)

NewTableStructs introspects a database connection and returns a slice of TableStructs, each TableStruct corresponding to a table in the database. You may narrow down the list of tables by filling in the Schemas, ExcludeSchemas, Tables and ExcludeTables fields of the Filter struct. The Filter.ObjectTypes field will always be set to []string{"TABLES"}.

func (*TableStructs) MarshalText

func (s *TableStructs) MarshalText() (text []byte, err error)

MarshalText converts the TableStructs into Go source code.

func (*TableStructs) ReadCatalog

func (s *TableStructs) ReadCatalog(catalog *Catalog) error

ReadCatalog reads from a catalog and populates the TableStructs accordingly.

type TablesCmd

type TablesCmd struct {
	// (Required) DB is the database.
	DB *sql.DB

	// (Required) Dialect is the database dialect.
	Dialect string

	// PackageName is the name of the package for the generated go code. Leave
	// blank to use "_".
	PackageName string

	// Filename is the name of the file to write the table structs into. Leave
	// blank to write to Stdout instead.
	Filename string

	// SchemaQualifiedStructs controls whether the generated struct names are
	// schema qualified e.g. `type SCHEMA_TABLE struct` instead of `type TABLE
	// struct`.
	SchemaQualifiedStructs bool

	// Stdout specifies the command's standard out to write to if no Filename
	// is provided. If nil, the command writes to os.Stdout.
	Stdout io.Writer

	// HistoryTable is the name of the migration history table. If empty, the
	// default history table name will be "sqddl_history".
	HistoryTable string

	// Schemas is the list of schemas that will be included.
	Schemas []string

	// ExcludeSchemas is the list of schemas that will be excluded.
	ExcludeSchemas []string

	// Tables is the list of tables that will be included.
	Tables []string

	// ExcludeTables is the list of tables that will be excluded.
	ExcludeTables []string
	// contains filtered or unexported fields
}

TablesCmd implements the `sqddl tables` subcommand.

func TablesCommand

func TablesCommand(args ...string) (*TablesCmd, error)

func (*TablesCmd) Run

func (cmd *TablesCmd) Run() error

type Timestamp added in v0.4.7

type Timestamp struct {
	time.Time
	Valid bool
	// contains filtered or unexported fields
}

func (*Timestamp) Scan added in v0.4.7

func (ts *Timestamp) Scan(value any) error

Scan implements the sql.Scanner interface. It additionally supports scanning from int64 and text (string/[]byte) values on top of what sql.NullTime already supports. The following text timestamp formats are supported:

var timestampFormats = []string{
	"2006-01-02 15:04:05.999999999-07:00",
	"2006-01-02T15:04:05.999999999-07:00",
	"2006-01-02 15:04:05.999999999",
	"2006-01-02T15:04:05.999999999",
	"2006-01-02 15:04:05",
	"2006-01-02T15:04:05",
	"2006-01-02 15:04",
	"2006-01-02T15:04",
	"2006-01-02",
}

func (Timestamp) Value added in v0.4.7

func (ts Timestamp) Value() (driver.Value, error)

Value implements the driver.Valuer interface. It returns an int64 unix timestamp if the dialect is SQLite, otherwise it returns a time.Time (similar to sql.NullTime).

type TouchCmd

type TouchCmd struct {
	// (Required) DB is the database.
	DB *sql.DB

	// (Required) Dialect is the database dialect.
	Dialect string

	// (Required) DirFS is the migration directory.
	DirFS fs.FS

	// Filenames specifies the list of files to be touched in the history
	// table. If empty, all migrations in the migration directory will be
	// touched.
	Filenames []string

	// Stderr specifies the command's standard error. If nil, the command
	// writes to os.Stderr.
	Stderr io.Writer

	// HistoryTable is the name of the migration history table. If empty, the
	// default history table name will be "sqddl_history".
	HistoryTable string
	// contains filtered or unexported fields
}

TouchCmd implements the `sqddl touch` subcommand.

func TouchCommand

func TouchCommand(args ...string) (*TouchCmd, error)

TouchCommand creates a new TouchCmd from the given arguments.

sqddl touch -db <DATABASE_URL> -dir <MIGRATION_DIR> [FILENAMES...]

TouchCommand(
    "-db", "postgres://user:pass@localhost:5432/sakila",
    "-dir", "./migrations",
    "02_sakila.sql",
    "04_extras.sql",
)

func (*TouchCmd) Run

func (cmd *TouchCmd) Run() error

Run runs the TouchCmd.

type Trigger

type Trigger struct {
	// TableSchema is the name of the schema that the table and trigger belong to.
	TableSchema string `json:",omitempty"`

	// TableName is the name of the table (or view) that the trigger belongs to.
	TableName string `json:",omitempty"`

	// TriggerName is the name of the trigger.
	TriggerName string `json:",omitempty"`

	// IsViewTrigger indicates if the trigger belongs to a view.
	IsViewTrigger bool `json:",omitempty"`

	// SQL is the SQL definition of the trigger.
	SQL string `json:",omitempty"`

	// Attrs stores additional metadata about the trigger.
	Attrs map[string]string `json:",omitempty"`

	// Comment stores the comment on the trigger.
	Comment string `json:",omitempty"`

	// If Ignore is true, the trigger should be treated like it doesn't exist
	// (a soft delete flag).
	Ignore bool `json:",omitempty"`
}

Trigger represents a database trigger.

type VersionNums

type VersionNums []int

VersionNums holds a database's version.

func (VersionNums) GreaterOrEqualTo added in v0.4.3

func (v VersionNums) GreaterOrEqualTo(nums ...int) bool

GreaterOrEqualTo checks if the database version is greater or equal to the given version numbers. You can provide just one number (the major version) or multiple numbers (the major, minor and patch versions). E.g.

version.GreaterOrEqualTo(12)   # $version >= 12
version.GreaterOrEqualTo(8, 5) # $version >= 8.5

func (VersionNums) LowerThan

func (v VersionNums) LowerThan(nums ...int) bool

LowerThan checks if the database version is lower than the given version numbers. You can provide just one number (the major version) or multiple numbers (the major, minor and patch versions). E.g.

version.LowerThan(12)   # $version < 12
version.LowerThan(8, 5) # $version < 8.5

type View

type View struct {
	// ViewSchema is the name of schema that the view belongs to.
	ViewSchema string `json:",omitempty"`

	// ViewName is the name of the view.
	ViewName string `json:",omitempty"`

	// IsMaterialized indicates if the view is a materialized view.
	IsMaterialized bool `json:",omitempty"`

	// SQL is the SQL definition of the view.
	SQL string `json:",omitempty"`

	// Columns is the list of columns in the view.
	Columns []string `json:",omitempty"`

	// ColumnTypes is the list of column types in the view.
	ColumnTypes []string `json:",omitempty"`

	// EnumColumns is the list of columns in the view whose column type is an
	// enum.
	EnumColumns []string `json:",omitempty"`

	// Indexes is the list of indexes belonging to the view.
	Indexes []Index `json:",omitempty"`

	// Triggers is the list of triggers belonging to the view.
	Triggers []Trigger `json:",omitempty"`

	// Comment stores the comment on the view.
	Comment string `json:",omitempty"`

	// If Ignore is true, the view should be treated like it doesn't exist (a
	// soft delete flag).
	Ignore bool `json:",omitempty"`
}

View represents a database view.

type ViewStruct

type ViewStruct struct {
	Name   string
	Fields []StructField
}

type ViewStructs

type ViewStructs []ViewStruct

func NewViewStructs added in v0.3.3

func NewViewStructs(dialect string, db *sql.DB, filter Filter) (ViewStructs, error)

NewViewStructs introspects a database connection and returns a slice of ViewStructs, each ViewStruct corresponding to a view in the database. You may narrow down the list of views by filling in the Schemas, ExcludeSchemas, Views and ExcludeViews fields of the Filter struct. The Filter.ObjectTypes field will always be set to []string{"VIEWS"}.

func (*ViewStructs) MarshalText

func (s *ViewStructs) MarshalText() (text []byte, err error)

MarshalText converts the ViewStructs into Go source code.

func (*ViewStructs) ReadCatalog

func (s *ViewStructs) ReadCatalog(catalog *Catalog) error

ReadCatalog reads from a catalog and populates the ViewStructs accordingly.

type ViewsCmd

type ViewsCmd struct {
	// (Required) DB is the database.
	DB *sql.DB

	// (Required) Dialect is the database dialect.
	Dialect string

	// PackageName is the name of the package for the generated go code. Leave
	// blank to use "_".
	PackageName string

	// Filename is the name of the file to write the table structs into. Leave
	// blank to write to Stdout instead.
	Filename string

	// Stdout specifies the command's standard out to write to if no Filename
	// is provided. If nil, the command writes to os.Stdout.
	Stdout io.Writer

	// Schemas is the list of schemas that will be included.
	Schemas []string

	// ExcludeSchemas is the list of schemas that will be excluded.
	ExcludeSchemas []string

	// Views is the list of tables that will be included.
	Views []string

	// ExcludeViews is the list of tables that will be excluded.
	ExcludeViews []string
	// contains filtered or unexported fields
}

ViewsCmd implements the `sqddl views` subcommand.

func ViewsCommand

func ViewsCommand(args ...string) (*ViewsCmd, error)

func (*ViewsCmd) Run

func (cmd *ViewsCmd) Run() error

type WipeCmd

type WipeCmd struct {
	// (Required) DB is the database to wipe.
	DB *sql.DB

	// (Required) Dialect is the database dialect.
	Dialect string

	// If DryRun is true, the SQL queries will be written to Stdout instead of
	// being run against the database.
	DryRun bool

	// Stdout is the command's standard out. If nil, the command writes to
	// os.Stdout.
	Stdout io.Writer

	// Ctx is the command's context.
	Ctx context.Context
	// contains filtered or unexported fields
}

WipeCmd implements the `sqddl wipe` subcommand.

func WipeCommand

func WipeCommand(args ...string) (*WipeCmd, error)

WipeCommand creates a new WipeCmd with the given arguments. E.g.

sqddl wipe -db <DATABASE_URL> [FLAGS]

WipeCommand("-db", "postgres://user:pass@localhost:5432/sakila")

func (*WipeCmd) Run

func (cmd *WipeCmd) Run() error

Run runs the WipeCmd.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL