cockroach: github.com/cockroachdb/cockroach/pkg/sql/sqlbase Index | Files

package sqlbase

import "github.com/cockroachdb/cockroach/pkg/sql/sqlbase"

Index

Package Files

cancel_checker.go check.go col_type_info.go column_resolver.go column_type_encoding.go column_type_properties.go computed_exprs.go constants.go data_source.go datum_alloc.go default_exprs.go encoded_datum.go encoded_datum.pb.go errors.go evalctx.go expr_filter.go formatversion_string.go index_encoding.go join_type.go join_type.pb.go keys.go metadata.go ordering.go partition.go prepared_statement.go privilege.go privilege.pb.go result_columns.go select_name_resolution.go settings.go sort.go structured.go structured.pb.go system.go table.go testutils.go

Constants

const (
    CrdbInternalID = math.MaxUint32 - iota
    CrdbInternalBackwardDependenciesTableID
    CrdbInternalBuildInfoTableID
    CrdbInternalBuiltinFunctionsTableID
    CrdbInternalClusterQueriesTableID
    CrdbInternalClusterSessionsTableID
    CrdbInternalClusterSettingsTableID
    CrdbInternalCreateStmtsTableID
    CrdbInternalFeatureUsageID
    CrdbInternalForwardDependenciesTableID
    CrdbInternalGossipNodesTableID
    CrdbInternalGossipAlertsTableID
    CrdbInternalGossipLivenessTableID
    CrdbInternalGossipNetworkTableID
    CrdbInternalIndexColumnsTableID
    CrdbInternalJobsTableID
    CrdbInternalKVNodeStatusTableID
    CrdbInternalKVStoreStatusTableID
    CrdbInternalLeasesTableID
    CrdbInternalLocalQueriesTableID
    CrdbInternalLocalSessionsTableID
    CrdbInternalLocalMetricsTableID
    CrdbInternalPartitionsTableID
    CrdbInternalPredefinedCommentsTableID
    CrdbInternalRangesNoLeasesTableID
    CrdbInternalRangesViewID
    CrdbInternalRuntimeInfoTableID
    CrdbInternalSchemaChangesTableID
    CrdbInternalSessionTraceTableID
    CrdbInternalSessionVariablesTableID
    CrdbInternalStmtStatsTableID
    CrdbInternalTableColumnsTableID
    CrdbInternalTableIndexesTableID
    CrdbInternalTablesTableID
    CrdbInternalTxnStatsTableID
    CrdbInternalZonesTableID
    InformationSchemaID
    InformationSchemaAdministrableRoleAuthorizationsID
    InformationSchemaApplicableRolesID
    InformationSchemaCheckConstraints
    InformationSchemaColumnPrivilegesID
    InformationSchemaColumnsTableID
    InformationSchemaConstraintColumnUsageTableID
    InformationSchemaEnabledRolesID
    InformationSchemaKeyColumnUsageTableID
    InformationSchemaParametersTableID
    InformationSchemaReferentialConstraintsTableID
    InformationSchemaRoleTableGrantsID
    InformationSchemaRoutineTableID
    InformationSchemaSchemataTableID
    InformationSchemaSchemataTablePrivilegesID
    InformationSchemaSequencesID
    InformationSchemaStatisticsTableID
    InformationSchemaTableConstraintTableID
    InformationSchemaTablePrivilegesID
    InformationSchemaTablesTableID
    InformationSchemaViewsTableID
    InformationSchemaUserPrivilegesID
    PgCatalogID
    PgCatalogAmTableID
    PgCatalogAttrDefTableID
    PgCatalogAttributeTableID
    PgCatalogAuthMembersTableID
    PgCatalogAvailableExtensionsTableID
    PgCatalogCastTableID
    PgCatalogClassTableID
    PgCatalogCollationTableID
    PgCatalogConstraintTableID
    PgCatalogConversionTableID
    PgCatalogDatabaseTableID
    PgCatalogDefaultACLTableID
    PgCatalogDependTableID
    PgCatalogDescriptionTableID
    PgCatalogSharedDescriptionTableID
    PgCatalogEnumTableID
    PgCatalogExtensionTableID
    PgCatalogForeignDataWrapperTableID
    PgCatalogForeignServerTableID
    PgCatalogForeignTableTableID
    PgCatalogIndexTableID
    PgCatalogIndexesTableID
    PgCatalogInheritsTableID
    PgCatalogLanguageTableID
    PgCatalogLocksTableID
    PgCatalogMatViewsTableID
    PgCatalogNamespaceTableID
    PgCatalogOperatorTableID
    PgCatalogPreparedStatementsTableID
    PgCatalogPreparedXactsTableID
    PgCatalogProcTableID
    PgCatalogRangeTableID
    PgCatalogRewriteTableID
    PgCatalogRolesTableID
    PgCatalogSecLabelsTableID
    PgCatalogSequencesTableID
    PgCatalogSettingsTableID
    PgCatalogShdependTableID
    PgCatalogUserTableID
    PgCatalogUserMappingTableID
    PgCatalogTablesTableID
    PgCatalogTablespaceTableID
    PgCatalogTriggerTableID
    PgCatalogTypeTableID
    PgCatalogViewsTableID
    PgCatalogStatActivityTableID
    PgCatalogSecurityLabelTableID
    PgCatalogSharedSecurityLabelTableID
    MinVirtualID = PgCatalogSharedSecurityLabelTableID
)

Oid for virtual database and table.

const (
    InnerJoin        = JoinType_INNER
    LeftOuterJoin    = JoinType_LEFT_OUTER
    RightOuterJoin   = JoinType_RIGHT_OUTER
    FullOuterJoin    = JoinType_FULL_OUTER
    LeftSemiJoin     = JoinType_LEFT_SEMI
    LeftAntiJoin     = JoinType_LEFT_ANTI
    IntersectAllJoin = JoinType_INTERSECT_ALL
    ExceptAllJoin    = JoinType_EXCEPT_ALL
)

Prettier aliases for JoinType values. See the original types for descriptions.

const (
    NamespaceTableSchema = "" /* 129 byte string literal not displayed */

    DescriptorTableSchema = `
CREATE TABLE system.descriptor (
  id         INT8 PRIMARY KEY,
  descriptor BYTES
);`

    UsersTableSchema = "" /* 145 byte string literal not displayed */

    ZonesTableSchema = `
CREATE TABLE system.zones (
  id     INT8 PRIMARY KEY,
  config BYTES
);`

    SettingsTableSchema = "" /* 256 byte string literal not displayed */

)

These system tables are part of the system config.

const (
    LeaseTableSchema = "" /* 170 byte string literal not displayed */

    EventLogTableSchema = "" /* 292 byte string literal not displayed */

    RangeEventTableSchema = "" /* 326 byte string literal not displayed */

    UITableSchema = `
CREATE TABLE system.ui (
	key           STRING PRIMARY KEY,
	value         BYTES,
	"lastUpdated" TIMESTAMP NOT NULL
);`

    JobsTableSchema = "" /* 293 byte string literal not displayed */

    WebSessionsTableSchema = "" /* 525 byte string literal not displayed */

    TableStatisticsTableSchema = "" /* 557 byte string literal not displayed */

    LocationsTableSchema = "" /* 286 byte string literal not displayed */

    RoleMembersTableSchema = "" /* 196 byte string literal not displayed */

    CommentsTableSchema = "" /* 398 byte string literal not displayed */

)

These system tables are not part of the system config.

const DelegatedAppNamePrefix = "$$ "

DelegatedAppNamePrefix is added to a regular client application name for SQL queries that are ran internally on behalf of other SQL queries inside that application. This is not the same as RepotableAppNamePrefix; in particular the application name with DelegatedAppNamePrefix should be scrubbed in reporting.

const EncDatumOverhead = unsafe.Sizeof(EncDatum{})

EncDatumOverhead is the overhead of EncDatum in bytes.

const EncDatumRowOverhead = unsafe.Sizeof(EncDatumRow{})

EncDatumRowOverhead is the overhead of EncDatumRow in bytes.

const FamilyHeuristicTargetBytes = 256

FamilyHeuristicTargetBytes is the target total byte size of columns that the current heuristic will assign to a family.

const IgnoreConstraints = false

IgnoreConstraints is used in MakeFirstMutationPublic to indicate that the table descriptor returned should not include newly added constraints, which is useful when passing the returned table descriptor to be used in validating constraints to be added.

const IncludeConstraints = true

IncludeConstraints is used in MakeFirstMutationPublic to indicate that the table descriptor returned should include newly added constraints.

const InternalAppNamePrefix = ReportableAppNamePrefix + "internal"

InternalAppNamePrefix indicates that the application name identifies an internal task / query / job to CockroachDB. Different application names are used to classify queries in different categories.

const (
    // PrimaryKeyIndexName is the name of the index for the primary key.
    PrimaryKeyIndexName = "primary"
)
const ReportableAppNamePrefix = "$ "

ReportableAppNamePrefix indicates that the application name can be reported in telemetry without scrubbing. (Note this only applies to the application name itself. Query data is still scrubbed as usual.)

Variables

var (
    ErrInvalidLengthPrivilege = fmt.Errorf("proto: negative length found during unmarshaling")
    ErrIntOverflowPrivilege   = fmt.Errorf("proto: integer overflow")
)
var (
    ErrInvalidLengthStructured = fmt.Errorf("proto: negative length found during unmarshaling")
    ErrIntOverflowStructured   = fmt.Errorf("proto: integer overflow")
)
var (
    // SystemDB is the descriptor for the system database.
    SystemDB = MakeSystemDatabaseDesc()

    // NamespaceTable is the descriptor for the namespace table.
    NamespaceTable = TableDescriptor{
        Name:     "namespace",
        ID:       keys.NamespaceTableID,
        ParentID: keys.SystemDatabaseID,
        Version:  1,
        Columns: []ColumnDescriptor{
            {Name: "parentID", ID: 1, Type: *types.Int},
            {Name: "name", ID: 2, Type: *types.String},
            {Name: "id", ID: 3, Type: *types.Int, Nullable: true},
        },
        NextColumnID: 4,
        Families: []ColumnFamilyDescriptor{
            {Name: "primary", ID: 0, ColumnNames: []string{"parentID", "name"}, ColumnIDs: []ColumnID{1, 2}},
            {Name: "fam_3_id", ID: 3, ColumnNames: []string{"id"}, ColumnIDs: []ColumnID{3}, DefaultColumnID: 3},
        },
        NextFamilyID: 4,
        PrimaryIndex: IndexDescriptor{
            Name:             "primary",
            ID:               1,
            Unique:           true,
            ColumnNames:      []string{"parentID", "name"},
            ColumnDirections: []IndexDescriptor_Direction{IndexDescriptor_ASC, IndexDescriptor_ASC},
            ColumnIDs:        []ColumnID{1, 2},
        },
        NextIndexID:    2,
        Privileges:     NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.NamespaceTableID]),
        FormatVersion:  InterleavedFormatVersion,
        NextMutationID: 1,
    }

    // DescriptorTable is the descriptor for the descriptor table.
    DescriptorTable = TableDescriptor{
        Name:       "descriptor",
        ID:         keys.DescriptorTableID,
        Privileges: NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.DescriptorTableID]),
        ParentID:   keys.SystemDatabaseID,
        Version:    1,
        Columns: []ColumnDescriptor{
            {Name: "id", ID: 1, Type: *types.Int},
            {Name: "descriptor", ID: 2, Type: *types.Bytes, Nullable: true},
        },
        NextColumnID: 3,
        Families: []ColumnFamilyDescriptor{
            {Name: "primary", ID: 0, ColumnNames: []string{"id"}, ColumnIDs: singleID1},
            {Name: "fam_2_descriptor", ID: 2, ColumnNames: []string{"descriptor"}, ColumnIDs: []ColumnID{2}, DefaultColumnID: 2},
        },
        PrimaryIndex:   pk("id"),
        NextFamilyID:   3,
        NextIndexID:    2,
        FormatVersion:  InterleavedFormatVersion,
        NextMutationID: 1,
    }

    // UsersTable is the descriptor for the users table.
    UsersTable = TableDescriptor{
        Name:     "users",
        ID:       keys.UsersTableID,
        ParentID: keys.SystemDatabaseID,
        Version:  1,
        Columns: []ColumnDescriptor{
            {Name: "username", ID: 1, Type: *types.String},
            {Name: "hashedPassword", ID: 2, Type: *types.Bytes, Nullable: true},
            {Name: "isRole", ID: 3, Type: *types.Bool, DefaultExpr: &falseBoolString},
        },
        NextColumnID: 4,
        Families: []ColumnFamilyDescriptor{
            {Name: "primary", ID: 0, ColumnNames: []string{"username"}, ColumnIDs: singleID1},
            {Name: "fam_2_hashedPassword", ID: 2, ColumnNames: []string{"hashedPassword"}, ColumnIDs: []ColumnID{2}, DefaultColumnID: 2},
            {Name: "fam_3_isRole", ID: 3, ColumnNames: []string{"isRole"}, ColumnIDs: []ColumnID{3}, DefaultColumnID: 3},
        },
        PrimaryIndex:   pk("username"),
        NextFamilyID:   4,
        NextIndexID:    2,
        Privileges:     NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.UsersTableID]),
        FormatVersion:  InterleavedFormatVersion,
        NextMutationID: 1,
    }

    // ZonesTable is the descriptor for the zones table.
    ZonesTable = TableDescriptor{
        Name:     "zones",
        ID:       keys.ZonesTableID,
        ParentID: keys.SystemDatabaseID,
        Version:  1,
        Columns: []ColumnDescriptor{
            {Name: "id", ID: 1, Type: *types.Int},
            {Name: "config", ID: keys.ZonesTableConfigColumnID, Type: *types.Bytes, Nullable: true},
        },
        NextColumnID: 3,
        Families: []ColumnFamilyDescriptor{
            {Name: "primary", ID: 0, ColumnNames: []string{"id"}, ColumnIDs: singleID1},
            {Name: "fam_2_config", ID: keys.ZonesTableConfigColFamID, ColumnNames: []string{"config"},
                ColumnIDs: []ColumnID{keys.ZonesTableConfigColumnID}, DefaultColumnID: keys.ZonesTableConfigColumnID},
        },
        PrimaryIndex: IndexDescriptor{
            Name:             "primary",
            ID:               keys.ZonesTablePrimaryIndexID,
            Unique:           true,
            ColumnNames:      []string{"id"},
            ColumnDirections: singleASC,
            ColumnIDs:        []ColumnID{keys.ZonesTablePrimaryIndexID},
        },
        NextFamilyID:   3,
        NextIndexID:    2,
        Privileges:     NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.ZonesTableID]),
        FormatVersion:  InterleavedFormatVersion,
        NextMutationID: 1,
    }
    // SettingsTable is the descriptor for the jobs table.
    SettingsTable = TableDescriptor{
        Name:     "settings",
        ID:       keys.SettingsTableID,
        ParentID: keys.SystemDatabaseID,
        Version:  1,
        Columns: []ColumnDescriptor{
            {Name: "name", ID: 1, Type: *types.String},
            {Name: "value", ID: 2, Type: *types.String},
            {Name: "lastUpdated", ID: 3, Type: *types.Timestamp, DefaultExpr: &nowString},
            {Name: "valueType", ID: 4, Type: *types.String, Nullable: true},
        },
        NextColumnID: 5,
        Families: []ColumnFamilyDescriptor{
            {
                Name:        "fam_0_name_value_lastUpdated_valueType",
                ID:          0,
                ColumnNames: []string{"name", "value", "lastUpdated", "valueType"},
                ColumnIDs:   []ColumnID{1, 2, 3, 4},
            },
        },
        NextFamilyID:   1,
        PrimaryIndex:   pk("name"),
        NextIndexID:    2,
        Privileges:     NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.SettingsTableID]),
        FormatVersion:  InterleavedFormatVersion,
        NextMutationID: 1,
    }
)

These system config TableDescriptor literals should match the descriptor that would be produced by evaluating one of the above `CREATE TABLE` statements. See the `TestSystemTableLiterals` which checks that they do indeed match, and has suggestions on writing and maintaining them.

var (
    // LeaseTable is the descriptor for the leases table.
    LeaseTable = TableDescriptor{
        Name:     "lease",
        ID:       keys.LeaseTableID,
        ParentID: keys.SystemDatabaseID,
        Version:  1,
        Columns: []ColumnDescriptor{
            {Name: "descID", ID: 1, Type: *types.Int},
            {Name: "version", ID: 2, Type: *types.Int},
            {Name: "nodeID", ID: 3, Type: *types.Int},
            {Name: "expiration", ID: 4, Type: *types.Timestamp},
        },
        NextColumnID: 5,
        Families: []ColumnFamilyDescriptor{
            {Name: "primary", ID: 0, ColumnNames: []string{"descID", "version", "nodeID", "expiration"}, ColumnIDs: []ColumnID{1, 2, 3, 4}},
        },
        PrimaryIndex: IndexDescriptor{
            Name:             "primary",
            ID:               1,
            Unique:           true,
            ColumnNames:      []string{"descID", "version", "expiration", "nodeID"},
            ColumnDirections: []IndexDescriptor_Direction{IndexDescriptor_ASC, IndexDescriptor_ASC, IndexDescriptor_ASC, IndexDescriptor_ASC},
            ColumnIDs:        []ColumnID{1, 2, 4, 3},
        },
        NextFamilyID:   1,
        NextIndexID:    2,
        Privileges:     NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.LeaseTableID]),
        FormatVersion:  InterleavedFormatVersion,
        NextMutationID: 1,
    }

    // EventLogTable is the descriptor for the event log table.
    EventLogTable = TableDescriptor{
        Name:     "eventlog",
        ID:       keys.EventLogTableID,
        ParentID: keys.SystemDatabaseID,
        Version:  1,
        Columns: []ColumnDescriptor{
            {Name: "timestamp", ID: 1, Type: *types.Timestamp},
            {Name: "eventType", ID: 2, Type: *types.String},
            {Name: "targetID", ID: 3, Type: *types.Int},
            {Name: "reportingID", ID: 4, Type: *types.Int},
            {Name: "info", ID: 5, Type: *types.String, Nullable: true},
            {Name: "uniqueID", ID: 6, Type: *types.Bytes, DefaultExpr: &uuidV4String},
        },
        NextColumnID: 7,
        Families: []ColumnFamilyDescriptor{
            {Name: "primary", ID: 0, ColumnNames: []string{"timestamp", "uniqueID"}, ColumnIDs: []ColumnID{1, 6}},
            {Name: "fam_2_eventType", ID: 2, ColumnNames: []string{"eventType"}, ColumnIDs: []ColumnID{2}, DefaultColumnID: 2},
            {Name: "fam_3_targetID", ID: 3, ColumnNames: []string{"targetID"}, ColumnIDs: []ColumnID{3}, DefaultColumnID: 3},
            {Name: "fam_4_reportingID", ID: 4, ColumnNames: []string{"reportingID"}, ColumnIDs: []ColumnID{4}, DefaultColumnID: 4},
            {Name: "fam_5_info", ID: 5, ColumnNames: []string{"info"}, ColumnIDs: []ColumnID{5}, DefaultColumnID: 5},
        },
        PrimaryIndex: IndexDescriptor{
            Name:             "primary",
            ID:               1,
            Unique:           true,
            ColumnNames:      []string{"timestamp", "uniqueID"},
            ColumnDirections: []IndexDescriptor_Direction{IndexDescriptor_ASC, IndexDescriptor_ASC},
            ColumnIDs:        []ColumnID{1, 6},
        },
        NextFamilyID:   6,
        NextIndexID:    2,
        Privileges:     NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.EventLogTableID]),
        FormatVersion:  InterleavedFormatVersion,
        NextMutationID: 1,
    }

    // RangeEventTable is the descriptor for the range log table.
    RangeEventTable = TableDescriptor{
        Name:     "rangelog",
        ID:       keys.RangeEventTableID,
        ParentID: keys.SystemDatabaseID,
        Version:  1,
        Columns: []ColumnDescriptor{
            {Name: "timestamp", ID: 1, Type: *types.Timestamp},
            {Name: "rangeID", ID: 2, Type: *types.Int},
            {Name: "storeID", ID: 3, Type: *types.Int},
            {Name: "eventType", ID: 4, Type: *types.String},
            {Name: "otherRangeID", ID: 5, Type: *types.Int, Nullable: true},
            {Name: "info", ID: 6, Type: *types.String, Nullable: true},
            {Name: "uniqueID", ID: 7, Type: *types.Int, DefaultExpr: &uniqueRowIDString},
        },
        NextColumnID: 8,
        Families: []ColumnFamilyDescriptor{
            {Name: "primary", ID: 0, ColumnNames: []string{"timestamp", "uniqueID"}, ColumnIDs: []ColumnID{1, 7}},
            {Name: "fam_2_rangeID", ID: 2, ColumnNames: []string{"rangeID"}, ColumnIDs: []ColumnID{2}, DefaultColumnID: 2},
            {Name: "fam_3_storeID", ID: 3, ColumnNames: []string{"storeID"}, ColumnIDs: []ColumnID{3}, DefaultColumnID: 3},
            {Name: "fam_4_eventType", ID: 4, ColumnNames: []string{"eventType"}, ColumnIDs: []ColumnID{4}, DefaultColumnID: 4},
            {Name: "fam_5_otherRangeID", ID: 5, ColumnNames: []string{"otherRangeID"}, ColumnIDs: []ColumnID{5}, DefaultColumnID: 5},
            {Name: "fam_6_info", ID: 6, ColumnNames: []string{"info"}, ColumnIDs: []ColumnID{6}, DefaultColumnID: 6},
        },
        PrimaryIndex: IndexDescriptor{
            Name:             "primary",
            ID:               1,
            Unique:           true,
            ColumnNames:      []string{"timestamp", "uniqueID"},
            ColumnDirections: []IndexDescriptor_Direction{IndexDescriptor_ASC, IndexDescriptor_ASC},
            ColumnIDs:        []ColumnID{1, 7},
        },
        NextFamilyID:   7,
        NextIndexID:    2,
        Privileges:     NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.RangeEventTableID]),
        FormatVersion:  InterleavedFormatVersion,
        NextMutationID: 1,
    }

    // UITable is the descriptor for the ui table.
    UITable = TableDescriptor{
        Name:     "ui",
        ID:       keys.UITableID,
        ParentID: keys.SystemDatabaseID,
        Version:  1,
        Columns: []ColumnDescriptor{
            {Name: "key", ID: 1, Type: *types.String},
            {Name: "value", ID: 2, Type: *types.Bytes, Nullable: true},
            {Name: "lastUpdated", ID: 3, Type: *types.Timestamp},
        },
        NextColumnID: 4,
        Families: []ColumnFamilyDescriptor{
            {Name: "primary", ID: 0, ColumnNames: []string{"key"}, ColumnIDs: singleID1},
            {Name: "fam_2_value", ID: 2, ColumnNames: []string{"value"}, ColumnIDs: []ColumnID{2}, DefaultColumnID: 2},
            {Name: "fam_3_lastUpdated", ID: 3, ColumnNames: []string{"lastUpdated"}, ColumnIDs: []ColumnID{3}, DefaultColumnID: 3},
        },
        NextFamilyID:   4,
        PrimaryIndex:   pk("key"),
        NextIndexID:    2,
        Privileges:     NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.UITableID]),
        FormatVersion:  InterleavedFormatVersion,
        NextMutationID: 1,
    }

    // JobsTable is the descriptor for the jobs table.
    JobsTable = TableDescriptor{
        Name:     "jobs",
        ID:       keys.JobsTableID,
        ParentID: keys.SystemDatabaseID,
        Version:  1,
        Columns: []ColumnDescriptor{
            {Name: "id", ID: 1, Type: *types.Int, DefaultExpr: &uniqueRowIDString},
            {Name: "status", ID: 2, Type: *types.String},
            {Name: "created", ID: 3, Type: *types.Timestamp, DefaultExpr: &nowString},
            {Name: "payload", ID: 4, Type: *types.Bytes},
        },
        NextColumnID: 5,
        Families: []ColumnFamilyDescriptor{
            {
                Name:        "fam_0_id_status_created_payload",
                ID:          0,
                ColumnNames: []string{"id", "status", "created", "payload"},
                ColumnIDs:   []ColumnID{1, 2, 3, 4},
            },
        },
        NextFamilyID: 1,
        PrimaryIndex: pk("id"),
        Indexes: []IndexDescriptor{
            {
                Name:             "jobs_status_created_idx",
                ID:               2,
                Unique:           false,
                ColumnNames:      []string{"status", "created"},
                ColumnDirections: []IndexDescriptor_Direction{IndexDescriptor_ASC, IndexDescriptor_ASC},
                ColumnIDs:        []ColumnID{2, 3},
                ExtraColumnIDs:   []ColumnID{1},
            },
        },
        NextIndexID:    3,
        Privileges:     NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.JobsTableID]),
        FormatVersion:  InterleavedFormatVersion,
        NextMutationID: 1,
    }

    // WebSessions table to authenticate sessions over stateless connections.
    WebSessionsTable = TableDescriptor{
        Name:     "web_sessions",
        ID:       keys.WebSessionsTableID,
        ParentID: keys.SystemDatabaseID,
        Version:  1,
        Columns: []ColumnDescriptor{
            {Name: "id", ID: 1, Type: *types.Int, DefaultExpr: &uniqueRowIDString},
            {Name: "hashedSecret", ID: 2, Type: *types.Bytes},
            {Name: "username", ID: 3, Type: *types.String},
            {Name: "createdAt", ID: 4, Type: *types.Timestamp, DefaultExpr: &nowString},
            {Name: "expiresAt", ID: 5, Type: *types.Timestamp},
            {Name: "revokedAt", ID: 6, Type: *types.Timestamp, Nullable: true},
            {Name: "lastUsedAt", ID: 7, Type: *types.Timestamp, DefaultExpr: &nowString},
            {Name: "auditInfo", ID: 8, Type: *types.String, Nullable: true},
        },
        NextColumnID: 9,
        Families: []ColumnFamilyDescriptor{
            {
                Name: "fam_0_id_hashedSecret_username_createdAt_expiresAt_revokedAt_lastUsedAt_auditInfo",
                ID:   0,
                ColumnNames: []string{
                    "id",
                    "hashedSecret",
                    "username",
                    "createdAt",
                    "expiresAt",
                    "revokedAt",
                    "lastUsedAt",
                    "auditInfo",
                },
                ColumnIDs: []ColumnID{1, 2, 3, 4, 5, 6, 7, 8},
            },
        },
        NextFamilyID: 1,
        PrimaryIndex: pk("id"),
        Indexes: []IndexDescriptor{
            {
                Name:             "web_sessions_expiresAt_idx",
                ID:               2,
                Unique:           false,
                ColumnNames:      []string{"expiresAt"},
                ColumnDirections: []IndexDescriptor_Direction{IndexDescriptor_ASC},
                ColumnIDs:        []ColumnID{5},
                ExtraColumnIDs:   []ColumnID{1},
            },
            {
                Name:             "web_sessions_createdAt_idx",
                ID:               3,
                Unique:           false,
                ColumnNames:      []string{"createdAt"},
                ColumnDirections: []IndexDescriptor_Direction{IndexDescriptor_ASC},
                ColumnIDs:        []ColumnID{4},
                ExtraColumnIDs:   []ColumnID{1},
            },
        },
        NextIndexID:    4,
        Privileges:     NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.WebSessionsTableID]),
        NextMutationID: 1,
        FormatVersion:  3,
    }

    // TableStatistics table to hold statistics about columns and column groups.
    TableStatisticsTable = TableDescriptor{
        Name:     "table_statistics",
        ID:       keys.TableStatisticsTableID,
        ParentID: keys.SystemDatabaseID,
        Version:  1,
        Columns: []ColumnDescriptor{
            {Name: "tableID", ID: 1, Type: *types.Int},
            {Name: "statisticID", ID: 2, Type: *types.Int, DefaultExpr: &uniqueRowIDString},
            {Name: "name", ID: 3, Type: *types.String, Nullable: true},
            {Name: "columnIDs", ID: 4, Type: *types.IntArray},
            {Name: "createdAt", ID: 5, Type: *types.Timestamp, DefaultExpr: &nowString},
            {Name: "rowCount", ID: 6, Type: *types.Int},
            {Name: "distinctCount", ID: 7, Type: *types.Int},
            {Name: "nullCount", ID: 8, Type: *types.Int},
            {Name: "histogram", ID: 9, Type: *types.Bytes, Nullable: true},
        },
        NextColumnID: 10,
        Families: []ColumnFamilyDescriptor{
            {
                Name: "fam_0_tableID_statisticID_name_columnIDs_createdAt_rowCount_distinctCount_nullCount_histogram",
                ID:   0,
                ColumnNames: []string{
                    "tableID",
                    "statisticID",
                    "name",
                    "columnIDs",
                    "createdAt",
                    "rowCount",
                    "distinctCount",
                    "nullCount",
                    "histogram",
                },
                ColumnIDs: []ColumnID{1, 2, 3, 4, 5, 6, 7, 8, 9},
            },
        },
        NextFamilyID: 1,
        PrimaryIndex: IndexDescriptor{
            Name:             "primary",
            ID:               1,
            Unique:           true,
            ColumnNames:      []string{"tableID", "statisticID"},
            ColumnDirections: []IndexDescriptor_Direction{IndexDescriptor_ASC, IndexDescriptor_ASC},
            ColumnIDs:        []ColumnID{1, 2},
        },
        NextIndexID:    2,
        Privileges:     NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.TableStatisticsTableID]),
        FormatVersion:  InterleavedFormatVersion,
        NextMutationID: 1,
    }

    // LocationsTable is the descriptor for the locations table.
    LocationsTable = TableDescriptor{
        Name:     "locations",
        ID:       keys.LocationsTableID,
        ParentID: keys.SystemDatabaseID,
        Version:  1,
        Columns: []ColumnDescriptor{
            {Name: "localityKey", ID: 1, Type: *types.String},
            {Name: "localityValue", ID: 2, Type: *types.String},
            {Name: "latitude", ID: 3, Type: *latLonDecimal},
            {Name: "longitude", ID: 4, Type: *latLonDecimal},
        },
        NextColumnID: 5,
        Families: []ColumnFamilyDescriptor{
            {
                Name:        "fam_0_localityKey_localityValue_latitude_longitude",
                ID:          0,
                ColumnNames: []string{"localityKey", "localityValue", "latitude", "longitude"},
                ColumnIDs:   []ColumnID{1, 2, 3, 4},
            },
        },
        NextFamilyID: 1,
        PrimaryIndex: IndexDescriptor{
            Name:             "primary",
            ID:               1,
            Unique:           true,
            ColumnNames:      []string{"localityKey", "localityValue"},
            ColumnDirections: []IndexDescriptor_Direction{IndexDescriptor_ASC, IndexDescriptor_ASC},
            ColumnIDs:        []ColumnID{1, 2},
        },
        NextIndexID:    2,
        Privileges:     NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.LocationsTableID]),
        FormatVersion:  InterleavedFormatVersion,
        NextMutationID: 1,
    }

    // RoleMembersTable is the descriptor for the role_members table.
    RoleMembersTable = TableDescriptor{
        Name:     "role_members",
        ID:       keys.RoleMembersTableID,
        ParentID: keys.SystemDatabaseID,
        Version:  1,
        Columns: []ColumnDescriptor{
            {Name: "role", ID: 1, Type: *types.String},
            {Name: "member", ID: 2, Type: *types.String},
            {Name: "isAdmin", ID: 3, Type: *types.Bool},
        },
        NextColumnID: 4,
        Families: []ColumnFamilyDescriptor{
            {
                Name:        "primary",
                ID:          0,
                ColumnNames: []string{"role", "member"},
                ColumnIDs:   []ColumnID{1, 2},
            },
            {
                Name:            "fam_3_isAdmin",
                ID:              3,
                ColumnNames:     []string{"isAdmin"},
                ColumnIDs:       []ColumnID{3},
                DefaultColumnID: 3,
            },
        },
        NextFamilyID: 4,
        PrimaryIndex: IndexDescriptor{
            Name:             "primary",
            ID:               1,
            Unique:           true,
            ColumnNames:      []string{"role", "member"},
            ColumnDirections: []IndexDescriptor_Direction{IndexDescriptor_ASC, IndexDescriptor_ASC},
            ColumnIDs:        []ColumnID{1, 2},
        },
        Indexes: []IndexDescriptor{
            {
                Name:             "role_members_role_idx",
                ID:               2,
                Unique:           false,
                ColumnNames:      []string{"role"},
                ColumnDirections: []IndexDescriptor_Direction{IndexDescriptor_ASC},
                ColumnIDs:        []ColumnID{1},

                ExtraColumnIDs: []ColumnID{2},
            },
            {
                Name:             "role_members_member_idx",
                ID:               3,
                Unique:           false,
                ColumnNames:      []string{"member"},
                ColumnDirections: []IndexDescriptor_Direction{IndexDescriptor_ASC},
                ColumnIDs:        []ColumnID{2},
                ExtraColumnIDs:   []ColumnID{1},
            },
        },
        NextIndexID:    4,
        Privileges:     NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.RoleMembersTableID]),
        FormatVersion:  InterleavedFormatVersion,
        NextMutationID: 1,
    }

    // CommentsTable is the descriptor for the comments table.
    CommentsTable = TableDescriptor{
        Name:     "comments",
        ID:       keys.CommentsTableID,
        ParentID: keys.SystemDatabaseID,
        Version:  1,
        Columns: []ColumnDescriptor{
            {Name: "type", ID: 1, Type: *types.Int},
            {Name: "object_id", ID: 2, Type: *types.Int},
            {Name: "sub_id", ID: 3, Type: *types.Int},
            {Name: "comment", ID: 4, Type: *types.String},
        },
        NextColumnID: 5,
        Families: []ColumnFamilyDescriptor{
            {Name: "primary", ID: 0, ColumnNames: []string{"type", "object_id", "sub_id"}, ColumnIDs: []ColumnID{1, 2, 3}},
            {Name: "fam_4_comment", ID: 4, ColumnNames: []string{"comment"}, ColumnIDs: []ColumnID{4}, DefaultColumnID: 4},
        },
        NextFamilyID: 5,
        PrimaryIndex: IndexDescriptor{
            Name:             "primary",
            ID:               1,
            Unique:           true,
            ColumnNames:      []string{"type", "object_id", "sub_id"},
            ColumnDirections: []IndexDescriptor_Direction{IndexDescriptor_ASC, IndexDescriptor_ASC, IndexDescriptor_ASC},
            ColumnIDs:        []ColumnID{1, 2, 3},
        },
        NextIndexID:    2,
        Privileges:     newCommentPrivilegeDescriptor(SystemAllowedPrivileges[keys.CommentsTableID]),
        FormatVersion:  InterleavedFormatVersion,
        NextMutationID: 1,
    }

    ReportsMetaTable = TableDescriptor{
        Name:     "reports_meta",
        ID:       keys.ReportsMetaTableID,
        ParentID: keys.SystemDatabaseID,
        Version:  1,
        Columns: []ColumnDescriptor{
            {Name: "id", ID: 1, Type: *types.Int},
            {Name: "generated", ID: 2, Type: *types.TimestampTZ},
        },
        NextColumnID: 3,
        Families: []ColumnFamilyDescriptor{
            {
                Name:        "primary",
                ID:          0,
                ColumnNames: []string{"id", "generated"},
                ColumnIDs:   []ColumnID{1, 2},
            },
        },
        NextFamilyID: 1,
        PrimaryIndex: IndexDescriptor{
            Name:        "primary",
            ID:          1,
            Unique:      true,
            ColumnNames: []string{"id"},
            ColumnDirections: []IndexDescriptor_Direction{
                IndexDescriptor_ASC,
            },
            ColumnIDs: []ColumnID{1},
        },
        NextIndexID:    2,
        Privileges:     NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.ReportsMetaTableID]),
        FormatVersion:  InterleavedFormatVersion,
        NextMutationID: 1,
    }

    ReplicationConstraintStatsTableTTL = time.Minute * 10
    // TODO(andrei): In 20.1 we should add a foreign key reference to the
    // reports_meta table. Until then, it would cost us having to create an index
    // on report_id.
    ReplicationConstraintStatsTable = TableDescriptor{
        Name:     "replication_constraint_stats",
        ID:       keys.ReplicationConstraintStatsTableID,
        ParentID: keys.SystemDatabaseID,
        Version:  1,
        Columns: []ColumnDescriptor{
            {Name: "zone_id", ID: 1, Type: *types.Int},
            {Name: "subzone_id", ID: 2, Type: *types.Int},
            {Name: "type", ID: 3, Type: *types.String},
            {Name: "config", ID: 4, Type: *types.String},
            {Name: "report_id", ID: 5, Type: *types.Int},
            {Name: "violation_start", ID: 6, Type: *types.TimestampTZ, Nullable: true},
            {Name: "violating_ranges", ID: 7, Type: *types.Int},
        },
        NextColumnID: 8,
        Families: []ColumnFamilyDescriptor{
            {
                Name: "primary",
                ID:   0,
                ColumnNames: []string{
                    "zone_id",
                    "subzone_id",
                    "type",
                    "config",
                    "report_id",
                    "violation_start",
                    "violating_ranges",
                },
                ColumnIDs: []ColumnID{1, 2, 3, 4, 5, 6, 7},
            },
        },
        NextFamilyID: 1,
        PrimaryIndex: IndexDescriptor{
            Name:        "primary",
            ID:          1,
            Unique:      true,
            ColumnNames: []string{"zone_id", "subzone_id", "type", "config"},
            ColumnDirections: []IndexDescriptor_Direction{
                IndexDescriptor_ASC, IndexDescriptor_ASC, IndexDescriptor_ASC, IndexDescriptor_ASC,
            },
            ColumnIDs: []ColumnID{1, 2, 3, 4},
        },
        NextIndexID:    2,
        Privileges:     NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.ReplicationConstraintStatsTableID]),
        FormatVersion:  InterleavedFormatVersion,
        NextMutationID: 1,
    }

    // TODO(andrei): In 20.1 we should add a foreign key reference to the
    // reports_meta table. Until then, it would cost us having to create an index
    // on report_id.
    ReplicationCriticalLocalitiesTable = TableDescriptor{
        Name:     "replication_critical_localities",
        ID:       keys.ReplicationCriticalLocalitiesTableID,
        ParentID: keys.SystemDatabaseID,
        Version:  1,
        Columns: []ColumnDescriptor{
            {Name: "zone_id", ID: 1, Type: *types.Int},
            {Name: "subzone_id", ID: 2, Type: *types.Int},
            {Name: "locality", ID: 3, Type: *types.String},
            {Name: "report_id", ID: 4, Type: *types.Int},
            {Name: "at_risk_ranges", ID: 5, Type: *types.Int},
        },
        NextColumnID: 6,
        Families: []ColumnFamilyDescriptor{
            {
                Name: "primary",
                ID:   0,
                ColumnNames: []string{
                    "zone_id",
                    "subzone_id",
                    "locality",
                    "report_id",
                    "at_risk_ranges",
                },
                ColumnIDs: []ColumnID{1, 2, 3, 4, 5},
            },
        },
        NextFamilyID: 1,
        PrimaryIndex: IndexDescriptor{
            Name:        "primary",
            ID:          1,
            Unique:      true,
            ColumnNames: []string{"zone_id", "subzone_id", "locality"},
            ColumnDirections: []IndexDescriptor_Direction{
                IndexDescriptor_ASC, IndexDescriptor_ASC, IndexDescriptor_ASC,
            },
            ColumnIDs: []ColumnID{1, 2, 3},
        },
        NextIndexID:    2,
        Privileges:     NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.ReplicationCriticalLocalitiesTableID]),
        FormatVersion:  InterleavedFormatVersion,
        NextMutationID: 1,
    }

    ReplicationStatsTableTTL = time.Minute * 10
    // TODO(andrei): In 20.1 we should add a foreign key reference to the
    // reports_meta table. Until then, it would cost us having to create an index
    // on report_id.
    ReplicationStatsTable = TableDescriptor{
        Name:     "replication_stats",
        ID:       keys.ReplicationStatsTableID,
        ParentID: keys.SystemDatabaseID,
        Version:  1,
        Columns: []ColumnDescriptor{
            {Name: "zone_id", ID: 1, Type: *types.Int},
            {Name: "subzone_id", ID: 2, Type: *types.Int},
            {Name: "report_id", ID: 3, Type: *types.Int},
            {Name: "total_ranges", ID: 4, Type: *types.Int},
            {Name: "unavailable_ranges", ID: 5, Type: *types.Int},
            {Name: "under_replicated_ranges", ID: 6, Type: *types.Int},
            {Name: "over_replicated_ranges", ID: 7, Type: *types.Int},
        },
        NextColumnID: 8,
        Families: []ColumnFamilyDescriptor{
            {
                Name: "primary",
                ID:   0,
                ColumnNames: []string{
                    "zone_id",
                    "subzone_id",
                    "report_id",
                    "total_ranges",
                    "unavailable_ranges",
                    "under_replicated_ranges",
                    "over_replicated_ranges",
                },
                ColumnIDs: []ColumnID{1, 2, 3, 4, 5, 6, 7},
            },
        },
        NextFamilyID: 2,
        PrimaryIndex: IndexDescriptor{
            Name:             "primary",
            ID:               1,
            Unique:           true,
            ColumnNames:      []string{"zone_id", "subzone_id"},
            ColumnDirections: []IndexDescriptor_Direction{IndexDescriptor_ASC, IndexDescriptor_ASC},
            ColumnIDs:        []ColumnID{1, 2},
        },
        NextIndexID:    2,
        Privileges:     NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.ReplicationStatsTableID]),
        FormatVersion:  InterleavedFormatVersion,
        NextMutationID: 1,
    }
)

These system TableDescriptor literals should match the descriptor that would be produced by evaluating one of the above `CREATE TABLE` statements for system tables that are not system config tables. See the `TestSystemTableLiterals` which checks that they do indeed match, and has suggestions on writing and maintaining them.

var (
    // OneIntCol is a slice of one IntType.
    OneIntCol = []types.T{*types.Int}
    // TwoIntCols is a slice of two IntTypes.
    TwoIntCols = []types.T{*types.Int, *types.Int}
    // ThreeIntCols is a slice of three IntTypes.
    ThreeIntCols = []types.T{*types.Int, *types.Int, *types.Int}
    // FourIntCols is a slice of four IntTypes.
    FourIntCols = []types.T{*types.Int, *types.Int, *types.Int, *types.Int}
)

The following variables are useful for testing.

var AdminRole = "admin"

AdminRole is the default (and non-droppable) role with superuser privileges.

var AlterTableRelocateColumns = ResultColumns{
    {Name: "key", Typ: types.Bytes},
    {Name: "pretty", Typ: types.String},
}

AlterTableRelocateColumns are the result columns of an ALTER TABLE/INDEX .. EXPERIMENTAL_RELOCATE statement.

var AlterTableScatterColumns = ResultColumns{
    {Name: "key", Typ: types.Bytes},
    {Name: "pretty", Typ: types.String},
}

AlterTableScatterColumns are the result columns of an ALTER TABLE/INDEX .. SCATTER statement.

var AlterTableSplitColumns = ResultColumns{
    {Name: "key", Typ: types.Bytes},
    {Name: "pretty", Typ: types.String},
    {Name: "split_enforced_until", Typ: types.Timestamp},
}

AlterTableSplitColumns are the result columns of an ALTER TABLE/INDEX .. SPLIT AT statement.

var AlterTableUnsplitColumns = ResultColumns{
    {Name: "key", Typ: types.Bytes},
    {Name: "pretty", Typ: types.String},
}

AlterTableUnsplitColumns are the result columns of an ALTER TABLE/INDEX .. UNSPLIT statement.

var AnonymousTable = tree.TableName{}

AnonymousTable is the empty table name, used when a data source has no own name, e.g. VALUES, subqueries or the empty source.

var CompositeKeyMatchMethodValue = [...]ForeignKeyReference_Match{
    tree.MatchSimple:  ForeignKeyReference_SIMPLE,
    tree.MatchFull:    ForeignKeyReference_FULL,
    tree.MatchPartial: ForeignKeyReference_PARTIAL,
}

CompositeKeyMatchMethodValue allows the conversion from a tree.ReferenceCompositeKeyMatchMethod to a ForeignKeyReference_Match.

var ConstraintToUpdate_ConstraintType_name = map[int32]string{
    0:  "CHECK",
    1:  "FOREIGN_KEY",
    2:  "NOT_NULL",
}
var ConstraintToUpdate_ConstraintType_value = map[string]int32{
    "CHECK":       0,
    "FOREIGN_KEY": 1,
    "NOT_NULL":    2,
}
var ConstraintValidity_name = map[int32]string{
    0:  "Validated",
    1:  "Unvalidated",
    2:  "Validating",
    3:  "Dropping",
}
var ConstraintValidity_value = map[string]int32{
    "Validated":   0,
    "Unvalidated": 1,
    "Validating":  2,
    "Dropping":    3,
}
var DatumEncoding_name = map[int32]string{
    0:  "ASCENDING_KEY",
    1:  "DESCENDING_KEY",
    2:  "VALUE",
}
var DatumEncoding_value = map[string]int32{
    "ASCENDING_KEY":  0,
    "DESCENDING_KEY": 1,
    "VALUE":          2,
}
var DefaultSearchPath = sessiondata.MakeSearchPath([]string{"public"})

DefaultSearchPath is the search path used by virgin sessions.

var DefaultSuperuserPrivileges = privilege.List{privilege.ALL}

DefaultSuperuserPrivileges is the list of privileges for super users on non-system objects.

var DescriptorMutation_Direction_name = map[int32]string{
    0:  "NONE",
    1:  "ADD",
    2:  "DROP",
}
var DescriptorMutation_Direction_value = map[string]int32{
    "NONE": 0,
    "ADD":  1,
    "DROP": 2,
}
var DescriptorMutation_State_name = map[int32]string{
    0:  "UNKNOWN",
    1:  "DELETE_ONLY",
    2:  "DELETE_AND_WRITE_ONLY",
}
var DescriptorMutation_State_value = map[string]int32{
    "UNKNOWN":               0,
    "DELETE_ONLY":           1,
    "DELETE_AND_WRITE_ONLY": 2,
}
var ErrDescriptorNotFound = errors.New("descriptor not found")

ErrDescriptorNotFound is returned by GetTableDescFromID to signal that a descriptor could not be found with the given id.

var ErrIndexGCMutationsList = errors.New("index in GC mutations list")

ErrIndexGCMutationsList is returned by FindIndexByID to signal that the index with the given ID does not have a descriptor and is in the garbage collected mutations list.

var ErrMissingColumns = errors.New("table must contain at least 1 column")

ErrMissingColumns indicates a table with no columns.

var ErrMissingPrimaryKey = errors.New("table must contain a primary key")

ErrMissingPrimaryKey indicates a table with no primary key.

var ExplainDistSQLColumns = ResultColumns{
    {Name: "automatic", Typ: types.Bool},
    {Name: "url", Typ: types.String},
    {Name: "json", Typ: types.String, Hidden: true},
}

ExplainDistSQLColumns are the result columns of an EXPLAIN (DISTSQL) statement.

var ExplainOptColumns = ResultColumns{
    {Name: "text", Typ: types.String},
}

ExplainOptColumns are the result columns of an EXPLAIN (OPT) statement.

var ExplainPlanColumns = ResultColumns{

    {Name: "tree", Typ: types.String},

    {Name: "field", Typ: types.String},

    {Name: "description", Typ: types.String},
}

ExplainPlanColumns are the result columns of an EXPLAIN (PLAN) ... statement.

var ExplainPlanVerboseColumns = ResultColumns{

    {Name: "tree", Typ: types.String},

    {Name: "level", Typ: types.Int, Hidden: true},

    {Name: "node_type", Typ: types.String, Hidden: true},

    {Name: "field", Typ: types.String},

    {Name: "description", Typ: types.String},

    {Name: "columns", Typ: types.String},

    {Name: "ordering", Typ: types.String},
}

ExplainPlanVerboseColumns are the result columns of an EXPLAIN (PLAN, ...) ... statement when a flag like VERBOSE or TYPES is passed.

var ExplainVecColumns = ResultColumns{
    {Name: "text", Typ: types.String},
}

ExplainVecColumns are the result columns of an EXPLAIN (VEC) statement.

var ExportColumns = ResultColumns{
    {Name: "filename", Typ: types.String},
    {Name: "rows", Typ: types.Int},
    {Name: "bytes", Typ: types.Int},
}

ExportColumns are the result columns of an EXPORT statement.

var ForeignKeyReferenceActionType = [...]tree.ReferenceAction{
    ForeignKeyReference_NO_ACTION:   tree.NoAction,
    ForeignKeyReference_RESTRICT:    tree.Restrict,
    ForeignKeyReference_SET_DEFAULT: tree.SetDefault,
    ForeignKeyReference_SET_NULL:    tree.SetNull,
    ForeignKeyReference_CASCADE:     tree.Cascade,
}

ForeignKeyReferenceActionType allows the conversion between a tree.ReferenceAction and a ForeignKeyReference_Action.

var ForeignKeyReferenceActionValue = [...]ForeignKeyReference_Action{
    tree.NoAction:   ForeignKeyReference_NO_ACTION,
    tree.Restrict:   ForeignKeyReference_RESTRICT,
    tree.SetDefault: ForeignKeyReference_SET_DEFAULT,
    tree.SetNull:    ForeignKeyReference_SET_NULL,
    tree.Cascade:    ForeignKeyReference_CASCADE,
}

ForeignKeyReferenceActionValue allows the conversion between a ForeignKeyReference_Action and a tree.ReferenceAction.

var ForeignKeyReferenceMatchValue = [...]tree.CompositeKeyMatchMethod{
    ForeignKeyReference_SIMPLE:  tree.MatchSimple,
    ForeignKeyReference_FULL:    tree.MatchFull,
    ForeignKeyReference_PARTIAL: tree.MatchPartial,
}

ForeignKeyReferenceMatchValue allows the conversion from a ForeignKeyReference_Match to a tree.ReferenceCompositeKeyMatchMethod. This should match CompositeKeyMatchMethodValue.

var ForeignKeyReference_Action_name = map[int32]string{
    0:  "NO_ACTION",
    1:  "RESTRICT",
    2:  "SET_NULL",
    3:  "SET_DEFAULT",
    4:  "CASCADE",
}
var ForeignKeyReference_Action_value = map[string]int32{
    "NO_ACTION":   0,
    "RESTRICT":    1,
    "SET_NULL":    2,
    "SET_DEFAULT": 3,
    "CASCADE":     4,
}
var ForeignKeyReference_Match_name = map[int32]string{
    0:  "SIMPLE",
    1:  "FULL",
    2:  "PARTIAL",
}
var ForeignKeyReference_Match_value = map[string]int32{
    "SIMPLE":  0,
    "FULL":    1,
    "PARTIAL": 2,
}
var IndexDescriptor_Direction_name = map[int32]string{
    0:  "ASC",
    1:  "DESC",
}
var IndexDescriptor_Direction_value = map[string]int32{
    "ASC":  0,
    "DESC": 1,
}
var IndexDescriptor_Type_name = map[int32]string{
    0:  "FORWARD",
    1:  "INVERTED",
}
var IndexDescriptor_Type_value = map[string]int32{
    "FORWARD":  0,
    "INVERTED": 1,
}
var JoinType_name = map[int32]string{
    0:  "INNER",
    1:  "LEFT_OUTER",
    2:  "RIGHT_OUTER",
    3:  "FULL_OUTER",
    4:  "LEFT_SEMI",
    5:  "LEFT_ANTI",
    6:  "INTERSECT_ALL",
    7:  "EXCEPT_ALL",
}
var JoinType_value = map[string]int32{
    "INNER":         0,
    "LEFT_OUTER":    1,
    "RIGHT_OUTER":   2,
    "FULL_OUTER":    3,
    "LEFT_SEMI":     4,
    "LEFT_ANTI":     5,
    "INTERSECT_ALL": 6,
    "EXCEPT_ALL":    7,
}
var ParallelScans = settings.RegisterBoolSetting(
    "sql.parallel_scans.enabled",
    "parallelizes scanning different ranges when the maximum result size can be deduced",
    true,
)

ParallelScans controls parallelizing multi-range scans when the maximum size of the result set is known.

var PublicRole = "public"

PublicRole is the special "public" pseudo-role. All users are implicit members of "public". The role cannot be created, dropped, assigned to another role, and is generally not listed. It can be granted privileges, implicitly granting them to all users (current and future).

var QueryCanceledError = pgerror.New(
    pgcode.QueryCanceled, "query execution canceled")

QueryCanceledError is an error representing query cancellation.

var QueryTimeoutError = pgerror.New(
    pgcode.QueryCanceled, "query execution canceled due to statement timeout")

QueryTimeoutError is an error representing a query timeout.

var ScrubColumns = ResultColumns{
    {Name: "job_uuid", Typ: types.Uuid},
    {Name: "error_type", Typ: types.String},
    {Name: "database", Typ: types.String},
    {Name: "table", Typ: types.String},
    {Name: "primary_key", Typ: types.String},
    {Name: "timestamp", Typ: types.Timestamp},
    {Name: "repaired", Typ: types.Bool},
    {Name: "details", Typ: types.Jsonb},
}

ScrubColumns are the result columns of a SCRUB statement.

var SequenceSelectColumns = ResultColumns{
    {Name: `last_value`, Typ: types.Int},
    {Name: `log_cnt`, Typ: types.Int},
    {Name: `is_called`, Typ: types.Bool},
}

SequenceSelectColumns are the result columns of a sequence data source.

var ShowCompactTraceColumns = ResultColumns{
    {Name: "age", Typ: types.Interval},
    {Name: "message", Typ: types.String},
    {Name: "tag", Typ: types.String},
    {Name: "operation", Typ: types.String},
}

ShowCompactTraceColumns are the result columns of a SHOW COMPACT [KV] TRACE statement.

var ShowFingerprintsColumns = ResultColumns{
    {Name: "index_name", Typ: types.String},
    {Name: "fingerprint", Typ: types.String},
}

ShowFingerprintsColumns are the result columns of a SHOW EXPERIMENTAL_FINGERPRINTS statement.

var ShowReplicaTraceColumns = ResultColumns{
    {Name: "timestamp", Typ: types.TimestampTZ},
    {Name: "node_id", Typ: types.Int},
    {Name: "store_id", Typ: types.Int},
    {Name: "replica_id", Typ: types.Int},
}

ShowReplicaTraceColumns are the result columns of a SHOW EXPERIMENTAL_REPLICA TRACE statement.

var ShowSyntaxColumns = ResultColumns{
    {Name: "field", Typ: types.String},
    {Name: "message", Typ: types.String},
}

ShowSyntaxColumns are the columns of a SHOW SYNTAX statement.

var ShowTraceColumns = ResultColumns{
    {Name: "timestamp", Typ: types.TimestampTZ},
    {Name: "age", Typ: types.Interval},
    {Name: "message", Typ: types.String},
    {Name: "tag", Typ: types.String},
    {Name: "location", Typ: types.String},
    {Name: "operation", Typ: types.String},
    {Name: "span", Typ: types.Int},
}

ShowTraceColumns are the result columns of a SHOW [KV] TRACE statement.

var SystemAllowedPrivileges = map[ID]privilege.List{
    keys.SystemDatabaseID:  privilege.ReadData,
    keys.NamespaceTableID:  privilege.ReadData,
    keys.DescriptorTableID: privilege.ReadData,
    keys.UsersTableID:      privilege.ReadWriteData,
    keys.ZonesTableID:      privilege.ReadWriteData,

    keys.SettingsTableID:   privilege.ReadWriteData,
    keys.LeaseTableID:      privilege.ReadWriteData,
    keys.EventLogTableID:   privilege.ReadWriteData,
    keys.RangeEventTableID: privilege.ReadWriteData,
    keys.UITableID:         privilege.ReadWriteData,

    keys.JobsTableID:                          privilege.ReadWriteData,
    keys.WebSessionsTableID:                   privilege.ReadWriteData,
    keys.TableStatisticsTableID:               privilege.ReadWriteData,
    keys.LocationsTableID:                     privilege.ReadWriteData,
    keys.RoleMembersTableID:                   privilege.ReadWriteData,
    keys.CommentsTableID:                      privilege.ReadWriteData,
    keys.ReplicationConstraintStatsTableID:    privilege.ReadWriteData,
    keys.ReplicationCriticalLocalitiesTableID: privilege.ReadWriteData,
    keys.ReplicationStatsTableID:              privilege.ReadWriteData,
    keys.ReportsMetaTableID:                   privilege.ReadWriteData,
}

SystemAllowedPrivileges describes the allowable privilege list for each system object. Super users (root and admin) must have exactly the specified privileges, other users must not exceed the specified privileges.

var TableDescriptor_AuditMode_name = map[int32]string{
    0:  "DISABLED",
    1:  "READWRITE",
}
var TableDescriptor_AuditMode_value = map[string]int32{
    "DISABLED":  0,
    "READWRITE": 1,
}
var TableDescriptor_State_name = map[int32]string{
    0:  "PUBLIC",
    1:  "ADD",
    2:  "DROP",
    3:  "OFFLINE",
}
var TableDescriptor_State_value = map[string]int32{
    "PUBLIC":  0,
    "ADD":     1,
    "DROP":    2,
    "OFFLINE": 3,
}

func AdjustEndKeyForInterleave Uses

func AdjustEndKeyForInterleave(
    table *TableDescriptor, index *IndexDescriptor, end roachpb.Key, inclusive bool,
) (roachpb.Key, error)

AdjustEndKeyForInterleave returns an exclusive end key. It does two things:

- determines the end key based on the prior: inclusive vs exclusive
- adjusts the end key to skip unnecessary interleaved sections

For example, the parent span composed from the filter PK >= 1 and PK < 3 is

/1 - /3

This reads all keys up to the first parent key for PK = 3. If parent had interleaved tables and keys, it would unnecessarily scan over interleaved rows under PK2 (e.g. /2/#/5). We can instead "tighten" or adjust the end key from /3 to /2/#. DO NOT pass in any keys that have been invoked with PrefixEnd: this may cause issues when trying to decode the key tokens. AdjustEndKeyForInterleave is idempotent upon successive invocation(s).

func AdjustStartKeyForInterleave Uses

func AdjustStartKeyForInterleave(index *IndexDescriptor, start roachpb.Key) (roachpb.Key, error)

AdjustStartKeyForInterleave adjusts the start key to skip unnecessary interleaved sections.

For example, if child is interleaved into parent, a typical parent span might look like

/1 - /3

and a typical child span might look like

/1/#/2 - /2/#/5

Suppose the parent span is

/1/#/2 - /3

where the start key is a child's index key. Notice that the first parent key read actually starts at /2 since all the parent keys with the prefix /1 come before the child key /1/#/2 (and is not read in the span). We can thus push forward the start key from /1/#/2 to /2. If the start key was /1, we cannot push this forwards since that is the first key we want to read.

func CannotWriteToComputedColError Uses

func CannotWriteToComputedColError(colName string) error

CannotWriteToComputedColError constructs a write error for a computed column.

func CheckDatumTypeFitsColumnType Uses

func CheckDatumTypeFitsColumnType(col *ColumnDescriptor, typ *types.T) error

CheckDatumTypeFitsColumnType verifies that a given scalar value type is valid to be stored in a column of the given column type.

For the purpose of this analysis, column type aliases are not considered to be different (eg. TEXT and VARCHAR will fit the same scalar type String).

This is used by the UPDATE, INSERT and UPSERT code.

func ColumnNeedsBackfill Uses

func ColumnNeedsBackfill(desc *ColumnDescriptor) bool

ColumnNeedsBackfill returns true if adding the given column requires a backfill (dropping a column always requires a backfill).

func ColumnsSelectors Uses

func ColumnsSelectors(cols []ColumnDescriptor, forUpdateOrDelete bool) tree.SelectExprs

ColumnsSelectors generates Select expressions for cols.

func CompareDatums Uses

func CompareDatums(ordering ColumnOrdering, evalCtx *tree.EvalContext, lhs, rhs tree.Datums) int

CompareDatums compares two datum rows according to a column ordering. Returns:

- 0 if lhs and rhs are equal on the ordering columns;
- less than 0 if lhs comes first;
- greater than 0 if rhs comes first.

func ConditionalGetTableDescFromTxn Uses

func ConditionalGetTableDescFromTxn(
    ctx context.Context, txn *client.Txn, expectation *TableDescriptor,
) (*roachpb.Value, error)

ConditionalGetTableDescFromTxn validates that the supplied TableDescriptor matches the one currently stored in kv. This simulates a CPut and returns a ConditionFailedError on mismatch. We don't directly use CPut with protos because the marshaling is not guaranteed to be stable and also because it's sensitive to things like missing vs default values of fields.

func DatumTypeHasCompositeKeyEncoding Uses

func DatumTypeHasCompositeKeyEncoding(typ *types.T) bool

DatumTypeHasCompositeKeyEncoding is a version of HasCompositeKeyEncoding which works on datum types.

func DecodeIndexKey Uses

func DecodeIndexKey(
    desc *TableDescriptor,
    index *IndexDescriptor,
    types []types.T,
    vals []EncDatum,
    colDirs []IndexDescriptor_Direction,
    key []byte,
) (remainingKey []byte, matches bool, _ error)

DecodeIndexKey decodes the values that are a part of the specified index key (setting vals).

The remaining bytes in the index key are returned which will either be an encoded column ID for the primary key index, the primary key suffix for non-unique secondary indexes or unique secondary indexes containing NULL or empty. If the given descriptor does not match the key, false is returned with no error.

func DecodeIndexKeyWithoutTableIDIndexIDPrefix Uses

func DecodeIndexKeyWithoutTableIDIndexIDPrefix(
    desc *TableDescriptor,
    index *IndexDescriptor,
    types []types.T,
    vals []EncDatum,
    colDirs []IndexDescriptor_Direction,
    key []byte,
) (remainingKey []byte, matches bool, _ error)

DecodeIndexKeyWithoutTableIDIndexIDPrefix is the same as DecodeIndexKey, except it expects its index key is missing its first table id / index id key prefix.

func DecodeKeyVals Uses

func DecodeKeyVals(
    types []types.T, vals []EncDatum, directions []IndexDescriptor_Direction, key []byte,
) ([]byte, error)

DecodeKeyVals decodes the values that are part of the key. The decoded values are stored in the vals. If this slice is nil, the direction used will default to encoding.Ascending.

func DecodeTableIDIndexID Uses

func DecodeTableIDIndexID(key []byte) ([]byte, ID, IndexID, error)

DecodeTableIDIndexID decodes a table id followed by an index id.

func DecodeTableKey Uses

func DecodeTableKey(
    a *DatumAlloc, valType *types.T, key []byte, dir encoding.Direction,
) (tree.Datum, []byte, error)

DecodeTableKey decodes a value encoded by EncodeTableKey.

func DecodeTableValue Uses

func DecodeTableValue(a *DatumAlloc, valType *types.T, b []byte) (tree.Datum, []byte, error)

DecodeTableValue decodes a value encoded by EncodeTableValue.

func EncDatumRowToDatums Uses

func EncDatumRowToDatums(
    types []types.T, datums tree.Datums, row EncDatumRow, da *DatumAlloc,
) error

EncDatumRowToDatums converts a given EncDatumRow to a Datums.

func EncodeColumns Uses

func EncodeColumns(
    columnIDs []ColumnID,
    directions directions,
    colMap map[ColumnID]int,
    values []tree.Datum,
    keyPrefix []byte,
) (key []byte, containsNull bool, err error)

EncodeColumns is a version of EncodePartialIndexKey that takes ColumnIDs and directions explicitly. WARNING: unlike EncodePartialIndexKey, EncodeColumns appends directly to keyPrefix.

func EncodeDatumKeyAscending Uses

func EncodeDatumKeyAscending(b []byte, d tree.Datum) ([]byte, error)

EncodeDatumKeyAscending encodes a datum using an order-preserving encoding. The encoding is lossy: some datums need composite encoding where the key part only contains part of the datum's information.

func EncodeDatumsKeyAscending Uses

func EncodeDatumsKeyAscending(b []byte, d tree.Datums) ([]byte, error)

EncodeDatumsKeyAscending encodes a Datums (tuple) using an order-preserving encoding. The encoding is lossy: some datums need composite encoding where the key part only contains part of the datum's information.

func EncodeIndexKey Uses

func EncodeIndexKey(
    tableDesc *TableDescriptor,
    index *IndexDescriptor,
    colMap map[ColumnID]int,
    values []tree.Datum,
    keyPrefix []byte,
) (key []byte, containsNull bool, err error)

EncodeIndexKey creates a key by concatenating keyPrefix with the encodings of the columns in the index, and returns the key and whether any of the encoded values were NULLs.

If a table or index is interleaved, `encoding.interleavedSentinel` is used in place of the family id (a varint) to signal the next component of the key. An example of one level of interleaving (a parent): /<parent_table_id>/<parent_index_id>/<field_1>/<field_2>/NullDesc/<table_id>/<index_id>/<field_3>/<family>

Note that ExtraColumnIDs are not encoded, so the result isn't always a full index key.

func EncodeInvertedIndexKeys Uses

func EncodeInvertedIndexKeys(
    tableDesc *TableDescriptor,
    index *IndexDescriptor,
    colMap map[ColumnID]int,
    values []tree.Datum,
    keyPrefix []byte,
) (key [][]byte, err error)

EncodeInvertedIndexKeys creates a list of inverted index keys by concatenating keyPrefix with the encodings of the column in the index. Returns the key and whether any of the encoded values were NULLs.

func EncodeInvertedIndexTableKeys Uses

func EncodeInvertedIndexTableKeys(val tree.Datum, inKey []byte) (key [][]byte, err error)

EncodeInvertedIndexTableKeys encodes the paths in a JSON `val` and concatenates it with `inKey`and returns a list of buffers per path. The encoded values is guaranteed to be lexicographically sortable, but not guaranteed to be round-trippable during decoding.

func EncodePartialIndexKey Uses

func EncodePartialIndexKey(
    tableDesc *TableDescriptor,
    index *IndexDescriptor,
    numCols int,
    colMap map[ColumnID]int,
    values []tree.Datum,
    keyPrefix []byte,
) (key []byte, containsNull bool, err error)

EncodePartialIndexKey encodes a partial index key; only the first numCols of index.ColumnIDs are encoded.

func EncodePartialIndexSpan Uses

func EncodePartialIndexSpan(
    tableDesc *TableDescriptor,
    index *IndexDescriptor,
    numCols int,
    colMap map[ColumnID]int,
    values []tree.Datum,
    keyPrefix []byte,
) (span roachpb.Span, containsNull bool, err error)

EncodePartialIndexSpan creates the minimal key span for the key specified by the given table, index, and values, with the same method as EncodePartialIndexKey.

func EncodeTableIDIndexID Uses

func EncodeTableIDIndexID(key []byte, tableID ID, indexID IndexID) []byte

EncodeTableIDIndexID encodes a table id followed by an index id.

func EncodeTableKey Uses

func EncodeTableKey(b []byte, val tree.Datum, dir encoding.Direction) ([]byte, error)

EncodeTableKey encodes `val` into `b` and returns the new buffer. This is suitable to generate index/lookup keys in KV.

The encoded value is guaranteed to be lexicographically sortable, but not guaranteed to be round-trippable during decoding: some values like decimals or collated strings have composite encoding where part of their value lies in the value part of the key/value pair.

See also: docs/tech-notes/encoding.md, EncodeTableValue().

func EncodeTableValue Uses

func EncodeTableValue(
    appendTo []byte, colID ColumnID, val tree.Datum, scratch []byte,
) ([]byte, error)

EncodeTableValue encodes `val` into `appendTo` using DatumEncoding_VALUE and returns the new buffer.

This is suitable for generating the value part of individual columns in a column family.

The encoded value is guaranteed to round trip and decode exactly to its input, but is not guaranteed to be lexicographically sortable.

See also: docs/tech-notes/encoding.md, EncodeTableKey().

func ExtractIndexKey Uses

func ExtractIndexKey(
    a *DatumAlloc, tableDesc *TableDescriptor, entry client.KeyValue,
) (roachpb.Key, error)

ExtractIndexKey constructs the index (primary) key for a row from any index key/value entry, including secondary indexes.

Don't use this function in the scan "hot path".

func FillColumnRange Uses

func FillColumnRange(firstIdx, lastIdx int) util.FastIntSet

FillColumnRange creates a single range that refers to all the columns between firstIdx and lastIdx, inclusive.

func GetColumnTypes Uses

func GetColumnTypes(desc *TableDescriptor, columnIDs []ColumnID) ([]types.T, error)

GetColumnTypes returns the types of the columns with the given IDs.

func GetTraceAgeColumnIdx Uses

func GetTraceAgeColumnIdx(compact bool) int

GetTraceAgeColumnIdx retrieves the index of the age column depending on whether the compact format is used.

func HasCompositeKeyEncoding Uses

func HasCompositeKeyEncoding(semanticType types.Family) bool

HasCompositeKeyEncoding returns true if key columns of the given kind can have a composite encoding. For such types, it can be decided on a case-by-base basis whether a given Datum requires the composite encoding.

As an example of a composite encoding, collated string key columns are encoded partly as a key and partly as a value. The key part is the collation key, so that different strings that collate equal cannot both be used as keys. The value part is the usual UTF-8 encoding of the string, stored so that it can be recovered later for inspection/display.

func IndexKeyEquivSignature Uses

func IndexKeyEquivSignature(
    key []byte, validEquivSignatures map[string]int, signatureBuf []byte, restBuf []byte,
) (tableIdx int, restResult []byte, success bool, err error)

IndexKeyEquivSignature parses an index key if and only if the index key belongs to a table where its equivalence signature and all its interleave ancestors' signatures can be found in validEquivSignatures.

Its validEquivSignatures argument is a map containing equivalence signatures of valid ancestors of the desired table and of the desired table itself.

IndexKeyEquivSignature returns whether or not the index key satisfies the above condition, the value mapped to by the desired table (could be a table index), and the rest of the key that's not part of the signature.

It also requires two []byte buffers: one for the signature (signatureBuf) and one for the rest of the key (keyRestBuf).

The equivalence signature defines the equivalence classes for the signature of potentially interleaved tables. For example, the equivalence signatures for the following interleaved indexes:

<parent@primary>
<child@secondary>

and index keys

<parent index key>:   /<parent table id>/<parent index id>/<val 1>/<val 2>
<child index key>:    /<parent table id>/<parent index id>/<val 1>/<val 2>/#/<child table id>/child index id>/<val 3>/<val 4>

correspond to the equivalence signatures

<parent@primary>:     /<parent table id>/<parent index id>
<child@secondary>:    /<parent table id>/<parent index id>/#/<child table id>/<child index id>

Equivalence signatures allow us to associate an index key with its table without having to invoke DecodeIndexKey multiple times.

IndexKeyEquivSignature will return false if the a table's ancestor's signature or the table's signature (table which the index key belongs to) is not mapped in validEquivSignatures.

For example, suppose the given key is

/<t2 table id>/<t2 index id>/<val t2>/#/<t3 table id>/<t3 table id>/<val t3>

and validEquivSignatures contains

/<t1 table id>/t1 index id>
/<t1 table id>/t1 index id>/#/<t4 table id>/<t4 index id>

IndexKeyEquivSignature will short-circuit and return false once

/<t2 table id>/<t2 index id>

is processed since t2's signature is not specified in validEquivSignatures.

func IndexKeyValDirs Uses

func IndexKeyValDirs(index *IndexDescriptor) []encoding.Direction

IndexKeyValDirs returns the corresponding encoding.Directions for all the encoded values in index's "fullest" possible index key, including directions for table/index IDs, the interleaved sentinel and the index column values. For example, given

CREATE INDEX foo ON bar (a, b DESC) INTERLEAVED IN PARENT bar (a)

a typical index key with all values specified could be

/51/1/42/#/51/2/1337

which would return the slice

{ASC, ASC, ASC, 0, ASC, ASC, DESC}

func IsCCLRequiredError Uses

func IsCCLRequiredError(err error) bool

IsCCLRequiredError returns whether the error is a CCLRequired error.

func IsOutOfMemoryError Uses

func IsOutOfMemoryError(err error) bool

IsOutOfMemoryError checks whether this is an out of memory error.

func IsReservedID Uses

func IsReservedID(id ID) bool

IsReservedID returns whether this ID is for any system object.

func IsSystemConfigID Uses

func IsSystemConfigID(id ID) bool

IsSystemConfigID returns whether this ID is for a system config object.

func IsVirtualTable Uses

func IsVirtualTable(id ID) bool

IsVirtualTable returns true if the TableDescriptor describes a virtual Table (like the informationgi_schema tables) and thus doesn't need to be physically stored.

func LimitValueWidth Uses

func LimitValueWidth(typ *types.T, inVal tree.Datum, name *string) (outVal tree.Datum, err error)

LimitValueWidth checks that the width (for strings, byte arrays, and bit strings) and scale (for decimals) of the value fits the specified column type. In case of decimals, it can truncate fractional digits in the input value in order to fit the target column. If the input value fits the target column, it is returned unchanged. If the input value can be truncated to fit, then a truncated copy is returned. Otherwise, an error is returned. This method is used by INSERT and UPDATE.

func MakeAllDescsMetadataKey Uses

func MakeAllDescsMetadataKey() roachpb.Key

MakeAllDescsMetadataKey returns the key for all descriptors.

func MakeColumnDefDescs Uses

func MakeColumnDefDescs(
    d *tree.ColumnTableDef, semaCtx *tree.SemaContext,
) (*ColumnDescriptor, *IndexDescriptor, tree.TypedExpr, error)

MakeColumnDefDescs creates the column descriptor for a column, as well as the index descriptor if the column is a primary key or unique.

If the column type *may* be SERIAL (or SERIAL-like), it is the caller's responsibility to call sql.processSerialInColumnDef() and sql.doCreateSequence() before MakeColumnDefDescs() to remove the SERIAL type and replace it with a suitable integer type and default expression.

semaCtx can be nil if no default expression is used for the column.

The DEFAULT expression is returned in TypedExpr form for analysis (e.g. recording sequence dependencies).

func MakeComputedExprs Uses

func MakeComputedExprs(
    cols []ColumnDescriptor,
    tableDesc *ImmutableTableDescriptor,
    tn *tree.TableName,
    txCtx *transform.ExprTransformContext,
    evalCtx *tree.EvalContext,
    addingCols bool,
) ([]tree.TypedExpr, error)

MakeComputedExprs returns a slice of the computed expressions for the slice of input column descriptors, or nil if none of the input column descriptors have computed expressions. The length of the result slice matches the length of the input column descriptors. For every column that has no computed expression, a NULL expression is reported. addingCols indicates if the input column descriptors are being added and allows type checking of the compute expressions to reference input columns earlier in the slice.

func MakeDefaultExprs Uses

func MakeDefaultExprs(
    cols []ColumnDescriptor, txCtx *transform.ExprTransformContext, evalCtx *tree.EvalContext,
) ([]tree.TypedExpr, error)

MakeDefaultExprs returns a slice of the default expressions for the slice of input column descriptors, or nil if none of the input column descriptors have default expressions. The length of the result slice matches the length of the input column descriptors. For every column that has no default expression, a NULL expression is reported as default.

func MakeDescMetadataKey Uses

func MakeDescMetadataKey(descID ID) roachpb.Key

MakeDescMetadataKey returns the key for the descriptor.

func MakeIndexKeyPrefix Uses

func MakeIndexKeyPrefix(desc *TableDescriptor, indexID IndexID) []byte

MakeIndexKeyPrefix returns the key prefix used for the index's data. If you need the corresponding Span, prefer desc.IndexSpan(indexID) or desc.PrimaryIndexSpan().

func MakeIntCols Uses

func MakeIntCols(numCols int) []types.T

MakeIntCols makes a slice of numCols IntTypes.

func MakeNameMetadataKey Uses

func MakeNameMetadataKey(parentID ID, name string) roachpb.Key

MakeNameMetadataKey returns the key for the name. Pass name == "" in order to generate the prefix key to use to scan over all of the names for the specified parentID.

func MakeSpanFromEncDatums Uses

func MakeSpanFromEncDatums(
    keyPrefix []byte,
    values EncDatumRow,
    types []types.T,
    dirs []IndexDescriptor_Direction,
    tableDesc *TableDescriptor,
    index *IndexDescriptor,
    alloc *DatumAlloc,
) (roachpb.Span, error)

MakeSpanFromEncDatums creates a minimal index key span on the input values. A minimal index key span is a span that includes the fewest possible keys after the start key generated by the input values.

The start key is generated by concatenating keyPrefix with the encodings of the given EncDatum values. The values, types, and dirs parameters should be specified in the same order as the index key columns and may be a prefix.

If a table or index is interleaved, `encoding.interleavedSentinel` is used in place of the family id (a varint) to signal the next component of the key. An example of one level of interleaving (a parent): /<parent_table_id>/<parent_index_id>/<field_1>/<field_2>/NullDesc/<table_id>/<index_id>/<field_3>/<family>

func MarshalColumnValue Uses

func MarshalColumnValue(col *ColumnDescriptor, val tree.Datum) (roachpb.Value, error)

MarshalColumnValue produces the value encoding of the given datum, constrained by the given column type, into a roachpb.Value.

This is used when when the table format does not use column families, such as pre-2.0 tables and some system tables.

If val's type is incompatible with col, or if col's type is not yet implemented by this function, an error is returned.

func MustBeValueEncoded Uses

func MustBeValueEncoded(semanticType types.Family) bool

MustBeValueEncoded returns true if columns of the given kind can only be value encoded.

func NewAggInAggError Uses

func NewAggInAggError() error

NewAggInAggError creates an error for the case when an aggregate function is contained within another aggregate function.

func NewCCLRequiredError Uses

func NewCCLRequiredError(err error) error

NewCCLRequiredError creates an error for when a CCL feature is used in an OSS binary.

func NewDatabaseAlreadyExistsError Uses

func NewDatabaseAlreadyExistsError(name string) error

NewDatabaseAlreadyExistsError creates an error for a preexisting database.

func NewDependentObjectError Uses

func NewDependentObjectError(msg string) error

NewDependentObjectError creates a dependent object error.

func NewDependentObjectErrorWithHint Uses

func NewDependentObjectErrorWithHint(msg string, hint string) error

NewDependentObjectErrorWithHint creates a dependent object error with a hint

func NewInvalidSchemaDefinitionError Uses

func NewInvalidSchemaDefinitionError(err error) error

NewInvalidSchemaDefinitionError creates an error for an invalid schema definition such as a schema definition that doesn't parse.

func NewInvalidWildcardError Uses

func NewInvalidWildcardError(name string) error

NewInvalidWildcardError creates an error that represents the result of expanding a table wildcard over an invalid database or schema prefix.

func NewNonNullViolationError Uses

func NewNonNullViolationError(columnName string) error

NewNonNullViolationError creates an error for a violation of a non-NULL constraint.

func NewRangeUnavailableError Uses

func NewRangeUnavailableError(
    rangeID roachpb.RangeID, origErr error, nodeIDs ...roachpb.NodeID,
) error

NewRangeUnavailableError creates an unavailable range error.

func NewRelationAlreadyExistsError Uses

func NewRelationAlreadyExistsError(name string) error

NewRelationAlreadyExistsError creates an error for a preexisting relation.

func NewSyntaxError Uses

func NewSyntaxError(msg string) error

NewSyntaxError creates a syntax error.

func NewTransactionAbortedError Uses

func NewTransactionAbortedError(customMsg string) error

NewTransactionAbortedError creates an error for trying to run a command in the context of transaction that's already aborted.

func NewTransactionCommittedError Uses

func NewTransactionCommittedError() error

NewTransactionCommittedError creates an error that signals that the SQL txn is in the COMMIT_WAIT state and that only a COMMIT statement will be accepted.

func NewUndefinedColumnError Uses

func NewUndefinedColumnError(name string) error

NewUndefinedColumnError creates an error that represents a missing database column.

func NewUndefinedDatabaseError Uses

func NewUndefinedDatabaseError(name string) error

NewUndefinedDatabaseError creates an error that represents a missing database.

func NewUndefinedRelationError Uses

func NewUndefinedRelationError(name tree.NodeFormatter) error

NewUndefinedRelationError creates an error that represents a missing database table or view.

func NewUnsupportedSchemaUsageError Uses

func NewUnsupportedSchemaUsageError(name string) error

NewUnsupportedSchemaUsageError creates an error for an invalid schema use, e.g. mydb.someschema.tbl.

func NewWindowInAggError Uses

func NewWindowInAggError() error

NewWindowInAggError creates an error for the case when a window function is nested within an aggregate function.

func NewWrongObjectTypeError Uses

func NewWrongObjectTypeError(name *tree.TableName, desiredObjType string) error

NewWrongObjectTypeError creates a wrong object type error.

func PrettyKey Uses

func PrettyKey(valDirs []encoding.Direction, key roachpb.Key, skip int) string

PrettyKey pretty-prints the specified key, skipping over the first `skip` fields. The pretty printed key looks like:

/Table/<tableID>/<indexID>/...

We always strip off the /Table prefix and then `skip` more fields. Note that this assumes that the fields themselves do not contain '/', but that is currently true for the fields we care about stripping (the table and index ID).

func PrettySpan Uses

func PrettySpan(valDirs []encoding.Direction, span roachpb.Span, skip int) string

PrettySpan returns a human-readable representation of a span.

func PrettySpans Uses

func PrettySpans(index *IndexDescriptor, spans []roachpb.Span, skip int) string

PrettySpans returns a human-readable description of the spans. If index is nil, then pretty print subroutines will use their default settings.

func RandArrayContentsType Uses

func RandArrayContentsType(rng *rand.Rand) *types.T

RandArrayContentsType returns a random type that's guaranteed to be valid to use as the contents of an array.

func RandCollationLocale Uses

func RandCollationLocale(rng *rand.Rand) *string

RandCollationLocale returns a random element of collationLocales.

func RandColumnType Uses

func RandColumnType(rng *rand.Rand) *types.T

RandColumnType returns a random type that is a legal column type (e.g. no nested arrays or tuples).

func RandColumnTypes Uses

func RandColumnTypes(rng *rand.Rand, numCols int) []types.T

RandColumnTypes returns a slice of numCols random types. These types must be legal table column types.

func RandCreateTable Uses

func RandCreateTable(rng *rand.Rand, prefix string, tableIdx int) *tree.CreateTable

RandCreateTable creates a random CreateTable definition.

func RandCreateTables Uses

func RandCreateTables(
    rng *rand.Rand, prefix string, num int, mutators ...mutations.MultiStatementMutation,
) []tree.Statement

RandCreateTables creates random table definitions.

func RandDatum Uses

func RandDatum(rng *rand.Rand, typ *types.T, nullOk bool) tree.Datum

RandDatum generates a random Datum of the given type. If nullOk is true, the datum can be DNull. Note that if typ.Family is UNKNOWN, the datum will always be DNull, regardless of the null flag.

func RandDatumWithNullChance Uses

func RandDatumWithNullChance(rng *rand.Rand, typ *types.T, nullChance int) tree.Datum

RandDatumWithNullChance generates a random Datum of the given type. nullChance is the chance of returning null, expressed as a fraction denominator. For example, a nullChance of 5 means that there's a 1/5 chance that DNull will be returned. A nullChance of 0 means that DNull will not be returned. Note that if typ.Family is UNKNOWN, the datum will always be DNull, regardless of the null flag.

func RandEncodableColumnTypes Uses

func RandEncodableColumnTypes(rng *rand.Rand, numCols int) []types.T

RandEncodableColumnTypes works around #36736, which fails when name[] (or other type using DTypeWrapper) is encoded.

TODO(andyk): Remove this workaround once #36736 is resolved. Replace calls to it with calls to RandColumnTypes.

func RandEncodableType Uses

func RandEncodableType(rng *rand.Rand) *types.T

RandEncodableType wraps RandType in order to workaround #36736, which fails when name[] (or other type using DTypeWrapper) is encoded.

TODO(andyk): Remove this workaround once #36736 is resolved. Also, RandDatum really should be extended to create DTypeWrapper datums with alternate OIDs like oid.T_varchar for better testing.

func RandScalarType Uses

func RandScalarType(rng *rand.Rand) *types.T

RandScalarType returns a random type value that is not an array or tuple.

func RandSortingEncDatumSlices Uses

func RandSortingEncDatumSlices(
    rng *rand.Rand, numSets, numValsPerSet int,
) ([][]EncDatum, []types.T)

RandSortingEncDatumSlices generates EncDatum slices, each slice with values of the same random type which is key-encodable.

func RandSortingType Uses

func RandSortingType(rng *rand.Rand) *types.T

RandSortingType returns a column type which can be key-encoded.

func RandSortingTypes Uses

func RandSortingTypes(rng *rand.Rand, numCols int) []types.T

RandSortingTypes returns a slice of numCols random ColumnType values which are key-encodable.

func RandType Uses

func RandType(rng *rand.Rand) *types.T

RandType returns a random type value.

func RemapIVarsInTypedExpr Uses

func RemapIVarsInTypedExpr(expr tree.TypedExpr, indexVarMap []int) tree.TypedExpr

RemapIVarsInTypedExpr remaps tree.IndexedVars in expr using indexVarMap. Note that a new expression is returned.

func ResolveNames Uses

func ResolveNames(
    expr tree.Expr,
    sources MultiSourceInfo,
    ivarHelper tree.IndexedVarHelper,
    searchPath sessiondata.SearchPath,
) (tree.Expr, bool, bool, error)

ResolveNames is a wrapper around ResolveNamesUsingVisitor.

func ResolveNamesUsingVisitor Uses

func ResolveNamesUsingVisitor(
    v *NameResolutionVisitor,
    expr tree.Expr,
    sources MultiSourceInfo,
    ivarHelper tree.IndexedVarHelper,
    searchPath sessiondata.SearchPath,
) (tree.Expr, bool, bool, error)

ResolveNamesUsingVisitor resolves the names in the given expression. It returns the resolved expression, whether it found dependent vars, and whether it found stars.

func RunFilter Uses

func RunFilter(filter tree.TypedExpr, evalCtx *tree.EvalContext) (bool, error)

RunFilter runs a filter expression and returns whether the filter passes.

func SanitizeVarFreeExpr Uses

func SanitizeVarFreeExpr(
    expr tree.Expr,
    expectedType *types.T,
    context string,
    semaCtx *tree.SemaContext,
    allowImpure bool,
) (tree.TypedExpr, error)

SanitizeVarFreeExpr verifies that an expression is valid, has the correct type and contains no variable expressions. It returns the type-checked and constant-folded expression.

func Sort Uses

func Sort(data sort.Interface, cancelChecker *CancelChecker)

Sort sorts data. It makes one call to data.Len to determine n, and O(n*log(n)) calls to data.Less and data.Swap. The sort is not guaranteed to be stable.

func SplitAtIDHook Uses

func SplitAtIDHook(id uint32, cfg *config.SystemConfig) bool

SplitAtIDHook determines whether a specific descriptor ID should be considered for a split at all. If it is a database or a view table descriptor, it should not be considered.

func SplitSpanIntoSeparateFamilies Uses

func SplitSpanIntoSeparateFamilies(span roachpb.Span, neededFamilies []FamilyID) roachpb.Spans

SplitSpanIntoSeparateFamilies can only be used to split a span representing a single row point lookup into separate spans that request particular families from neededFamilies instead of requesting all the families. It is up to the client to verify whether the requested span represents a single row lookup, and when the span splitting is appropriate.

func TableEquivSignatures Uses

func TableEquivSignatures(
    desc *TableDescriptor, index *IndexDescriptor,
) (signatures [][]byte, err error)

TableEquivSignatures returns the equivalence signatures for each interleave ancestor and itself. See IndexKeyEquivSignature for more info.

func TestingMakePrimaryIndexKey Uses

func TestingMakePrimaryIndexKey(desc *TableDescriptor, vals ...interface{}) (roachpb.Key, error)

TestingMakePrimaryIndexKey creates a key prefix that corresponds to a table row (in the primary index); it is intended for tests.

It is exported because it is used by tests outside of this package.

The value types must match the primary key columns (or a prefix of them); supported types are: - Datum

- bool (converts to DBool)
- int (converts to DInt)
- string (converts to DString)

func UnmarshalColumnValue Uses

func UnmarshalColumnValue(a *DatumAlloc, typ *types.T, value roachpb.Value) (tree.Datum, error)

UnmarshalColumnValue is the counterpart to MarshalColumnValues.

It decodes the value from a roachpb.Value using the type expected by the column. An error is returned if the value's type does not match the column's type.

func ValidateColumnDefType Uses

func ValidateColumnDefType(t *types.T) error

ValidateColumnDefType returns an error if the type of a column definition is not valid. It is checked when a column is created or altered.

type AnalyzeExprFunction Uses

type AnalyzeExprFunction func(
    ctx context.Context,
    raw tree.Expr,
    sources MultiSourceInfo,
    iVarHelper tree.IndexedVarHelper,
    expectedType *types.T,
    requireType bool,
    typingContext string,
) (tree.TypedExpr, error)

AnalyzeExprFunction is the function type used by the CheckHelper during initialization to analyze an expression. See sql/analyze_expr.go for details about the function.

type CancelChecker Uses

type CancelChecker struct {
    // contains filtered or unexported fields
}

CancelChecker is a helper object for repeatedly checking whether the associated context has been canceled or not. Encapsulates all logic for waiting for cancelCheckInterval rows before actually checking for cancellation. The cancellation check has a significant time overhead, so it's not checked in every iteration.

func NewCancelChecker Uses

func NewCancelChecker(ctx context.Context) *CancelChecker

NewCancelChecker returns a new CancelChecker.

func (*CancelChecker) Check Uses

func (c *CancelChecker) Check() error

Check returns an error if the associated query has been canceled.

func (*CancelChecker) Reset Uses

func (c *CancelChecker) Reset(ctx context.Context)

Reset resets this cancel checker with a fresh context.

type CheckHelper Uses

type CheckHelper struct {
    Exprs []tree.TypedExpr
    // contains filtered or unexported fields
}

CheckHelper validates check constraints on rows, on INSERT and UPDATE. CheckHelper has two different modes for executing check constraints:

1. Eval: in this mode, CheckHelper analyzes and evaluates each check
         constraint as a standalone expression. This is used by the
         heuristic planner, and is the backwards-compatible code path.

2. Input: in this mode, each check constraint expression is integrated with
          the input expression as a boolean column. CheckHelper only
          inspects the value of the column; if false, it reports a
          constraint violation error. This mode is used by the cost-based
          optimizer.

In the Eval mode, callers should call NewEvalCheckHelper to initialize a new instance of CheckHelper. For each row, they call LoadEvalRow one or more times to set row values for evaluation, and then call CheckEval to trigger evaluation.

In the Input mode, callers should call NewInputCheckHelper to initialize a new instance of CheckHelper. For each row, they call CheckInput with the boolean check columns.

func NewEvalCheckHelper Uses

func NewEvalCheckHelper(
    ctx context.Context, analyzeExpr AnalyzeExprFunction, tableDesc *ImmutableTableDescriptor,
) (*CheckHelper, error)

NewEvalCheckHelper constructs a new instance of the CheckHelper, to be used in the "Eval" mode (see comment for the CheckHelper struct).

func NewInputCheckHelper Uses

func NewInputCheckHelper(checks util.FastIntSet, tableDesc *ImmutableTableDescriptor) *CheckHelper

NewInputCheckHelper constructs a new instance of the CheckHelper, to be used in the "Input" mode (see comment for the CheckHelper struct).

func (*CheckHelper) CheckEval Uses

func (c *CheckHelper) CheckEval(ctx *tree.EvalContext) error

CheckEval evaluates each check constraint expression using values from the current row that was previously set via a call to LoadEvalRow.

func (*CheckHelper) CheckInput Uses

func (c *CheckHelper) CheckInput(checkVals tree.Datums) error

CheckInput expects checkVals to already contain the boolean result of evaluating each check constraint. If any of the boolean values is false, then CheckInput reports a constraint violation error.

func (*CheckHelper) Count Uses

func (c *CheckHelper) Count() int

Count returns the number of check constraints that need to be checked. The count can be less than the number of check constraints defined on the table descriptor if the planner was able to statically prove that some have already been fulfilled.

func (*CheckHelper) IndexedVarEval Uses

func (c *CheckHelper) IndexedVarEval(idx int, ctx *tree.EvalContext) (tree.Datum, error)

IndexedVarEval implements the tree.IndexedVarContainer interface.

func (*CheckHelper) IndexedVarNodeFormatter Uses

func (c *CheckHelper) IndexedVarNodeFormatter(idx int) tree.NodeFormatter

IndexedVarNodeFormatter implements the parser.IndexedVarContainer interface.

func (*CheckHelper) IndexedVarResolvedType Uses

func (c *CheckHelper) IndexedVarResolvedType(idx int) *types.T

IndexedVarResolvedType implements the tree.IndexedVarContainer interface.

func (*CheckHelper) LoadEvalRow Uses

func (c *CheckHelper) LoadEvalRow(colIdx map[ColumnID]int, row tree.Datums, merge bool) error

LoadEvalRow sets values in the IndexedVars used by the CHECK exprs. Any value not passed is set to NULL, unless `merge` is true, in which case it is left unchanged (allowing updating a subset of a row's values).

func (*CheckHelper) NeedsEval Uses

func (c *CheckHelper) NeedsEval() bool

NeedsEval returns true if CheckHelper is operating in the "Eval" mode. See the comment for the CheckHelper struct for more details.

type ColTypeInfo Uses

type ColTypeInfo struct {
    // contains filtered or unexported fields
}

ColTypeInfo is a type that allows multiple representations of column type information (to avoid conversions and allocations).

func ColTypeInfoFromColDescs Uses

func ColTypeInfoFromColDescs(colDescs []ColumnDescriptor) ColTypeInfo

ColTypeInfoFromColDescs creates a ColTypeInfo from []ColumnDescriptor.

func ColTypeInfoFromColTypes Uses

func ColTypeInfoFromColTypes(colTypes []types.T) ColTypeInfo

ColTypeInfoFromColTypes creates a ColTypeInfo from []ColumnType.

func ColTypeInfoFromResCols Uses

func ColTypeInfoFromResCols(resCols ResultColumns) ColTypeInfo

ColTypeInfoFromResCols creates a ColTypeInfo from ResultColumns.

func MakeColTypeInfo Uses

func MakeColTypeInfo(
    tableDesc *ImmutableTableDescriptor, colIDToRowIndex map[ColumnID]int,
) (ColTypeInfo, error)

MakeColTypeInfo returns a ColTypeInfo initialized from the given TableDescriptor and map from column ID to row index.

func (ColTypeInfo) NumColumns Uses

func (ti ColTypeInfo) NumColumns() int

NumColumns returns the number of columns in the type.

func (ColTypeInfo) Type Uses

func (ti ColTypeInfo) Type(idx int) *types.T

Type returns the datum type of the i-th column.

type ColumnDescriptor Uses

type ColumnDescriptor struct {
    Name     string                                           `protobuf:"bytes,1,opt,name=name" json:"name"`
    ID       ColumnID                                         `protobuf:"varint,2,opt,name=id,casttype=ColumnID" json:"id"`
    Type     github_com_cockroachdb_cockroach_pkg_sql_types.T `protobuf:"bytes,3,opt,name=type,customtype=github.com/cockroachdb/cockroach/pkg/sql/types.T" json:"type"`
    Nullable bool                                             `protobuf:"varint,4,opt,name=nullable" json:"nullable"`
    // Default expression to use to populate the column on insert if no
    // value is provided.
    DefaultExpr *string `protobuf:"bytes,5,opt,name=default_expr,json=defaultExpr" json:"default_expr,omitempty"`
    Hidden      bool    `protobuf:"varint,6,opt,name=hidden" json:"hidden"`
    // Ids of sequences used in this column's DEFAULT expression, in calls to nextval().
    UsesSequenceIds []ID `protobuf:"varint,10,rep,name=uses_sequence_ids,json=usesSequenceIds,casttype=ID" json:"uses_sequence_ids,omitempty"`
    // Expression to use to compute the value of this column if this is a
    // computed column.
    ComputeExpr *string `protobuf:"bytes,11,opt,name=compute_expr,json=computeExpr" json:"compute_expr,omitempty"`
}

func ProcessDefaultColumns Uses

func ProcessDefaultColumns(
    cols []ColumnDescriptor,
    tableDesc *ImmutableTableDescriptor,
    txCtx *transform.ExprTransformContext,
    evalCtx *tree.EvalContext,
) ([]ColumnDescriptor, []tree.TypedExpr, error)

ProcessDefaultColumns adds columns with DEFAULT to cols if not present and returns the defaultExprs for cols.

func ProcessTargetColumns Uses

func ProcessTargetColumns(
    tableDesc *ImmutableTableDescriptor, nameList tree.NameList, ensureColumns, allowMutations bool,
) ([]ColumnDescriptor, error)

ProcessTargetColumns returns the column descriptors identified by the given name list. It also checks that a given column name is only listed once. If no column names are given (special case for INSERT) and ensureColumns is set, the descriptors for all visible columns are returned. If allowMutations is set, even columns undergoing mutations are added.

func (*ColumnDescriptor) CheckCanBeFKRef Uses

func (desc *ColumnDescriptor) CheckCanBeFKRef() error

CheckCanBeFKRef returns whether the given column is computed.

func (*ColumnDescriptor) ColID Uses

func (desc *ColumnDescriptor) ColID() cat.StableID

ColID is part of the cat.Column interface.

func (*ColumnDescriptor) ColName Uses

func (desc *ColumnDescriptor) ColName() tree.Name

ColName is part of the cat.Column interface.

func (*ColumnDescriptor) ColTypePrecision Uses

func (desc *ColumnDescriptor) ColTypePrecision() int

ColTypePrecision is part of the cat.Column interface.

func (*ColumnDescriptor) ColTypeStr Uses

func (desc *ColumnDescriptor) ColTypeStr() string

ColTypeStr is part of the cat.Column interface.

func (*ColumnDescriptor) ColTypeWidth Uses

func (desc *ColumnDescriptor) ColTypeWidth() int

ColTypeWidth is part of the cat.Column interface.

func (*ColumnDescriptor) ComputedExprStr Uses

func (desc *ColumnDescriptor) ComputedExprStr() string

ComputedExprStr is part of the cat.Column interface.

func (*ColumnDescriptor) DatumType Uses

func (desc *ColumnDescriptor) DatumType() *types.T

DatumType is part of the cat.Column interface.

func (*ColumnDescriptor) DefaultExprStr Uses

func (desc *ColumnDescriptor) DefaultExprStr() string

DefaultExprStr is part of the cat.Column interface.

func (*ColumnDescriptor) Descriptor Uses

func (*ColumnDescriptor) Descriptor() ([]byte, []int)

func (*ColumnDescriptor) Equal Uses

func (this *ColumnDescriptor) Equal(that interface{}) bool

func (*ColumnDescriptor) HasDefault Uses

func (desc *ColumnDescriptor) HasDefault() bool

HasDefault is part of the cat.Column interface.

func (*ColumnDescriptor) HasNullDefault Uses

func (desc *ColumnDescriptor) HasNullDefault() bool

HasNullDefault checks that the column descriptor has a default of NULL.

func (*ColumnDescriptor) IsComputed Uses

func (desc *ColumnDescriptor) IsComputed() bool

IsComputed is part of the cat.Column interface.

func (*ColumnDescriptor) IsHidden Uses

func (desc *ColumnDescriptor) IsHidden() bool

IsHidden is part of the cat.Column interface.

func (*ColumnDescriptor) IsNullable Uses

func (desc *ColumnDescriptor) IsNullable() bool

IsNullable is part of the cat.Column interface.

func (*ColumnDescriptor) Marshal Uses

func (m *ColumnDescriptor) Marshal() (dAtA []byte, err error)

func (*ColumnDescriptor) MarshalTo Uses

func (m *ColumnDescriptor) MarshalTo(dAtA []byte) (int, error)

func (*ColumnDescriptor) ProtoMessage Uses

func (*ColumnDescriptor) ProtoMessage()

func (*ColumnDescriptor) Reset Uses

func (m *ColumnDescriptor) Reset()

func (*ColumnDescriptor) SQLString Uses

func (desc *ColumnDescriptor) SQLString() string

SQLString returns the SQL statement describing the column.

func (*ColumnDescriptor) Size Uses

func (m *ColumnDescriptor) Size() (n int)

func (*ColumnDescriptor) String Uses

func (m *ColumnDescriptor) String() string

func (*ColumnDescriptor) Unmarshal Uses

func (m *ColumnDescriptor) Unmarshal(dAtA []byte) error

func (*ColumnDescriptor) XXX_DiscardUnknown Uses

func (m *ColumnDescriptor) XXX_DiscardUnknown()

func (*ColumnDescriptor) XXX_Marshal Uses

func (m *ColumnDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ColumnDescriptor) XXX_Merge Uses

func (dst *ColumnDescriptor) XXX_Merge(src proto.Message)

func (*ColumnDescriptor) XXX_Size Uses

func (m *ColumnDescriptor) XXX_Size() int

func (*ColumnDescriptor) XXX_Unmarshal Uses

func (m *ColumnDescriptor) XXX_Unmarshal(b []byte) error

type ColumnFamilyDescriptor Uses

type ColumnFamilyDescriptor struct {
    Name string   `protobuf:"bytes,1,opt,name=name" json:"name"`
    ID   FamilyID `protobuf:"varint,2,opt,name=id,casttype=FamilyID" json:"id"`
    // A list of column names of which the family is comprised. This list
    // parallels the column_ids list. If duplicating the storage of the column
    // names here proves to be prohibitive, we could clear this field before
    // saving and reconstruct it after loading.
    ColumnNames []string `protobuf:"bytes,3,rep,name=column_names,json=columnNames" json:"column_names,omitempty"`
    // A list of column ids of which the family is comprised. This list parallels
    // the column_names list.
    ColumnIDs []ColumnID `protobuf:"varint,4,rep,name=column_ids,json=columnIds,casttype=ColumnID" json:"column_ids,omitempty"`
    // If nonzero, the column involved in the single column optimization.
    //
    // Families store columns in a ValueType_TUPLE as repeated <columnID><data>
    // entries. As a space optimization and for backward compatibility, a single
    // column is written without the column id prefix. Because more columns could
    // be added, it would be ambiguous which column was stored when read back in,
    // so this field supplies it.
    DefaultColumnID ColumnID `protobuf:"varint,5,opt,name=default_column_id,json=defaultColumnId,casttype=ColumnID" json:"default_column_id"`
}

ColumnFamilyDescriptor is set of columns stored together in one kv entry.

func (*ColumnFamilyDescriptor) Descriptor Uses

func (*ColumnFamilyDescriptor) Descriptor() ([]byte, []int)

func (*ColumnFamilyDescriptor) Equal Uses

func (this *ColumnFamilyDescriptor) Equal(that interface{}) bool

func (*ColumnFamilyDescriptor) Marshal Uses

func (m *ColumnFamilyDescriptor) Marshal() (dAtA []byte, err error)

func (*ColumnFamilyDescriptor) MarshalTo Uses

func (m *ColumnFamilyDescriptor) MarshalTo(dAtA []byte) (int, error)

func (*ColumnFamilyDescriptor) ProtoMessage Uses

func (*ColumnFamilyDescriptor) ProtoMessage()

func (*ColumnFamilyDescriptor) Reset Uses

func (m *ColumnFamilyDescriptor) Reset()

func (*ColumnFamilyDescriptor) Size Uses

func (m *ColumnFamilyDescriptor) Size() (n int)

func (*ColumnFamilyDescriptor) String Uses

func (m *ColumnFamilyDescriptor) String() string

func (*ColumnFamilyDescriptor) Unmarshal Uses

func (m *ColumnFamilyDescriptor) Unmarshal(dAtA []byte) error

func (*ColumnFamilyDescriptor) XXX_DiscardUnknown Uses

func (m *ColumnFamilyDescriptor) XXX_DiscardUnknown()

func (*ColumnFamilyDescriptor) XXX_Marshal Uses

func (m *ColumnFamilyDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ColumnFamilyDescriptor) XXX_Merge Uses

func (dst *ColumnFamilyDescriptor) XXX_Merge(src proto.Message)

func (*ColumnFamilyDescriptor) XXX_Size Uses

func (m *ColumnFamilyDescriptor) XXX_Size() int

func (*ColumnFamilyDescriptor) XXX_Unmarshal Uses

func (m *ColumnFamilyDescriptor) XXX_Unmarshal(b []byte) error

type ColumnID Uses

type ColumnID tree.ColumnID

ColumnID is a custom type for ColumnDescriptor IDs.

type ColumnIDs Uses

type ColumnIDs []ColumnID

ColumnIDs is a slice of ColumnDescriptor IDs.

func (ColumnIDs) HasPrefix Uses

func (c ColumnIDs) HasPrefix(input ColumnIDs) bool

HasPrefix returns true if the input list is a prefix of this list.

func (ColumnIDs) Len Uses

func (c ColumnIDs) Len() int

func (ColumnIDs) Less Uses

func (c ColumnIDs) Less(i, j int) bool

func (ColumnIDs) Swap Uses

func (c ColumnIDs) Swap(i, j int)

type ColumnOrderInfo Uses

type ColumnOrderInfo struct {
    ColIdx    int
    Direction encoding.Direction
}

ColumnOrderInfo describes a column (as an index) and a desired order direction.

type ColumnOrdering Uses

type ColumnOrdering []ColumnOrderInfo

ColumnOrdering is used to describe a desired column ordering. For example,

[]ColumnOrderInfo{ {3, encoding.Descending}, {1, encoding.Ascending} }

represents an ordering first by column 3 (descending), then by column 1 (ascending).

var NoOrdering ColumnOrdering

NoOrdering is used to indicate an empty ColumnOrdering.

type ColumnResolver Uses

type ColumnResolver struct {
    Sources MultiSourceInfo

    // resolverState is modified in-place by the implementation of the
    // tree.ColumnItemResolver interface in resolver.go.
    ResolverState struct {
        ForUpdateOrDelete bool
        SrcIdx            int
        ColIdx            int
        ColSetIdx         int
    }
}

ColumnResolver is a utility struct to be used when resolving column names to point to one of the data sources and one of the column IDs in that data source.

func (*ColumnResolver) FindSourceMatchingName Uses

func (r *ColumnResolver) FindSourceMatchingName(
    ctx context.Context, tn tree.TableName,
) (
    res tree.NumResolutionResults,
    prefix *tree.TableName,
    srcMeta tree.ColumnSourceMeta,
    err error,
)

FindSourceMatchingName is part of the tree.ColumnItemResolver interface.

func (*ColumnResolver) FindSourceProvidingColumn Uses

func (r *ColumnResolver) FindSourceProvidingColumn(
    ctx context.Context, col tree.Name,
) (prefix *tree.TableName, srcMeta tree.ColumnSourceMeta, colHint int, err error)

FindSourceProvidingColumn is part of the tree.ColumnItemResolver interface.

func (*ColumnResolver) Resolve Uses

func (r *ColumnResolver) Resolve(
    ctx context.Context,
    prefix *tree.TableName,
    srcMeta tree.ColumnSourceMeta,
    colHint int,
    col tree.Name,
) (tree.ColumnResolutionResult, error)

Resolve is part of the tree.ColumnItemResolver interface.

type ConstraintDetail Uses

type ConstraintDetail struct {
    Kind        ConstraintType
    Columns     []string
    Details     string
    Unvalidated bool

    // Only populated for PK and Unique Constraints.
    Index *IndexDescriptor

    // Only populated for FK Constraints.
    FK              *ForeignKeyConstraint
    ReferencedTable *TableDescriptor

    // Only populated for Check Constraints.
    CheckConstraint *TableDescriptor_CheckConstraint
}

ConstraintDetail describes a constraint.

type ConstraintToUpdate Uses

type ConstraintToUpdate struct {
    ConstraintType ConstraintToUpdate_ConstraintType `protobuf:"varint,1,req,name=constraint_type,json=constraintType,enum=cockroach.sql.sqlbase.ConstraintToUpdate_ConstraintType" json:"constraint_type"`
    Name           string                            `protobuf:"bytes,2,req,name=name" json:"name"`
    Check          TableDescriptor_CheckConstraint   `protobuf:"bytes,3,opt,name=check" json:"check"`
    // All fields past 3 haven't been persisted before 19.2.
    ForeignKey    ForeignKeyConstraint `protobuf:"bytes,4,opt,name=foreign_key,json=foreignKey" json:"foreign_key"`
    NotNullColumn ColumnID             `protobuf:"varint,6,opt,name=not_null_column,json=notNullColumn,casttype=ColumnID" json:"not_null_column"`
}

ConstraintToUpdate represents a constraint to be added to the table and validated for existing rows. More generally, in the future, when we support adding constraints that are unvalidated for existing rows and can be validated later using VALIDATE CONSTRAINT, this mutation will also represent either adding an unvalidated constraint or validating an existing constraint.

This mutation effects changes only in the backfill step of the schema changer: First, a new version of the table descriptor with the constraint added is published, after all columns being added have been backfilled. After waiting for the constraint to be enforced for writes on all nodes, the constraint is then validated for all existing rows. This ensures that constraints added to columns that are being added are correctly enforced before the column becomes public.

func (*ConstraintToUpdate) Descriptor Uses

func (*ConstraintToUpdate) Descriptor() ([]byte, []int)

func (*ConstraintToUpdate) Equal Uses

func (this *ConstraintToUpdate) Equal(that interface{}) bool

func (*ConstraintToUpdate) Marshal Uses

func (m *ConstraintToUpdate) Marshal() (dAtA []byte, err error)

func (*ConstraintToUpdate) MarshalTo Uses

func (m *ConstraintToUpdate) MarshalTo(dAtA []byte) (int, error)

func (*ConstraintToUpdate) ProtoMessage Uses

func (*ConstraintToUpdate) ProtoMessage()

func (*ConstraintToUpdate) Reset Uses

func (m *ConstraintToUpdate) Reset()

func (*ConstraintToUpdate) Size Uses

func (m *ConstraintToUpdate) Size() (n int)

func (*ConstraintToUpdate) String Uses

func (m *ConstraintToUpdate) String() string

func (*ConstraintToUpdate) Unmarshal Uses

func (m *ConstraintToUpdate) Unmarshal(dAtA []byte) error

func (*ConstraintToUpdate) XXX_DiscardUnknown Uses

func (m *ConstraintToUpdate) XXX_DiscardUnknown()

func (*ConstraintToUpdate) XXX_Marshal Uses

func (m *ConstraintToUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ConstraintToUpdate) XXX_Merge Uses

func (dst *ConstraintToUpdate) XXX_Merge(src proto.Message)

func (*ConstraintToUpdate) XXX_Size Uses

func (m *ConstraintToUpdate) XXX_Size() int

func (*ConstraintToUpdate) XXX_Unmarshal Uses

func (m *ConstraintToUpdate) XXX_Unmarshal(b []byte) error

type ConstraintToUpdate_ConstraintType Uses

type ConstraintToUpdate_ConstraintType int32
const (
    ConstraintToUpdate_CHECK       ConstraintToUpdate_ConstraintType = 0
    ConstraintToUpdate_FOREIGN_KEY ConstraintToUpdate_ConstraintType = 1
    // NOT NULL constraints being added are represented by a dummy check
    // constraint so that a multi-state schema change, including a bulk
    // validation step, can occur. The check field contains the dummy
    // constraint.
    ConstraintToUpdate_NOT_NULL ConstraintToUpdate_ConstraintType = 2
)

func (ConstraintToUpdate_ConstraintType) Enum Uses

func (x ConstraintToUpdate_ConstraintType) Enum() *ConstraintToUpdate_ConstraintType

func (ConstraintToUpdate_ConstraintType) EnumDescriptor Uses

func (ConstraintToUpdate_ConstraintType) EnumDescriptor() ([]byte, []int)

func (ConstraintToUpdate_ConstraintType) String Uses

func (x ConstraintToUpdate_ConstraintType) String() string

func (*ConstraintToUpdate_ConstraintType) UnmarshalJSON Uses

func (x *ConstraintToUpdate_ConstraintType) UnmarshalJSON(data []byte) error

type ConstraintType Uses

type ConstraintType string

ConstraintType is used to identify the type of a constraint.

const (
    // ConstraintTypePK identifies a PRIMARY KEY constraint.
    ConstraintTypePK ConstraintType = "PRIMARY KEY"
    // ConstraintTypeFK identifies a FOREIGN KEY constraint.
    ConstraintTypeFK ConstraintType = "FOREIGN KEY"
    // ConstraintTypeUnique identifies a FOREIGN constraint.
    ConstraintTypeUnique ConstraintType = "UNIQUE"
    // ConstraintTypeCheck identifies a CHECK constraint.
    ConstraintTypeCheck ConstraintType = "CHECK"
)

type ConstraintValidity Uses

type ConstraintValidity int32
const (
    // The constraint is valid for all rows.
    ConstraintValidity_Validated ConstraintValidity = 0
    // The constraint has not yet been validated for all rows (and will not be
    // validated until VALIDATE CONSTRAINT is used).
    ConstraintValidity_Unvalidated ConstraintValidity = 1
    // The constraint was just added, but the validation for existing rows is not
    // yet complete. If validation fails, the constraint will be dropped.
    ConstraintValidity_Validating ConstraintValidity = 2
    // The constraint is being dropped in the schema changer.
    ConstraintValidity_Dropping ConstraintValidity = 3
)

func (ConstraintValidity) Enum Uses

func (x ConstraintValidity) Enum() *ConstraintValidity

func (ConstraintValidity) EnumDescriptor Uses

func (ConstraintValidity) EnumDescriptor() ([]byte, []int)

func (ConstraintValidity) String Uses

func (x ConstraintValidity) String() string

func (*ConstraintValidity) UnmarshalJSON Uses

func (x *ConstraintValidity) UnmarshalJSON(data []byte) error

type DataSourceInfo Uses

type DataSourceInfo struct {
    // SourceColumns match the plan.Columns() 1-to-1. However the column
    // names might be different if the statement renames them using AS.
    SourceColumns ResultColumns

    // SourceAliases indicates to which table alias column ranges
    // belong.
    // These often correspond to the original table names for each
    // column but might be different if the statement renames
    // them using AS.
    SourceAliases SourceAliases

    // ColOffset is the offset of the first column in this DataSourceInfo in the
    // MultiSourceInfo array it is part of.
    // The value is populated and used during name resolution, and shouldn't get
    // touched by anything but the nameResolutionVisitor without care.
    ColOffset int

    // The number of backfill source columns. The backfill columns are
    // always the last columns from SourceColumns.
    NumBackfillColumns int
}

DataSourceInfo provides column metadata for exactly one data source.

func NewSourceInfoForSingleTable Uses

func NewSourceInfoForSingleTable(tn tree.TableName, columns ResultColumns) *DataSourceInfo

NewSourceInfoForSingleTable creates a simple DataSourceInfo which maps the same tableAlias to all columns.

func (*DataSourceInfo) NodeFormatter Uses

func (src *DataSourceInfo) NodeFormatter(colIdx int) tree.NodeFormatter

NodeFormatter returns a tree.NodeFormatter that, when formatted, represents the object at the input column index.

func (*DataSourceInfo) String Uses

func (src *DataSourceInfo) String() string

type DatabaseDescriptor Uses

type DatabaseDescriptor struct {
    Name       string               `protobuf:"bytes,1,opt,name=name" json:"name"`
    ID         ID                   `protobuf:"varint,2,opt,name=id,casttype=ID" json:"id"`
    Privileges *PrivilegeDescriptor `protobuf:"bytes,3,opt,name=privileges" json:"privileges,omitempty"`
}

DatabaseDescriptor represents a namespace (aka database) and is stored in a structured metadata key. The DatabaseDescriptor has a globally-unique ID shared with the TableDescriptor ID. Permissions are applied to all tables in the namespace.

func GetDatabaseDescFromID Uses

func GetDatabaseDescFromID(
    ctx context.Context, protoGetter protoGetter, id ID,
) (*DatabaseDescriptor, error)

GetDatabaseDescFromID retrieves the database descriptor for the database ID passed in using an existing proto getter. Returns an error if the descriptor doesn't exist or if it exists and is not a database.

func MakeSystemDatabaseDesc Uses

func MakeSystemDatabaseDesc() DatabaseDescriptor

MakeSystemDatabaseDesc constructs a copy of the system database descriptor.

func (*DatabaseDescriptor) Descriptor Uses

func (*DatabaseDescriptor) Descriptor() ([]byte, []int)

func (*DatabaseDescriptor) Equal Uses

func (this *DatabaseDescriptor) Equal(that interface{}) bool

func (*DatabaseDescriptor) GetAuditMode Uses

func (desc *DatabaseDescriptor) GetAuditMode() TableDescriptor_AuditMode

GetAuditMode is part of the DescriptorProto interface. This is a stub until per-database auditing is enabled.

func (*DatabaseDescriptor) GetID Uses

func (m *DatabaseDescriptor) GetID() ID

func (*DatabaseDescriptor) GetName Uses

func (m *DatabaseDescriptor) GetName() string

func (*DatabaseDescriptor) GetPrivileges Uses

func (m *DatabaseDescriptor) GetPrivileges() *PrivilegeDescriptor

func (*DatabaseDescriptor) Marshal Uses

func (m *DatabaseDescriptor) Marshal() (dAtA []byte, err error)

func (*DatabaseDescriptor) MarshalTo Uses

func (m *DatabaseDescriptor) MarshalTo(dAtA []byte) (int, error)

func (*DatabaseDescriptor) ProtoMessage Uses

func (*DatabaseDescriptor) ProtoMessage()

func (*DatabaseDescriptor) Reset Uses

func (m *DatabaseDescriptor) Reset()

func (*DatabaseDescriptor) SchemaMeta Uses

func (*DatabaseDescriptor) SchemaMeta()

SchemaMeta implements the tree.SchemaMeta interface.

func (*DatabaseDescriptor) SetID Uses

func (desc *DatabaseDescriptor) SetID(id ID)

SetID implements the DescriptorProto interface.

func (*DatabaseDescriptor) SetName Uses

func (desc *DatabaseDescriptor) SetName(name string)

SetName implements the DescriptorProto interface.

func (*DatabaseDescriptor) Size Uses

func (m *DatabaseDescriptor) Size() (n int)

func (*DatabaseDescriptor) String Uses

func (m *DatabaseDescriptor) String() string

func (*DatabaseDescriptor) TypeName Uses

func (desc *DatabaseDescriptor) TypeName() string

TypeName returns the plain type of this descriptor.

func (*DatabaseDescriptor) Unmarshal Uses

func (m *DatabaseDescriptor) Unmarshal(dAtA []byte) error

func (*DatabaseDescriptor) Validate Uses

func (desc *DatabaseDescriptor) Validate() error

Validate validates that the database descriptor is well formed. Checks include validate the database name, and verifying that there is at least one read and write user.

func (*DatabaseDescriptor) XXX_DiscardUnknown Uses

func (m *DatabaseDescriptor) XXX_DiscardUnknown()

func (*DatabaseDescriptor) XXX_Marshal Uses

func (m *DatabaseDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DatabaseDescriptor) XXX_Merge Uses

func (dst *DatabaseDescriptor) XXX_Merge(src proto.Message)

func (*DatabaseDescriptor) XXX_Size Uses

func (m *DatabaseDescriptor) XXX_Size() int

func (*DatabaseDescriptor) XXX_Unmarshal Uses

func (m *DatabaseDescriptor) XXX_Unmarshal(b []byte) error

type DatabaseKey Uses

type DatabaseKey struct {
    // contains filtered or unexported fields
}

DatabaseKey implements DescriptorKey.

func NewDatabaseKey Uses

func NewDatabaseKey(name string) DatabaseKey

NewDatabaseKey returns a new DatabaseKey.

func (DatabaseKey) Key Uses

func (dk DatabaseKey) Key() roachpb.Key

Key implements DescriptorKey interface.

func (DatabaseKey) Name Uses

func (dk DatabaseKey) Name() string

Name implements DescriptorKey interface.

type DatumAlloc Uses

type DatumAlloc struct {
    // contains filtered or unexported fields
}

DatumAlloc provides batch allocation of datum pointers, amortizing the cost of the allocations.

func (*DatumAlloc) NewDBitArray Uses

func (a *DatumAlloc) NewDBitArray(v tree.DBitArray) *tree.DBitArray

NewDBitArray allocates a DBitArray.

func (*DatumAlloc) NewDBytes Uses

func (a *DatumAlloc) NewDBytes(v tree.DBytes) *tree.DBytes

NewDBytes allocates a DBytes.

func (*DatumAlloc) NewDDate Uses

func (a *DatumAlloc) NewDDate(v tree.DDate) *tree.DDate

NewDDate allocates a DDate.

func (*DatumAlloc) NewDDecimal Uses

func (a *DatumAlloc) NewDDecimal(v tree.DDecimal) *tree.DDecimal

NewDDecimal allocates a DDecimal.

func (*DatumAlloc) NewDFloat Uses

func (a *DatumAlloc) NewDFloat(v tree.DFloat) *tree.DFloat

NewDFloat allocates a DFloat.

func (*DatumAlloc) NewDIPAddr Uses

func (a *DatumAlloc) NewDIPAddr(v tree.DIPAddr) *tree.DIPAddr

NewDIPAddr allocates a DIPAddr.

func (*DatumAlloc) NewDInt Uses

func (a *DatumAlloc) NewDInt(v tree.DInt) *tree.DInt

NewDInt allocates a DInt.

func (*DatumAlloc) NewDInterval Uses

func (a *DatumAlloc) NewDInterval(v tree.DInterval) *tree.DInterval

NewDInterval allocates a DInterval.

func (*DatumAlloc) NewDJSON Uses

func (a *DatumAlloc) NewDJSON(v tree.DJSON) *tree.DJSON

NewDJSON allocates a DJSON.

func (*DatumAlloc) NewDName Uses

func (a *DatumAlloc) NewDName(v tree.DString) tree.Datum

NewDName allocates a DName.

func (*DatumAlloc) NewDOid Uses

func (a *DatumAlloc) NewDOid(v tree.DOid) tree.Datum

NewDOid allocates a DOid.

func (*DatumAlloc) NewDString Uses

func (a *DatumAlloc) NewDString(v tree.DString) *tree.DString

NewDString allocates a DString.

func (*DatumAlloc) NewDTime Uses

func (a *DatumAlloc) NewDTime(v tree.DTime) *tree.DTime

NewDTime allocates a DTime.

func (*DatumAlloc) NewDTimestamp Uses

func (a *DatumAlloc) NewDTimestamp(v tree.DTimestamp) *tree.DTimestamp

NewDTimestamp allocates a DTimestamp.

func (*DatumAlloc) NewDTimestampTZ Uses

func (a *DatumAlloc) NewDTimestampTZ(v tree.DTimestampTZ) *tree.DTimestampTZ

NewDTimestampTZ allocates a DTimestampTZ.

func (*DatumAlloc) NewDTuple Uses

func (a *DatumAlloc) NewDTuple(v tree.DTuple) *tree.DTuple

NewDTuple allocates a DTuple.

func (*DatumAlloc) NewDUuid Uses

func (a *DatumAlloc) NewDUuid(v tree.DUuid) *tree.DUuid

NewDUuid allocates a DUuid.

func (*DatumAlloc) NewDatums Uses

func (a *DatumAlloc) NewDatums(num int) tree.Datums

NewDatums allocates Datums of the specified size.

type DatumEncoding Uses

type DatumEncoding int32

DatumEncoding identifies the encoding used for an EncDatum.

const (
    // Indicates that the datum is encoded using the order-preserving encoding
    // used for keys (ascending order).
    DatumEncoding_ASCENDING_KEY DatumEncoding = 0
    // Indicates that the datum is encoded using the order-preserving encoding
    // used for keys (descending order).
    DatumEncoding_DESCENDING_KEY DatumEncoding = 1
    // Indicates that the datum is encoded using the encoding used for values.
    DatumEncoding_VALUE DatumEncoding = 2
)

func EncodingDirToDatumEncoding Uses

func EncodingDirToDatumEncoding(dir encoding.Direction) DatumEncoding

EncodingDirToDatumEncoding returns an equivalent DatumEncoding for the given encoding direction.

func RandDatumEncoding Uses

func RandDatumEncoding(rng *rand.Rand) DatumEncoding

RandDatumEncoding returns a random DatumEncoding value.

func (DatumEncoding) Enum Uses

func (x DatumEncoding) Enum() *DatumEncoding

func (DatumEncoding) EnumDescriptor Uses

func (DatumEncoding) EnumDescriptor() ([]byte, []int)

func (DatumEncoding) String Uses

func (x DatumEncoding) String() string

func (*DatumEncoding) UnmarshalJSON Uses

func (x *DatumEncoding) UnmarshalJSON(data []byte) error

type Descriptor Uses

type Descriptor struct {
    // Types that are valid to be assigned to Union:
    //	*Descriptor_Table
    //	*Descriptor_Database
    Union isDescriptor_Union `protobuf_oneof:"union"`
}

Descriptor is a union type holding either a table or database descriptor.

func WrapDescriptor Uses

func WrapDescriptor(descriptor DescriptorProto) *Descriptor

WrapDescriptor fills in a Descriptor.

func (*Descriptor) Descriptor Uses

func (*Descriptor) Descriptor() ([]byte, []int)

func (*Descriptor) Equal Uses

func (this *Descriptor) Equal(that interface{}) bool

func (*Descriptor) GetDatabase Uses

func (m *Descriptor) GetDatabase() *DatabaseDescriptor

func (*Descriptor) GetID Uses

func (desc *Descriptor) GetID() ID

GetID returns the ID of the descriptor.

func (*Descriptor) GetName Uses

func (desc *Descriptor) GetName() string

GetName returns the Name of the descriptor.

func (*Descriptor) GetTable Uses

func (m *Descriptor) GetTable() *TableDescriptor

func (*Descriptor) GetUnion Uses

func (m *Descriptor) GetUnion() isDescriptor_Union

func (*Descriptor) Marshal Uses

func (m *Descriptor) Marshal() (dAtA []byte, err error)

func (*Descriptor) MarshalTo Uses

func (m *Descriptor) MarshalTo(dAtA []byte) (int, error)

func (Descriptor) NameResolutionResult Uses

func (Descriptor) NameResolutionResult()

NameResolutionResult implements the tree.NameResolutionResult interface.

func (*Descriptor) ProtoMessage Uses

func (*Descriptor) ProtoMessage()

func (*Descriptor) Reset Uses

func (m *Descriptor) Reset()

func (Descriptor) SchemaMeta Uses

func (Descriptor) SchemaMeta()

SchemaMeta implements the tree.SchemaMeta interface.

func (*Descriptor) Size Uses

func (m *Descriptor) Size() (n int)

func (*Descriptor) String Uses

func (m *Descriptor) String() string

func (*Descriptor) Table Uses

func (desc *Descriptor) Table(ts hlc.Timestamp) *TableDescriptor

Table is a replacement for GetTable() which seeks to ensure that clients which unmarshal Descriptor structs properly set the ModificationTime on tables based on the MVCC timestamp at which the descriptor was read.

A linter should ensure that GetTable() is not called.

func (*Descriptor) Unmarshal Uses

func (m *Descriptor) Unmarshal(dAtA []byte) error

func (*Descriptor) XXX_DiscardUnknown Uses

func (m *Descriptor) XXX_DiscardUnknown()

func (*Descriptor) XXX_Marshal Uses

func (m *Descriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*Descriptor) XXX_Merge Uses

func (dst *Descriptor) XXX_Merge(src proto.Message)

func (*Descriptor) XXX_OneofFuncs Uses

func (*Descriptor) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})

XXX_OneofFuncs is for the internal use of the proto package.

func (*Descriptor) XXX_Size Uses

func (m *Descriptor) XXX_Size() int

func (*Descriptor) XXX_Unmarshal Uses

func (m *Descriptor) XXX_Unmarshal(b []byte) error

type DescriptorKey Uses

type DescriptorKey interface {
    Key() roachpb.Key
    Name() string
}

DescriptorKey is the interface implemented by both databaseKey and tableKey. It is used to easily get the descriptor key and plain name.

type DescriptorMutation Uses

type DescriptorMutation struct {
    // Types that are valid to be assigned to Descriptor_:
    //	*DescriptorMutation_Column
    //	*DescriptorMutation_Index
    //	*DescriptorMutation_Constraint
    Descriptor_ isDescriptorMutation_Descriptor_ `protobuf_oneof:"descriptor"`
    State       DescriptorMutation_State         `protobuf:"varint,3,opt,name=state,enum=cockroach.sql.sqlbase.DescriptorMutation_State" json:"state"`
    Direction   DescriptorMutation_Direction     `protobuf:"varint,4,opt,name=direction,enum=cockroach.sql.sqlbase.DescriptorMutation_Direction" json:"direction"`
    // The mutation id used to group mutations that should be applied together.
    // This is used for situations like creating a unique column, which
    // involve adding two mutations: one for the column, and another for the
    // unique constraint index.
    MutationID MutationID `protobuf:"varint,5,opt,name=mutation_id,json=mutationId,casttype=MutationID" json:"mutation_id"`
    // Indicates that this mutation is a rollback.
    Rollback bool `protobuf:"varint,7,opt,name=rollback" json:"rollback"`
}

A DescriptorMutation represents a column or an index that has either been added or dropped and hasn't yet transitioned into a stable state: completely backfilled and visible, or completely deleted. A table descriptor in the middle of a schema change will have a DescriptorMutation FIFO queue containing each column/index descriptor being added or dropped. Mutations for constraints work differently from columns and indexes; see the documentation for ConstraintToUpdate.

func (*DescriptorMutation) Descriptor Uses

func (*DescriptorMutation) Descriptor() ([]byte, []int)

func (*DescriptorMutation) Equal Uses

func (this *DescriptorMutation) Equal(that interface{}) bool

func (*DescriptorMutation) GetColumn Uses

func (m *DescriptorMutation) GetColumn() *ColumnDescriptor

func (*DescriptorMutation) GetConstraint Uses

func (m *DescriptorMutation) GetConstraint() *ConstraintToUpdate

func (*DescriptorMutation) GetDescriptor_ Uses

func (m *DescriptorMutation) GetDescriptor_() isDescriptorMutation_Descriptor_

func (*DescriptorMutation) GetIndex Uses

func (m *DescriptorMutation) GetIndex() *IndexDescriptor

func (*DescriptorMutation) Marshal Uses

func (m *DescriptorMutation) Marshal() (dAtA []byte, err error)

func (*DescriptorMutation) MarshalTo Uses

func (m *DescriptorMutation) MarshalTo(dAtA []byte) (int, error)

func (*DescriptorMutation) ProtoMessage Uses

func (*DescriptorMutation) ProtoMessage()

func (*DescriptorMutation) Reset Uses

func (m *DescriptorMutation) Reset()

func (*DescriptorMutation) Size Uses

func (m *DescriptorMutation) Size() (n int)

func (*DescriptorMutation) String Uses

func (m *DescriptorMutation) String() string

func (*DescriptorMutation) Unmarshal Uses

func (m *DescriptorMutation) Unmarshal(dAtA []byte) error

func (*DescriptorMutation) XXX_DiscardUnknown Uses

func (m *DescriptorMutation) XXX_DiscardUnknown()

func (*DescriptorMutation) XXX_Marshal Uses

func (m *DescriptorMutation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DescriptorMutation) XXX_Merge Uses

func (dst *DescriptorMutation) XXX_Merge(src proto.Message)

func (*DescriptorMutation) XXX_OneofFuncs Uses

func (*DescriptorMutation) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})

XXX_OneofFuncs is for the internal use of the proto package.

func (*DescriptorMutation) XXX_Size Uses

func (m *DescriptorMutation) XXX_Size() int

func (*DescriptorMutation) XXX_Unmarshal Uses

func (m *DescriptorMutation) XXX_Unmarshal(b []byte) error

type DescriptorMutation_Column Uses

type DescriptorMutation_Column struct {
    Column *ColumnDescriptor `protobuf:"bytes,1,opt,name=column,oneof"`
}

func (*DescriptorMutation_Column) Equal Uses

func (this *DescriptorMutation_Column) Equal(that interface{}) bool

func (*DescriptorMutation_Column) MarshalTo Uses

func (m *DescriptorMutation_Column) MarshalTo(dAtA []byte) (int, error)

func (*DescriptorMutation_Column) Size Uses

func (m *DescriptorMutation_Column) Size() (n int)

type DescriptorMutation_Constraint Uses

type DescriptorMutation_Constraint struct {
    Constraint *ConstraintToUpdate `protobuf:"bytes,8,opt,name=constraint,oneof"`
}

func (*DescriptorMutation_Constraint) Equal Uses

func (this *DescriptorMutation_Constraint) Equal(that interface{}) bool

func (*DescriptorMutation_Constraint) MarshalTo Uses

func (m *DescriptorMutation_Constraint) MarshalTo(dAtA []byte) (int, error)

func (*DescriptorMutation_Constraint) Size Uses

func (m *DescriptorMutation_Constraint) Size() (n int)

type DescriptorMutation_Direction Uses

type DescriptorMutation_Direction int32

Direction of mutation.

const (
    // Not used.
    DescriptorMutation_NONE DescriptorMutation_Direction = 0
    // Descriptor is being added.
    DescriptorMutation_ADD DescriptorMutation_Direction = 1
    // Descriptor is being dropped.
    DescriptorMutation_DROP DescriptorMutation_Direction = 2
)

func (DescriptorMutation_Direction) Enum Uses

func (x DescriptorMutation_Direction) Enum() *DescriptorMutation_Direction

func (DescriptorMutation_Direction) EnumDescriptor Uses

func (DescriptorMutation_Direction) EnumDescriptor() ([]byte, []int)

func (DescriptorMutation_Direction) String Uses

func (x DescriptorMutation_Direction) String() string

func (*DescriptorMutation_Direction) UnmarshalJSON Uses

func (x *DescriptorMutation_Direction) UnmarshalJSON(data []byte) error

type DescriptorMutation_Index Uses

type DescriptorMutation_Index struct {
    Index *IndexDescriptor `protobuf:"bytes,2,opt,name=index,oneof"`
}

func (*DescriptorMutation_Index) Equal Uses

func (this *DescriptorMutation_Index) Equal(that interface{}) bool

func (*DescriptorMutation_Index) MarshalTo Uses

func (m *DescriptorMutation_Index) MarshalTo(dAtA []byte) (int, error)

func (*DescriptorMutation_Index) Size Uses

func (m *DescriptorMutation_Index) Size() (n int)

type DescriptorMutation_State Uses

type DescriptorMutation_State int32

A descriptor within a mutation is unavailable for reads, writes and deletes. It is only available for implicit (internal to the database) writes and deletes depending on the state of the mutation.

const (
    // Not used.
    DescriptorMutation_UNKNOWN DescriptorMutation_State = 0
    // Operations can use this invisible descriptor to implicitly
    // delete entries.
    // Column: A descriptor in this state is invisible to
    // INSERT and UPDATE. DELETE must delete a column in this state.
    // Index: A descriptor in this state is invisible to an INSERT.
    // UPDATE must delete the old value of the index but doesn't write
    // the new value. DELETE must delete the index.
    //
    // When deleting a descriptor, all descriptor related data
    // (column or index data) can only be mass deleted once
    // all the nodes have transitioned to the DELETE_ONLY state.
    DescriptorMutation_DELETE_ONLY DescriptorMutation_State = 1
    // Operations can use this invisible descriptor to implicitly
    // write and delete entries.
    // Column: INSERT will populate this column with the default
    // value. UPDATE ignores this descriptor. DELETE must delete
    // the column.
    // Index: INSERT, UPDATE and DELETE treat this index like any
    // other index.
    //
    // When adding a descriptor, all descriptor related data
    // (column default or index data) can only be backfilled once
    // all nodes have transitioned into the DELETE_AND_WRITE_ONLY state.
    DescriptorMutation_DELETE_AND_WRITE_ONLY DescriptorMutation_State = 2
)

func (DescriptorMutation_State) Enum Uses

func (x DescriptorMutation_State) Enum() *DescriptorMutation_State

func (DescriptorMutation_State) EnumDescriptor Uses

func (DescriptorMutation_State) EnumDescriptor() ([]byte, []int)

func (DescriptorMutation_State) String Uses

func (x DescriptorMutation_State) String() string

func (*DescriptorMutation_State) UnmarshalJSON Uses

func (x *DescriptorMutation_State) UnmarshalJSON(data []byte) error

type DescriptorProto Uses

type DescriptorProto interface {
    protoutil.Message
    GetPrivileges() *PrivilegeDescriptor
    GetID() ID
    SetID(ID)
    TypeName() string
    GetName() string
    SetName(string)
    GetAuditMode() TableDescriptor_AuditMode
}

DescriptorProto is the interface implemented by both DatabaseDescriptor and TableDescriptor. TODO(marc): this is getting rather large.

type DescriptorVersion Uses

type DescriptorVersion uint32

DescriptorVersion is a custom type for TableDescriptor Versions.

type Descriptor_Database Uses

type Descriptor_Database struct {
    Database *DatabaseDescriptor `protobuf:"bytes,2,opt,name=database,oneof"`
}

func (*Descriptor_Database) Equal Uses

func (this *Descriptor_Database) Equal(that interface{}) bool

func (*Descriptor_Database) MarshalTo Uses

func (m *Descriptor_Database) MarshalTo(dAtA []byte) (int, error)

func (*Descriptor_Database) Size Uses

func (m *Descriptor_Database) Size() (n int)

type Descriptor_Table Uses

type Descriptor_Table struct {
    Table *TableDescriptor `protobuf:"bytes,1,opt,name=table,oneof"`
}

func (*Descriptor_Table) Equal Uses

func (this *Descriptor_Table) Equal(that interface{}) bool

func (*Descriptor_Table) MarshalTo Uses

func (m *Descriptor_Table) MarshalTo(dAtA []byte) (int, error)

func (*Descriptor_Table) Size Uses

func (m *Descriptor_Table) Size() (n int)

type DummyEvalPlanner Uses

type DummyEvalPlanner struct{}

DummyEvalPlanner implements the tree.EvalPlanner interface by returning errors.

func (*DummyEvalPlanner) EvalSubquery Uses

func (ep *DummyEvalPlanner) EvalSubquery(expr *tree.Subquery) (tree.Datum, error)

EvalSubquery is part of the tree.EvalPlanner interface.

func (*DummyEvalPlanner) LookupSchema Uses

func (ep *DummyEvalPlanner) LookupSchema(
    ctx context.Context, dbName, scName string,
) (bool, tree.SchemaMeta, error)

LookupSchema is part of the tree.EvalDatabase interface.

func (*DummyEvalPlanner) ParseQualifiedTableName Uses

func (ep *DummyEvalPlanner) ParseQualifiedTableName(
    ctx context.Context, sql string,
) (*tree.TableName, error)

ParseQualifiedTableName is part of the tree.EvalDatabase interface.

func (*DummyEvalPlanner) ParseType Uses

func (ep *DummyEvalPlanner) ParseType(sql string) (*types.T, error)

ParseType is part of the tree.EvalPlanner interface.

func (*DummyEvalPlanner) ResolveTableName Uses

func (ep *DummyEvalPlanner) ResolveTableName(
    ctx context.Context, tn *tree.TableName,
) (tree.ID, error)

ResolveTableName is part of the tree.EvalDatabase interface.

type DummySequenceOperators Uses

type DummySequenceOperators struct{}

DummySequenceOperators implements the tree.SequenceOperators interface by returning errors.

func (*DummySequenceOperators) GetLatestValueInSessionForSequence Uses

func (so *DummySequenceOperators) GetLatestValueInSessionForSequence(
    ctx context.Context, seqName *tree.TableName,
) (int64, error)

GetLatestValueInSessionForSequence implements the tree.SequenceOperators interface.

func (*DummySequenceOperators) IncrementSequence Uses

func (so *DummySequenceOperators) IncrementSequence(
    ctx context.Context, seqName *tree.TableName,
) (int64, error)

IncrementSequence is part of the tree.SequenceOperators interface.

func (*DummySequenceOperators) LookupSchema Uses

func (so *DummySequenceOperators) LookupSchema(
    ctx context.Context, dbName, scName string,
) (bool, tree.SchemaMeta, error)

LookupSchema is part of the tree.EvalDatabase interface.

func (*DummySequenceOperators) ParseQualifiedTableName Uses

func (so *DummySequenceOperators) ParseQualifiedTableName(
    ctx context.Context, sql string,
) (*tree.TableName, error)

ParseQualifiedTableName is part of the tree.EvalDatabase interface.

func (*DummySequenceOperators) ResolveTableName Uses

func (so *DummySequenceOperators) ResolveTableName(
    ctx context.Context, tn *tree.TableName,
) (tree.ID, error)

ResolveTableName is part of the tree.EvalDatabase interface.

func (*DummySequenceOperators) SetSequenceValue Uses

func (so *DummySequenceOperators) SetSequenceValue(
    ctx context.Context, seqName *tree.TableName, newVal int64, isCalled bool,
) error

SetSequenceValue implements the tree.SequenceOperators interface.

type DummySessionAccessor Uses

type DummySessionAccessor struct{}

DummySessionAccessor implements the tree.EvalSessionAccessor interface by returning errors.

func (*DummySessionAccessor) GetSessionVar Uses

func (ep *DummySessionAccessor) GetSessionVar(
    _ context.Context, _ string, _ bool,
) (bool, string, error)

GetSessionVar is part of the tree.EvalSessionAccessor interface.

func (*DummySessionAccessor) SetSessionVar Uses

func (ep *DummySessionAccessor) SetSessionVar(_ context.Context, _, _ string) error

SetSessionVar is part of the tree.EvalSessionAccessor interface.

type EncDatum Uses

type EncDatum struct {

    // Decoded datum.
    Datum tree.Datum
    // contains filtered or unexported fields
}

EncDatum represents a datum that is "backed" by an encoding and/or by a tree.Datum. It allows "passing through" a Datum without decoding and reencoding.

func DatumToEncDatum Uses

func DatumToEncDatum(ctyp *types.T, d tree.Datum) EncDatum

DatumToEncDatum initializes an EncDatum with the given Datum.

func EncDatumFromBuffer Uses

func EncDatumFromBuffer(typ *types.T, enc DatumEncoding, buf []byte) (EncDatum, []byte, error)

EncDatumFromBuffer initializes an EncDatum with an encoding that is possibly followed by other data. Similar to EncDatumFromEncoded, except that this function figures out where the encoding stops and returns a slice for the rest of the buffer.

func EncDatumFromEncoded Uses

func EncDatumFromEncoded(enc DatumEncoding, encoded []byte) EncDatum

EncDatumFromEncoded initializes an EncDatum with the given encoded value. The encoded value is stored as a shallow copy, so the caller must make sure the slice is not modified for the lifetime of the EncDatum. The underlying Datum is nil.

func EncDatumValueFromBufferWithOffsetsAndType Uses

func EncDatumValueFromBufferWithOffsetsAndType(
    buf []byte, typeOffset int, dataOffset int, typ encoding.Type,
) (EncDatum, []byte, error)

EncDatumValueFromBufferWithOffsetsAndType is just like calling EncDatumFromBuffer with DatumEncoding_VALUE, except it expects that you pass in the result of calling DecodeValueTag on the input buf. Use this if you've already called DecodeValueTag on buf already, to avoid it getting called more than necessary.

func IntEncDatum Uses

func IntEncDatum(i int) EncDatum

IntEncDatum returns an EncDatum representation of DInt(i).

func NullEncDatum Uses

func NullEncDatum() EncDatum

NullEncDatum returns and EncDatum representation of tree.DNull.

func RandEncDatum Uses

func RandEncDatum(rng *rand.Rand) (EncDatum, *types.T)

RandEncDatum generates a random EncDatum (of a random type).

func RandSortingEncDatumSlice Uses

func RandSortingEncDatumSlice(rng *rand.Rand, numVals int) ([]EncDatum, *types.T)

RandSortingEncDatumSlice generates a slice of random EncDatum values of the same random type which is key-encodable.

func StrEncDatum Uses

func StrEncDatum(s string) EncDatum

StrEncDatum returns an EncDatum representation of DString(s).

func (*EncDatum) BytesEqual Uses

func (ed *EncDatum) BytesEqual(b []byte) bool

BytesEqual is true if the EncDatum's encoded field is equal to the input.

func (*EncDatum) Compare Uses

func (ed *EncDatum) Compare(
    typ *types.T, a *DatumAlloc, evalCtx *tree.EvalContext, rhs *EncDatum,
) (int, error)

Compare returns:

-1 if the receiver is less than rhs,
0  if the receiver is equal to rhs,
+1 if the receiver is greater than rhs.

func (*EncDatum) Encode Uses

func (ed *EncDatum) Encode(
    typ *types.T, a *DatumAlloc, enc DatumEncoding, appendTo []byte,
) ([]byte, error)

Encode appends the encoded datum to the given slice using the requested encoding. Note: DatumEncoding_VALUE encodings are not unique because they can contain a column ID so they should not be used to test for equality.

func (*EncDatum) EncodedString Uses

func (ed *EncDatum) EncodedString() string

EncodedString returns an immutable copy of this EncDatum's encoded field.

func (*EncDatum) Encoding Uses

func (ed *EncDatum) Encoding() (DatumEncoding, bool)

Encoding returns the encoding that is already available (the latter indicated by the bool return value).

func (*EncDatum) EnsureDecoded Uses

func (ed *EncDatum) EnsureDecoded(typ *types.T, a *DatumAlloc) error

EnsureDecoded ensures that the Datum field is set (decoding if it is not).

func (*EncDatum) GetInt Uses

func (ed *EncDatum) GetInt() (int64, error)

GetInt decodes an EncDatum that is known to be of integer type and returns the integer value. It is a more convenient and more efficient alternative to calling EnsureDecoded and casting the Datum.

func (*EncDatum) IsNull Uses

func (ed *EncDatum) IsNull() bool

IsNull returns true if the EncDatum value is NULL. Equivalent to checking if ed.Datum is DNull after calling EnsureDecoded.

func (*EncDatum) IsUnset Uses

func (ed *EncDatum) IsUnset() bool

IsUnset returns true if EncDatumFromEncoded or DatumToEncDatum were not called.

func (EncDatum) Size Uses

func (ed EncDatum) Size() uintptr

Size returns a lower bound on the total size of the receiver in bytes, including memory referenced by the receiver.

func (*EncDatum) String Uses

func (ed *EncDatum) String(typ *types.T) string

func (*EncDatum) UnsetDatum Uses

func (ed *EncDatum) UnsetDatum()

UnsetDatum ensures subsequent IsUnset() calls return false.

type EncDatumRow Uses

type EncDatumRow []EncDatum

EncDatumRow is a row of EncDatums.

func RandEncDatumRowOfTypes Uses

func RandEncDatumRowOfTypes(rng *rand.Rand, types []types.T) EncDatumRow

RandEncDatumRowOfTypes generates a slice of random EncDatum values for the corresponding type in types.

func (EncDatumRow) Compare Uses

func (r EncDatumRow) Compare(
    types []types.T,
    a *DatumAlloc,
    ordering ColumnOrdering,
    evalCtx *tree.EvalContext,
    rhs EncDatumRow,
) (int, error)

Compare returns the relative ordering of two EncDatumRows according to a ColumnOrdering:

-1 if the receiver comes before the rhs in the ordering,
+1 if the receiver comes after the rhs in the ordering,
0 if the relative order does not matter (i.e. the two rows have the same
  values for the columns in the ordering).

Note that a return value of 0 does not (in general) imply that the rows are equal; for example, rows [1 1 5] and [1 1 6] when compared against ordering {{0, asc}, {1, asc}} (i.e. ordered by first column and then by second column).

func (EncDatumRow) CompareToDatums Uses

func (r EncDatumRow) CompareToDatums(
    types []types.T,
    a *DatumAlloc,
    ordering ColumnOrdering,
    evalCtx *tree.EvalContext,
    rhs tree.Datums,
) (int, error)

CompareToDatums is a version of Compare which compares against decoded Datums.

func (EncDatumRow) Copy Uses

func (r EncDatumRow) Copy() EncDatumRow

Copy makes a copy of this EncDatumRow. Convenient for tests. Use an EncDatumRowAlloc in non-test code.

func (EncDatumRow) Size Uses

func (r EncDatumRow) Size() uintptr

Size returns a lower bound on the total size all EncDatum's in the receiver, including memory referenced by all EncDatum's.

func (EncDatumRow) String Uses

func (r EncDatumRow) String(types []types.T) string

type EncDatumRowAlloc Uses

type EncDatumRowAlloc struct {
    // contains filtered or unexported fields
}

EncDatumRowAlloc is a helper that speeds up allocation of EncDatumRows (preferably of the same length).

func (*EncDatumRowAlloc) AllocRow Uses

func (a *EncDatumRowAlloc) AllocRow(cols int) EncDatumRow

AllocRow allocates an EncDatumRow with the given number of columns.

func (*EncDatumRowAlloc) CopyRow Uses

func (a *EncDatumRowAlloc) CopyRow(row EncDatumRow) EncDatumRow

CopyRow allocates an EncDatumRow and copies the given row to it.

type EncDatumRowContainer Uses

type EncDatumRowContainer struct {
    // contains filtered or unexported fields
}

EncDatumRowContainer holds rows and can cycle through them. Must be Reset upon initialization.

func (*EncDatumRowContainer) IsEmpty Uses

func (c *EncDatumRowContainer) IsEmpty() bool

IsEmpty returns whether the container is "empty", which means that it's about to cycle through its rows again on the next Pop.

func (*EncDatumRowContainer) Peek Uses

func (c *EncDatumRowContainer) Peek() EncDatumRow

Peek returns the current element at the top of the container.

func (*EncDatumRowContainer) Pop Uses

func (c *EncDatumRowContainer) Pop() EncDatumRow

Pop returns the next row from the container. Will cycle through the rows again if we reach the end.

func (*EncDatumRowContainer) Push Uses

func (c *EncDatumRowContainer) Push(row EncDatumRow)

Push adds a row to the container.

func (*EncDatumRowContainer) Reset Uses

func (c *EncDatumRowContainer) Reset()

Reset clears the container and resets the indexes. Must be called upon creating a container.

type EncDatumRows Uses

type EncDatumRows []EncDatumRow

EncDatumRows is a slice of EncDatumRows having the same schema.

func GenEncDatumRowsInt Uses

func GenEncDatumRowsInt(inputRows [][]int) EncDatumRows

GenEncDatumRowsInt converts rows of ints to rows of EncDatum DInts. If an int is negative, the corresponding value is NULL.

func MakeIntRows Uses

func MakeIntRows(numRows, numCols int) EncDatumRows

MakeIntRows constructs a numRows x numCols table where rows[i][j] = i + j.

func MakeRandIntRows Uses

func MakeRandIntRows(rng *rand.Rand, numRows int, numCols int) EncDatumRows

MakeRandIntRows constructs a numRows x numCols table where the values are random.

func MakeRandIntRowsInRange Uses

func MakeRandIntRowsInRange(
    rng *rand.Rand, numRows int, numCols int, maxNum int, nullProbability float64,
) EncDatumRows

MakeRandIntRowsInRange constructs a numRows * numCols table where the values are random integers in the range [0, maxNum).

func MakeRepeatedIntRows Uses

func MakeRepeatedIntRows(n int, numRows int, numCols int) EncDatumRows

MakeRepeatedIntRows constructs a numRows x numCols table where blocks of n consecutive rows have the same value.

func RandEncDatumRows Uses

func RandEncDatumRows(rng *rand.Rand, numRows, numCols int) (EncDatumRows, []types.T)

RandEncDatumRows generates EncDatumRows where all rows follow the same random []ColumnType structure.

func RandEncDatumRowsOfTypes Uses

func RandEncDatumRowsOfTypes(rng *rand.Rand, numRows int, types []types.T) EncDatumRows

RandEncDatumRowsOfTypes generates EncDatumRows, each row with values of the corresponding type in types.

func (EncDatumRows) String Uses

func (r EncDatumRows) String(types []types.T) string

type FamilyID Uses

type FamilyID uint32

FamilyID is a custom type for ColumnFamilyDescriptor IDs.

func NeededColumnFamilyIDs Uses

func NeededColumnFamilyIDs(
    colIdxMap map[ColumnID]int, families []ColumnFamilyDescriptor, neededCols util.FastIntSet,
) []FamilyID

NeededColumnFamilyIDs returns a slice of FamilyIDs which contain the families needed to load a set of neededCols

type ForeignKeyConstraint Uses

type ForeignKeyConstraint struct {
    OriginTableID       ID                         `protobuf:"varint,1,opt,name=origin_table_id,json=originTableId,casttype=ID" json:"origin_table_id"`
    OriginColumnIDs     []ColumnID                 `protobuf:"varint,2,rep,name=origin_column_ids,json=originColumnIds,casttype=ColumnID" json:"origin_column_ids,omitempty"`
    ReferencedColumnIDs []ColumnID                 `protobuf:"varint,3,rep,name=referenced_column_ids,json=referencedColumnIds,casttype=ColumnID" json:"referenced_column_ids,omitempty"`
    ReferencedTableID   ID                         `protobuf:"varint,4,opt,name=referenced_table_id,json=referencedTableId,casttype=ID" json:"referenced_table_id"`
    Name                string                     `protobuf:"bytes,5,opt,name=name" json:"name"`
    Validity            ConstraintValidity         `protobuf:"varint,6,opt,name=validity,enum=cockroach.sql.sqlbase.ConstraintValidity" json:"validity"`
    OnDelete            ForeignKeyReference_Action `protobuf:"varint,7,opt,name=on_delete,json=onDelete,enum=cockroach.sql.sqlbase.ForeignKeyReference_Action" json:"on_delete"`
    OnUpdate            ForeignKeyReference_Action `protobuf:"varint,8,opt,name=on_update,json=onUpdate,enum=cockroach.sql.sqlbase.ForeignKeyReference_Action" json:"on_update"`
    // This is only important for composite keys. For all prior matches before
    // the addition of this value, MATCH SIMPLE will be used.
    Match ForeignKeyReference_Match `protobuf:"varint,9,opt,name=match,enum=cockroach.sql.sqlbase.ForeignKeyReference_Match" json:"match"`
    // LegacyOriginIndex is the ID of the index used for the FK on the origin
    // table. In versions 19.1 and earlier, foreign keys were represented by
    // fields on the index that they use. In versions 19.2 and later, we preserve
    // the semantics of the older FKs which were tied to indexes by specifying
    // the index as a field on this proto, since the migration process to have
    // top-level FK fields on the table descriptor requires two releases. In 20.1,
    // when all 19.2 nodes will be correctly handling the new FK representation,
    // we will perform a migration to upgrade all table descriptors.
    LegacyOriginIndex IndexID `protobuf:"varint,10,opt,name=legacy_origin_index,json=legacyOriginIndex,casttype=IndexID" json:"legacy_origin_index"`
    // LegacyReferencedIndex is the ID of the index used for the FK on the
    // referenced side. See the comment for LegacyOriginIndex.
    LegacyReferencedIndex IndexID `protobuf:"varint,11,opt,name=legacy_referenced_index,json=legacyReferencedIndex,casttype=IndexID" json:"legacy_referenced_index"`
    // These fields are set when upgrading an old-style FK (stored on the index)
    // into this kind. The purpose is to permit validation that downgrading this
    // representation to the old representation (which happens when the cluster
    // is in a mixed-version state containing VersionTopLevelForeignKeys) was done
    // without creating any accidental changes to the foreign key references.
    // They are only read and written in a mixed 19.1/19.2 cluster.
    LegacyUpgradedFromOriginReference     ForeignKeyReference `protobuf:"bytes,12,opt,name=legacy_upgraded_from_origin_reference,json=legacyUpgradedFromOriginReference" json:"legacy_upgraded_from_origin_reference"`             // Deprecated: Do not use.
    LegacyUpgradedFromReferencedReference ForeignKeyReference `protobuf:"bytes,13,opt,name=legacy_upgraded_from_referenced_reference,json=legacyUpgradedFromReferencedReference" json:"legacy_upgraded_from_referenced_reference"` // Deprecated: Do not use.
}

ForeignKeyConstraint is the new (as of 19.2 and VersionTopLevelForeignKeys) representation for foreign keys. It's stored on the TableDescriptor and is designed to be agnostic to which indexes are available on both the origin and referenced tables, so that the optimizer can have full freedom to choose the best possible index to satisfy constraint checks at runtime.

func (*ForeignKeyConstraint) Descriptor Uses

func (*ForeignKeyConstraint) Descriptor() ([]byte, []int)

func (*ForeignKeyConstraint) Equal Uses

func (this *ForeignKeyConstraint) Equal(that interface{}) bool

func (*ForeignKeyConstraint) Marshal Uses

func (m *ForeignKeyConstraint) Marshal() (dAtA []byte, err error)

func (*ForeignKeyConstraint) MarshalTo Uses

func (m *ForeignKeyConstraint) MarshalTo(dAtA []byte) (int, error)

func (*ForeignKeyConstraint) ProtoMessage Uses

func (*ForeignKeyConstraint) ProtoMessage()

func (*ForeignKeyConstraint) Reset Uses

func (m *ForeignKeyConstraint) Reset()

func (*ForeignKeyConstraint) Size Uses

func (m *ForeignKeyConstraint) Size() (n int)

func (*ForeignKeyConstraint) String Uses

func (m *ForeignKeyConstraint) String() string

func (*ForeignKeyConstraint) Unmarshal Uses

func (m *ForeignKeyConstraint) Unmarshal(dAtA []byte) error

func (*ForeignKeyConstraint) XXX_DiscardUnknown Uses

func (m *ForeignKeyConstraint) XXX_DiscardUnknown()

func (*ForeignKeyConstraint) XXX_Marshal Uses

func (m *ForeignKeyConstraint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ForeignKeyConstraint) XXX_Merge Uses

func (dst *ForeignKeyConstraint) XXX_Merge(src proto.Message)

func (*ForeignKeyConstraint) XXX_Size Uses

func (m *ForeignKeyConstraint) XXX_Size() int

func (*ForeignKeyConstraint) XXX_Unmarshal Uses

func (m *ForeignKeyConstraint) XXX_Unmarshal(b []byte) error

type ForeignKeyReference Uses

type ForeignKeyReference struct {
    Table    ID                 `protobuf:"varint,1,opt,name=table,casttype=ID" json:"table"`
    Index    IndexID            `protobuf:"varint,2,opt,name=index,casttype=IndexID" json:"index"`
    Name     string             `protobuf:"bytes,3,opt,name=name" json:"name"`
    Validity ConstraintValidity `protobuf:"varint,4,opt,name=validity,enum=cockroach.sql.sqlbase.ConstraintValidity" json:"validity"`
    // If this FK only uses a prefix of the columns in its index, we record how
    // many to avoid spuriously counting the additional cols as used by this FK.
    SharedPrefixLen int32                      `protobuf:"varint,5,opt,name=shared_prefix_len,json=sharedPrefixLen" json:"shared_prefix_len"`
    OnDelete        ForeignKeyReference_Action `protobuf:"varint,6,opt,name=on_delete,json=onDelete,enum=cockroach.sql.sqlbase.ForeignKeyReference_Action" json:"on_delete"`
    OnUpdate        ForeignKeyReference_Action `protobuf:"varint,7,opt,name=on_update,json=onUpdate,enum=cockroach.sql.sqlbase.ForeignKeyReference_Action" json:"on_update"`
    // This is only important for composite keys. For all prior matches before
    // the addition of this value, MATCH SIMPLE will be used.
    Match ForeignKeyReference_Match `protobuf:"varint,8,opt,name=match,enum=cockroach.sql.sqlbase.ForeignKeyReference_Match" json:"match"`
}

func (*ForeignKeyReference) Descriptor Uses

func (*ForeignKeyReference) Descriptor() ([]byte, []int)

func (*ForeignKeyReference) Equal Uses

func (this *ForeignKeyReference) Equal(that interface{}) bool

func (ForeignKeyReference) IsSet Uses

func (f ForeignKeyReference) IsSet() bool

IsSet returns whether or not the foreign key actually references a table.

func (*ForeignKeyReference) Marshal Uses

func (m *ForeignKeyReference) Marshal() (dAtA []byte, err error)

func (*ForeignKeyReference) MarshalTo Uses

func (m *ForeignKeyReference) MarshalTo(dAtA []byte) (int, error)

func (*ForeignKeyReference) ProtoMessage Uses

func (*ForeignKeyReference) ProtoMessage()

func (*ForeignKeyReference) Reset Uses

func (m *ForeignKeyReference) Reset()

func (*ForeignKeyReference) Size Uses

func (m *ForeignKeyReference) Size() (n int)

func (*ForeignKeyReference) String Uses

func (m *ForeignKeyReference) String() string

func (*ForeignKeyReference) Unmarshal Uses

func (m *ForeignKeyReference) Unmarshal(dAtA []byte) error

func (*ForeignKeyReference) XXX_DiscardUnknown Uses

func (m *ForeignKeyReference) XXX_DiscardUnknown()

func (*ForeignKeyReference) XXX_Marshal Uses

func (m *ForeignKeyReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ForeignKeyReference) XXX_Merge Uses

func (dst *ForeignKeyReference) XXX_Merge(src proto.Message)

func (*ForeignKeyReference) XXX_Size Uses

func (m *ForeignKeyReference) XXX_Size() int

func (*ForeignKeyReference) XXX_Unmarshal Uses

func (m *ForeignKeyReference) XXX_Unmarshal(b []byte) error

type ForeignKeyReference_Action Uses

type ForeignKeyReference_Action int32
const (
    ForeignKeyReference_NO_ACTION   ForeignKeyReference_Action = 0
    ForeignKeyReference_RESTRICT    ForeignKeyReference_Action = 1
    ForeignKeyReference_SET_NULL    ForeignKeyReference_Action = 2
    ForeignKeyReference_SET_DEFAULT ForeignKeyReference_Action = 3
    ForeignKeyReference_CASCADE     ForeignKeyReference_Action = 4
)

func (ForeignKeyReference_Action) Enum Uses

func (x ForeignKeyReference_Action) Enum() *ForeignKeyReference_Action

func (ForeignKeyReference_Action) EnumDescriptor Uses

func (ForeignKeyReference_Action) EnumDescriptor() ([]byte, []int)

func (ForeignKeyReference_Action) MarshalJSON Uses

func (x ForeignKeyReference_Action) MarshalJSON() ([]byte, error)

func (ForeignKeyReference_Action) String Uses

func (x ForeignKeyReference_Action) String() string

String implements the fmt.Stringer interface.

func (*ForeignKeyReference_Action) UnmarshalJSON Uses

func (x *ForeignKeyReference_Action) UnmarshalJSON(data []byte) error

type ForeignKeyReference_Match Uses

type ForeignKeyReference_Match int32

Match is the algorithm used to compare composite keys.

const (
    ForeignKeyReference_SIMPLE  ForeignKeyReference_Match = 0
    ForeignKeyReference_FULL    ForeignKeyReference_Match = 1
    ForeignKeyReference_PARTIAL ForeignKeyReference_Match = 2
)

func (ForeignKeyReference_Match) Enum Uses

func (x ForeignKeyReference_Match) Enum() *ForeignKeyReference_Match

func (ForeignKeyReference_Match) EnumDescriptor Uses

func (ForeignKeyReference_Match) EnumDescriptor() ([]byte, []int)

func (ForeignKeyReference_Match) MarshalJSON Uses

func (x ForeignKeyReference_Match) MarshalJSON() ([]byte, error)

func (ForeignKeyReference_Match) String Uses

func (x ForeignKeyReference_Match) String() string

String implements the fmt.Stringer interface.

func (*ForeignKeyReference_Match) UnmarshalJSON Uses

func (x *ForeignKeyReference_Match) UnmarshalJSON(data []byte) error

type FormatVersion Uses

type FormatVersion uint32

FormatVersion is a custom type for TableDescriptor versions of the sql to key:value mapping. go:generate stringer -type=FormatVersion

const (

    // BaseFormatVersion corresponds to the encoding described in
    // https://www.cockroachlabs.com/blog/sql-in-cockroachdb-mapping-table-data-to-key-value-storage/.
    BaseFormatVersion FormatVersion
    // FamilyFormatVersion corresponds to the encoding described in
    // https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20151214_sql_column_families.md
    FamilyFormatVersion
    // InterleavedFormatVersion corresponds to the encoding described in
    // https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20160624_sql_interleaved_tables.md
    InterleavedFormatVersion
)

func (FormatVersion) String Uses

func (i FormatVersion) String() string

type ID Uses

type ID tree.ID

ID is a custom type for {Database,Table}Descriptor IDs.

const InvalidID ID = 0

InvalidID is the uninitialised descriptor id.

func LookupSystemTableDescriptorID Uses

func LookupSystemTableDescriptorID(dbID ID, tableName string) ID

LookupSystemTableDescriptorID uses the lookup cache above to bypass a KV lookup when resolving the name of system tables.

type IDs Uses

type IDs []ID

IDs is a sortable list of IDs.

func (IDs) Len Uses

func (ids IDs) Len() int

func (IDs) Less Uses

func (ids IDs) Less(i, j int) bool

func (IDs) Swap Uses

func (ids IDs) Swap(i, j int)

type ImmutableTableDescriptor Uses

type ImmutableTableDescriptor struct {
    TableDescriptor

    // ReadableColumns is a list of columns (including those undergoing a schema change)
    // which can be scanned. Columns in the process of a schema change
    // are all set to nullable while column backfilling is still in
    // progress, as mutation columns may have NULL values.
    ReadableColumns []ColumnDescriptor
    // contains filtered or unexported fields
}

ImmutableTableDescriptor is a custom type for TableDescriptors It holds precomputed values and the underlying TableDescriptor should be const.

func GetImmutableTableDescriptor Uses

func GetImmutableTableDescriptor(
    kvDB *client.DB, database string, table string,
) *ImmutableTableDescriptor

GetImmutableTableDescriptor retrieves an immutable table descriptor directly from the KV layer.

func NewImmutableTableDescriptor Uses

func NewImmutableTableDescriptor(tbl TableDescriptor) *ImmutableTableDescriptor

NewImmutableTableDescriptor returns a ImmutableTableDescriptor from the given TableDescriptor.

func (*ImmutableTableDescriptor) ActiveChecks Uses

func (desc *ImmutableTableDescriptor) ActiveChecks() []TableDescriptor_CheckConstraint

ActiveChecks returns a list of all check constraints that should be enforced on writes (including constraints being added/validated). The columns referenced by the returned checks are writable, but not necessarily public.

func (*ImmutableTableDescriptor) DeletableColumns Uses

func (desc *ImmutableTableDescriptor) DeletableColumns() []ColumnDescriptor

DeletableColumns returns a list of public and non-public columns.

func (*ImmutableTableDescriptor) DeletableIndexes Uses

func (desc *ImmutableTableDescriptor) DeletableIndexes() []IndexDescriptor

DeletableIndexes returns a list of public and non-public indexes.

func (*ImmutableTableDescriptor) DeleteOnlyIndexes Uses

func (desc *ImmutableTableDescriptor) DeleteOnlyIndexes() []IndexDescriptor

DeleteOnlyIndexes returns a list of delete-only mutation indexes.

func (*ImmutableTableDescriptor) FindReadableColumnByID Uses

func (desc *ImmutableTableDescriptor) FindReadableColumnByID(
    id ColumnID,
) (*ColumnDescriptor, bool, error)

FindReadableColumnByID finds the readable column with specified ID. The column may be undergoing a schema change and is marked nullable regardless of its configuration. It returns true if the column is undergoing a schema change.

func (*ImmutableTableDescriptor) MakeFirstMutationPublic Uses

func (desc *ImmutableTableDescriptor) MakeFirstMutationPublic(
    includeConstraints bool,
) (*MutableTableDescriptor, error)

MakeFirstMutationPublic creates a MutableTableDescriptor from the ImmutableTableDescriptor by making the first mutation public. This is super valuable when trying to run SQL over data associated with a schema mutation that is still not yet public: Data validation, error reporting.

func (*ImmutableTableDescriptor) MutationColumns Uses

func (desc *ImmutableTableDescriptor) MutationColumns() []ColumnDescriptor

MutationColumns returns a list of mutation columns.

func (*ImmutableTableDescriptor) TableDesc Uses

func (desc *ImmutableTableDescriptor) TableDesc() *TableDescriptor

TableDesc implements the ObjectDescriptor interface.

func (*ImmutableTableDescriptor) WritableColumns Uses

func (desc *ImmutableTableDescriptor) WritableColumns() []ColumnDescriptor

WritableColumns returns a list of public and write-only mutation columns.

func (*ImmutableTableDescriptor) WritableIndexes Uses

func (desc *ImmutableTableDescriptor) WritableIndexes() []IndexDescriptor

WritableIndexes returns a list of public and write-only mutation indexes.

type IndexDescriptor Uses

type IndexDescriptor struct {
    Name   string  `protobuf:"bytes,1,opt,name=name" json:"name"`
    ID     IndexID `protobuf:"varint,2,opt,name=id,casttype=IndexID" json:"id"`
    Unique bool    `protobuf:"varint,3,opt,name=unique" json:"unique"`
    // An ordered list of column names of which the index is comprised; these
    // columns do not include any additional stored columns (which are in
    // stored_column_names). This list parallels the column_ids list.
    //
    // Note: if duplicating the storage of the column names here proves to be
    // prohibitive, we could clear this field before saving and reconstruct it
    // after loading.
    ColumnNames []string `protobuf:"bytes,4,rep,name=column_names,json=columnNames" json:"column_names,omitempty"`
    // The sort direction of each column in column_names.
    ColumnDirections []IndexDescriptor_Direction `protobuf:"varint,8,rep,name=column_directions,json=columnDirections,enum=cockroach.sql.sqlbase.IndexDescriptor_Direction" json:"column_directions,omitempty"`
    // An ordered list of column names which the index stores in addition to the
    // columns which are explicitly part of the index (STORING clause). Only used
    // for secondary indexes.
    StoreColumnNames []string `protobuf:"bytes,5,rep,name=store_column_names,json=storeColumnNames" json:"store_column_names,omitempty"`
    // An ordered list of column IDs of which the index is comprised. This list
    // parallels the column_names list and does not include any additional stored
    // columns.
    ColumnIDs []ColumnID `protobuf:"varint,6,rep,name=column_ids,json=columnIds,casttype=ColumnID" json:"column_ids,omitempty"`
    // An ordered list of IDs for the additional columns associated with the
    // index:
    //  - implicit columns, which are all the primary key columns that are not
    //    already part of the index (i.e. PrimaryIndex.column_ids - column_ids).
    //  - stored columns (the columns in store_column_names) if this index uses the
    //    old STORING encoding (key-encoded data).
    //
    // Only used for secondary indexes.
    // For non-unique indexes, these columns are appended to the key.
    // For unique indexes, these columns are stored in the value (unless the key
    // contains a NULL value: then the extra columns are appended to the key to
    // unique-ify it).
    // This distinction exists because we want to be able to insert an entry using
    // a single conditional put on the key.
    ExtraColumnIDs []ColumnID `protobuf:"varint,7,rep,name=extra_column_ids,json=extraColumnIds,casttype=ColumnID" json:"extra_column_ids,omitempty"`
    // An ordered list of column IDs that parallels store_column_names if this
    // index uses the new STORING encoding (value-encoded data, always in the KV
    // value).
    StoreColumnIDs []ColumnID `protobuf:"varint,14,rep,name=store_column_ids,json=storeColumnIds,casttype=ColumnID" json:"store_column_ids,omitempty"`
    // CompositeColumnIDs contains an ordered list of IDs of columns that appear
    // in the index and have a composite encoding. Includes IDs from both
    // column_ids and extra_column_ids.
    CompositeColumnIDs []ColumnID `protobuf:"varint,13,rep,name=composite_column_ids,json=compositeColumnIds,casttype=ColumnID" json:"composite_column_ids,omitempty"`
    // ForeignKey and ReferencedBy are deprecated and not stored from 19.2 onward.
    ForeignKey   ForeignKeyReference   `protobuf:"bytes,9,opt,name=foreign_key,json=foreignKey" json:"foreign_key"`        // Deprecated: Do not use.
    ReferencedBy []ForeignKeyReference `protobuf:"bytes,10,rep,name=referenced_by,json=referencedBy" json:"referenced_by"` // Deprecated: Do not use.
    // Interleave, if it's not the zero value, describes how this index's data is
    // interleaved into another index's data.
    Interleave InterleaveDescriptor `protobuf:"bytes,11,opt,name=interleave" json:"interleave"`
    // InterleavedBy contains a reference to every table/index that is interleaved
    // into this one.
    InterleavedBy []ForeignKeyReference `protobuf:"bytes,12,rep,name=interleaved_by,json=interleavedBy" json:"interleaved_by"`
    // Partitioning, if it's not the zero value, describes how this index's data
    // is partitioned into spans of keys each addressable by zone configs.
    Partitioning PartitioningDescriptor `protobuf:"bytes,15,opt,name=partitioning" json:"partitioning"`
    // Type is the type of index, inverted or forward.
    Type IndexDescriptor_Type `protobuf:"varint,16,opt,name=type,enum=cockroach.sql.sqlbase.IndexDescriptor_Type" json:"type"`
    // CreatedExplicitly specifies whether this index was created explicitly
    // (i.e. via 'CREATE INDEX' statement).
    CreatedExplicitly bool `protobuf:"varint,17,opt,name=created_explicitly,json=createdExplicitly" json:"created_explicitly"`
}

IndexDescriptor describes an index (primary or secondary).

Sample field values on the following table:

CREATE TABLE t (
  k1 INT NOT NULL,   // column ID: 1
  k2 INT NOT NULL,   // column ID: 2
  u INT NULL,        // column ID: 3
  v INT NULL,        // column ID: 4
  w INT NULL,        // column ID: 5
  CONSTRAINT "primary" PRIMARY KEY (k1, k2),
  INDEX k1v (k1, v) STORING (w),
  FAMILY "primary" (k1, k2, u, v, w)
)

Primary index:

name:                primary
id:                  1
unique:              true
column_names:        k1, k2
column_directions:   ASC, ASC
column_ids:          1, 2   // k1, k2

[old STORING encoding] Index k1v (k1, v) STORING (w):

name:                k1v
id:                  2
unique:              false
column_names:        k1, v
column_directions:   ASC, ASC
store_column_names:  w
column_ids:          1, 4   // k1, v
extra_column_ids:    2, 5   // k2, w

[new STORING encoding] Index k1v (k1, v) STORING (w):

name:                k1v
id:                  2
unique:              false
column_names:        k1, v
column_directions:   ASC, ASC
store_column_names:  w
column_ids:          1, 4   // k1, v
extra_column_ids:    2      // k2
store_column_ids:    5      // w

func FindFKOriginIndex Uses

func FindFKOriginIndex(
    originTable *TableDescriptor, originColIDs ColumnIDs,
) (*IndexDescriptor, error)

FindFKOriginIndex finds the first index in the supplied originTable that can satisfy an outgoing foreign key of the supplied column ids.

func FindFKReferencedIndex Uses

func FindFKReferencedIndex(
    referencedTable *TableDescriptor, referencedColIDs ColumnIDs,
) (*IndexDescriptor, error)

FindFKReferencedIndex finds the first index in the supplied referencedTable that can satisfy a foreign key of the supplied column ids.

func (*IndexDescriptor) ColNamesFormat Uses

func (desc *IndexDescriptor) ColNamesFormat(ctx *tree.FmtCtx)

ColNamesFormat writes a string describing the column names and directions in this index to the given buffer.

func (*IndexDescriptor) ColNamesString Uses

func (desc *IndexDescriptor) ColNamesString() string

ColNamesString returns a string describing the column names and directions in this index.

func (*IndexDescriptor) ContainsColumnID Uses

func (desc *IndexDescriptor) ContainsColumnID(colID ColumnID) bool

ContainsColumnID returns true if the index descriptor contains the specified column ID either in its explicit column IDs, the extra column IDs, or the stored column IDs.

func (*IndexDescriptor) Descriptor Uses

func (*IndexDescriptor) Descriptor() ([]byte, []int)

func (*IndexDescriptor) Equal Uses

func (this *IndexDescriptor) Equal(that interface{}) bool

func (*IndexDescriptor) FillColumns Uses

func (desc *IndexDescriptor) FillColumns(elems tree.IndexElemList) error

FillColumns sets the column names and directions in desc.

func (*IndexDescriptor) FindPartitionByName Uses

func (desc *IndexDescriptor) FindPartitionByName(name string) *PartitioningDescriptor

FindPartitionByName searches this index descriptor for a partition whose name is the input and returns it, or nil if no match is found.

func (*IndexDescriptor) FullColumnIDs Uses

func (desc *IndexDescriptor) FullColumnIDs() ([]ColumnID, []IndexDescriptor_Direction)

FullColumnIDs returns the index column IDs including any extra (implicit or stored (old STORING encoding)) column IDs for non-unique indexes. It also returns the direction with which each column was encoded.

func (*IndexDescriptor) HasOldStoredColumns Uses

func (desc *IndexDescriptor) HasOldStoredColumns() bool

HasOldStoredColumns returns whether the index has stored columns in the old format (data encoded the same way as if they were in an implicit column).

func (*IndexDescriptor) IsInterleaved Uses

func (desc *IndexDescriptor) IsInterleaved() bool

IsInterleaved returns whether the index is interleaved or not.

func (*IndexDescriptor) Marshal Uses

func (m *IndexDescriptor) Marshal() (dAtA []byte, err error)

func (*IndexDescriptor) MarshalTo Uses

func (m *IndexDescriptor) MarshalTo(dAtA []byte) (int, error)

func (*IndexDescriptor) ProtoMessage Uses

func (*IndexDescriptor) ProtoMessage()

func (*IndexDescriptor) Reset Uses

func (m *IndexDescriptor) Reset()

func (*IndexDescriptor) RunOverAllColumns Uses

func (desc *IndexDescriptor) RunOverAllColumns(fn func(id ColumnID) error) error

RunOverAllColumns applies its argument fn to each of the column IDs in desc. If there is an error, that error is returned immediately.

func (*IndexDescriptor) SQLString Uses

func (desc *IndexDescriptor) SQLString(tableName *tree.TableName) string

SQLString returns the SQL string describing this index. If non-empty, "ON tableName" is included in the output in the correct place.

func (*IndexDescriptor) Size Uses

func (m *IndexDescriptor) Size() (n int)

func (*IndexDescriptor) String Uses

func (m *IndexDescriptor) String() string

func (*IndexDescriptor) Unmarshal Uses

func (m *IndexDescriptor) Unmarshal(dAtA []byte) error

func (*IndexDescriptor) XXX_DiscardUnknown Uses

func (m *IndexDescriptor) XXX_DiscardUnknown()

func (*IndexDescriptor) XXX_Marshal Uses

func (m *IndexDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*IndexDescriptor) XXX_Merge Uses

func (dst *IndexDescriptor) XXX_Merge(src proto.Message)

func (*IndexDescriptor) XXX_Size Uses

func (m *IndexDescriptor) XXX_Size() int

func (*IndexDescriptor) XXX_Unmarshal Uses

func (m *IndexDescriptor) XXX_Unmarshal(b []byte) error

type IndexDescriptor_Direction Uses

type IndexDescriptor_Direction int32

The direction of a column in the index.

const (
    IndexDescriptor_ASC  IndexDescriptor_Direction = 0
    IndexDescriptor_DESC IndexDescriptor_Direction = 1
)

func (IndexDescriptor_Direction) Enum Uses

func (x IndexDescriptor_Direction) Enum() *IndexDescriptor_Direction

func (IndexDescriptor_Direction) EnumDescriptor Uses

func (IndexDescriptor_Direction) EnumDescriptor() ([]byte, []int)

func (IndexDescriptor_Direction) String Uses

func (x IndexDescriptor_Direction) String() string

func (IndexDescriptor_Direction) ToEncodingDirection Uses

func (dir IndexDescriptor_Direction) ToEncodingDirection() (encoding.Direction, error)

ToEncodingDirection converts a direction from the proto to an encoding.Direction.

func (*IndexDescriptor_Direction) UnmarshalJSON Uses

func (x *IndexDescriptor_Direction) UnmarshalJSON(data []byte) error

type IndexDescriptor_Type Uses

type IndexDescriptor_Type int32

The type of the index.

const (
    IndexDescriptor_FORWARD  IndexDescriptor_Type = 0
    IndexDescriptor_INVERTED IndexDescriptor_Type = 1
)

func (IndexDescriptor_Type) Enum Uses

func (x IndexDescriptor_Type) Enum() *IndexDescriptor_Type

func (IndexDescriptor_Type) EnumDescriptor Uses

func (IndexDescriptor_Type) EnumDescriptor() ([]byte, []int)

func (IndexDescriptor_Type) String Uses

func (x IndexDescriptor_Type) String() string

func (*IndexDescriptor_Type) UnmarshalJSON Uses

func (x *IndexDescriptor_Type) UnmarshalJSON(data []byte) error

type IndexEntry Uses

type IndexEntry struct {
    Key   roachpb.Key
    Value roachpb.Value
}

IndexEntry represents an encoded key/value for an index entry.

func EncodeSecondaryIndex Uses

func EncodeSecondaryIndex(
    tableDesc *TableDescriptor,
    secondaryIndex *IndexDescriptor,
    colMap map[ColumnID]int,
    values []tree.Datum,
) ([]IndexEntry, error)

EncodeSecondaryIndex encodes key/values for a secondary index. colMap maps ColumnIDs to indices in `values`. This returns a slice of IndexEntry. Forward indexes will return one value, while inverted indices can return multiple values.

func EncodeSecondaryIndexes Uses

func EncodeSecondaryIndexes(
    tableDesc *TableDescriptor,
    indexes []IndexDescriptor,
    colMap map[ColumnID]int,
    values []tree.Datum,
    secondaryIndexEntries []IndexEntry,
) ([]IndexEntry, error)

EncodeSecondaryIndexes encodes key/values for the secondary indexes. colMap maps ColumnIDs to indices in `values`. secondaryIndexEntries is the return value (passed as a parameter so the caller can reuse between rows) and is expected to be the same length as indexes.

type IndexID Uses

type IndexID tree.IndexID

IndexID is a custom type for IndexDescriptor IDs.

func DecodeIndexKeyPrefix Uses

func DecodeIndexKeyPrefix(
    desc *TableDescriptor, key []byte,
) (indexID IndexID, remaining []byte, err error)

DecodeIndexKeyPrefix decodes the prefix of an index key and returns the index id and a slice for the rest of the key.

Don't use this function in the scan "hot path".

type InterleaveDescriptor Uses

type InterleaveDescriptor struct {
    // Ancestors contains the nesting of interleaves in the order they appear in
    // an encoded key. This means they are always in the far-to-near ancestor
    // order (e.g. grand-grand-parent, grand-parent, parent).
    Ancestors []InterleaveDescriptor_Ancestor `protobuf:"bytes,1,rep,name=ancestors" json:"ancestors"`
}

InterleaveDescriptor represents an index (either primary or secondary) that is interleaved into another table's data.

Example: Table 1 -> /a/b Table 2 -> /a/b/c Table 3 -> /a/b/c/d

There are two components (table 2 is the parent and table 1 is the grandparent) with shared lengths 2 and 1.

func (*InterleaveDescriptor) Descriptor Uses

func (*InterleaveDescriptor) Descriptor() ([]byte, []int)

func (*InterleaveDescriptor) Equal Uses

func (this *InterleaveDescriptor) Equal(that interface{}) bool

func (*InterleaveDescriptor) Marshal Uses

func (m *InterleaveDescriptor) Marshal() (dAtA []byte, err error)

func (*InterleaveDescriptor) MarshalTo Uses

func (m *InterleaveDescriptor) MarshalTo(dAtA []byte) (int, error)

func (*InterleaveDescriptor) ProtoMessage Uses

func (*InterleaveDescriptor) ProtoMessage()

func (*InterleaveDescriptor) Reset Uses

func (m *InterleaveDescriptor) Reset()

func (*InterleaveDescriptor) Size Uses

func (m *InterleaveDescriptor) Size() (n int)

func (*InterleaveDescriptor) String Uses

func (m *InterleaveDescriptor) String() string

func (*InterleaveDescriptor) Unmarshal Uses

func (m *InterleaveDescriptor) Unmarshal(dAtA []byte) error

func (*InterleaveDescriptor) XXX_DiscardUnknown Uses

func (m *InterleaveDescriptor) XXX_DiscardUnknown()

func (*InterleaveDescriptor) XXX_Marshal Uses

func (m *InterleaveDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*InterleaveDescriptor) XXX_Merge Uses

func (dst *InterleaveDescriptor) XXX_Merge(src proto.Message)

func (*InterleaveDescriptor) XXX_Size Uses

func (m *InterleaveDescriptor) XXX_Size() int

func (*InterleaveDescriptor) XXX_Unmarshal Uses

func (m *InterleaveDescriptor) XXX_Unmarshal(b []byte) error

type InterleaveDescriptor_Ancestor Uses

type InterleaveDescriptor_Ancestor struct {
    // TableID is the ID of the table being interleaved into.
    TableID ID  `protobuf:"varint,1,opt,name=table_id,json=tableId,casttype=ID" json:"table_id"`
    // IndexID is the ID of the parent index being interleaved into.
    IndexID IndexID `protobuf:"varint,2,opt,name=index_id,json=indexId,casttype=IndexID" json:"index_id"`
    // SharedPrefixLen is how many fields are shared between a parent and child
    // being interleaved, excluding any fields shared between parent and
    // grandparent. Thus, the sum of SharedPrefixLens in the components of an
    // InterleaveDescriptor is never more than the number of fields in the index
    // being interleaved.
    // In cockroach 1.0, this value did not exist and thus a check for > 0
    // must be performed prior to its use.
    SharedPrefixLen uint32 `protobuf:"varint,3,opt,name=shared_prefix_len,json=sharedPrefixLen" json:"shared_prefix_len"`
}

func (*InterleaveDescriptor_Ancestor) Descriptor Uses

func (*InterleaveDescriptor_Ancestor) Descriptor() ([]byte, []int)

func (*InterleaveDescriptor_Ancestor) Equal Uses

func (this *InterleaveDescriptor_Ancestor) Equal(that interface{}) bool

func (*InterleaveDescriptor_Ancestor) Marshal Uses

func (m *InterleaveDescriptor_Ancestor) Marshal() (dAtA []byte, err error)

func (*InterleaveDescriptor_Ancestor) MarshalTo Uses

func (m *InterleaveDescriptor_Ancestor) MarshalTo(dAtA []byte) (int, error)

func (*InterleaveDescriptor_Ancestor) ProtoMessage Uses

func (*InterleaveDescriptor_Ancestor) ProtoMessage()

func (*InterleaveDescriptor_Ancestor) Reset Uses

func (m *InterleaveDescriptor_Ancestor) Reset()

func (*InterleaveDescriptor_Ancestor) Size Uses

func (m *InterleaveDescriptor_Ancestor) Size() (n int)

func (*InterleaveDescriptor_Ancestor) String Uses

func (m *InterleaveDescriptor_Ancestor) String() string

func (*InterleaveDescriptor_Ancestor) Unmarshal Uses

func (m *InterleaveDescriptor_Ancestor) Unmarshal(dAtA []byte) error

func (*InterleaveDescriptor_Ancestor) XXX_DiscardUnknown Uses

func (m *InterleaveDescriptor_Ancestor) XXX_DiscardUnknown()

func (*InterleaveDescriptor_Ancestor) XXX_Marshal Uses

func (m *InterleaveDescriptor_Ancestor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*InterleaveDescriptor_Ancestor) XXX_Merge Uses

func (dst *InterleaveDescriptor_Ancestor) XXX_Merge(src proto.Message)

func (*InterleaveDescriptor_Ancestor) XXX_Size Uses

func (m *InterleaveDescriptor_Ancestor) XXX_Size() int

func (*InterleaveDescriptor_Ancestor) XXX_Unmarshal Uses

func (m *InterleaveDescriptor_Ancestor) XXX_Unmarshal(b []byte) error

type JoinType Uses

type JoinType int32

JoinType is the particular type of a join (or join-like) operation. Not all values are used in all contexts.

const (
    JoinType_INNER       JoinType = 0
    JoinType_LEFT_OUTER  JoinType = 1
    JoinType_RIGHT_OUTER JoinType = 2
    JoinType_FULL_OUTER  JoinType = 3
    // A left semi join returns the rows from the left side that match at least
    // one row from the right side (as per equality columns and ON condition).
    JoinType_LEFT_SEMI JoinType = 4
    // A left anti join is an "inverted" semi join: it returns the rows from the
    // left side that don't match any columns on the right side (as per equality
    // columns and ON condition).
    JoinType_LEFT_ANTI JoinType = 5
    // INTERSECT_ALL is a special join-like operation that is only used for
    // INTERSECT ALL and INTERSECT operations.
    //
    // It is similar to a left semi join, except that if there are multiple left
    // rows that have the same values on the equality columns, only as many of
    // those are returned as there are matches on the right side.
    //
    // In practice, there is a one-to-one mapping between the left and right
    // columns (they are all equality columns).
    //
    // For example:
    //
    //       Left    Right    Result
    //       1       1        1
    //       1       2        2
    //       2       2        2
    //       2       3        3
    //       3       3
    //               3
    JoinType_INTERSECT_ALL JoinType = 6
    // EXCEPT_ALL is a special join-like operation that is only used for EXCEPT
    // ALL and EXCEPT operations.
    //
    // It is similar to a left anti join, except that if there are multiple left
    // rows that have the same values on the equality columns, only as many of
    // those are removed as there are matches on the right side.
    //
    // In practice, there is a one-to-one mapping between the left and right
    // columns (they are all equality columns).
    //
    // For example:
    //
    //       Left    Right    Result
    //       1       1        1
    //       1       2        2
    //       2       3        2
    //       2       3
    //       2       3
    //       3
    //       3
    //
    //
    // In practice, there is a one-to-one mapping between the left and right
    // columns (they are all equality columns).
    JoinType_EXCEPT_ALL JoinType = 7
)

func JoinTypeFromAstString Uses

func JoinTypeFromAstString(joinStr string) JoinType

JoinTypeFromAstString takes a join string as found in a SQL statement (e.g. "INNER JOIN") and returns the JoinType.

func (JoinType) Enum Uses

func (x JoinType) Enum() *JoinType

func (JoinType) EnumDescriptor Uses

func (JoinType) EnumDescriptor() ([]byte, []int)

func (JoinType) IsSetOpJoin Uses

func (j JoinType) IsSetOpJoin() bool

IsSetOpJoin returns true if this join is a set operation.

func (JoinType) String Uses

func (x JoinType) String() string

func (*JoinType) UnmarshalJSON Uses

func (x *JoinType) UnmarshalJSON(data []byte) error

type MapProtoGetter Uses

type MapProtoGetter struct {
    Protos map[interface{}]protoutil.Message
}

MapProtoGetter is a protoGetter that has a hard-coded map of keys to proto messages.

func (MapProtoGetter) GetProtoTs Uses

func (m MapProtoGetter) GetProtoTs(
    ctx context.Context, key interface{}, msg protoutil.Message,
) (hlc.Timestamp, error)

GetProtoTs implements the protoGetter interface.

type MetadataSchema Uses

type MetadataSchema struct {
    // contains filtered or unexported fields
}

MetadataSchema is used to construct the initial sql schema for a new CockroachDB cluster being bootstrapped. Tables and databases must be installed on the underlying persistent storage before a cockroach store can start running correctly, thus requiring this special initialization.

func MakeMetadataSchema Uses

func MakeMetadataSchema(
    defaultZoneConfig *config.ZoneConfig, defaultSystemZoneConfig *config.ZoneConfig,
) MetadataSchema

MakeMetadataSchema constructs a new MetadataSchema value which constructs the "system" database.

func (*MetadataSchema) AddDescriptor Uses

func (ms *MetadataSchema) AddDescriptor(parentID ID, desc DescriptorProto)

AddDescriptor adds a new non-config descriptor to the system schema.

func (*MetadataSchema) AddSplitIDs Uses

func (ms *MetadataSchema) AddSplitIDs(id ...uint32)

AddSplitIDs adds some "table ids" to the MetadataSchema such that corresponding keys are returned as split points by GetInitialValues(). AddDescriptor() has the same effect for the table descriptors that are passed to it, but we also have a couple of "fake tables" that don't have descriptors but need splits just the same.

func (MetadataSchema) DescriptorIDs Uses

func (ms MetadataSchema) DescriptorIDs() IDs

DescriptorIDs returns the descriptor IDs present in the metadata schema in sorted order.

func (MetadataSchema) GetInitialValues Uses

func (ms MetadataSchema) GetInitialValues() ([]roachpb.KeyValue, []roachpb.RKey)

GetInitialValues returns the set of initial K/V values which should be added to a bootstrapping cluster in order to create the tables contained in the schema. Also returns a list of split points (a split for each SQL table descriptor part of the initial values). Both returned sets are sorted.

func (MetadataSchema) SystemDescriptorCount Uses

func (ms MetadataSchema) SystemDescriptorCount() int

SystemDescriptorCount returns the number of descriptors that will be created by this schema. This value is needed to automate certain tests.

type MultiSourceInfo Uses

type MultiSourceInfo []*DataSourceInfo

MultiSourceInfo is a list of *DataSourceInfo.

func MakeMultiSourceInfo Uses

func MakeMultiSourceInfo(args ...*DataSourceInfo) MultiSourceInfo

MakeMultiSourceInfo constructs a MultiSourceInfo for the given DataSourceInfos.

func (MultiSourceInfo) String Uses

func (m MultiSourceInfo) String() string

type MutableTableDescriptor Uses

type MutableTableDescriptor struct {
    TableDescriptor

    // ClusterVersion represents the version of the table descriptor read from the store.
    ClusterVersion TableDescriptor
}

MutableTableDescriptor is a custom type for TableDescriptors going through schema mutations.

func GetMutableTableDescFromID Uses

func GetMutableTableDescFromID(
    ctx context.Context, protoGetter protoGetter, id ID,
) (*MutableTableDescriptor, error)

GetMutableTableDescFromID retrieves the table descriptor for the table ID passed in using an existing proto getter. Returns an error if the descriptor doesn't exist or if it exists and is not a table. Otherwise a mutable copy of the table is returned.

func NewMutableCreatedTableDescriptor Uses

func NewMutableCreatedTableDescriptor(tbl TableDescriptor) *MutableTableDescriptor

NewMutableCreatedTableDescriptor returns a MutableTableDescriptor from the given TableDescriptor with the cluster version being the zero table. This is for a table that is created in the transaction.

func NewMutableExistingTableDescriptor Uses

func NewMutableExistingTableDescriptor(tbl TableDescriptor) *MutableTableDescriptor

NewMutableExistingTableDescriptor returns a MutableTableDescriptor from the given TableDescriptor with the cluster version also set to the descriptor. This is for an existing table.

func (*MutableTableDescriptor) AddCheckMutation Uses

func (desc *MutableTableDescriptor) AddCheckMutation(
    ck *TableDescriptor_CheckConstraint, direction DescriptorMutation_Direction,
)

AddCheckMutation adds a check constraint mutation to desc.Mutations.

func (*MutableTableDescriptor) AddColumn Uses

func (desc *MutableTableDescriptor) AddColumn(col *ColumnDescriptor)

AddColumn adds a column to the table.

func (*MutableTableDescriptor) AddColumnMutation Uses

func (desc *MutableTableDescriptor) AddColumnMutation(
    c *ColumnDescriptor, direction DescriptorMutation_Direction,
)

AddColumnMutation adds a column mutation to desc.Mutations. Callers must take care not to further mutate the column descriptor, since this method retains a pointer to it.

func (*MutableTableDescriptor) AddColumnToFamilyMaybeCreate Uses

func (desc *MutableTableDescriptor) AddColumnToFamilyMaybeCreate(
    col string, family string, create bool, ifNotExists bool,
) error

AddColumnToFamilyMaybeCreate adds the specified column to the specified family. If it doesn't exist and create is true, creates it. If it does exist adds it unless "strict" create (`true` for create but `false` for ifNotExists) is specified.

AllocateIDs must be called before the TableDescriptor will be valid.

func (*MutableTableDescriptor) AddFamily Uses

func (desc *MutableTableDescriptor) AddFamily(fam ColumnFamilyDescriptor)

AddFamily adds a family to the table.

func (*MutableTableDescriptor) AddForeignKeyMutation Uses

func (desc *MutableTableDescriptor) AddForeignKeyMutation(
    fk *ForeignKeyConstraint, direction DescriptorMutation_Direction,
)

AddForeignKeyMutation adds a foreign key constraint mutation to desc.Mutations.

func (*MutableTableDescriptor) AddIndex Uses

func (desc *MutableTableDescriptor) AddIndex(idx IndexDescriptor, primary bool) error

AddIndex adds an index to the table.

func (*MutableTableDescriptor) AddIndexMutation Uses

func (desc *MutableTableDescriptor) AddIndexMutation(
    idx *IndexDescriptor, direction DescriptorMutation_Direction,
) error

AddIndexMutation adds an index mutation to desc.Mutations.

func (*MutableTableDescriptor) AddNotNullMutation Uses

func (desc *MutableTableDescriptor) AddNotNullMutation(
    ck *TableDescriptor_CheckConstraint, direction DescriptorMutation_Direction,
)

AddNotNullMutation adds a not null constraint mutation to desc.Mutations. Similarly to other schema elements, adding or dropping a non-null constraint requires a multi-state schema change, including a bulk validation step, before the Nullable flag can be set to false on the descriptor. This is done by adding a dummy check constraint of the form "x IS NOT NULL" that is treated like other check constraints being added, until the completion of the schema change, at which the check constraint is deleted. This function mutates inuseNames to add the new constraint name.

func (*MutableTableDescriptor) AllocateIDs Uses

func (desc *MutableTableDescriptor) AllocateIDs() error

AllocateIDs allocates column, family, and index ids for any column, family, or index which has an ID of 0.

func (*MutableTableDescriptor) DropConstraint Uses

func (desc *MutableTableDescriptor) DropConstraint(
    ctx context.Context,
    name string,
    detail ConstraintDetail,
    removeFK func(*MutableTableDescriptor, *ForeignKeyConstraint) error,
    settings *cluster.Settings,
) error

DropConstraint drops a constraint, either by removing it from the table descriptor or by queuing a mutation for a schema change.

func (*MutableTableDescriptor) IsNewTable Uses

func (desc *MutableTableDescriptor) IsNewTable() bool

IsNewTable returns true if the table was created in the current transaction.

func (*MutableTableDescriptor) MakeMutationComplete Uses

func (desc *MutableTableDescriptor) MakeMutationComplete(m DescriptorMutation) error

MakeMutationComplete updates the descriptor upon completion of a mutation. There are three Validity types for the mutations: Validated - The constraint has already been added and validated, should

never be the case for a validated constraint to enter this
method.

Validating - The constraint has already been added, and just needs to be

marked as validated.

Unvalidated - The constraint has not yet been added, and needs to be added

for the first time.

func (*MutableTableDescriptor) MaybeIncrementVersion Uses

func (desc *MutableTableDescriptor) MaybeIncrementVersion(
    ctx context.Context, txn *client.Txn, settings *cluster.Settings,
) error

MaybeIncrementVersion increments the version of a descriptor if necessary.

func (*MutableTableDescriptor) RemoveColumnFromFamily Uses

func (desc *MutableTableDescriptor) RemoveColumnFromFamily(colID ColumnID)

RemoveColumnFromFamily removes a colID from the family it's assigned to.

func (*MutableTableDescriptor) RenameColumnDescriptor Uses

func (desc *MutableTableDescriptor) RenameColumnDescriptor(
    column *ColumnDescriptor, newColName string,
)

RenameColumnDescriptor updates all references to a column name in a table descriptor including indexes and families.

func (*MutableTableDescriptor) RenameConstraint Uses

func (desc *MutableTableDescriptor) RenameConstraint(
    detail ConstraintDetail,
    oldName, newName string,
    dependentViewRenameError func(string, ID) error,
    renameFK func(*MutableTableDescriptor, *ForeignKeyConstraint, string) error,
) error

RenameConstraint renames a constraint.

func (*MutableTableDescriptor) RenameIndexDescriptor Uses

func (desc *MutableTableDescriptor) RenameIndexDescriptor(
    index *IndexDescriptor, name string,
) error

RenameIndexDescriptor renames an index descriptor.

func (*MutableTableDescriptor) TableDesc Uses

func (desc *MutableTableDescriptor) TableDesc() *TableDescriptor

TableDesc implements the ObjectDescriptor interface.

type MutationID Uses

type MutationID uint32

MutationID is a custom type for TableDescriptor mutations.

const InvalidMutationID MutationID = 0

InvalidMutationID is the uninitialised mutation id.

type NameResolutionVisitor Uses

type NameResolutionVisitor struct {
    // contains filtered or unexported fields
}

NameResolutionVisitor is a tree.Visitor implementation used to resolve the column names in an expression.

func (*NameResolutionVisitor) VisitPost Uses

func (*NameResolutionVisitor) VisitPost(expr tree.Expr) tree.Expr

VisitPost implements tree.Visitor.

func (*NameResolutionVisitor) VisitPre Uses

func (v *NameResolutionVisitor) VisitPre(expr tree.Expr) (recurse bool, newNode tree.Expr)

VisitPre implements tree.Visitor.

type PartitionSpecialValCode Uses

type PartitionSpecialValCode uint64

PartitionSpecialValCode identifies a special value.

const (
    // PartitionDefaultVal represents the special DEFAULT value.
    PartitionDefaultVal PartitionSpecialValCode = 0
    // PartitionMaxVal represents the special MAXVALUE value.
    PartitionMaxVal PartitionSpecialValCode = 1
    // PartitionMinVal represents the special MINVALUE value.
    PartitionMinVal PartitionSpecialValCode = 2
)

func (PartitionSpecialValCode) String Uses

func (c PartitionSpecialValCode) String() string

type PartitionTuple Uses

type PartitionTuple struct {
    Datums       tree.Datums
    Special      PartitionSpecialValCode
    SpecialCount int
}

PartitionTuple represents a tuple in a partitioning specification.

It contains any number of true datums, stored in the Datums field, followed by any number of special partitioning values, represented by the Special and SpecialCount fields.

func DecodePartitionTuple Uses

func DecodePartitionTuple(
    a *DatumAlloc,
    tableDesc *TableDescriptor,
    idxDesc *IndexDescriptor,
    partDesc *PartitioningDescriptor,
    valueEncBuf []byte,
    prefixDatums tree.Datums,
) (*PartitionTuple, []byte, error)

DecodePartitionTuple parses columns (which are a prefix of the columns of `idxDesc`) encoded with the "value" encoding and returns the parsed datums. It also reencodes them into a key as they would be for `idxDesc` (accounting for index dirs, interleaves, subpartitioning, etc).

For a list partitioning, this returned key can be used as a prefix scan to select all rows that have the given columns as a prefix (this is true even if the list partitioning contains DEFAULT).

Examples of the key returned for a list partitioning:

- (1, 2) -> /table/index/1/2
- (1, DEFAULT) -> /table/index/1
- (DEFAULT, DEFAULT) -> /table/index

For a range partitioning, this returned key can be used as a exclusive end key to select all rows strictly less than ones with the given columns as a prefix (this is true even if the range partitioning contains MINVALUE or MAXVALUE).

Examples of the key returned for a range partitioning:

- (1, 2) -> /table/index/1/3
- (1, MAXVALUE) -> /table/index/2
- (MAXVALUE, MAXVALUE) -> (/table/index).PrefixEnd()

NB: It is checked here that if an entry for a list partitioning contains DEFAULT, everything in that entry "after" also has to be DEFAULT. So, (1, 2, DEFAULT) is valid but (1, DEFAULT, 2) is not. Similarly for range partitioning and MINVALUE/MAXVALUE.

func (*PartitionTuple) String Uses

func (t *PartitionTuple) String() string

type PartitioningDescriptor Uses

type PartitioningDescriptor struct {
    // NumColumns is how large of a prefix of the columns in an index are used in
    // the function mapping column values to partitions. If this is a
    // subpartition, this is offset to start from the end of the parent
    // partition's columns. If NumColumns is 0, then there is no partitioning.
    NumColumns uint32 `protobuf:"varint,1,opt,name=num_columns,json=numColumns" json:"num_columns"`
    // Exactly one of List or Range is required to be non-empty if NumColumns is
    // non-zero.
    List  []PartitioningDescriptor_List  `protobuf:"bytes,2,rep,name=list" json:"list"`
    Range []PartitioningDescriptor_Range `protobuf:"bytes,3,rep,name=range" json:"range"`
}

PartitioningDescriptor represents the partitioning of an index into spans of keys addressable by a zone config. The key encoding is unchanged. Each partition may optionally be itself divided into further partitions, called subpartitions.

func (*PartitioningDescriptor) Descriptor Uses

func (*PartitioningDescriptor) Descriptor() ([]byte, []int)

func (*PartitioningDescriptor) Equal Uses

func (this *PartitioningDescriptor) Equal(that interface{}) bool

func (*PartitioningDescriptor) FindPartitionByName Uses

func (desc *PartitioningDescriptor) FindPartitionByName(name string) *PartitioningDescriptor

FindPartitionByName searches this partitioning descriptor for a partition whose name is the input and returns it, or nil if no match is found.

func (*PartitioningDescriptor) Marshal Uses

func (m *PartitioningDescriptor) Marshal() (dAtA []byte, err error)

func (*PartitioningDescriptor) MarshalTo Uses

func (m *PartitioningDescriptor) MarshalTo(dAtA []byte) (int, error)

func (*PartitioningDescriptor) PartitionNames Uses

func (desc *PartitioningDescriptor) PartitionNames() []string

PartitionNames returns a slice containing the name of every partition and subpartition in an arbitrary order.

func (*PartitioningDescriptor) ProtoMessage Uses

func (*PartitioningDescriptor) ProtoMessage()

func (*PartitioningDescriptor) Reset Uses

func (m *PartitioningDescriptor) Reset()

func (*PartitioningDescriptor) Size Uses

func (m *PartitioningDescriptor) Size() (n int)

func (*PartitioningDescriptor) String Uses

func (m *PartitioningDescriptor) String() string

func (*PartitioningDescriptor) Unmarshal Uses

func (m *PartitioningDescriptor) Unmarshal(dAtA []byte) error

func (*PartitioningDescriptor) XXX_DiscardUnknown Uses

func (m *PartitioningDescriptor) XXX_DiscardUnknown()

func (*PartitioningDescriptor) XXX_Marshal Uses

func (m *PartitioningDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*PartitioningDescriptor) XXX_Merge Uses

func (dst *PartitioningDescriptor) XXX_Merge(src proto.Message)

func (*PartitioningDescriptor) XXX_Size Uses

func (m *PartitioningDescriptor) XXX_Size() int

func (*PartitioningDescriptor) XXX_Unmarshal Uses

func (m *PartitioningDescriptor) XXX_Unmarshal(b []byte) error

type PartitioningDescriptor_List Uses

type PartitioningDescriptor_List struct {
    // Name is the partition name.
    Name string `protobuf:"bytes,1,opt,name=name" json:"name"`
    // Values is an unordered set of the tuples included in this partition. Each
    // tuple is encoded with the EncDatum value encoding. DEFAULT is encoded as
    // NOT NULL followed by PartitionDefaultVal encoded as a non-sorting
    // uvarint.
    Values [][]byte `protobuf:"bytes,2,rep,name=values" json:"values,omitempty"`
    // Subpartitioning represents a further partitioning of this list partition.
    Subpartitioning PartitioningDescriptor `protobuf:"bytes,3,opt,name=subpartitioning" json:"subpartitioning"`
}

List represents a list partitioning, which maps individual tuples to partitions.

func (*PartitioningDescriptor_List) Descriptor Uses

func (*PartitioningDescriptor_List) Descriptor() ([]byte, []int)

func (*PartitioningDescriptor_List) Equal Uses

func (this *PartitioningDescriptor_List) Equal(that interface{}) bool

func (*PartitioningDescriptor_List) Marshal Uses

func (m *PartitioningDescriptor_List) Marshal() (dAtA []byte, err error)

func (*PartitioningDescriptor_List) MarshalTo Uses

func (m *PartitioningDescriptor_List) MarshalTo(dAtA []byte) (int, error)

func (*PartitioningDescriptor_List) ProtoMessage Uses

func (*PartitioningDescriptor_List) ProtoMessage()

func (*PartitioningDescriptor_List) Reset Uses

func (m *PartitioningDescriptor_List) Reset()

func (*PartitioningDescriptor_List) Size Uses

func (m *PartitioningDescriptor_List) Size() (n int)

func (*PartitioningDescriptor_List) String Uses

func (m *PartitioningDescriptor_List) String() string

func (*PartitioningDescriptor_List) Unmarshal Uses

func (m *PartitioningDescriptor_List) Unmarshal(dAtA []byte) error

func (*PartitioningDescriptor_List) XXX_DiscardUnknown Uses

func (m *PartitioningDescriptor_List) XXX_DiscardUnknown()

func (*PartitioningDescriptor_List) XXX_Marshal Uses

func (m *PartitioningDescriptor_List) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*PartitioningDescriptor_List) XXX_Merge Uses

func (dst *PartitioningDescriptor_List) XXX_Merge(src proto.Message)

func (*PartitioningDescriptor_List) XXX_Size Uses

func (m *PartitioningDescriptor_List) XXX_Size() int

func (*PartitioningDescriptor_List) XXX_Unmarshal Uses

func (m *PartitioningDescriptor_List) XXX_Unmarshal(b []byte) error

type PartitioningDescriptor_Range Uses

type PartitioningDescriptor_Range struct {
    // Name is the partition name.
    Name string `protobuf:"bytes,1,opt,name=name" json:"name"`
    // FromInclusive is the inclusive lower bound of this range partition. It is
    // encoded with the EncDatum value encoding. MINVALUE and MAXVALUE are
    // encoded as NOT NULL followed by a PartitionSpecialValCode encoded as a
    // non-sorting uvarint.
    FromInclusive []byte `protobuf:"bytes,3,opt,name=from_inclusive,json=fromInclusive" json:"from_inclusive,omitempty"`
    // ToExclusive is the exclusive upper bound of this range partition. It is
    // encoded in the same way as From.
    ToExclusive []byte `protobuf:"bytes,2,opt,name=to_exclusive,json=toExclusive" json:"to_exclusive,omitempty"`
}

Range represents a range partitioning, which maps ranges of tuples to partitions by specifying exclusive upper bounds. The range partitions in a PartitioningDescriptor are required to be sorted by UpperBound.

func (*PartitioningDescriptor_Range) Descriptor Uses

func (*PartitioningDescriptor_Range) Descriptor() ([]byte, []int)

func (*PartitioningDescriptor_Range) Equal Uses

func (this *PartitioningDescriptor_Range) Equal(that interface{}) bool

func (*PartitioningDescriptor_Range) Marshal Uses

func (m *PartitioningDescriptor_Range) Marshal() (dAtA []byte, err error)

func (*PartitioningDescriptor_Range) MarshalTo Uses

func (m *PartitioningDescriptor_Range) MarshalTo(dAtA []byte) (int, error)

func (*PartitioningDescriptor_Range) ProtoMessage Uses

func (*PartitioningDescriptor_Range) ProtoMessage()

func (*PartitioningDescriptor_Range) Reset Uses

func (m *PartitioningDescriptor_Range) Reset()

func (*PartitioningDescriptor_Range) Size Uses

func (m *PartitioningDescriptor_Range) Size() (n int)

func (*PartitioningDescriptor_Range) String Uses

func (m *PartitioningDescriptor_Range) String() string

func (*PartitioningDescriptor_Range) Unmarshal Uses

func (m *PartitioningDescriptor_Range) Unmarshal(dAtA []byte) error

func (*PartitioningDescriptor_Range) XXX_DiscardUnknown Uses

func (m *PartitioningDescriptor_Range) XXX_DiscardUnknown()

func (*PartitioningDescriptor_Range) XXX_Marshal Uses

func (m *PartitioningDescriptor_Range) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*PartitioningDescriptor_Range) XXX_Merge Uses

func (dst *PartitioningDescriptor_Range) XXX_Merge(src proto.Message)

func (*PartitioningDescriptor_Range) XXX_Size Uses

func (m *PartitioningDescriptor_Range) XXX_Size() int

func (*PartitioningDescriptor_Range) XXX_Unmarshal Uses

func (m *PartitioningDescriptor_Range) XXX_Unmarshal(b []byte) error

type PrepareMetadata Uses

type PrepareMetadata struct {
    // Note that AST may be nil if the prepared statement is empty.
    parser.Statement

    // AnonymizedStr is the anonymized statement string suitable for recording
    // in statement statistics.
    AnonymizedStr string

    // Provides TypeHints and Types fields which contain placeholder typing
    // information.
    tree.PlaceholderTypesInfo

    // Columns are the types and names of the query output columns.
    Columns ResultColumns

    // InferredTypes represents the inferred types for placeholder, using protocol
    // identifiers. Used for reporting on Describe.
    InferredTypes []oid.Oid
}

PrepareMetadata encapsulates information about a statement that is gathered during Prepare and is later used during Describe or Execute.

func (*PrepareMetadata) MemoryEstimate Uses

func (pm *PrepareMetadata) MemoryEstimate() int64

MemoryEstimate returns an estimation (in bytes) of how much memory is used by the prepare metadata.

type PrivilegeDescriptor Uses

type PrivilegeDescriptor struct {
    Users []UserPrivileges `protobuf:"bytes,1,rep,name=users" json:"users"`
}

PrivilegeDescriptor describes a list of users and attached privileges. The list should be sorted by user for fast access.

func NewCustomSuperuserPrivilegeDescriptor Uses

func NewCustomSuperuserPrivilegeDescriptor(priv privilege.List) *PrivilegeDescriptor

NewCustomSuperuserPrivilegeDescriptor returns a privilege descriptor for the root user and the admin role with specified privileges.

func NewDefaultPrivilegeDescriptor Uses

func NewDefaultPrivilegeDescriptor() *PrivilegeDescriptor

NewDefaultPrivilegeDescriptor returns a privilege descriptor with ALL privileges for the root user and admin role.

func NewPrivilegeDescriptor Uses

func NewPrivilegeDescriptor(user string, priv privilege.List) *PrivilegeDescriptor

NewPrivilegeDescriptor returns a privilege descriptor for the given user with the specified list of privileges.

func (PrivilegeDescriptor) AnyPrivilege Uses

func (p PrivilegeDescriptor) AnyPrivilege(user string) bool

AnyPrivilege returns true if 'user' has any privilege on this descriptor.

func (PrivilegeDescriptor) CheckPrivilege Uses

func (p PrivilegeDescriptor) CheckPrivilege(user string, priv privilege.Kind) bool

CheckPrivilege returns true if 'user' has 'privilege' on this descriptor.

func (*PrivilegeDescriptor) Descriptor Uses

func (*PrivilegeDescriptor) Descriptor() ([]byte, []int)

func (*PrivilegeDescriptor) Equal Uses

func (this *PrivilegeDescriptor) Equal(that interface{}) bool

func (*PrivilegeDescriptor) Grant Uses

func (p *PrivilegeDescriptor) Grant(user string, privList privilege.List)

Grant adds new privileges to this descriptor for a given list of users. TODO(marc): if all privileges other than ALL are set, should we collapse them into ALL?

func (*PrivilegeDescriptor) Marshal Uses

func (m *PrivilegeDescriptor) Marshal() (dAtA []byte, err error)

func (*PrivilegeDescriptor) MarshalTo Uses

func (m *PrivilegeDescriptor) MarshalTo(dAtA []byte) (int, error)

func (*PrivilegeDescriptor) MaybeFixPrivileges Uses

func (p *PrivilegeDescriptor) MaybeFixPrivileges(id ID) bool

MaybeFixPrivileges fixes the privilege descriptor if needed, including: * adding default privileges for the "admin" role * fixing default privileges for the "root" user * fixing maximum privileges for users. Returns true if the privilege descriptor was modified.

func (*PrivilegeDescriptor) ProtoMessage Uses

func (*PrivilegeDescriptor) ProtoMessage()

func (*PrivilegeDescriptor) Reset Uses

func (m *PrivilegeDescriptor) Reset()

func (*PrivilegeDescriptor) Revoke Uses

func (p *PrivilegeDescriptor) Revoke(user string, privList privilege.List)

Revoke removes privileges from this descriptor for a given list of users.

func (PrivilegeDescriptor) Show Uses

func (p PrivilegeDescriptor) Show() []UserPrivilegeString

Show returns the list of {username, privileges} sorted by username. 'privileges' is a string of comma-separated sorted privilege names.

func (*PrivilegeDescriptor) Size Uses

func (m *PrivilegeDescriptor) Size() (n int)

func (*PrivilegeDescriptor) String Uses

func (m *PrivilegeDescriptor) String() string

func (*PrivilegeDescriptor) Unmarshal Uses

func (m *PrivilegeDescriptor) Unmarshal(dAtA []byte) error

func (PrivilegeDescriptor) Validate Uses

func (p PrivilegeDescriptor) Validate(id ID) error

Validate is called when writing a database or table descriptor. It takes the descriptor ID which is used to determine if it belongs to a system descriptor, in which case the maximum set of allowed privileges is looked up and applied.

func (*PrivilegeDescriptor) XXX_DiscardUnknown Uses

func (m *PrivilegeDescriptor) XXX_DiscardUnknown()

func (*PrivilegeDescriptor) XXX_Marshal Uses

func (m *PrivilegeDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*PrivilegeDescriptor) XXX_Merge Uses

func (dst *PrivilegeDescriptor) XXX_Merge(src proto.Message)

func (*PrivilegeDescriptor) XXX_Size Uses

func (m *PrivilegeDescriptor) XXX_Size() int

func (*PrivilegeDescriptor) XXX_Unmarshal Uses

func (m *PrivilegeDescriptor) XXX_Unmarshal(b []byte) error

type ResultColumn Uses

type ResultColumn struct {
    Name string
    Typ  *types.T

    // If set, this is an implicit column; used internally.
    Hidden bool
}

ResultColumn contains the name and type of a SQL "cell".

type ResultColumns Uses

type ResultColumns []ResultColumn

ResultColumns is the type used throughout the sql module to describe the column types of a table.

func ResultColumnsFromColDescs Uses

func ResultColumnsFromColDescs(colDescs []ColumnDescriptor) ResultColumns

ResultColumnsFromColDescs converts ColumnDescriptors to ResultColumns.

func (ResultColumns) TypesEqual Uses

func (r ResultColumns) TypesEqual(other ResultColumns) bool

TypesEqual returns whether the length and types of r matches other. If a type in other is NULL, it is considered equal.

type RowIndexedVarContainer Uses

type RowIndexedVarContainer struct {
    CurSourceRow tree.Datums

    Cols    []ColumnDescriptor
    Mapping map[ColumnID]int
}

RowIndexedVarContainer is used to evaluate expressions over various rows.

func (*RowIndexedVarContainer) IndexedVarEval Uses

func (r *RowIndexedVarContainer) IndexedVarEval(
    idx int, ctx *tree.EvalContext,
) (tree.Datum, error)

IndexedVarEval implements tree.IndexedVarContainer.

func (*RowIndexedVarContainer) IndexedVarNodeFormatter Uses

func (*RowIndexedVarContainer) IndexedVarNodeFormatter(idx int) tree.NodeFormatter

IndexedVarNodeFormatter implements tree.IndexedVarContainer.

func (*RowIndexedVarContainer) IndexedVarResolvedType Uses

func (*RowIndexedVarContainer) IndexedVarResolvedType(idx int) *types.T

IndexedVarResolvedType implements tree.IndexedVarContainer.

type SourceAlias Uses

type SourceAlias struct {
    Name tree.TableName
    // ColumnSet identifies a non-empty set of columns in a
    // selection. This is used by DataSourceInfo.SourceAliases to map
    // table names to column ranges.
    ColumnSet util.FastIntSet
}

SourceAlias associates a table name (alias) to a set of columns in the result row of a data source.

type SourceAliases Uses

type SourceAliases []SourceAlias

SourceAliases is an array of one or more SourceAlias.

type TableDescriptor Uses

type TableDescriptor struct {
    // The table name. It should be normalized using NormalizeName() before
    // comparing it.
    Name string `protobuf:"bytes,1,opt,name=name" json:"name"`
    ID   ID     `protobuf:"varint,3,opt,name=id,casttype=ID" json:"id"`
    // ID of the parent database.
    ParentID ID  `protobuf:"varint,4,opt,name=parent_id,json=parentId,casttype=ID" json:"parent_id"`
    // Monotonically increasing version of the table descriptor.
    //
    // The design maintains two invariants:
    // 1. Two safe versions: A transaction at a particular timestamp is
    //    allowed to use one of two versions of a table descriptor:
    //    the one that would be read from the store at that timestamp,
    //    and the one behind it in version.
    // 2. Two leased versions: There can be valid leases on at most the 2
    //    latest versions of a table in the cluster at any time. New leases
    //    are only granted on the latest version.
    //
    // The database must maintain correctness in light of there being two
    // versions of a descriptor that can be used.
    //
    // Multiple schema change mutations can be grouped together on a
    // particular version increment.
    Version DescriptorVersion `protobuf:"varint,5,opt,name=version,casttype=DescriptorVersion" json:"version"`
    // Last modification time of the table descriptor.
    // Starting in 19.2 this field's value may sometime be zero-valued in which
    // case the MVCC timestamp of the row containing the value should be used to
    // populate it. This dance allows us to avoid observing the commit timestamp
    // for transactions which increment the descriptor version.
    // Encoded TableDescriptor structs should not be stored directly but rather
    // should live inside of a Descriptor. The Descriptor.Table() method takes an
    // hlc timestamp to ensure that this field is set properly when extracted from
    // a Descriptor.
    ModificationTime hlc.Timestamp      `protobuf:"bytes,7,opt,name=modification_time,json=modificationTime" json:"modification_time"`
    Columns          []ColumnDescriptor `protobuf:"bytes,8,rep,name=columns" json:"columns"`
    // next_column_id is used to ensure that deleted column ids are not reused.
    NextColumnID ColumnID                 `protobuf:"varint,9,opt,name=next_column_id,json=nextColumnId,casttype=ColumnID" json:"next_column_id"`
    Families     []ColumnFamilyDescriptor `protobuf:"bytes,22,rep,name=families" json:"families"`
    // next_family_id is used to ensure that deleted family ids are not reused.
    NextFamilyID FamilyID        `protobuf:"varint,23,opt,name=next_family_id,json=nextFamilyId,casttype=FamilyID" json:"next_family_id"`
    PrimaryIndex IndexDescriptor `protobuf:"bytes,10,opt,name=primary_index,json=primaryIndex" json:"primary_index"`
    // indexes are all the secondary indexes.
    Indexes []IndexDescriptor `protobuf:"bytes,11,rep,name=indexes" json:"indexes"`
    // next_index_id is used to ensure that deleted index ids are not reused.
    NextIndexID IndexID              `protobuf:"varint,12,opt,name=next_index_id,json=nextIndexId,casttype=IndexID" json:"next_index_id"`
    Privileges  *PrivilegeDescriptor `protobuf:"bytes,13,opt,name=privileges" json:"privileges,omitempty"`
    // Columns or indexes being added or deleted in a FIFO order.
    Mutations []DescriptorMutation               `protobuf:"bytes,14,rep,name=mutations" json:"mutations"`
    Lease     *TableDescriptor_SchemaChangeLease `protobuf:"bytes,15,opt,name=lease" json:"lease,omitempty"`
    // An id for the next group of mutations to be applied together.
    NextMutationID MutationID `protobuf:"varint,16,opt,name=next_mutation_id,json=nextMutationId,casttype=MutationID" json:"next_mutation_id"`
    // format_version declares which sql to key:value mapping is being used to
    // represent the data in this table.
    FormatVersion FormatVersion                      `protobuf:"varint,17,opt,name=format_version,json=formatVersion,casttype=FormatVersion" json:"format_version"`
    State         TableDescriptor_State              `protobuf:"varint,19,opt,name=state,enum=cockroach.sql.sqlbase.TableDescriptor_State" json:"state"`
    OfflineReason string                             `protobuf:"bytes,38,opt,name=offline_reason,json=offlineReason" json:"offline_reason"`
    Checks        []*TableDescriptor_CheckConstraint `protobuf:"bytes,20,rep,name=checks" json:"checks,omitempty"`
    // A list of draining names. The draining name entries are drained from
    // the cluster wide name caches by incrementing the version for this
    // descriptor and ensuring that there are no leases on prior
    // versions of the descriptor. This field is then cleared and the version
    // of the descriptor incremented.
    DrainingNames []TableDescriptor_NameInfo `protobuf:"bytes,21,rep,name=draining_names,json=drainingNames" json:"draining_names"`
    // The TableDescriptor is used for views in addition to tables. Views
    // use mostly the same fields as tables, but need to track the actual
    // query from the view definition as well.
    //
    // For now we only track a string representation of the query. This prevents
    // us from easily supporting things like renames of the dependencies of a
    // view. Eventually we'll want to switch to a semantic encoding of the query
    // that relies on IDs rather than names so that we can support renames of
    // fields relied on by the query, as Postgres does.
    //
    // Note: The presence of this field is used to determine whether or not
    // a TableDescriptor represents a view.
    ViewQuery string `protobuf:"bytes,24,opt,name=view_query,json=viewQuery" json:"view_query"`
    // The IDs of all relations that this depends on.
    // Only ever populated if this descriptor is for a view.
    DependsOn []ID `protobuf:"varint,25,rep,name=dependsOn,casttype=ID" json:"dependsOn,omitempty"`
    // All references to this table/view from other views in the system, tracked
    // down to the column/index so that we can restrict changes to them while
    // they're still being referred to.
    DependedOnBy []TableDescriptor_Reference `protobuf:"bytes,26,rep,name=dependedOnBy" json:"dependedOnBy"`
    // Mutation jobs queued for execution in a FIFO order. Remains synchronized
    // with the mutations list.
    MutationJobs []TableDescriptor_MutationJob `protobuf:"bytes,27,rep,name=mutationJobs" json:"mutationJobs"`
    // The presence of sequence_opts indicates that this descriptor is for a sequence.
    SequenceOpts *TableDescriptor_SequenceOpts `protobuf:"bytes,28,opt,name=sequence_opts,json=sequenceOpts" json:"sequence_opts,omitempty"`
    // The drop time is set when a table is truncated or dropped,
    // based on the current time in nanoseconds since the epoch.
    // Use this timestamp + GC TTL to start deleting the table's
    // contents.
    //
    // TODO(vivek): Replace with the ModificationTime. This has been
    // added only for migration purposes.
    DropTime int64 `protobuf:"varint,29,opt,name=drop_time,json=dropTime" json:"drop_time"`
    // ReplacementOf tracks prior IDs by which this table went -- e.g. when
    // TRUNCATE creates a replacement of a table and swaps it in for the the old
    // one, it should note on the new table the ID of the table it replaced. This
    // can be used when trying to track a table's history across truncatations.
    ReplacementOf TableDescriptor_Replacement `protobuf:"bytes,30,opt,name=replacement_of,json=replacementOf" json:"replacement_of"`
    AuditMode     TableDescriptor_AuditMode   `protobuf:"varint,31,opt,name=audit_mode,json=auditMode,enum=cockroach.sql.sqlbase.TableDescriptor_AuditMode" json:"audit_mode"`
    // The job id for a drop job is the id in the system.jobs table of the
    // dropping of this table.
    DropJobID int64 `protobuf:"varint,32,opt,name=drop_job_id,json=dropJobId" json:"drop_job_id"`
    // The schema elements that have been dropped and whose underlying
    // data needs to be gc-ed. These schema elements have already transitioned
    // through the drop state machine when they were in the above mutations
    // list, and can be safely deleted. The names for these schema elements
    // can be reused. This list is separate because mutations can
    // lie in this list for a long time (gc deadline) and should not block
    // the execution of other schema changes on the table.
    //
    // TODO(vivekmenezes): This is currently only used by the non-interleaved drop
    // index case. Also use for dropped interleaved indexes and columns.
    GCMutations []TableDescriptor_GCDescriptorMutation `protobuf:"bytes,33,rep,name=gc_mutations,json=gcMutations" json:"gc_mutations"`
    CreateQuery string                                 `protobuf:"bytes,34,opt,name=create_query,json=createQuery" json:"create_query"`
    // Starting in 19.2 CreateAsOfTime is initialized to zero for the first
    // version of a table and is populated from the MVCC timestamp of the read
    // like ModificationTime. See Descriptor.Table().
    // CreateAsOfSystemTime is used for CREATE TABLE ... AS ... and was
    // added in 19.1.
    CreateAsOfTime hlc.Timestamp `protobuf:"bytes,35,opt,name=create_as_of_time,json=createAsOfTime" json:"create_as_of_time"`
    // outbound_fks contains all foreign key constraints that have this table as
    // the origin table.
    OutboundFKs []ForeignKeyConstraint `protobuf:"bytes,36,rep,name=outbound_fks,json=outboundFks" json:"outbound_fks"`
    // inbound_fks contains all foreign key constraints that have this table as
    // the referenced table.
    InboundFKs []ForeignKeyConstraint `protobuf:"bytes,37,rep,name=inbound_fks,json=inboundFks" json:"inbound_fks"`
    // Temporary table support will be added to CRDB starting from 20.1 . The temporary
    // flag is set to true for all temporary tables. All table descriptors created
    // before 20.1 refer to persistent tables, so lack of the flag being set implies
    // the table is persistent.
    Temporary bool `protobuf:"varint,39,opt,name=temporary" json:"temporary"`
}

A TableDescriptor represents a table or view and is stored in a structured metadata key. The TableDescriptor has a globally-unique ID, while its member {Column,Index}Descriptors have locally-unique IDs.

func GetTableDescFromID Uses

func GetTableDescFromID(
    ctx context.Context, protoGetter protoGetter, id ID,
) (*TableDescriptor, error)

GetTableDescFromID retrieves the table descriptor for the table ID passed in using an existing proto getter. Returns an error if the descriptor doesn't exist or if it exists and is not a table.

func GetTableDescriptor Uses

func GetTableDescriptor(kvDB *client.DB, database string, table string) *TableDescriptor

GetTableDescriptor retrieves a table descriptor directly from the KV layer.

func (*TableDescriptor) Adding Uses

func (desc *TableDescriptor) Adding() bool

Adding returns true if the table is being added.

func (*TableDescriptor) AllActiveAndInactiveChecks Uses

func (desc *TableDescriptor) AllActiveAndInactiveChecks() []*TableDescriptor_CheckConstraint

AllActiveAndInactiveChecks returns all check constraints, including both "active" ones on the table descriptor which are being enforced for all writes, and "inactive" ones queued in the mutations list.

func (*TableDescriptor) AllActiveAndInactiveForeignKeys Uses

func (desc *TableDescriptor) AllActiveAndInactiveForeignKeys() []*ForeignKeyConstraint

AllActiveAndInactiveForeignKeys returns all foreign keys, including both "active" ones on the index descriptor which are being enforced for all writes, and "inactive" ones queued in the mutations list. An error is returned if multiple foreign keys (including mutations) are found for the same index.

func (*TableDescriptor) AllIndexSpans Uses

func (desc *TableDescriptor) AllIndexSpans() roachpb.Spans

AllIndexSpans returns the Spans for each index in the table, including those being added in the mutations.

func (*TableDescriptor) AllNonDropColumns Uses

func (desc *TableDescriptor) AllNonDropColumns() []ColumnDescriptor

AllNonDropColumns returns all the columns, including those being added in the mutations.

func (*TableDescriptor) AllNonDropIndexes Uses

func (desc *TableDescriptor) AllNonDropIndexes() []*IndexDescriptor

AllNonDropIndexes returns all the indexes, including those being added in the mutations.

func (*TableDescriptor) CheckUniqueConstraints Uses

func (desc *TableDescriptor) CheckUniqueConstraints() error

CheckUniqueConstraints returns a non-nil error if a descriptor contains two constraints with the same name.

func (*TableDescriptor) ColumnIdxMap Uses

func (desc *TableDescriptor) ColumnIdxMap() map[ColumnID]int

ColumnIdxMap returns a map from Column ID to the ordinal position of that column.

func (*TableDescriptor) ColumnIdxMapWithMutations Uses

func (desc *TableDescriptor) ColumnIdxMapWithMutations(mutations bool) map[ColumnID]int

ColumnIdxMapWithMutations returns a map from Column ID to the ordinal position of that column, optionally including mutation columns if the input bool is true.

func (*TableDescriptor) ColumnTypes Uses

func (desc *TableDescriptor) ColumnTypes() []types.T

ColumnTypes returns the types of all columns.

func (*TableDescriptor) ColumnTypesWithMutations Uses

func (desc *TableDescriptor) ColumnTypesWithMutations(mutations bool) []types.T

ColumnTypesWithMutations returns the types of all columns, optionally including mutation columns, which will be returned if the input bool is true.

func (*TableDescriptor) Descriptor Uses

func (*TableDescriptor) Descriptor() ([]byte, []int)

func (*TableDescriptor) Dropped Uses

func (desc *TableDescriptor) Dropped() bool

Dropped returns true if the table is being dropped.

func (*TableDescriptor) Equal