cockroach: github.com/cockroachdb/cockroach/pkg/sql/execinfrapb Index | Files

package execinfrapb

import "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"

Index

Package Files

api.go api.pb.go data.go data.pb.go expr.go flow_diagram.go processors.go processors.pb.go processors_base.pb.go processors_bulk_io.pb.go processors_changefeeds.pb.go processors_sql.pb.go processors_table_stats.pb.go stats.go testutils.go version.go

Constants

const FlowIDTagKey = tracing.TagPrefix + "flowid"

FlowIDTagKey is the key used for flow id tags in tracing spans.

const ProcessorIDTagKey = tracing.TagPrefix + "processorid"

ProcessorIDTagKey is the key used for processor id tags in tracing spans.

const StreamIDTagKey = tracing.TagPrefix + "streamid"

StreamIDTagKey is the key used for stream id tags in tracing spans.

Variables

var (
    ErrInvalidLengthApi = fmt.Errorf("proto: negative length found during unmarshaling")
    ErrIntOverflowApi   = fmt.Errorf("proto: integer overflow")
)
var (
    ErrInvalidLengthData = fmt.Errorf("proto: negative length found during unmarshaling")
    ErrIntOverflowData   = fmt.Errorf("proto: integer overflow")
)
var (
    ErrInvalidLengthProcessors = fmt.Errorf("proto: negative length found during unmarshaling")
    ErrIntOverflowProcessors   = fmt.Errorf("proto: integer overflow")
)
var (
    ErrInvalidLengthProcessorsBase = fmt.Errorf("proto: negative length found during unmarshaling")
    ErrIntOverflowProcessorsBase   = fmt.Errorf("proto: integer overflow")
)
var (
    ErrInvalidLengthProcessorsBulkIo = fmt.Errorf("proto: negative length found during unmarshaling")
    ErrIntOverflowProcessorsBulkIo   = fmt.Errorf("proto: integer overflow")
)
var (
    ErrInvalidLengthProcessorsChangefeeds = fmt.Errorf("proto: negative length found during unmarshaling")
    ErrIntOverflowProcessorsChangefeeds   = fmt.Errorf("proto: integer overflow")
)
var (
    ErrInvalidLengthProcessorsSql = fmt.Errorf("proto: negative length found during unmarshaling")
    ErrIntOverflowProcessorsSql   = fmt.Errorf("proto: integer overflow")
)
var (
    ErrInvalidLengthProcessorsTableStats = fmt.Errorf("proto: negative length found during unmarshaling")
    ErrIntOverflowProcessorsTableStats   = fmt.Errorf("proto: integer overflow")
)
var AggregatorSpec_Func_name = map[int32]string{
    0:  "ANY_NOT_NULL",
    1:  "AVG",
    2:  "BOOL_AND",
    3:  "BOOL_OR",
    4:  "CONCAT_AGG",
    5:  "COUNT",
    7:  "MAX",
    8:  "MIN",
    9:  "STDDEV",
    10: "SUM",
    11: "SUM_INT",
    12: "VARIANCE",
    13: "XOR_AGG",
    14: "COUNT_ROWS",
    15: "SQRDIFF",
    16: "FINAL_VARIANCE",
    17: "FINAL_STDDEV",
    18: "ARRAY_AGG",
    19: "JSON_AGG",
    20: "JSONB_AGG",
    21: "STRING_AGG",
    22: "BIT_AND",
    23: "BIT_OR",
    24: "CORR",
    25: "PERCENTILE_DISC_IMPL",
    26: "PERCENTILE_CONT_IMPL",
    27: "JSON_OBJECT_AGG",
    28: "JSONB_OBJECT_AGG",
    29: "VAR_POP",
    30: "STDDEV_POP",
}
var AggregatorSpec_Func_value = map[string]int32{
    "ANY_NOT_NULL":         0,
    "AVG":                  1,
    "BOOL_AND":             2,
    "BOOL_OR":              3,
    "CONCAT_AGG":           4,
    "COUNT":                5,
    "MAX":                  7,
    "MIN":                  8,
    "STDDEV":               9,
    "SUM":                  10,
    "SUM_INT":              11,
    "VARIANCE":             12,
    "XOR_AGG":              13,
    "COUNT_ROWS":           14,
    "SQRDIFF":              15,
    "FINAL_VARIANCE":       16,
    "FINAL_STDDEV":         17,
    "ARRAY_AGG":            18,
    "JSON_AGG":             19,
    "JSONB_AGG":            20,
    "STRING_AGG":           21,
    "BIT_AND":              22,
    "BIT_OR":               23,
    "CORR":                 24,
    "PERCENTILE_DISC_IMPL": 25,
    "PERCENTILE_CONT_IMPL": 26,
    "JSON_OBJECT_AGG":      27,
    "JSONB_OBJECT_AGG":     28,
    "VAR_POP":              29,
    "STDDEV_POP":           30,
}
var AggregatorSpec_Type_name = map[int32]string{
    0:  "AUTO",
    1:  "SCALAR",
    2:  "NON_SCALAR",
}
var AggregatorSpec_Type_value = map[string]int32{
    "AUTO":       0,
    "SCALAR":     1,
    "NON_SCALAR": 2,
}
var BackfillerSpec_Type_name = map[int32]string{
    0:  "Invalid",
    1:  "Column",
    2:  "Index",
}
var BackfillerSpec_Type_value = map[string]int32{
    "Invalid": 0,
    "Column":  1,
    "Index":   2,
}
var BytesEncodeFormat_name = map[int32]string{
    0:  "HEX",
    1:  "ESCAPE",
    2:  "BASE64",
}
var BytesEncodeFormat_value = map[string]int32{
    "HEX":    0,
    "ESCAPE": 1,
    "BASE64": 2,
}
var FileCompression_name = map[int32]string{
    0:  "None",
    1:  "Gzip",
}
var FileCompression_value = map[string]int32{
    "None": 0,
    "Gzip": 1,
}
var InputSyncSpec_Type_name = map[int32]string{
    0:  "UNORDERED",
    1:  "ORDERED",
}
var InputSyncSpec_Type_value = map[string]int32{
    "UNORDERED": 0,
    "ORDERED":   1,
}
var Ordering_Column_Direction_name = map[int32]string{
    0:  "ASC",
    1:  "DESC",
}
var Ordering_Column_Direction_value = map[string]int32{
    "ASC":  0,
    "DESC": 1,
}
var OutputRouterSpec_Type_name = map[int32]string{
    0:  "PASS_THROUGH",
    1:  "MIRROR",
    2:  "BY_HASH",
    3:  "BY_RANGE",
}
var OutputRouterSpec_Type_value = map[string]int32{
    "PASS_THROUGH": 0,
    "MIRROR":       1,
    "BY_HASH":      2,
    "BY_RANGE":     3,
}
var ScanVisibility_name = map[int32]string{
    0:  "PUBLIC",
    1:  "PUBLIC_AND_NOT_PUBLIC",
}
var ScanVisibility_value = map[string]int32{
    "PUBLIC":                0,
    "PUBLIC_AND_NOT_PUBLIC": 1,
}
var SketchType_name = map[int32]string{
    0: "HLL_PLUS_PLUS_V1",
}
var SketchType_value = map[string]int32{
    "HLL_PLUS_PLUS_V1": 0,
}
var StreamEndpointSpec_Type_name = map[int32]string{
    0:  "LOCAL",
    1:  "REMOTE",
    2:  "SYNC_RESPONSE",
}
var StreamEndpointSpec_Type_value = map[string]int32{
    "LOCAL":         0,
    "REMOTE":        1,
    "SYNC_RESPONSE": 2,
}
var WindowerSpec_Frame_BoundType_name = map[int32]string{
    0:  "UNBOUNDED_PRECEDING",
    1:  "UNBOUNDED_FOLLOWING",
    2:  "OFFSET_PRECEDING",
    3:  "OFFSET_FOLLOWING",
    4:  "CURRENT_ROW",
}
var WindowerSpec_Frame_BoundType_value = map[string]int32{
    "UNBOUNDED_PRECEDING": 0,
    "UNBOUNDED_FOLLOWING": 1,
    "OFFSET_PRECEDING":    2,
    "OFFSET_FOLLOWING":    3,
    "CURRENT_ROW":         4,
}
var WindowerSpec_Frame_Exclusion_name = map[int32]string{
    0:  "NO_EXCLUSION",
    1:  "EXCLUDE_CURRENT_ROW",
    2:  "EXCLUDE_GROUP",
    3:  "EXCLUDE_TIES",
}
var WindowerSpec_Frame_Exclusion_value = map[string]int32{
    "NO_EXCLUSION":        0,
    "EXCLUDE_CURRENT_ROW": 1,
    "EXCLUDE_GROUP":       2,
    "EXCLUDE_TIES":        3,
}
var WindowerSpec_Frame_Mode_name = map[int32]string{
    0:  "RANGE",
    1:  "ROWS",
    2:  "GROUPS",
}
var WindowerSpec_Frame_Mode_value = map[string]int32{
    "RANGE":  0,
    "ROWS":   1,
    "GROUPS": 2,
}
var WindowerSpec_WindowFunc_name = map[int32]string{
    0:  "ROW_NUMBER",
    1:  "RANK",
    2:  "DENSE_RANK",
    3:  "PERCENT_RANK",
    4:  "CUME_DIST",
    5:  "NTILE",
    6:  "LAG",
    7:  "LEAD",
    8:  "FIRST_VALUE",
    9:  "LAST_VALUE",
    10: "NTH_VALUE",
}
var WindowerSpec_WindowFunc_value = map[string]int32{
    "ROW_NUMBER":   0,
    "RANK":         1,
    "DENSE_RANK":   2,
    "PERCENT_RANK": 3,
    "CUME_DIST":    4,
    "NTILE":        5,
    "LAG":          6,
    "LEAD":         7,
    "FIRST_VALUE":  8,
    "LAST_VALUE":   9,
    "NTH_VALUE":    10,
}

func ConvertToColumnOrdering Uses

func ConvertToColumnOrdering(specOrdering Ordering) sqlbase.ColumnOrdering

ConvertToColumnOrdering converts an Ordering type (as defined in data.proto) to a sqlbase.ColumnOrdering type.

func DeserializeExpr Uses

func DeserializeExpr(
    expr string, semaCtx *tree.SemaContext, evalCtx *tree.EvalContext, vars *tree.IndexedVarHelper,
) (tree.TypedExpr, error)

DeserializeExpr deserializes expr, binds the indexed variables to the provided IndexedVarHelper, and evaluates any constants in the expression.

func ExprFmtCtxBase Uses

func ExprFmtCtxBase(evalCtx *tree.EvalContext) *tree.FmtCtx

ExprFmtCtxBase produces a FmtCtx used for serializing expressions; a proper IndexedVar formatting function needs to be added on. It replaces placeholders with their values.

func GeneratePlanDiagramURL Uses

func GeneratePlanDiagramURL(
    sql string, flows map[roachpb.NodeID]*FlowSpec, showInputTypes bool,
) (string, url.URL, error)

GeneratePlanDiagramURL generates the json data for a flow diagram and a URL which encodes the diagram. There should be one FlowSpec per node. The function assumes that StreamIDs are unique across all flows.

func GetAggregateFuncIdx Uses

func GetAggregateFuncIdx(funcName string) (int32, error)

GetAggregateFuncIdx converts the aggregate function name to the enum value with the same string representation.

func GetWindowFuncIdx Uses

func GetWindowFuncIdx(funcName string) (int32, error)

GetWindowFuncIdx converts the window function name to the enum value with the same string representation.

func GetWindowFunctionInfo Uses

func GetWindowFunctionInfo(
    fn WindowerSpec_Func, inputTypes ...*types.T,
) (windowConstructor func(*tree.EvalContext) tree.WindowFunc, returnType *types.T, err error)

GetWindowFunctionInfo returns windowFunc constructor and the return type when given fn is applied to given inputTypes.

func RegisterDistSQLServer Uses

func RegisterDistSQLServer(s *grpc.Server, srv DistSQLServer)

type AggregateConstructor Uses

type AggregateConstructor func(*tree.EvalContext, tree.Datums) tree.AggregateFunc

AggregateConstructor is a function that creates an aggregate function.

func GetAggregateConstructor Uses

func GetAggregateConstructor(
    evalCtx *tree.EvalContext,
    semaCtx *tree.SemaContext,
    aggInfo *AggregatorSpec_Aggregation,
    inputTypes []*types.T,
) (constructor AggregateConstructor, arguments tree.Datums, outputType *types.T, err error)

GetAggregateConstructor processes the specification of a single aggregate function.

func GetAggregateInfo Uses

func GetAggregateInfo(
    fn AggregatorSpec_Func, inputTypes ...*types.T,
) (aggregateConstructor AggregateConstructor, returnType *types.T, err error)

GetAggregateInfo returns the aggregate constructor and the return type for the given aggregate function when applied on the given type.

type AggregatorSpec Uses

type AggregatorSpec struct {
    Type AggregatorSpec_Type `protobuf:"varint,5,opt,name=type,enum=cockroach.sql.distsqlrun.AggregatorSpec_Type" json:"type"`
    // The group key is a subset of the columns in the input stream schema on the
    // basis of which we define our groups.
    GroupCols    []uint32                     `protobuf:"varint,2,rep,packed,name=group_cols,json=groupCols" json:"group_cols,omitempty"`
    Aggregations []AggregatorSpec_Aggregation `protobuf:"bytes,3,rep,name=aggregations" json:"aggregations"`
    // A subset of the GROUP BY columns which are ordered in the input.
    OrderedGroupCols []uint32 `protobuf:"varint,4,rep,packed,name=ordered_group_cols,json=orderedGroupCols" json:"ordered_group_cols,omitempty"`
}

AggregatorSpec is the specification for an "aggregator" (processor core type, not the logical plan computation stage). An aggregator performs 'aggregation' in the SQL sense in that it groups rows and computes an aggregate for each group. The group is configured using the group key. The aggregator can be configured with one or more aggregation functions.

The "internal columns" of an Aggregator map 1-1 to the aggregations.

func (*AggregatorSpec) Descriptor Uses

func (*AggregatorSpec) Descriptor() ([]byte, []int)

func (*AggregatorSpec) IsRowCount Uses

func (spec *AggregatorSpec) IsRowCount() bool

IsRowCount returns true if the aggregator spec is scalar and has a single COUNT_ROWS aggregation with no FILTER or DISTINCT.

func (*AggregatorSpec) IsScalar Uses

func (spec *AggregatorSpec) IsScalar() bool

IsScalar returns whether the aggregate function is in scalar context.

func (*AggregatorSpec) Marshal Uses

func (m *AggregatorSpec) Marshal() (dAtA []byte, err error)

func (*AggregatorSpec) MarshalTo Uses

func (m *AggregatorSpec) MarshalTo(dAtA []byte) (int, error)

func (*AggregatorSpec) ProtoMessage Uses

func (*AggregatorSpec) ProtoMessage()

func (*AggregatorSpec) Reset Uses

func (m *AggregatorSpec) Reset()

func (*AggregatorSpec) Size Uses

func (m *AggregatorSpec) Size() (n int)

func (*AggregatorSpec) String Uses

func (m *AggregatorSpec) String() string

func (*AggregatorSpec) Unmarshal Uses

func (m *AggregatorSpec) Unmarshal(dAtA []byte) error

func (*AggregatorSpec) XXX_DiscardUnknown Uses

func (m *AggregatorSpec) XXX_DiscardUnknown()

func (*AggregatorSpec) XXX_Marshal Uses

func (m *AggregatorSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*AggregatorSpec) XXX_Merge Uses

func (dst *AggregatorSpec) XXX_Merge(src proto.Message)

func (*AggregatorSpec) XXX_Size Uses

func (m *AggregatorSpec) XXX_Size() int

func (*AggregatorSpec) XXX_Unmarshal Uses

func (m *AggregatorSpec) XXX_Unmarshal(b []byte) error

type AggregatorSpec_Aggregation Uses

type AggregatorSpec_Aggregation struct {
    Func AggregatorSpec_Func `protobuf:"varint,1,opt,name=func,enum=cockroach.sql.distsqlrun.AggregatorSpec_Func" json:"func"`
    // Aggregation functions with distinct = true functions like you would
    // expect '<FUNC> DISTINCT' to operate, the default behavior would be
    // the '<FUNC> ALL' operation.
    Distinct bool `protobuf:"varint,2,opt,name=distinct" json:"distinct"`
    // The column index specifies the argument(s) to the aggregator function.
    //
    // Most aggregations take one argument
    // COUNT_ROWS takes no arguments.
    // FINAL_STDDEV and FINAL_VARIANCE take three arguments (SQRDIFF, SUM,
    // COUNT).
    ColIdx []uint32 `protobuf:"varint,5,rep,name=col_idx,json=colIdx" json:"col_idx,omitempty"`
    // If set, this column index specifies a boolean argument; rows for which
    // this value is not true don't contribute to this aggregation. This enables
    // the filter clause, e.g.:
    //   SELECT SUM(x) FILTER (WHERE y > 1), SUM(x) FILTER (WHERE y < 1) FROM t
    FilterColIdx *uint32 `protobuf:"varint,4,opt,name=filter_col_idx,json=filterColIdx" json:"filter_col_idx,omitempty"`
    // Arguments are const expressions passed to aggregation functions.
    Arguments []Expression `protobuf:"bytes,6,rep,name=arguments" json:"arguments"`
}

func (*AggregatorSpec_Aggregation) Descriptor Uses

func (*AggregatorSpec_Aggregation) Descriptor() ([]byte, []int)

func (AggregatorSpec_Aggregation) Equals Uses

func (a AggregatorSpec_Aggregation) Equals(b AggregatorSpec_Aggregation) bool

Equals returns true if two aggregation specifiers are identical (and thus will always yield the same result).

func (*AggregatorSpec_Aggregation) Marshal Uses

func (m *AggregatorSpec_Aggregation) Marshal() (dAtA []byte, err error)

func (*AggregatorSpec_Aggregation) MarshalTo Uses

func (m *AggregatorSpec_Aggregation) MarshalTo(dAtA []byte) (int, error)

func (*AggregatorSpec_Aggregation) ProtoMessage Uses

func (*AggregatorSpec_Aggregation) ProtoMessage()

func (*AggregatorSpec_Aggregation) Reset Uses

func (m *AggregatorSpec_Aggregation) Reset()

func (*AggregatorSpec_Aggregation) Size Uses

func (m *AggregatorSpec_Aggregation) Size() (n int)

func (*AggregatorSpec_Aggregation) String Uses

func (m *AggregatorSpec_Aggregation) String() string

func (*AggregatorSpec_Aggregation) Unmarshal Uses

func (m *AggregatorSpec_Aggregation) Unmarshal(dAtA []byte) error

func (*AggregatorSpec_Aggregation) XXX_DiscardUnknown Uses

func (m *AggregatorSpec_Aggregation) XXX_DiscardUnknown()

func (*AggregatorSpec_Aggregation) XXX_Marshal Uses

func (m *AggregatorSpec_Aggregation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*AggregatorSpec_Aggregation) XXX_Merge Uses

func (dst *AggregatorSpec_Aggregation) XXX_Merge(src proto.Message)

func (*AggregatorSpec_Aggregation) XXX_Size Uses

func (m *AggregatorSpec_Aggregation) XXX_Size() int

func (*AggregatorSpec_Aggregation) XXX_Unmarshal Uses

func (m *AggregatorSpec_Aggregation) XXX_Unmarshal(b []byte) error

type AggregatorSpec_Func Uses

type AggregatorSpec_Func int32

These mirror the aggregate functions supported by sql/parser. See sql/parser/aggregate_builtins.go.

const (
    AggregatorSpec_ANY_NOT_NULL   AggregatorSpec_Func = 0
    AggregatorSpec_AVG            AggregatorSpec_Func = 1
    AggregatorSpec_BOOL_AND       AggregatorSpec_Func = 2
    AggregatorSpec_BOOL_OR        AggregatorSpec_Func = 3
    AggregatorSpec_CONCAT_AGG     AggregatorSpec_Func = 4
    AggregatorSpec_COUNT          AggregatorSpec_Func = 5
    AggregatorSpec_MAX            AggregatorSpec_Func = 7
    AggregatorSpec_MIN            AggregatorSpec_Func = 8
    AggregatorSpec_STDDEV         AggregatorSpec_Func = 9
    AggregatorSpec_SUM            AggregatorSpec_Func = 10
    AggregatorSpec_SUM_INT        AggregatorSpec_Func = 11
    AggregatorSpec_VARIANCE       AggregatorSpec_Func = 12
    AggregatorSpec_XOR_AGG        AggregatorSpec_Func = 13
    AggregatorSpec_COUNT_ROWS     AggregatorSpec_Func = 14
    AggregatorSpec_SQRDIFF        AggregatorSpec_Func = 15
    AggregatorSpec_FINAL_VARIANCE AggregatorSpec_Func = 16
    AggregatorSpec_FINAL_STDDEV   AggregatorSpec_Func = 17
    AggregatorSpec_ARRAY_AGG      AggregatorSpec_Func = 18
    AggregatorSpec_JSON_AGG       AggregatorSpec_Func = 19
    // JSONB_AGG is an alias for JSON_AGG, they do the same thing.
    AggregatorSpec_JSONB_AGG            AggregatorSpec_Func = 20
    AggregatorSpec_STRING_AGG           AggregatorSpec_Func = 21
    AggregatorSpec_BIT_AND              AggregatorSpec_Func = 22
    AggregatorSpec_BIT_OR               AggregatorSpec_Func = 23
    AggregatorSpec_CORR                 AggregatorSpec_Func = 24
    AggregatorSpec_PERCENTILE_DISC_IMPL AggregatorSpec_Func = 25
    AggregatorSpec_PERCENTILE_CONT_IMPL AggregatorSpec_Func = 26
    AggregatorSpec_JSON_OBJECT_AGG      AggregatorSpec_Func = 27
    AggregatorSpec_JSONB_OBJECT_AGG     AggregatorSpec_Func = 28
    AggregatorSpec_VAR_POP              AggregatorSpec_Func = 29
    AggregatorSpec_STDDEV_POP           AggregatorSpec_Func = 30
)

func (AggregatorSpec_Func) Enum Uses

func (x AggregatorSpec_Func) Enum() *AggregatorSpec_Func

func (AggregatorSpec_Func) EnumDescriptor Uses

func (AggregatorSpec_Func) EnumDescriptor() ([]byte, []int)

func (AggregatorSpec_Func) String Uses

func (x AggregatorSpec_Func) String() string

func (*AggregatorSpec_Func) UnmarshalJSON Uses

func (x *AggregatorSpec_Func) UnmarshalJSON(data []byte) error

type AggregatorSpec_Type Uses

type AggregatorSpec_Type int32
const (
    // This setting exists just for backwards compatibility; it's equivalent to
    // SCALAR when there are no grouping columns, and to NON_SCALAR when there
    // are grouping columns.
    AggregatorSpec_AUTO AggregatorSpec_Type = 0
    // A scalar aggregation has no grouping columns and always returns one
    // result row.
    AggregatorSpec_SCALAR AggregatorSpec_Type = 1
    // A non-scalar aggregation returns no rows if there are no input rows; it
    // may or may not have grouping columns.
    AggregatorSpec_NON_SCALAR AggregatorSpec_Type = 2
)

func (AggregatorSpec_Type) Enum Uses

func (x AggregatorSpec_Type) Enum() *AggregatorSpec_Type

func (AggregatorSpec_Type) EnumDescriptor Uses

func (AggregatorSpec_Type) EnumDescriptor() ([]byte, []int)

func (AggregatorSpec_Type) String Uses

func (x AggregatorSpec_Type) String() string

func (*AggregatorSpec_Type) UnmarshalJSON Uses

func (x *AggregatorSpec_Type) UnmarshalJSON(data []byte) error

type BackfillerSpec Uses

type BackfillerSpec struct {
    Type  BackfillerSpec_Type    `protobuf:"varint,1,opt,name=type,enum=cockroach.sql.distsqlrun.BackfillerSpec_Type" json:"type"`
    Table descpb.TableDescriptor `protobuf:"bytes,2,opt,name=table" json:"table"`
    // Sections of the table to be backfilled.
    Spans []TableReaderSpan `protobuf:"bytes,3,rep,name=spans" json:"spans"`
    // Run the backfill for approximately this duration.
    // The backfill will always process at least one backfill chunk.
    Duration time.Duration `protobuf:"varint,4,opt,name=duration,casttype=time.Duration" json:"duration"`
    // The backfill involves a complete table scan in chunks,
    // where each chunk is a transactional read of a set of rows
    // along with a backfill for the rows. This is the maximum number
    // of entries backfilled per chunk.
    ChunkSize int64 `protobuf:"varint,5,opt,name=chunk_size,json=chunkSize" json:"chunk_size"`
    // The timestamp to perform index backfill historical scans at.
    ReadAsOf hlc.Timestamp `protobuf:"bytes,7,opt,name=readAsOf" json:"readAsOf"`
}

BackfillerSpec is the specification for a "schema change backfiller". The created backfill processor runs a backfill for the first mutations in the table descriptor mutation list with the same mutation id and type. A backfiller processor performs KV operations to retrieve rows for a table and backfills the new indexes/columns contained in the table descriptor. It checkpoints its progress by updating the table descriptor in the database, and doesn't emit any rows nor support any post-processing.

func (*BackfillerSpec) Descriptor Uses

func (*BackfillerSpec) Descriptor() ([]byte, []int)

func (*BackfillerSpec) Marshal Uses

func (m *BackfillerSpec) Marshal() (dAtA []byte, err error)

func (*BackfillerSpec) MarshalTo Uses

func (m *BackfillerSpec) MarshalTo(dAtA []byte) (int, error)

func (*BackfillerSpec) ProtoMessage Uses

func (*BackfillerSpec) ProtoMessage()

func (*BackfillerSpec) Reset Uses

func (m *BackfillerSpec) Reset()

func (*BackfillerSpec) Size Uses

func (m *BackfillerSpec) Size() (n int)

func (*BackfillerSpec) String Uses

func (m *BackfillerSpec) String() string

func (*BackfillerSpec) Unmarshal Uses

func (m *BackfillerSpec) Unmarshal(dAtA []byte) error

func (*BackfillerSpec) XXX_DiscardUnknown Uses

func (m *BackfillerSpec) XXX_DiscardUnknown()

func (*BackfillerSpec) XXX_Marshal Uses

func (m *BackfillerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*BackfillerSpec) XXX_Merge Uses

func (dst *BackfillerSpec) XXX_Merge(src proto.Message)

func (*BackfillerSpec) XXX_Size Uses

func (m *BackfillerSpec) XXX_Size() int

func (*BackfillerSpec) XXX_Unmarshal Uses

func (m *BackfillerSpec) XXX_Unmarshal(b []byte) error

type BackfillerSpec_Type Uses

type BackfillerSpec_Type int32
const (
    BackfillerSpec_Invalid BackfillerSpec_Type = 0
    BackfillerSpec_Column  BackfillerSpec_Type = 1
    BackfillerSpec_Index   BackfillerSpec_Type = 2
)

func (BackfillerSpec_Type) Enum Uses

func (x BackfillerSpec_Type) Enum() *BackfillerSpec_Type

func (BackfillerSpec_Type) EnumDescriptor Uses

func (BackfillerSpec_Type) EnumDescriptor() ([]byte, []int)

func (BackfillerSpec_Type) String Uses

func (x BackfillerSpec_Type) String() string

func (*BackfillerSpec_Type) UnmarshalJSON Uses

func (x *BackfillerSpec_Type) UnmarshalJSON(data []byte) error

type BackupDataSpec Uses

type BackupDataSpec struct {
    Spans            []roachpb.Span                 `protobuf:"bytes,1,rep,name=spans" json:"spans"`
    IntroducedSpans  []roachpb.Span                 `protobuf:"bytes,2,rep,name=introduced_spans,json=introducedSpans" json:"introduced_spans"`
    DefaultURI       string                         `protobuf:"bytes,3,opt,name=default_uri,json=defaultUri" json:"default_uri"`
    URIsByLocalityKV map[string]string              `protobuf:"bytes,4,rep,name=uris_by_locality_kv,json=urisByLocalityKv" json:"uris_by_locality_kv,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
    MVCCFilter       roachpb.MVCCFilter             `protobuf:"varint,5,opt,name=mvcc_filter,json=mvccFilter,enum=cockroach.roachpb.MVCCFilter" json:"mvcc_filter"`
    Encryption       *roachpb.FileEncryptionOptions `protobuf:"bytes,6,opt,name=encryption" json:"encryption,omitempty"`
    BackupStartTime  hlc.Timestamp                  `protobuf:"bytes,7,opt,name=backup_start_time,json=backupStartTime" json:"backup_start_time"`
    BackupEndTime    hlc.Timestamp                  `protobuf:"bytes,8,opt,name=backup_end_time,json=backupEndTime" json:"backup_end_time"`
    // PKIDs is used to convert result from an ExportRequest into row count
    // information passed back to track progress in the backup job.
    PKIDs map[uint64]bool `protobuf:"bytes,9,rep,name=pk_ids,json=pkIds" json:"pk_ids,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"`
    // User who initiated the backup. This is used to check access privileges
    // when using FileTable ExternalStorage.
    User string `protobuf:"bytes,10,opt,name=user" json:"user"`
}

func (*BackupDataSpec) Descriptor Uses

func (*BackupDataSpec) Descriptor() ([]byte, []int)

func (*BackupDataSpec) Marshal Uses

func (m *BackupDataSpec) Marshal() (dAtA []byte, err error)

func (*BackupDataSpec) MarshalTo Uses

func (m *BackupDataSpec) MarshalTo(dAtA []byte) (int, error)

func (*BackupDataSpec) ProtoMessage Uses

func (*BackupDataSpec) ProtoMessage()

func (*BackupDataSpec) Reset Uses

func (m *BackupDataSpec) Reset()

func (*BackupDataSpec) Size Uses

func (m *BackupDataSpec) Size() (n int)

func (*BackupDataSpec) String Uses

func (m *BackupDataSpec) String() string

func (*BackupDataSpec) Unmarshal Uses

func (m *BackupDataSpec) Unmarshal(dAtA []byte) error

func (*BackupDataSpec) XXX_DiscardUnknown Uses

func (m *BackupDataSpec) XXX_DiscardUnknown()

func (*BackupDataSpec) XXX_Marshal Uses

func (m *BackupDataSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*BackupDataSpec) XXX_Merge Uses

func (dst *BackupDataSpec) XXX_Merge(src proto.Message)

func (*BackupDataSpec) XXX_Size Uses

func (m *BackupDataSpec) XXX_Size() int

func (*BackupDataSpec) XXX_Unmarshal Uses

func (m *BackupDataSpec) XXX_Unmarshal(b []byte) error

type BulkRowWriterSpec Uses

type BulkRowWriterSpec struct {
    Table descpb.TableDescriptor `protobuf:"bytes,1,opt,name=table" json:"table"`
}

BulkRowWriterSpec is the specification for a processor that consumes rows and writes them to a target table using AddSSTable. It outputs a BulkOpSummary.

func (*BulkRowWriterSpec) Descriptor Uses

func (*BulkRowWriterSpec) Descriptor() ([]byte, []int)

func (*BulkRowWriterSpec) Marshal Uses

func (m *BulkRowWriterSpec) Marshal() (dAtA []byte, err error)

func (*BulkRowWriterSpec) MarshalTo Uses

func (m *BulkRowWriterSpec) MarshalTo(dAtA []byte) (int, error)

func (*BulkRowWriterSpec) ProtoMessage Uses

func (*BulkRowWriterSpec) ProtoMessage()

func (*BulkRowWriterSpec) Reset Uses

func (m *BulkRowWriterSpec) Reset()

func (*BulkRowWriterSpec) Size Uses

func (m *BulkRowWriterSpec) Size() (n int)

func (*BulkRowWriterSpec) String Uses

func (m *BulkRowWriterSpec) String() string

func (*BulkRowWriterSpec) Unmarshal Uses

func (m *BulkRowWriterSpec) Unmarshal(dAtA []byte) error

func (*BulkRowWriterSpec) XXX_DiscardUnknown Uses

func (m *BulkRowWriterSpec) XXX_DiscardUnknown()

func (*BulkRowWriterSpec) XXX_Marshal Uses

func (m *BulkRowWriterSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*BulkRowWriterSpec) XXX_Merge Uses

func (dst *BulkRowWriterSpec) XXX_Merge(src proto.Message)

func (*BulkRowWriterSpec) XXX_Size Uses

func (m *BulkRowWriterSpec) XXX_Size() int

func (*BulkRowWriterSpec) XXX_Unmarshal Uses

func (m *BulkRowWriterSpec) XXX_Unmarshal(b []byte) error

type BytesEncodeFormat Uses

type BytesEncodeFormat int32

BytesEncodeFormat is the configuration for bytes to string conversions.

const (
    BytesEncodeFormat_HEX    BytesEncodeFormat = 0
    BytesEncodeFormat_ESCAPE BytesEncodeFormat = 1
    BytesEncodeFormat_BASE64 BytesEncodeFormat = 2
)

func (BytesEncodeFormat) Enum Uses

func (x BytesEncodeFormat) Enum() *BytesEncodeFormat

func (BytesEncodeFormat) EnumDescriptor Uses

func (BytesEncodeFormat) EnumDescriptor() ([]byte, []int)

func (BytesEncodeFormat) String Uses

func (x BytesEncodeFormat) String() string

func (*BytesEncodeFormat) UnmarshalJSON Uses

func (x *BytesEncodeFormat) UnmarshalJSON(data []byte) error

type CSVWriterSpec Uses

type CSVWriterSpec struct {
    // destination as a cloud.ExternalStorage URI pointing to an export store
    // location (directory).
    Destination string             `protobuf:"bytes,1,opt,name=destination" json:"destination"`
    NamePattern string             `protobuf:"bytes,2,opt,name=name_pattern,json=namePattern" json:"name_pattern"`
    Options     roachpb.CSVOptions `protobuf:"bytes,3,opt,name=options" json:"options"`
    // chunk_rows is num rows to write per file. 0 = no limit.
    ChunkRows int64 `protobuf:"varint,4,opt,name=chunk_rows,json=chunkRows" json:"chunk_rows"`
    // compression_codec specifies compression used for exported file.
    CompressionCodec FileCompression `protobuf:"varint,5,opt,name=compression_codec,json=compressionCodec,enum=cockroach.sql.distsqlrun.FileCompression" json:"compression_codec"`
    // User who initiated the export. This is used to check access privileges
    // when using FileTable ExternalStorage.
    User string `protobuf:"bytes,6,opt,name=user" json:"user"`
}

CSVWriterSpec is the specification for a processor that consumes rows and writes them to CSV files at uri. It outputs a row per file written with the file name, row count and byte size.

func (*CSVWriterSpec) Descriptor Uses

func (*CSVWriterSpec) Descriptor() ([]byte, []int)

func (*CSVWriterSpec) Marshal Uses

func (m *CSVWriterSpec) Marshal() (dAtA []byte, err error)

func (*CSVWriterSpec) MarshalTo Uses

func (m *CSVWriterSpec) MarshalTo(dAtA []byte) (int, error)

func (*CSVWriterSpec) ProtoMessage Uses

func (*CSVWriterSpec) ProtoMessage()

func (*CSVWriterSpec) Reset Uses

func (m *CSVWriterSpec) Reset()

func (*CSVWriterSpec) Size Uses

func (m *CSVWriterSpec) Size() (n int)

func (*CSVWriterSpec) String Uses

func (m *CSVWriterSpec) String() string

func (*CSVWriterSpec) Unmarshal Uses

func (m *CSVWriterSpec) Unmarshal(dAtA []byte) error

func (*CSVWriterSpec) XXX_DiscardUnknown Uses

func (m *CSVWriterSpec) XXX_DiscardUnknown()

func (*CSVWriterSpec) XXX_Marshal Uses

func (m *CSVWriterSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*CSVWriterSpec) XXX_Merge Uses

func (dst *CSVWriterSpec) XXX_Merge(src proto.Message)

func (*CSVWriterSpec) XXX_Size Uses

func (m *CSVWriterSpec) XXX_Size() int

func (*CSVWriterSpec) XXX_Unmarshal Uses

func (m *CSVWriterSpec) XXX_Unmarshal(b []byte) error

type CallbackMetadataSource Uses

type CallbackMetadataSource struct {
    DrainMetaCb func(context.Context) []ProducerMetadata
}

CallbackMetadataSource is a utility struct that implements the MetadataSource interface by calling a provided callback.

func (CallbackMetadataSource) DrainMeta Uses

func (s CallbackMetadataSource) DrainMeta(ctx context.Context) []ProducerMetadata

DrainMeta is part of the MetadataSource interface.

type ChangeAggregatorSpec Uses

type ChangeAggregatorSpec struct {
    Watches []ChangeAggregatorSpec_Watch `protobuf:"bytes,1,rep,name=watches" json:"watches"`
    // Feed is the specification for this changefeed.
    Feed jobspb.ChangefeedDetails `protobuf:"bytes,2,opt,name=feed" json:"feed"`
    // User who initiated the changefeed. This is used to check access privileges
    // when using FileTable ExternalStorage.
    User string `protobuf:"bytes,3,opt,name=user" json:"user"`
}

ChangeAggregatorSpec is the specification for a processor that watches for changes in a set of spans. Each span may cross multiple ranges.

func (*ChangeAggregatorSpec) Descriptor Uses

func (*ChangeAggregatorSpec) Descriptor() ([]byte, []int)

func (*ChangeAggregatorSpec) Marshal Uses

func (m *ChangeAggregatorSpec) Marshal() (dAtA []byte, err error)

func (*ChangeAggregatorSpec) MarshalTo Uses

func (m *ChangeAggregatorSpec) MarshalTo(dAtA []byte) (int, error)

func (*ChangeAggregatorSpec) ProtoMessage Uses

func (*ChangeAggregatorSpec) ProtoMessage()

func (*ChangeAggregatorSpec) Reset Uses

func (m *ChangeAggregatorSpec) Reset()

func (*ChangeAggregatorSpec) Size Uses

func (m *ChangeAggregatorSpec) Size() (n int)

func (*ChangeAggregatorSpec) String Uses

func (m *ChangeAggregatorSpec) String() string

func (*ChangeAggregatorSpec) Unmarshal Uses

func (m *ChangeAggregatorSpec) Unmarshal(dAtA []byte) error

func (*ChangeAggregatorSpec) XXX_DiscardUnknown Uses

func (m *ChangeAggregatorSpec) XXX_DiscardUnknown()

func (*ChangeAggregatorSpec) XXX_Marshal Uses

func (m *ChangeAggregatorSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ChangeAggregatorSpec) XXX_Merge Uses

func (dst *ChangeAggregatorSpec) XXX_Merge(src proto.Message)

func (*ChangeAggregatorSpec) XXX_Size Uses

func (m *ChangeAggregatorSpec) XXX_Size() int

func (*ChangeAggregatorSpec) XXX_Unmarshal Uses

func (m *ChangeAggregatorSpec) XXX_Unmarshal(b []byte) error

type ChangeAggregatorSpec_Watch Uses

type ChangeAggregatorSpec_Watch struct {
    InitialResolved hlc.Timestamp `protobuf:"bytes,1,opt,name=initial_resolved,json=initialResolved" json:"initial_resolved"`
    Span            roachpb.Span  `protobuf:"bytes,2,opt,name=span" json:"span"`
}

func (*ChangeAggregatorSpec_Watch) Descriptor Uses

func (*ChangeAggregatorSpec_Watch) Descriptor() ([]byte, []int)

func (*ChangeAggregatorSpec_Watch) Marshal Uses

func (m *ChangeAggregatorSpec_Watch) Marshal() (dAtA []byte, err error)

func (*ChangeAggregatorSpec_Watch) MarshalTo Uses

func (m *ChangeAggregatorSpec_Watch) MarshalTo(dAtA []byte) (int, error)

func (*ChangeAggregatorSpec_Watch) ProtoMessage Uses

func (*ChangeAggregatorSpec_Watch) ProtoMessage()

func (*ChangeAggregatorSpec_Watch) Reset Uses

func (m *ChangeAggregatorSpec_Watch) Reset()

func (*ChangeAggregatorSpec_Watch) Size Uses

func (m *ChangeAggregatorSpec_Watch) Size() (n int)

func (*ChangeAggregatorSpec_Watch) String Uses

func (m *ChangeAggregatorSpec_Watch) String() string

func (*ChangeAggregatorSpec_Watch) Unmarshal Uses

func (m *ChangeAggregatorSpec_Watch) Unmarshal(dAtA []byte) error

func (*ChangeAggregatorSpec_Watch) XXX_DiscardUnknown Uses

func (m *ChangeAggregatorSpec_Watch) XXX_DiscardUnknown()

func (*ChangeAggregatorSpec_Watch) XXX_Marshal Uses

func (m *ChangeAggregatorSpec_Watch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ChangeAggregatorSpec_Watch) XXX_Merge Uses

func (dst *ChangeAggregatorSpec_Watch) XXX_Merge(src proto.Message)

func (*ChangeAggregatorSpec_Watch) XXX_Size Uses

func (m *ChangeAggregatorSpec_Watch) XXX_Size() int

func (*ChangeAggregatorSpec_Watch) XXX_Unmarshal Uses

func (m *ChangeAggregatorSpec_Watch) XXX_Unmarshal(b []byte) error

type ChangeFrontierSpec Uses

type ChangeFrontierSpec struct {
    // TrackedSpans is the entire span set being watched. Once all these spans
    // have been resolved at a certain timestamp, then it's safe to resolve the
    // changefeed at that timestamp.
    TrackedSpans []roachpb.Span `protobuf:"bytes,1,rep,name=tracked_spans,json=trackedSpans" json:"tracked_spans"`
    // Feed is the specification for this changefeed.
    Feed jobspb.ChangefeedDetails `protobuf:"bytes,2,opt,name=feed" json:"feed"`
    // JobID is the id of this changefeed in the system jobs.
    JobID int64 `protobuf:"varint,3,opt,name=job_id,json=jobId" json:"job_id"`
    // User who initiated the changefeed. This is used to check access privileges
    // when using FileTable ExternalStorage.
    User string `protobuf:"bytes,4,opt,name=user" json:"user"`
}

ChangeFrontierSpec is the specification for a processor that receives span-level resolved timestamps, track them, and emits the changefeed-level resolved timestamp whenever it changes.

func (*ChangeFrontierSpec) Descriptor Uses

func (*ChangeFrontierSpec) Descriptor() ([]byte, []int)

func (*ChangeFrontierSpec) Marshal Uses

func (m *ChangeFrontierSpec) Marshal() (dAtA []byte, err error)

func (*ChangeFrontierSpec) MarshalTo Uses

func (m *ChangeFrontierSpec) MarshalTo(dAtA []byte) (int, error)

func (*ChangeFrontierSpec) ProtoMessage Uses

func (*ChangeFrontierSpec) ProtoMessage()

func (*ChangeFrontierSpec) Reset Uses

func (m *ChangeFrontierSpec) Reset()

func (*ChangeFrontierSpec) Size Uses

func (m *ChangeFrontierSpec) Size() (n int)

func (*ChangeFrontierSpec) String Uses

func (m *ChangeFrontierSpec) String() string

func (*ChangeFrontierSpec) Unmarshal Uses

func (m *ChangeFrontierSpec) Unmarshal(dAtA []byte) error

func (*ChangeFrontierSpec) XXX_DiscardUnknown Uses

func (m *ChangeFrontierSpec) XXX_DiscardUnknown()

func (*ChangeFrontierSpec) XXX_Marshal Uses

func (m *ChangeFrontierSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ChangeFrontierSpec) XXX_Merge Uses

func (dst *ChangeFrontierSpec) XXX_Merge(src proto.Message)

func (*ChangeFrontierSpec) XXX_Size Uses

func (m *ChangeFrontierSpec) XXX_Size() int

func (*ChangeFrontierSpec) XXX_Unmarshal Uses

func (m *ChangeFrontierSpec) XXX_Unmarshal(b []byte) error

type Columns Uses

type Columns struct {
    Columns []uint32 `protobuf:"varint,1,rep,packed,name=columns" json:"columns,omitempty"`
}

func (*Columns) Descriptor Uses

func (*Columns) Descriptor() ([]byte, []int)

func (*Columns) Marshal Uses

func (m *Columns) Marshal() (dAtA []byte, err error)

func (*Columns) MarshalTo Uses

func (m *Columns) MarshalTo(dAtA []byte) (int, error)

func (*Columns) ProtoMessage Uses

func (*Columns) ProtoMessage()

func (*Columns) Reset Uses

func (m *Columns) Reset()

func (*Columns) Size Uses

func (m *Columns) Size() (n int)

func (*Columns) String Uses

func (m *Columns) String() string

func (*Columns) Unmarshal Uses

func (m *Columns) Unmarshal(dAtA []byte) error

func (*Columns) XXX_DiscardUnknown Uses

func (m *Columns) XXX_DiscardUnknown()

func (*Columns) XXX_Marshal Uses

func (m *Columns) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*Columns) XXX_Merge Uses

func (dst *Columns) XXX_Merge(src proto.Message)

func (*Columns) XXX_Size Uses

func (m *Columns) XXX_Size() int

func (*Columns) XXX_Unmarshal Uses

func (m *Columns) XXX_Unmarshal(b []byte) error

type ConsumerHandshake Uses

type ConsumerHandshake struct {
    // consumer_scheduled is true if the flow that's consuming this stream has
    // already been scheduled and so it is ready to consume data. If this is
    // false, then the consumer has not yet been scheduled. In this case, the
    // server (i.e. the consumer node) will send another ConsumerHandshake with
    // consumer_scheduled = true when the consumer is finally scheduled (unless
    // the scheduling timeout fires first, in which case the stream will be
    // closed server-side).
    ConsumerScheduled bool `protobuf:"varint,1,opt,name=consumer_scheduled,json=consumerScheduled" json:"consumer_scheduled"`
    // If consumer_scheduled is false, then this indicates the deadline for the
    // scheduling of the consumer flow. If the flow is not scheduled within that
    // dealine, this stream will be disconnected by the server-side.
    ConsumerScheduleDeadline *time.Time `protobuf:"bytes,2,opt,name=consumer_schedule_deadline,json=consumerScheduleDeadline,stdtime" json:"consumer_schedule_deadline,omitempty"`
    // The server's DistSQL version range.
    Version            DistSQLVersion `protobuf:"varint,3,opt,name=version,casttype=DistSQLVersion" json:"version"`
    MinAcceptedVersion DistSQLVersion `protobuf:"varint,4,opt,name=min_accepted_version,json=minAcceptedVersion,casttype=DistSQLVersion" json:"min_accepted_version"`
}

ConsumerHandshake is the first one or two message sent in the consumer->producer direction on a stream. It informs the producer about the status of the consumer flow. Introduced in version 1.1 for future use by producers.

func (*ConsumerHandshake) Descriptor Uses

func (*ConsumerHandshake) Descriptor() ([]byte, []int)

func (*ConsumerHandshake) Marshal Uses

func (m *ConsumerHandshake) Marshal() (dAtA []byte, err error)

func (*ConsumerHandshake) MarshalTo Uses

func (m *ConsumerHandshake) MarshalTo(dAtA []byte) (int, error)

func (*ConsumerHandshake) ProtoMessage Uses

func (*ConsumerHandshake) ProtoMessage()

func (*ConsumerHandshake) Reset Uses

func (m *ConsumerHandshake) Reset()

func (*ConsumerHandshake) Size Uses

func (m *ConsumerHandshake) Size() (n int)

func (*ConsumerHandshake) String Uses

func (m *ConsumerHandshake) String() string

func (*ConsumerHandshake) Unmarshal Uses

func (m *ConsumerHandshake) Unmarshal(dAtA []byte) error

func (*ConsumerHandshake) XXX_DiscardUnknown Uses

func (m *ConsumerHandshake) XXX_DiscardUnknown()

func (*ConsumerHandshake) XXX_Marshal Uses

func (m *ConsumerHandshake) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ConsumerHandshake) XXX_Merge Uses

func (dst *ConsumerHandshake) XXX_Merge(src proto.Message)

func (*ConsumerHandshake) XXX_Size Uses

func (m *ConsumerHandshake) XXX_Size() int

func (*ConsumerHandshake) XXX_Unmarshal Uses

func (m *ConsumerHandshake) XXX_Unmarshal(b []byte) error

type ConsumerSignal Uses

type ConsumerSignal struct {
    // The consumer is done (doesn't need to consume any more rows) and is asking
    // the producer to push whatever trailing metadata it has and close its
    // stream.
    DrainRequest *DrainRequest `protobuf:"bytes,1,opt,name=drain_request,json=drainRequest" json:"drain_request,omitempty"`
    // Used in the RunSyncFlow case; the first message on the client stream must
    // contain this message.
    SetupFlowRequest *SetupFlowRequest `protobuf:"bytes,2,opt,name=setup_flow_request,json=setupFlowRequest" json:"setup_flow_request,omitempty"`
    // Consumer->Producer handshake messages. See message definition.
    Handshake *ConsumerHandshake `protobuf:"bytes,3,opt,name=handshake" json:"handshake,omitempty"`
}

ConsumerSignal are messages flowing from consumer to producer (so, from RPC server to client) for the FlowStream RPC.

func (*ConsumerSignal) Descriptor Uses

func (*ConsumerSignal) Descriptor() ([]byte, []int)

func (*ConsumerSignal) GetValue Uses

func (this *ConsumerSignal) GetValue() interface{}

func (*ConsumerSignal) Marshal Uses

func (m *ConsumerSignal) Marshal() (dAtA []byte, err error)

func (*ConsumerSignal) MarshalTo Uses

func (m *ConsumerSignal) MarshalTo(dAtA []byte) (int, error)

func (*ConsumerSignal) ProtoMessage Uses

func (*ConsumerSignal) ProtoMessage()

func (*ConsumerSignal) Reset Uses

func (m *ConsumerSignal) Reset()

func (*ConsumerSignal) SetValue Uses

func (this *ConsumerSignal) SetValue(value interface{}) bool

func (*ConsumerSignal) Size Uses

func (m *ConsumerSignal) Size() (n int)

func (*ConsumerSignal) String Uses

func (m *ConsumerSignal) String() string

func (*ConsumerSignal) Unmarshal Uses

func (m *ConsumerSignal) Unmarshal(dAtA []byte) error

func (*ConsumerSignal) XXX_DiscardUnknown Uses

func (m *ConsumerSignal) XXX_DiscardUnknown()

func (*ConsumerSignal) XXX_Marshal Uses

func (m *ConsumerSignal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ConsumerSignal) XXX_Merge Uses

func (dst *ConsumerSignal) XXX_Merge(src proto.Message)

func (*ConsumerSignal) XXX_Size Uses

func (m *ConsumerSignal) XXX_Size() int

func (*ConsumerSignal) XXX_Unmarshal Uses

func (m *ConsumerSignal) XXX_Unmarshal(b []byte) error

type DatumInfo Uses

type DatumInfo struct {
    Encoding descpb.DatumEncoding `protobuf:"varint,1,opt,name=encoding,enum=cockroach.sql.sqlbase.DatumEncoding" json:"encoding"`
    Type     *types.T             `protobuf:"bytes,2,opt,name=type" json:"type,omitempty"`
}

func (*DatumInfo) Descriptor Uses

func (*DatumInfo) Descriptor() ([]byte, []int)

func (*DatumInfo) Marshal Uses

func (m *DatumInfo) Marshal() (dAtA []byte, err error)

func (*DatumInfo) MarshalTo Uses

func (m *DatumInfo) MarshalTo(dAtA []byte) (int, error)

func (*DatumInfo) ProtoMessage Uses

func (*DatumInfo) ProtoMessage()

func (*DatumInfo) Reset Uses

func (m *DatumInfo) Reset()

func (*DatumInfo) Size Uses

func (m *DatumInfo) Size() (n int)

func (*DatumInfo) String Uses

func (m *DatumInfo) String() string

func (*DatumInfo) Unmarshal Uses

func (m *DatumInfo) Unmarshal(dAtA []byte) error

func (*DatumInfo) XXX_DiscardUnknown Uses

func (m *DatumInfo) XXX_DiscardUnknown()

func (*DatumInfo) XXX_Marshal Uses

func (m *DatumInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DatumInfo) XXX_Merge Uses

func (dst *DatumInfo) XXX_Merge(src proto.Message)

func (*DatumInfo) XXX_Size Uses

func (m *DatumInfo) XXX_Size() int

func (*DatumInfo) XXX_Unmarshal Uses

func (m *DatumInfo) XXX_Unmarshal(b []byte) error

type DistSQLClient Uses

type DistSQLClient interface {
    // RunSyncFlow instantiates a flow and streams back results of that flow.
    // The request must contain one flow, and that flow must have a single mailbox
    // of the special sync response type.
    RunSyncFlow(ctx context.Context, opts ...grpc.CallOption) (DistSQL_RunSyncFlowClient, error)
    // SetupFlow instantiates a flow (subgraphs of a distributed SQL
    // computation) on the receiving node.
    SetupFlow(ctx context.Context, in *SetupFlowRequest, opts ...grpc.CallOption) (*SimpleResponse, error)
    // FlowStream is used to push a stream of messages that is part of a flow. The
    // first message will have a StreamHeader which identifies the flow and the
    // stream (mailbox).
    //
    // The response is a stream that the consumer uses to perform a handshake and
    // to signal the producer when it wants it to start draining. The client (i.e.
    // the producer) will read from this consumer->producer stream until it has
    // sent everything it needs to send and it performs CloseSend() on the
    // producer->consumer stream; after that point the producer isn't listening
    // for consumer signals any more.
    FlowStream(ctx context.Context, opts ...grpc.CallOption) (DistSQL_FlowStreamClient, error)
}

DistSQLClient is the client API for DistSQL service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.

func NewDistSQLClient Uses

func NewDistSQLClient(cc *grpc.ClientConn) DistSQLClient

type DistSQLDrainingInfo Uses

type DistSQLDrainingInfo struct {
    Draining bool `protobuf:"varint,1,opt,name=draining" json:"draining"`
}

DistSQLDrainingInfo represents the DistSQL draining state that gets gossiped for each node. This is used by planners to avoid planning on nodes that are known to be draining.

func (*DistSQLDrainingInfo) Descriptor Uses

func (*DistSQLDrainingInfo) Descriptor() ([]byte, []int)

func (*DistSQLDrainingInfo) Marshal Uses

func (m *DistSQLDrainingInfo) Marshal() (dAtA []byte, err error)

func (*DistSQLDrainingInfo) MarshalTo Uses

func (m *DistSQLDrainingInfo) MarshalTo(dAtA []byte) (int, error)

func (*DistSQLDrainingInfo) ProtoMessage Uses

func (*DistSQLDrainingInfo) ProtoMessage()

func (*DistSQLDrainingInfo) Reset Uses

func (m *DistSQLDrainingInfo) Reset()

func (*DistSQLDrainingInfo) Size Uses

func (m *DistSQLDrainingInfo) Size() (n int)

func (*DistSQLDrainingInfo) String Uses

func (m *DistSQLDrainingInfo) String() string

func (*DistSQLDrainingInfo) Unmarshal Uses

func (m *DistSQLDrainingInfo) Unmarshal(dAtA []byte) error

func (*DistSQLDrainingInfo) XXX_DiscardUnknown Uses

func (m *DistSQLDrainingInfo) XXX_DiscardUnknown()

func (*DistSQLDrainingInfo) XXX_Marshal Uses

func (m *DistSQLDrainingInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DistSQLDrainingInfo) XXX_Merge Uses

func (dst *DistSQLDrainingInfo) XXX_Merge(src proto.Message)

func (*DistSQLDrainingInfo) XXX_Size Uses

func (m *DistSQLDrainingInfo) XXX_Size() int

func (*DistSQLDrainingInfo) XXX_Unmarshal Uses

func (m *DistSQLDrainingInfo) XXX_Unmarshal(b []byte) error

type DistSQLServer Uses

type DistSQLServer interface {
    // RunSyncFlow instantiates a flow and streams back results of that flow.
    // The request must contain one flow, and that flow must have a single mailbox
    // of the special sync response type.
    RunSyncFlow(DistSQL_RunSyncFlowServer) error
    // SetupFlow instantiates a flow (subgraphs of a distributed SQL
    // computation) on the receiving node.
    SetupFlow(context.Context, *SetupFlowRequest) (*SimpleResponse, error)
    // FlowStream is used to push a stream of messages that is part of a flow. The
    // first message will have a StreamHeader which identifies the flow and the
    // stream (mailbox).
    //
    // The response is a stream that the consumer uses to perform a handshake and
    // to signal the producer when it wants it to start draining. The client (i.e.
    // the producer) will read from this consumer->producer stream until it has
    // sent everything it needs to send and it performs CloseSend() on the
    // producer->consumer stream; after that point the producer isn't listening
    // for consumer signals any more.
    FlowStream(DistSQL_FlowStreamServer) error
}

DistSQLServer is the server API for DistSQL service.

type DistSQLSpanStats Uses

type DistSQLSpanStats interface {
    tracing.SpanStats
    StatsForQueryPlan() []string
}

DistSQLSpanStats is a tracing.SpanStats that returns a list of stats to output on a query plan.

type DistSQLVersion Uses

type DistSQLVersion uint32

DistSQLVersion identifies DistSQL engine versions.

type DistSQLVersionGossipInfo Uses

type DistSQLVersionGossipInfo struct {
    Version            DistSQLVersion `protobuf:"varint,1,opt,name=version,casttype=DistSQLVersion" json:"version"`
    MinAcceptedVersion DistSQLVersion `protobuf:"varint,2,opt,name=min_accepted_version,json=minAcceptedVersion,casttype=DistSQLVersion" json:"min_accepted_version"`
}

DistSQLVersionGossipInfo represents the DistSQL server version information that gets gossiped for each node. This is used by planners to avoid planning on nodes with incompatible version during rolling cluster updates.

For the meaning of the fields, see the corresponding constants in distsqlrun/server.go.

func (*DistSQLVersionGossipInfo) Descriptor Uses

func (*DistSQLVersionGossipInfo) Descriptor() ([]byte, []int)

func (*DistSQLVersionGossipInfo) Marshal Uses

func (m *DistSQLVersionGossipInfo) Marshal() (dAtA []byte, err error)

func (*DistSQLVersionGossipInfo) MarshalTo Uses

func (m *DistSQLVersionGossipInfo) MarshalTo(dAtA []byte) (int, error)

func (*DistSQLVersionGossipInfo) ProtoMessage Uses

func (*DistSQLVersionGossipInfo) ProtoMessage()

func (*DistSQLVersionGossipInfo) Reset Uses

func (m *DistSQLVersionGossipInfo) Reset()

func (*DistSQLVersionGossipInfo) Size Uses

func (m *DistSQLVersionGossipInfo) Size() (n int)

func (*DistSQLVersionGossipInfo) String Uses

func (m *DistSQLVersionGossipInfo) String() string

func (*DistSQLVersionGossipInfo) Unmarshal Uses

func (m *DistSQLVersionGossipInfo) Unmarshal(dAtA []byte) error

func (*DistSQLVersionGossipInfo) XXX_DiscardUnknown Uses

func (m *DistSQLVersionGossipInfo) XXX_DiscardUnknown()

func (*DistSQLVersionGossipInfo) XXX_Marshal Uses

func (m *DistSQLVersionGossipInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DistSQLVersionGossipInfo) XXX_Merge Uses

func (dst *DistSQLVersionGossipInfo) XXX_Merge(src proto.Message)

func (*DistSQLVersionGossipInfo) XXX_Size Uses

func (m *DistSQLVersionGossipInfo) XXX_Size() int

func (*DistSQLVersionGossipInfo) XXX_Unmarshal Uses

func (m *DistSQLVersionGossipInfo) XXX_Unmarshal(b []byte) error

type DistSQL_FlowStreamClient Uses

type DistSQL_FlowStreamClient interface {
    Send(*ProducerMessage) error
    Recv() (*ConsumerSignal, error)
    grpc.ClientStream
}

type DistSQL_FlowStreamServer Uses

type DistSQL_FlowStreamServer interface {
    Send(*ConsumerSignal) error
    Recv() (*ProducerMessage, error)
    grpc.ServerStream
}

type DistSQL_RunSyncFlowClient Uses

type DistSQL_RunSyncFlowClient interface {
    Send(*ConsumerSignal) error
    Recv() (*ProducerMessage, error)
    grpc.ClientStream
}

type DistSQL_RunSyncFlowServer Uses

type DistSQL_RunSyncFlowServer interface {
    Send(*ProducerMessage) error
    Recv() (*ConsumerSignal, error)
    grpc.ServerStream
}

type DistinctSpec Uses

type DistinctSpec struct {
    // The ordered columns in the input stream can be optionally specified for
    // possible optimizations. The specific ordering (ascending/descending) of
    // the column itself is not important nor is the order in which the columns
    // are specified. The ordered columns must be a subset of the distinct
    // columns.
    OrderedColumns []uint32 `protobuf:"varint,1,rep,name=ordered_columns,json=orderedColumns" json:"ordered_columns,omitempty"`
    // The distinct columns in the input stream are those columns on which we
    // check for distinct rows. If A,B,C are in distinct_columns and there is a
    // 4th column D which is not included in distinct_columns, its values are not
    // considered, so rows A1,B1,C1,D1 and A1,B1,C1,D2 are considered equal and
    // only one of them (the first) is output.
    DistinctColumns []uint32 `protobuf:"varint,2,rep,name=distinct_columns,json=distinctColumns" json:"distinct_columns,omitempty"`
    // If true, then NULL values are treated as not equal to one another. Each NULL
    // value will cause a new row group to be created. For example:
    //
    //   c
    //   ----
    //   NULL
    //   NULL
    //
    // A distinct operation on column "c" will result in one output row if
    // NullsAreDistinct is false, or two output rows if true. This is set to true
    // for UPSERT and INSERT..ON CONFLICT statements, since they must treat NULL
    // values as distinct.
    NullsAreDistinct bool `protobuf:"varint,3,opt,name=nulls_are_distinct,json=nullsAreDistinct" json:"nulls_are_distinct"`
    // If not empty, then an error with this text will be raised if there are two
    // rows with duplicate distinct column values. This is used to implement the
    // UPSERT and INSERT..ON CONFLICT statements, both of which prohibit the same
    // row from being changed twice.
    ErrorOnDup string `protobuf:"bytes,4,opt,name=error_on_dup,json=errorOnDup" json:"error_on_dup"`
}

func (*DistinctSpec) Descriptor Uses

func (*DistinctSpec) Descriptor() ([]byte, []int)

func (*DistinctSpec) Marshal Uses

func (m *DistinctSpec) Marshal() (dAtA []byte, err error)

func (*DistinctSpec) MarshalTo Uses

func (m *DistinctSpec) MarshalTo(dAtA []byte) (int, error)

func (*DistinctSpec) ProtoMessage Uses

func (*DistinctSpec) ProtoMessage()

func (*DistinctSpec) Reset Uses

func (m *DistinctSpec) Reset()

func (*DistinctSpec) Size Uses

func (m *DistinctSpec) Size() (n int)

func (*DistinctSpec) String Uses

func (m *DistinctSpec) String() string

func (*DistinctSpec) Unmarshal Uses

func (m *DistinctSpec) Unmarshal(dAtA []byte) error

func (*DistinctSpec) XXX_DiscardUnknown Uses

func (m *DistinctSpec) XXX_DiscardUnknown()

func (*DistinctSpec) XXX_Marshal Uses

func (m *DistinctSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DistinctSpec) XXX_Merge Uses

func (dst *DistinctSpec) XXX_Merge(src proto.Message)

func (*DistinctSpec) XXX_Size Uses

func (m *DistinctSpec) XXX_Size() int

func (*DistinctSpec) XXX_Unmarshal Uses

func (m *DistinctSpec) XXX_Unmarshal(b []byte) error

type DrainRequest Uses

type DrainRequest struct {
}

func (*DrainRequest) Descriptor Uses

func (*DrainRequest) Descriptor() ([]byte, []int)

func (*DrainRequest) Marshal Uses

func (m *DrainRequest) Marshal() (dAtA []byte, err error)

func (*DrainRequest) MarshalTo Uses

func (m *DrainRequest) MarshalTo(dAtA []byte) (int, error)

func (*DrainRequest) ProtoMessage Uses

func (*DrainRequest) ProtoMessage()

func (*DrainRequest) Reset Uses

func (m *DrainRequest) Reset()

func (*DrainRequest) Size Uses

func (m *DrainRequest) Size() (n int)

func (*DrainRequest) String Uses

func (m *DrainRequest) String() string

func (*DrainRequest) Unmarshal Uses

func (m *DrainRequest) Unmarshal(dAtA []byte) error

func (*DrainRequest) XXX_DiscardUnknown Uses

func (m *DrainRequest) XXX_DiscardUnknown()

func (*DrainRequest) XXX_Marshal Uses

func (m *DrainRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DrainRequest) XXX_Merge Uses

func (dst *DrainRequest) XXX_Merge(src proto.Message)

func (*DrainRequest) XXX_Size Uses

func (m *DrainRequest) XXX_Size() int

func (*DrainRequest) XXX_Unmarshal Uses

func (m *DrainRequest) XXX_Unmarshal(b []byte) error

type Error Uses

type Error struct {
    // full_error contains a structured errors with possibly multiple
    // wrapping layers implementing the errors.Cause() interface.
    FullError *errorspb.EncodedError `protobuf:"bytes,3,opt,name=full_error,json=fullError" json:"full_error,omitempty"`
}

Error is a generic representation including a string message.

func NewError Uses

func NewError(ctx context.Context, err error) *Error

NewError creates an Error from an error, to be sent on the wire. It will recognize certain errors and marshall them accordingly, and everything unrecognized is turned into a PGError with code "internal".

func (*Error) Descriptor Uses

func (*Error) Descriptor() ([]byte, []int)

func (*Error) ErrorDetail Uses

func (e *Error) ErrorDetail(ctx context.Context) (err error)

ErrorDetail returns the payload as a Go error.

func (*Error) Marshal Uses

func (m *Error) Marshal() (dAtA []byte, err error)

func (*Error) MarshalTo Uses

func (m *Error) MarshalTo(dAtA []byte) (int, error)

func (*Error) ProtoMessage Uses

func (*Error) ProtoMessage()

func (*Error) Reset Uses

func (m *Error) Reset()

func (*Error) Size Uses

func (m *Error) Size() (n int)

func (*Error) String Uses

func (e *Error) String() string

String implements fmt.Stringer.

func (*Error) Unmarshal Uses

func (m *Error) Unmarshal(dAtA []byte) error

func (*Error) XXX_DiscardUnknown Uses

func (m *Error) XXX_DiscardUnknown()

func (*Error) XXX_Marshal Uses

func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*Error) XXX_Merge Uses

func (dst *Error) XXX_Merge(src proto.Message)

func (*Error) XXX_Size Uses

func (m *Error) XXX_Size() int

func (*Error) XXX_Unmarshal Uses

func (m *Error) XXX_Unmarshal(b []byte) error

type EvalContext Uses

type EvalContext struct {
    StmtTimestampNanos int64 `protobuf:"varint,1,opt,name=stmtTimestampNanos" json:"stmtTimestampNanos"`
    TxnTimestampNanos  int64 `protobuf:"varint,2,opt,name=txnTimestampNanos" json:"txnTimestampNanos"`
    // The name of the location according to whose current timezone we're going to
    // parse timestamps. Used to init EvalContext.Location.
    Location            string            `protobuf:"bytes,4,opt,name=location" json:"location"`
    Database            string            `protobuf:"bytes,5,opt,name=database" json:"database"`
    SearchPath          []string          `protobuf:"bytes,6,rep,name=search_path,json=searchPath" json:"search_path,omitempty"`
    TemporarySchemaName string            `protobuf:"bytes,13,opt,name=temporary_schema_name,json=temporarySchemaName" json:"temporary_schema_name"`
    User                string            `protobuf:"bytes,7,opt,name=user" json:"user"`
    SeqState            SequenceState     `protobuf:"bytes,8,opt,name=seq_state,json=seqState" json:"seq_state"`
    ApplicationName     string            `protobuf:"bytes,9,opt,name=application_name,json=applicationName" json:"application_name"`
    BytesEncodeFormat   BytesEncodeFormat `protobuf:"varint,10,opt,name=bytes_encode_format,json=bytesEncodeFormat,enum=cockroach.sql.distsqlrun.BytesEncodeFormat" json:"bytes_encode_format"`
    ExtraFloatDigits    int32             `protobuf:"varint,11,opt,name=extra_float_digits,json=extraFloatDigits" json:"extra_float_digits"`
    Vectorize           int32             `protobuf:"varint,12,opt,name=vectorize" json:"vectorize"`
}

EvalContext is used to marshall some planner.EvalContext members.

func MakeEvalContext Uses

func MakeEvalContext(evalCtx *tree.EvalContext) EvalContext

MakeEvalContext serializes some of the fields of a tree.EvalContext into a distsqlpb.EvalContext proto.

func (*EvalContext) Descriptor Uses

func (*EvalContext) Descriptor() ([]byte, []int)

func (*EvalContext) Marshal Uses

func (m *EvalContext) Marshal() (dAtA []byte, err error)

func (*EvalContext) MarshalTo Uses

func (m *EvalContext) MarshalTo(dAtA []byte) (int, error)

func (*EvalContext) ProtoMessage Uses

func (*EvalContext) ProtoMessage()

func (*EvalContext) Reset Uses

func (m *EvalContext) Reset()

func (*EvalContext) Size Uses

func (m *EvalContext) Size() (n int)

func (*EvalContext) String Uses

func (m *EvalContext) String() string

func (*EvalContext) Unmarshal Uses

func (m *EvalContext) Unmarshal(dAtA []byte) error

func (*EvalContext) XXX_DiscardUnknown Uses

func (m *EvalContext) XXX_DiscardUnknown()

func (*EvalContext) XXX_Marshal Uses

func (m *EvalContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*EvalContext) XXX_Merge Uses

func (dst *EvalContext) XXX_Merge(src proto.Message)

func (*EvalContext) XXX_Size Uses

func (m *EvalContext) XXX_Size() int

func (*EvalContext) XXX_Unmarshal Uses

func (m *EvalContext) XXX_Unmarshal(b []byte) error

type ExprHelper Uses

type ExprHelper struct {
    Expr tree.TypedExpr
    // Vars is used to generate IndexedVars that are "backed" by the values in
    // `Row`.
    Vars tree.IndexedVarHelper

    Types []*types.T
    Row   sqlbase.EncDatumRow
    // contains filtered or unexported fields
}

ExprHelper implements the common logic around evaluating an expression that depends on a set of values.

func (*ExprHelper) Eval Uses

func (eh *ExprHelper) Eval(row sqlbase.EncDatumRow) (tree.Datum, error)

Eval - given a row - evaluates the wrapped expression and returns the resulting datum. For example, given a row (1, 2, 3, 4, 5):

'@2' would return '2'
'@2 + @5' would return '7'
'@1' would return '1'
'@2 + 10' would return '12'

func (*ExprHelper) EvalFilter Uses

func (eh *ExprHelper) EvalFilter(row sqlbase.EncDatumRow) (bool, error)

EvalFilter is used for filter expressions; it evaluates the expression and returns whether the filter passes.

func (*ExprHelper) IndexedVarEval Uses

func (eh *ExprHelper) IndexedVarEval(idx int, ctx *tree.EvalContext) (tree.Datum, error)

IndexedVarEval is part of the tree.IndexedVarContainer interface.

func (*ExprHelper) IndexedVarNodeFormatter Uses

func (eh *ExprHelper) IndexedVarNodeFormatter(idx int) tree.NodeFormatter

IndexedVarNodeFormatter is part of the parser.IndexedVarContainer interface.

func (*ExprHelper) IndexedVarResolvedType Uses

func (eh *ExprHelper) IndexedVarResolvedType(idx int) *types.T

IndexedVarResolvedType is part of the tree.IndexedVarContainer interface.

func (*ExprHelper) Init Uses

func (eh *ExprHelper) Init(
    expr Expression, types []*types.T, semaCtx *tree.SemaContext, evalCtx *tree.EvalContext,
) error

Init initializes the ExprHelper.

func (*ExprHelper) String Uses

func (eh *ExprHelper) String() string

type Expression Uses

type Expression struct {
    // Version is unused.
    Version string

    // Expr, if present, is the string representation of this expression.
    // SQL expressions are passed as a string, with ordinal references
    // (@1, @2, @3 ..) used for "input" variables.
    Expr string

    // LocalExpr is an unserialized field that's used to pass expressions to
    // the gateway node without serializing/deserializing them. It is always
    // set in non-test setup.
    LocalExpr tree.TypedExpr
}

Expression is the representation of a SQL expression. See data.proto for the corresponding proto definition. Its automatic type declaration is suppressed in the proto via the typedecl=false option, so that we can add the LocalExpr field which is not serialized. It never needs to be serialized because we only use it in the case where we know we won't need to send it, as a proto, to another machine.

func (*Expression) Descriptor Uses

func (*Expression) Descriptor() ([]byte, []int)

func (*Expression) Empty Uses

func (e *Expression) Empty() bool

Empty returns true if the expression has neither an Expr nor LocalExpr.

func (*Expression) Marshal Uses

func (m *Expression) Marshal() (dAtA []byte, err error)

func (*Expression) MarshalTo Uses

func (m *Expression) MarshalTo(dAtA []byte) (int, error)

func (*Expression) ProtoMessage Uses

func (*Expression) ProtoMessage()

func (*Expression) Reset Uses

func (m *Expression) Reset()

func (*Expression) Size Uses

func (m *Expression) Size() (n int)

func (Expression) String Uses

func (e Expression) String() string

String implements the Stringer interface.

func (*Expression) Unmarshal Uses

func (m *Expression) Unmarshal(dAtA []byte) error

func (*Expression) XXX_DiscardUnknown Uses

func (m *Expression) XXX_DiscardUnknown()

func (*Expression) XXX_Marshal Uses

func (m *Expression) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*Expression) XXX_Merge Uses

func (dst *Expression) XXX_Merge(src proto.Message)

func (*Expression) XXX_Size Uses

func (m *Expression) XXX_Size() int

func (*Expression) XXX_Unmarshal Uses

func (m *Expression) XXX_Unmarshal(b []byte) error

type FileCompression Uses

type FileCompression int32

FileCompression list of the compression codecs which are currently supported for CSVWriter spec

const (
    FileCompression_None FileCompression = 0
    FileCompression_Gzip FileCompression = 1
)

func (FileCompression) Enum Uses

func (x FileCompression) Enum() *FileCompression

func (FileCompression) EnumDescriptor Uses

func (FileCompression) EnumDescriptor() ([]byte, []int)

func (FileCompression) String Uses

func (x FileCompression) String() string

func (*FileCompression) UnmarshalJSON Uses

func (x *FileCompression) UnmarshalJSON(data []byte) error

type FlowDiagram Uses

type FlowDiagram interface {
    // ToURL generates the json data for a flow diagram and a URL which encodes the
    // diagram.
    ToURL() (string, url.URL, error)

    // AddSpans adds stats extracted from the input spans to the diagram.
    AddSpans([]tracing.RecordedSpan)
}

FlowDiagram is a plan diagram that can be made into a URL.

func GeneratePlanDiagram Uses

func GeneratePlanDiagram(
    sql string, flows map[roachpb.NodeID]*FlowSpec, showInputTypes bool,
) (FlowDiagram, error)

GeneratePlanDiagram generates the data for a flow diagram. There should be one FlowSpec per node. The function assumes that StreamIDs are unique across all flows.

type FlowID Uses

type FlowID struct {
    uuid.UUID
}

FlowID identifies a flow. It is most importantly used when setting up streams between nodes.

type FlowSpec Uses

type FlowSpec struct {
    FlowID FlowID `protobuf:"bytes,1,opt,name=flow_id,json=flowId,customtype=FlowID" json:"flow_id"`
    // The NodeID of the gateway that planned this Flow. Used for debugging.
    Gateway    github_com_cockroachdb_cockroach_pkg_roachpb.NodeID `protobuf:"varint,3,opt,name=gateway,casttype=github.com/cockroachdb/cockroach/pkg/roachpb.NodeID" json:"gateway"`
    Processors []ProcessorSpec                                     `protobuf:"bytes,2,rep,name=processors" json:"processors"`
}

FlowSpec describes a "flow" which is a subgraph of a distributed SQL computation consisting of processors and streams.

func (*FlowSpec) Descriptor Uses

func (*FlowSpec) Descriptor() ([]byte, []int)

func (*FlowSpec) Marshal Uses

func (m *FlowSpec) Marshal() (dAtA []byte, err error)

func (*FlowSpec) MarshalTo Uses

func (m *FlowSpec) MarshalTo(dAtA []byte) (int, error)

func (*FlowSpec) ProtoMessage Uses

func (*FlowSpec) ProtoMessage()

func (*FlowSpec) Reset Uses

func (m *FlowSpec) Reset()

func (*FlowSpec) Size Uses

func (m *FlowSpec) Size() (n int)

func (*FlowSpec) String Uses

func (m *FlowSpec) String() string

func (*FlowSpec) Unmarshal Uses

func (m *FlowSpec) Unmarshal(dAtA []byte) error

func (*FlowSpec) XXX_DiscardUnknown Uses

func (m *FlowSpec) XXX_DiscardUnknown()

func (*FlowSpec) XXX_Marshal Uses

func (m *FlowSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*FlowSpec) XXX_Merge Uses

func (dst *FlowSpec) XXX_Merge(src proto.Message)

func (*FlowSpec) XXX_Size Uses

func (m *FlowSpec) XXX_Size() int

func (*FlowSpec) XXX_Unmarshal Uses

func (m *FlowSpec) XXX_Unmarshal(b []byte) error

type HashJoinerSpec Uses

type HashJoinerSpec struct {
    // The join constraints certain columns from the left stream to equal
    // corresponding columns on the right stream. These must have the same length.
    LeftEqColumns  []uint32 `protobuf:"varint,1,rep,packed,name=left_eq_columns,json=leftEqColumns" json:"left_eq_columns,omitempty"`
    RightEqColumns []uint32 `protobuf:"varint,2,rep,packed,name=right_eq_columns,json=rightEqColumns" json:"right_eq_columns,omitempty"`
    // "ON" expression (in addition to the equality constraints captured by the
    // orderings). Assuming that the left stream has N columns and the right
    // stream has M columns, in this expression variables @1 to @N refer to
    // columns of the left stream and variables @N to @(N+M) refer to columns in
    // the right stream.
    // Having "ON" expression implies no merged columns.
    OnExpr Expression      `protobuf:"bytes,5,opt,name=on_expr,json=onExpr" json:"on_expr"`
    Type   descpb.JoinType `protobuf:"varint,6,opt,name=type,enum=cockroach.sql.sqlbase.JoinType" json:"type"`
    // If true, it is guaranteed that the left equality columns form a key for
    // the left input. In other words, no two rows from the left input have the
    // same set of values on the left equality columns.
    LeftEqColumnsAreKey bool `protobuf:"varint,8,opt,name=left_eq_columns_are_key,json=leftEqColumnsAreKey" json:"left_eq_columns_are_key"`
    // If true, it is guaranteed that the right equality columns form a key for
    // the right input. In other words, no two rows from the right input have the
    // same set of values on the right equality columns.
    RightEqColumnsAreKey bool `protobuf:"varint,9,opt,name=right_eq_columns_are_key,json=rightEqColumnsAreKey" json:"right_eq_columns_are_key"`
    // DEPRECATED
    //
    // Extra merged columns that are added in case of OUTER JOINS. These
    // columns occupy first positions in a row amd it's the left value if it's not
    // NULL, otherwise it's the right value. In INNER JOIN case no merged columns are
    // needed since left stream values are guaranteed to be not NULL.
    //
    // This has been deprecated; the distsqlrun layer still supports it for
    // backward compatibility during upgrade.
    MergedColumns bool `protobuf:"varint,7,opt,name=merged_columns,json=mergedColumns" json:"merged_columns"`
}

HashJoinerSpec is the specification for a hash join processor. The processor has two inputs and one output.

The processor works by reading the entire right input and putting it in a hash table. Thus, there is no guarantee on the ordering of results that stem only from the right input (in the case of RIGHT_OUTER, FULL_OUTER). However, it is guaranteed that results that involve the left stream preserve the ordering; i.e. all results that stem from left row (i) precede results that stem from left row (i+1).

The "internal columns" of a HashJoiner (see ProcessorSpec) are the concatenation of merged columns (if present), left input columns and right input columns. Each merged column corresponds to a left and a right equality column; its value is the value on the left if it is not NULL, otherwise it is the value on the right. There are either zero or E=len(left_eq_columns)=len(right_eq_columns) merged columns.

If the left input has N columns and the right input has M columns, the first N columns contain values from the left side and the following M columns contain values from the right side. If merged columns are present, they occupy first E positions followed by N values from the left side and M values from the right side.

In the case of semi-join and anti-join, the processor core outputs only the left columns.

func (*HashJoinerSpec) Descriptor Uses

func (*HashJoinerSpec) Descriptor() ([]byte, []int)

func (*HashJoinerSpec) Marshal Uses

func (m *HashJoinerSpec) Marshal() (dAtA []byte, err error)

func (*HashJoinerSpec) MarshalTo Uses

func (m *HashJoinerSpec) MarshalTo(dAtA []byte) (int, error)

func (*HashJoinerSpec) ProtoMessage Uses

func (*HashJoinerSpec) ProtoMessage()

func (*HashJoinerSpec) Reset Uses

func (m *HashJoinerSpec) Reset()

func (*HashJoinerSpec) Size Uses

func (m *HashJoinerSpec) Size() (n int)

func (*HashJoinerSpec) String Uses

func (m *HashJoinerSpec) String() string

func (*HashJoinerSpec) Unmarshal Uses

func (m *HashJoinerSpec) Unmarshal(dAtA []byte) error

func (*HashJoinerSpec) XXX_DiscardUnknown Uses

func (m *HashJoinerSpec) XXX_DiscardUnknown()

func (*HashJoinerSpec) XXX_Marshal Uses

func (m *HashJoinerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*HashJoinerSpec) XXX_Merge Uses

func (dst *HashJoinerSpec) XXX_Merge(src proto.Message)

func (*HashJoinerSpec) XXX_Size Uses

func (m *HashJoinerSpec) XXX_Size() int

func (*HashJoinerSpec) XXX_Unmarshal Uses

func (m *HashJoinerSpec) XXX_Unmarshal(b []byte) error

type InboundStreamNotification Uses

type InboundStreamNotification struct {
    Stream DistSQL_FlowStreamServer
    Donec  chan<- error
}

InboundStreamNotification is the MockDistSQLServer's way to tell its clients that a new gRPC call has arrived and thus a stream has arrived. The rpc handler is blocked until Donec is signaled.

type IndexSkipTableReaderSpec Uses

type IndexSkipTableReaderSpec struct {
    Table descpb.TableDescriptor `protobuf:"bytes,1,opt,name=table" json:"table"`
    // If 0, we use the primary index. If non-zero, we use the index_idx-th index,
    // i.e. table.indexes[index_idx-1]
    IndexIdx uint32            `protobuf:"varint,2,opt,name=index_idx,json=indexIdx" json:"index_idx"`
    Spans    []TableReaderSpan `protobuf:"bytes,3,rep,name=spans" json:"spans"`
    // Indicates the visibility level of the columns that should be returned.
    // Normally, will be set to PUBLIC. Will be set to PUBLIC_AND_NOT_PUBLIC if
    // the consumer of this TableReader expects to be able to see in-progress
    // schema changes.
    Visibility ScanVisibility `protobuf:"varint,4,opt,name=visibility,enum=cockroach.sql.distsqlrun.ScanVisibility" json:"visibility"`
    Reverse    bool           `protobuf:"varint,5,opt,name=reverse" json:"reverse"`
    // Indicates the row-level locking strength to be used by the scan. If set to
    // FOR_NONE, no row-level locking should be performed.
    LockingStrength descpb.ScanLockingStrength `protobuf:"varint,6,opt,name=locking_strength,json=lockingStrength,enum=cockroach.sql.sqlbase.ScanLockingStrength" json:"locking_strength"`
    // Indicates the policy to be used by the scan when dealing with rows being
    // locked. Always set to BLOCK when locking_stength is FOR_NONE.
    //
    // NOTE: this is currently set but unused because only the BLOCK wait policy
    // makes it out of the SQL optimizer without throwing an error. If/when other
    // wait policies are supported, this field will be plumbed further.
    LockingWaitPolicy descpb.ScanLockingWaitPolicy `protobuf:"varint,7,opt,name=locking_wait_policy,json=lockingWaitPolicy,enum=cockroach.sql.sqlbase.ScanLockingWaitPolicy" json:"locking_wait_policy"`
}

IndexSkipTableReaderSpec is the specification for a table reader that is performing a loose index scan over rows in the table. This means that this reader will return distinct rows from the table while using the index to skip unnecessary rows. This reader is used for different optimizations when operating on a prefix of a compound key.

func (*IndexSkipTableReaderSpec) Descriptor Uses

func (*IndexSkipTableReaderSpec) Descriptor() ([]byte, []int)

func (*IndexSkipTableReaderSpec) Marshal Uses

func (m *IndexSkipTableReaderSpec) Marshal() (dAtA []byte, err error)

func (*IndexSkipTableReaderSpec) MarshalTo Uses

func (m *IndexSkipTableReaderSpec) MarshalTo(dAtA []byte) (int, error)

func (*IndexSkipTableReaderSpec) ProtoMessage Uses

func (*IndexSkipTableReaderSpec) ProtoMessage()

func (*IndexSkipTableReaderSpec) Reset Uses

func (m *IndexSkipTableReaderSpec) Reset()

func (*IndexSkipTableReaderSpec) Size Uses

func (m *IndexSkipTableReaderSpec) Size() (n int)

func (*IndexSkipTableReaderSpec) String Uses

func (m *IndexSkipTableReaderSpec) String() string

func (*IndexSkipTableReaderSpec) Unmarshal Uses

func (m *IndexSkipTableReaderSpec) Unmarshal(dAtA []byte) error

func (*IndexSkipTableReaderSpec) XXX_DiscardUnknown Uses

func (m *IndexSkipTableReaderSpec) XXX_DiscardUnknown()

func (*IndexSkipTableReaderSpec) XXX_Marshal Uses

func (m *IndexSkipTableReaderSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*IndexSkipTableReaderSpec) XXX_Merge Uses

func (dst *IndexSkipTableReaderSpec) XXX_Merge(src proto.Message)

func (*IndexSkipTableReaderSpec) XXX_Size Uses

func (m *IndexSkipTableReaderSpec) XXX_Size() int

func (*IndexSkipTableReaderSpec) XXX_Unmarshal Uses

func (m *IndexSkipTableReaderSpec) XXX_Unmarshal(b []byte) error

type InputSyncSpec Uses

type InputSyncSpec struct {
    Type     InputSyncSpec_Type   `protobuf:"varint,1,opt,name=type,enum=cockroach.sql.distsqlrun.InputSyncSpec_Type" json:"type"`
    Ordering Ordering             `protobuf:"bytes,2,opt,name=ordering" json:"ordering"`
    Streams  []StreamEndpointSpec `protobuf:"bytes,3,rep,name=streams" json:"streams"`
    // Schema for the streams entering this synchronizer.
    ColumnTypes []*types.T `protobuf:"bytes,4,rep,name=column_types,json=columnTypes" json:"column_types,omitempty"`
}

InputSyncSpec is the specification for an input synchronizer; it decides how to interleave rows from multiple input streams.

func (*InputSyncSpec) Descriptor Uses

func (*InputSyncSpec) Descriptor() ([]byte, []int)

func (*InputSyncSpec) Marshal Uses

func (m *InputSyncSpec) Marshal() (dAtA []byte, err error)

func (*InputSyncSpec) MarshalTo Uses

func (m *InputSyncSpec) MarshalTo(dAtA []byte) (int, error)

func (*InputSyncSpec) ProtoMessage Uses

func (*InputSyncSpec) ProtoMessage()

func (*InputSyncSpec) Reset Uses

func (m *InputSyncSpec) Reset()

func (*InputSyncSpec) Size Uses

func (m *InputSyncSpec) Size() (n int)

func (*InputSyncSpec) String Uses

func (m *InputSyncSpec) String() string

func (*InputSyncSpec) Unmarshal Uses

func (m *InputSyncSpec) Unmarshal(dAtA []byte) error

func (*InputSyncSpec) XXX_DiscardUnknown Uses

func (m *InputSyncSpec) XXX_DiscardUnknown()

func (*InputSyncSpec) XXX_Marshal Uses

func (m *InputSyncSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*InputSyncSpec) XXX_Merge Uses

func (dst *InputSyncSpec) XXX_Merge(src proto.Message)

func (*InputSyncSpec) XXX_Size Uses

func (m *InputSyncSpec) XXX_Size() int

func (*InputSyncSpec) XXX_Unmarshal Uses

func (m *InputSyncSpec) XXX_Unmarshal(b []byte) error

type InputSyncSpec_Type Uses

type InputSyncSpec_Type int32
const (
    // Rows from the input streams are interleaved arbitrarily.
    InputSyncSpec_UNORDERED InputSyncSpec_Type = 0
    // The input streams are guaranteed to be ordered according to the column
    // ordering field; rows from the streams are interleaved to preserve that
    // ordering.
    InputSyncSpec_ORDERED InputSyncSpec_Type = 1
)

func (InputSyncSpec_Type) Enum Uses

func (x InputSyncSpec_Type) Enum() *InputSyncSpec_Type

func (InputSyncSpec_Type) EnumDescriptor Uses

func (InputSyncSpec_Type) EnumDescriptor() ([]byte, []int)

func (InputSyncSpec_Type) String Uses

func (x InputSyncSpec_Type) String() string

func (*InputSyncSpec_Type) UnmarshalJSON Uses

func (x *InputSyncSpec_Type) UnmarshalJSON(data []byte) error

type InterleavedReaderJoinerSpec Uses

type InterleavedReaderJoinerSpec struct {
    // The tables can be in any order. The processor figures out internally the
    // hierarchy between them.
    Tables  []InterleavedReaderJoinerSpec_Table `protobuf:"bytes,1,rep,name=tables" json:"tables"`
    Reverse bool                                `protobuf:"varint,2,opt,name=reverse" json:"reverse"`
    // A hint for how many joined rows from the tables the consumer of the
    // interleavedReaderJoiner might need. This is used to size the initial KV
    // batches to try to avoid reading many more rows than needed by the
    // processor receiving the output.
    // Not used if there is a limit set in the PostProcessSpec of this processor
    // (that value will be used for sizing batches instead).
    LimitHint int64 `protobuf:"varint,3,opt,name=limit_hint,json=limitHint" json:"limit_hint"`
    // Indicates the row-level locking strength to be used by the scan over the
    // tables. If set to FOR_NONE, no row-level locking should be performed.
    LockingStrength descpb.ScanLockingStrength `protobuf:"varint,6,opt,name=locking_strength,json=lockingStrength,enum=cockroach.sql.sqlbase.ScanLockingStrength" json:"locking_strength"`
    // Indicates the policy to be used by the scan over the tables when dealing
    // with rows being locked. Always set to BLOCK when locking_stength is
    // FOR_NONE.
    //
    // NOTE: this is currently set but unused because only the BLOCK wait policy
    // makes it out of the SQL optimizer without throwing an error. If/when other
    // wait policies are supported, this field will be plumbed further.
    LockingWaitPolicy descpb.ScanLockingWaitPolicy `protobuf:"varint,7,opt,name=locking_wait_policy,json=lockingWaitPolicy,enum=cockroach.sql.sqlbase.ScanLockingWaitPolicy" json:"locking_wait_policy"`
    // "ON" expression (in addition to the equality constraints captured by the
    // orderings). Assuming that the left table has N columns and the second
    // table stream has M columns, in this expression ordinal references @1 to @N
    // refer to columns of the left table and variables @(N+1) to @(N+M) refer to
    // columns in the right table.
    OnExpr Expression      `protobuf:"bytes,4,opt,name=on_expr,json=onExpr" json:"on_expr"`
    Type   descpb.JoinType `protobuf:"varint,5,opt,name=type,enum=cockroach.sql.sqlbase.JoinType" json:"type"`
}

InterleavedReaderJoinerSpec is the specification for a processor that performs KV operations to retrieve rows from 2+ tables from an interleaved hierarchy, performs intermediate filtering on rows from each table, and performs a join on the rows from the 2+ tables.

Limitations: the InterleavedReaderJoiner currently supports only equality joins on the full interleave prefix. See https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20171025_interleaved_table_joins.md.

The "internal columns" of an InterleavedReaderJoiner are the concatenation of left input columns and right input columns. If the left table has N columns and the right table has M columns, the first N columns contain values from the left table and the following M columns contain values from the right table.

func (*InterleavedReaderJoinerSpec) Descriptor Uses

func (*InterleavedReaderJoinerSpec) Descriptor() ([]byte, []int)

func (*InterleavedReaderJoinerSpec) Marshal Uses

func (m *InterleavedReaderJoinerSpec) Marshal() (dAtA []byte, err error)

func (*InterleavedReaderJoinerSpec) MarshalTo Uses

func (m *InterleavedReaderJoinerSpec) MarshalTo(dAtA []byte) (int, error)

func (*InterleavedReaderJoinerSpec) ProtoMessage Uses

func (*InterleavedReaderJoinerSpec) ProtoMessage()

func (*InterleavedReaderJoinerSpec) Reset Uses

func (m *InterleavedReaderJoinerSpec) Reset()

func (*InterleavedReaderJoinerSpec) Size Uses

func (m *InterleavedReaderJoinerSpec) Size() (n int)

func (*InterleavedReaderJoinerSpec) String Uses

func (m *InterleavedReaderJoinerSpec) String() string

func (*InterleavedReaderJoinerSpec) Unmarshal Uses

func (m *InterleavedReaderJoinerSpec) Unmarshal(dAtA []byte) error

func (*InterleavedReaderJoinerSpec) XXX_DiscardUnknown Uses

func (m *InterleavedReaderJoinerSpec) XXX_DiscardUnknown()

func (*InterleavedReaderJoinerSpec) XXX_Marshal Uses

func (m *InterleavedReaderJoinerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*InterleavedReaderJoinerSpec) XXX_Merge Uses

func (dst *InterleavedReaderJoinerSpec) XXX_Merge(src proto.Message)

func (*InterleavedReaderJoinerSpec) XXX_Size Uses

func (m *InterleavedReaderJoinerSpec) XXX_Size() int

func (*InterleavedReaderJoinerSpec) XXX_Unmarshal Uses

func (m *InterleavedReaderJoinerSpec) XXX_Unmarshal(b []byte) error

type InterleavedReaderJoinerSpec_Table Uses

type InterleavedReaderJoinerSpec_Table struct {
    Desc descpb.TableDescriptor `protobuf:"bytes,1,opt,name=desc" json:"desc"`
    // If 0, we use the primary index. If non-zero, we use the index_idx-th index,
    // i.e. desc.indexes[index_idx-1]
    IndexIdx uint32 `protobuf:"varint,2,opt,name=index_idx,json=indexIdx" json:"index_idx"`
    // The PostProcessSpecs of the corresponding TableReaderSpecs of each table
    // are fed as arguments to InterleavedReaderJoiner.
    //
    // This is required to properly post-process the rows (i.e. filtering and
    // projections) after reading from the table but before joining.
    // It may be necessary to modify/introduce additional intermediate filters
    // for correctness (see comment above 'spans' under
    // InterleavedReaderJoinerSpec).
    Post PostProcessSpec `protobuf:"bytes,3,opt,name=post" json:"post"`
    // The tables must be ordered according to the columns that have equality
    // constraints. The first column of the first table's ordering is constrained
    // to be equal to the first column in the second table's ordering and so on
    // for the other tables and their corresponding columns.
    Ordering Ordering `protobuf:"bytes,4,opt,name=ordering" json:"ordering"`
    // The span covering the rows from this table to join. Note the
    // InterleavedReaderJoiner processor will taking the union of all spans across
    // all tables to do a single pass-through scan. InterleavedReaderJoiner will
    // then check if a given row for a table is within any of its spans.
    // There must exist at least one non-empty set of spans for some table.
    Spans []TableReaderSpan `protobuf:"bytes,5,rep,name=spans" json:"spans"`
}

func (*InterleavedReaderJoinerSpec_Table) Descriptor Uses

func (*InterleavedReaderJoinerSpec_Table) Descriptor() ([]byte, []int)

func (*InterleavedReaderJoinerSpec_Table) Marshal Uses

func (m *InterleavedReaderJoinerSpec_Table) Marshal() (dAtA []byte, err error)

func (*InterleavedReaderJoinerSpec_Table) MarshalTo Uses

func (m *InterleavedReaderJoinerSpec_Table) MarshalTo(dAtA []byte) (int, error)

func (*InterleavedReaderJoinerSpec_Table) ProtoMessage Uses

func (*InterleavedReaderJoinerSpec_Table) ProtoMessage()

func (*InterleavedReaderJoinerSpec_Table) Reset Uses

func (m *InterleavedReaderJoinerSpec_Table) Reset()

func (*InterleavedReaderJoinerSpec_Table) Size Uses

func (m *InterleavedReaderJoinerSpec_Table) Size() (n int)

func (*InterleavedReaderJoinerSpec_Table) String Uses

func (m *InterleavedReaderJoinerSpec_Table) String() string

func (*InterleavedReaderJoinerSpec_Table) Unmarshal Uses

func (m *InterleavedReaderJoinerSpec_Table) Unmarshal(dAtA []byte) error

func (*InterleavedReaderJoinerSpec_Table) XXX_DiscardUnknown Uses

func (m *InterleavedReaderJoinerSpec_Table) XXX_DiscardUnknown()

func (*InterleavedReaderJoinerSpec_Table) XXX_Marshal Uses

func (m *InterleavedReaderJoinerSpec_Table) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*InterleavedReaderJoinerSpec_Table) XXX_Merge Uses

func (dst *InterleavedReaderJoinerSpec_Table) XXX_Merge(src proto.Message)

func (*InterleavedReaderJoinerSpec_Table) XXX_Size Uses

func (m *InterleavedReaderJoinerSpec_Table) XXX_Size() int

func (*InterleavedReaderJoinerSpec_Table) XXX_Unmarshal Uses

func (m *InterleavedReaderJoinerSpec_Table) XXX_Unmarshal(b []byte) error

type InvertedFiltererSpec Uses

type InvertedFiltererSpec struct {
    // The index in the input row of the inverted column.
    InvertedColIdx uint32 `protobuf:"varint,1,opt,name=inverted_col_idx,json=invertedColIdx" json:"inverted_col_idx"`
    // The expression to evaluate. The SpansToRead are ignored since they
    // have already been used to setup the input.
    InvertedExpr invertedexpr.SpanExpressionProto `protobuf:"bytes,2,opt,name=inverted_expr,json=invertedExpr" json:"inverted_expr"`
}

InvertedFiltererSpec is the specification of a processor that does filtering on a table by evaluating an invertedexpr.SpanExpressionProto on an inverted index of the table. The input consists of the inverted index rows from InvertedExpr.SpansToRead. It is acceptable for a filter on the primary key to be pushed down between the scan and the inverted filterer.

Example: Table columns: | a | b | c | d | where a, d are the primary key and b is the column with the inverted index. Inverted index columns: | a | b' | d | where b' is derived from b. For instance, if b is an array, b' will be elements of the array.

Internal columns are | a | b | d |. The output sets b to NULL, since it does not have the value of the original column that was indexed in the inverted column.

func (*InvertedFiltererSpec) Descriptor Uses

func (*InvertedFiltererSpec) Descriptor() ([]byte, []int)

func (*InvertedFiltererSpec) Marshal Uses

func (m *InvertedFiltererSpec) Marshal() (dAtA []byte, err error)

func (*InvertedFiltererSpec) MarshalTo Uses

func (m *InvertedFiltererSpec) MarshalTo(dAtA []byte) (int, error)

func (*InvertedFiltererSpec) ProtoMessage Uses

func (*InvertedFiltererSpec) ProtoMessage()

func (*InvertedFiltererSpec) Reset Uses

func (m *InvertedFiltererSpec) Reset()

func (*InvertedFiltererSpec) Size Uses

func (m *InvertedFiltererSpec) Size() (n int)

func (*InvertedFiltererSpec) String Uses

func (m *InvertedFiltererSpec) String() string

func (*InvertedFiltererSpec) Unmarshal Uses

func (m *InvertedFiltererSpec) Unmarshal(dAtA []byte) error

func (*InvertedFiltererSpec) XXX_DiscardUnknown Uses

func (m *InvertedFiltererSpec) XXX_DiscardUnknown()

func (*InvertedFiltererSpec) XXX_Marshal Uses

func (m *InvertedFiltererSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*InvertedFiltererSpec) XXX_Merge Uses

func (dst *InvertedFiltererSpec) XXX_Merge(src proto.Message)

func (*InvertedFiltererSpec) XXX_Size Uses

func (m *InvertedFiltererSpec) XXX_Size() int

func (*InvertedFiltererSpec) XXX_Unmarshal Uses

func (m *InvertedFiltererSpec) XXX_Unmarshal(b []byte) error

type InvertedJoinerSpec Uses

type InvertedJoinerSpec struct {
    Table descpb.TableDescriptor `protobuf:"bytes,1,opt,name=table" json:"table"`
    // The ID of the inverted index. The first column in the index is the
    // inverted column, and the the remaining columns are the primary key.
    IndexIdx uint32 `protobuf:"varint,2,opt,name=index_idx,json=indexIdx" json:"index_idx"`
    // Expression involving the indexed column and columns from the input.
    // Assuming that the input stream has N columns and the table that has been
    // indexed has M columns, in this expression variables @1 to @N refer to
    // columns of the input stream and variables @(N+1) to @(N+M) refer to
    // columns in the table. Although the numbering includes all columns, only
    // columns corresponding to the indexed column and the input columns may be
    // present in this expression. Note that the column numbering matches the
    // numbering used below by the on expression.
    //
    // The expression is passed to xform.NewDatumToInvertedExpr to construct an
    // implementation of invertedexpr.DatumToInvertedExpr, which will be fed each
    // input row and output an expression to evaluate over the inverted index.
    InvertedExpr Expression `protobuf:"bytes,4,opt,name=inverted_expr,json=invertedExpr" json:"inverted_expr"`
    // Optional expression involving the columns in the index (other than the
    // inverted column) and the columns in the input stream. Assuming that the
    // input stream has N columns and the table that has been indexed has M
    // columns, in this expression variables @1 to @N refer to columns of the
    // input stream and variables @(N+1) to @(N+M) refer to columns in the
    // table. The numbering does not omit the column in the table corresponding
    // to the inverted column, or other table columns absent from the index, but
    // they cannot be present in this expression. Note that the column numbering
    // matches the numbering used above by the inverted expression.
    OnExpr Expression `protobuf:"bytes,5,opt,name=on_expr,json=onExpr" json:"on_expr"`
    // Only INNER, LEFT_OUTER, LEFT_SEMI, LEFT_ANTI are supported. For indexes
    // that produce false positives for user expressions, like geospatial
    // indexes, only INNER and LEFT_OUTER are actually useful -- LEFT_SEMI will
    // be mapped to INNER by the optimizer, and LEFT_ANTI to LEFT_OUTER, to
    // allow the false positives to be eliminated by evaluating the exact
    // expression on the rows output by this join.
    Type descpb.JoinType `protobuf:"varint,6,opt,name=type,enum=cockroach.sql.sqlbase.JoinType" json:"type"`
}

InvertedJoinerSpec is the specification for an inverted join. The processor has two inputs and one output.

The processor uses the inverted index on a column of the right input to join with a column of the left input. In addition to the InvertedExpr which is specified on these two columns, it also evaluates an OnExpr on the joined rows that satisfy the InvertedExpr. The "internal columns" of an InvertedJoiner for INNER and LEFT_OUTER joins are a concatenation of the columns of left and right input. The only columns of the right input that are populated are the columns present in the inverted index, except for the inverted column (since it does not represent a complete value for the datum that was indexed). For LEFT_SEMI and LEFT_ANTI, the "internal columns" are the columns of the left input.

Example: Input stream columns: | a | b | Table columns: | c | d | e | The InvertedExpr involves columns b, e and the primary key for the right input is c. The inverted index has columns: | e' | c | where e' is derived from e. For instance, if e is an array, e' will correspond to elements of the array. The OnExpr can use columns a, b, c, since they are the other columns that are present in the input stream and the inverted index.

Internal columns for INNER and LEFT_OUTER: | a | b | c | d | e | where d, e are not populated. Internal columns for LEFT_SEMI and LEFT_ANTI: | a | b |

func (*InvertedJoinerSpec) Descriptor Uses

func (*InvertedJoinerSpec) Descriptor() ([]byte, []int)

func (*InvertedJoinerSpec) Marshal Uses

func (m *InvertedJoinerSpec) Marshal() (dAtA []byte, err error)

func (*InvertedJoinerSpec) MarshalTo Uses

func (m *InvertedJoinerSpec) MarshalTo(dAtA []byte) (int, error)

func (*InvertedJoinerSpec) ProtoMessage Uses

func (*InvertedJoinerSpec) ProtoMessage()

func (*InvertedJoinerSpec) Reset Uses

func (m *InvertedJoinerSpec) Reset()

func (*InvertedJoinerSpec) Size Uses

func (m *InvertedJoinerSpec) Size() (n int)

func (*InvertedJoinerSpec) String Uses

func (m *InvertedJoinerSpec) String() string

func (*InvertedJoinerSpec) Unmarshal Uses

func (m *InvertedJoinerSpec) Unmarshal(dAtA []byte) error

func (*InvertedJoinerSpec) XXX_DiscardUnknown Uses

func (m *InvertedJoinerSpec) XXX_DiscardUnknown()

func (*InvertedJoinerSpec) XXX_Marshal Uses

func (m *InvertedJoinerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*InvertedJoinerSpec) XXX_Merge Uses

func (dst *InvertedJoinerSpec) XXX_Merge(src proto.Message)

func (*InvertedJoinerSpec) XXX_Size Uses

func (m *InvertedJoinerSpec) XXX_Size() int

func (*InvertedJoinerSpec) XXX_Unmarshal Uses

func (m *InvertedJoinerSpec) XXX_Unmarshal(b []byte) error

type JobProgress Uses

type JobProgress struct {
    JobID int64 `protobuf:"varint,1,opt,name=job_id,json=jobId" json:"job_id"`
    // contribution is the percent of work of the total this processor will
    // process.
    Contribution float32 `protobuf:"fixed32,2,opt,name=contribution" json:"contribution"`
    // slot is the index into the job details for this processor's completion.
    Slot int32 `protobuf:"varint,3,opt,name=slot" json:"slot"`
}

JobProgress identifies the job to report progress on. This reporting happens outside this package.

func (*JobProgress) Descriptor Uses

func (*JobProgress) Descriptor() ([]byte, []int)

func (*JobProgress) Marshal Uses

func (m *JobProgress) Marshal() (dAtA []byte, err error)

func (*JobProgress) MarshalTo Uses

func (m *JobProgress) MarshalTo(dAtA []byte) (int, error)

func (*JobProgress) ProtoMessage Uses

func (*JobProgress) ProtoMessage()

func (*JobProgress) Reset Uses

func (m *JobProgress) Reset()

func (*JobProgress) Size Uses

func (m *JobProgress) Size() (n int)

func (*JobProgress) String Uses

func (m *JobProgress) String() string

func (*JobProgress) Unmarshal Uses

func (m *JobProgress) Unmarshal(dAtA []byte) error

func (*JobProgress) XXX_DiscardUnknown Uses

func (m *JobProgress) XXX_DiscardUnknown()

func (*JobProgress) XXX_Marshal Uses

func (m *JobProgress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*JobProgress) XXX_Merge Uses

func (dst *JobProgress) XXX_Merge(src proto.Message)

func (*JobProgress) XXX_Size Uses

func (m *JobProgress) XXX_Size() int

func (*JobProgress) XXX_Unmarshal Uses

func (m *JobProgress) XXX_Unmarshal(b []byte) error

type JoinReaderSpec Uses

type JoinReaderSpec struct {
    Table descpb.TableDescriptor `protobuf:"bytes,1,opt,name=table" json:"table"`
    // If 0, we use the primary index; each row in the input stream has a value
    // for each primary key. The index must provide all lookup columns.
    IndexIdx uint32 `protobuf:"varint,2,opt,name=index_idx,json=indexIdx" json:"index_idx"`
    // Column indexes in the input stream specifying the columns which match with
    // the index columns. These are the equality columns of the join.
    //
    // If empty (index join), the start of the input stream schema is assumed to
    // match the index columns. The joinReader will perform an index join and the
    // "internal columns" will be the columns of the table.
    //
    // If populated (lookup join), the `joinReader` will perform a lookup join
    // and the "internal columns" will be the concatenation of the input stream
    // columns followed by the table columns (except for semi/anti join, which
    // don't output any table columns).
    LookupColumns []uint32 `protobuf:"varint,3,rep,packed,name=lookup_columns,json=lookupColumns" json:"lookup_columns,omitempty"`
    // If set, the lookup columns form a key in the target table and thus each
    // lookup has at most one result.
    LookupColumnsAreKey bool `protobuf:"varint,8,opt,name=lookup_columns_are_key,json=lookupColumnsAreKey" json:"lookup_columns_are_key"`
    // "ON" expression (in addition to the equality constraints captured by the
    // orderings). Assuming that the left stream has N columns and the right
    // stream has M columns, in this expression variables @1 to @N refer to
    // columns of the left stream and variables @N to @(N+M) refer to columns in
    // the right stream.
    OnExpr Expression `protobuf:"bytes,4,opt,name=on_expr,json=onExpr" json:"on_expr"`
    // For lookup joins. Only JoinType_INNER and JoinType_LEFT_OUTER are
    // supported.
    Type descpb.JoinType `protobuf:"varint,6,opt,name=type,enum=cockroach.sql.sqlbase.JoinType" json:"type"`
    // For index joins that are sources to mutation statements - what visibility
    // of columns should we return? Mutations sometimes need to see in-progress
    // schema change columns, in which case this field will be changed from its
    // default PUBLIC state. Causes the index join to return these schema change
    // columns.
    Visibility ScanVisibility `protobuf:"varint,7,opt,name=visibility,enum=cockroach.sql.distsqlrun.ScanVisibility" json:"visibility"`
    // Indicates the row-level locking strength to be used by the join. If set to
    // FOR_NONE, no row-level locking should be performed.
    LockingStrength descpb.ScanLockingStrength `protobuf:"varint,9,opt,name=locking_strength,json=lockingStrength,enum=cockroach.sql.sqlbase.ScanLockingStrength" json:"locking_strength"`
    // Indicates the policy to be used by the join when dealing with rows being
    // locked. Always set to BLOCK when locking_stength is FOR_NONE.
    //
    // NOTE: this is currently set but unused because only the BLOCK wait policy
    // makes it out of the SQL optimizer without throwing an error. If/when other
    // wait policies are supported, this field will be plumbed further.
    LockingWaitPolicy descpb.ScanLockingWaitPolicy `protobuf:"varint,10,opt,name=locking_wait_policy,json=lockingWaitPolicy,enum=cockroach.sql.sqlbase.ScanLockingWaitPolicy" json:"locking_wait_policy"`
    // Indicates that the join reader should maintain the ordering of the input
    // stream. This is only applicable to lookup joins, where doing so is
    // expensive. Index joins do this by default regardless of the parameter.
    MaintainOrdering bool `protobuf:"varint,11,opt,name=maintain_ordering,json=maintainOrdering" json:"maintain_ordering"`
    // Indicates what implicit system columns this JoinReader is expected to
    // synthesize. These system columns will be placed at the end of the row
    // output by the JoinReader, in the order specified.
    //
    // This is only used in the special case of index joins, where the final
    // result of the secondary index joined against the primary index is
    // expected to contain the materialized system columns.
    SystemColumns []descpb.SystemColumnKind `protobuf:"varint,12,rep,name=system_columns,json=systemColumns,enum=cockroach.sql.sqlbase.SystemColumnKind" json:"system_columns,omitempty"`
}

JoinReaderSpec is the specification for a "join reader". A join reader performs KV operations to retrieve specific rows that correspond to the values in the input stream (join by lookup). The output always preserves the order of the input rows.

The "internal columns" of a JoinReader (see ProcessorSpec) are either:

- the columns of the table, if we are performing an index join (no lookup
  columns) or if we are performing a semi or anti join, or
- the concatenation of the columns of the input stream with the table
  columns.

Internally, only the values for the columns needed by the post-processing stage are populated.

Example: Input stream columns: | a | b | Table columns: | c | d | e |

If performing a lookup join on a = c (lookup columns is [0]):

Internal columns: | a | b | c | d | e |

If performing an index join (where a = c and b = d) (lookup columns is []):

Internal columns: | c | d | e |

func (*JoinReaderSpec) Descriptor Uses

func (*JoinReaderSpec) Descriptor() ([]byte, []int)

func (*JoinReaderSpec) Marshal Uses

func (m *JoinReaderSpec) Marshal() (dAtA []byte, err error)

func (*JoinReaderSpec) MarshalTo Uses

func (m *JoinReaderSpec) MarshalTo(dAtA []byte) (int, error)

func (*JoinReaderSpec) ProtoMessage Uses

func (*JoinReaderSpec) ProtoMessage()

func (*JoinReaderSpec) Reset Uses

func (m *JoinReaderSpec) Reset()

func (*JoinReaderSpec) Size Uses

func (m *JoinReaderSpec) Size() (n int)

func (*JoinReaderSpec) String Uses

func (m *JoinReaderSpec) String() string

func (*JoinReaderSpec) Unmarshal Uses

func (m *JoinReaderSpec) Unmarshal(dAtA []byte) error

func (*JoinReaderSpec) XXX_DiscardUnknown Uses

func (m *JoinReaderSpec) XXX_DiscardUnknown()

func (*JoinReaderSpec) XXX_Marshal Uses

func (m *JoinReaderSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*JoinReaderSpec) XXX_Merge Uses

func (dst *JoinReaderSpec) XXX_Merge(src proto.Message)

func (*JoinReaderSpec) XXX_Size Uses

func (m *JoinReaderSpec) XXX_Size() int

func (*JoinReaderSpec) XXX_Unmarshal Uses

func (m *JoinReaderSpec) XXX_Unmarshal(b []byte) error

type LocalPlanNodeSpec Uses

type LocalPlanNodeSpec struct {
    RowSourceIdx *uint32 `protobuf:"varint,1,opt,name=RowSourceIdx" json:"RowSourceIdx,omitempty"`
    NumInputs    *uint32 `protobuf:"varint,2,opt,name=NumInputs" json:"NumInputs,omitempty"`
    Name         *string `protobuf:"bytes,3,opt,name=Name" json:"Name,omitempty"`
}

LocalPlanNodeSpec is the specification for a local planNode wrapping processor. It's created for situations where a planNode has no DistSQL processor equivalent, and constrains the plan to live on the gateway node. This spec contains just an index, which is used by the execution engine to find the planNode instance this processor is executing in an array of local planNodes. See LocalProcessors and LocalProcessorIndexes on distsqlplan.PhysicalPlan.

func (*LocalPlanNodeSpec) Descriptor Uses

func (*LocalPlanNodeSpec) Descriptor() ([]byte, []int)

func (*LocalPlanNodeSpec) Marshal Uses

func (m *LocalPlanNodeSpec) Marshal() (dAtA []byte, err error)

func (*LocalPlanNodeSpec) MarshalTo Uses

func (m *LocalPlanNodeSpec) MarshalTo(dAtA []byte) (int, error)

func (*LocalPlanNodeSpec) ProtoMessage Uses

func (*LocalPlanNodeSpec) ProtoMessage()

func (*LocalPlanNodeSpec) Reset Uses

func (m *LocalPlanNodeSpec) Reset()

func (*LocalPlanNodeSpec) Size Uses

func (m *LocalPlanNodeSpec) Size() (n int)

func (*LocalPlanNodeSpec) String Uses

func (m *LocalPlanNodeSpec) String() string

func (*LocalPlanNodeSpec) Unmarshal Uses

func (m *LocalPlanNodeSpec) Unmarshal(dAtA []byte) error

func (*LocalPlanNodeSpec) XXX_DiscardUnknown Uses

func (m *LocalPlanNodeSpec) XXX_DiscardUnknown()

func (*LocalPlanNodeSpec) XXX_Marshal Uses

func (m *LocalPlanNodeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*LocalPlanNodeSpec) XXX_Merge Uses

func (dst *LocalPlanNodeSpec) XXX_Merge(src proto.Message)

func (*LocalPlanNodeSpec) XXX_Size Uses

func (m *LocalPlanNodeSpec) XXX_Size() int

func (*LocalPlanNodeSpec) XXX_Unmarshal Uses

func (m *LocalPlanNodeSpec) XXX_Unmarshal(b []byte) error

type MergeJoinerSpec Uses

type MergeJoinerSpec struct {
    // The streams must be ordered according to the columns that have equality
    // constraints. The first column of the left ordering is constrained to be
    // equal to the first column in the right ordering and so on. The ordering
    // lengths and directions must match.
    // In the example above, left ordering describes C1+,C2- and right ordering
    // describes C5+,C4-.
    LeftOrdering  Ordering `protobuf:"bytes,1,opt,name=left_ordering,json=leftOrdering" json:"left_ordering"`
    RightOrdering Ordering `protobuf:"bytes,2,opt,name=right_ordering,json=rightOrdering" json:"right_ordering"`
    // "ON" expression (in addition to the equality constraints captured by the
    // orderings). Assuming that the left stream has N columns and the right
    // stream has M columns, in this expression ordinal references @1 to @N refer
    // to columns of the left stream and variables @(N+1) to @(N+M) refer to
    // columns in the right stream.
    OnExpr Expression      `protobuf:"bytes,5,opt,name=on_expr,json=onExpr" json:"on_expr"`
    Type   descpb.JoinType `protobuf:"varint,6,opt,name=type,enum=cockroach.sql.sqlbase.JoinType" json:"type"`
    // NullEquality indicates that NULL = NULL should be considered true.
    // This allows OUTER JOINs to consider NULL values meaningfully. An
    // example of this is during SCRUB checks on secondary indexes.
    NullEquality bool `protobuf:"varint,7,opt,name=null_equality,json=nullEquality" json:"null_equality"`
    // If true, it is guaranteed that the left equality columns form a key for
    // the left input. In other words, no two rows from the left input have the
    // same set of values on the left equality columns.
    LeftEqColumnsAreKey bool `protobuf:"varint,8,opt,name=left_eq_columns_are_key,json=leftEqColumnsAreKey" json:"left_eq_columns_are_key"`
    // If true, it is guaranteed that the right equality columns form a key for
    // the right input. In other words, no two rows from the right input have the
    // same set of values on the right equality columns.
    RightEqColumnsAreKey bool `protobuf:"varint,9,opt,name=right_eq_columns_are_key,json=rightEqColumnsAreKey" json:"right_eq_columns_are_key"`
}

MergeJoinerSpec is the specification for a merge join processor. The processor has two inputs and one output. The inputs must have the same ordering on the columns that have equality constraints. For example:

SELECT * FROM T1 INNER JOIN T2 ON T1.C1 = T2.C5 AND T1.C2 = T2.C4

To perform a merge join, the streams corresponding to T1 and T2 must have the same ordering on columns C1, C2 and C5, C4 respectively. For example: C1+,C2- and C5+,C4-.

The "internal columns" of a MergeJoiner (see ProcessorSpec) are the concatenation of left input columns and right input columns. If the left input has N columns and the right input has M columns, the first N columns contain values from the left side and the following M columns contain values from the right side.

In the case of semi-join and anti-join, the processor core outputs only the left columns.

func (*MergeJoinerSpec) Descriptor Uses

func (*MergeJoinerSpec) Descriptor() ([]byte, []int)

func (*MergeJoinerSpec) Marshal Uses

func (m *MergeJoinerSpec) Marshal() (dAtA []byte, err error)

func (*MergeJoinerSpec) MarshalTo Uses

func (m *MergeJoinerSpec) MarshalTo(dAtA []byte) (int, error)

func (*MergeJoinerSpec) ProtoMessage Uses

func (*MergeJoinerSpec) ProtoMessage()

func (*MergeJoinerSpec) Reset Uses

func (m *MergeJoinerSpec) Reset()

func (*MergeJoinerSpec) Size Uses

func (m *MergeJoinerSpec) Size() (n int)

func (*MergeJoinerSpec) String Uses

func (m *MergeJoinerSpec) String() string

func (*MergeJoinerSpec) Unmarshal Uses

func (m *MergeJoinerSpec) Unmarshal(dAtA []byte) error

func (*MergeJoinerSpec) XXX_DiscardUnknown Uses

func (m *MergeJoinerSpec) XXX_DiscardUnknown()

func (*MergeJoinerSpec) XXX_Marshal Uses

func (m *MergeJoinerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*MergeJoinerSpec) XXX_Merge Uses

func (dst *MergeJoinerSpec) XXX_Merge(src proto.Message)

func (*MergeJoinerSpec) XXX_Size Uses

func (m *MergeJoinerSpec) XXX_Size() int

func (*MergeJoinerSpec) XXX_Unmarshal Uses

func (m *MergeJoinerSpec) XXX_Unmarshal(b []byte) error

type MetadataSource Uses

type MetadataSource interface {
    // DrainMeta returns all the metadata produced by the processor or operator.
    // It will be called exactly once, usually, when the processor or operator
    // has finished doing its computations. This is a signal that the output
    // requires no more rows to be returned.
    // Implementers can choose what to do on subsequent calls (if such occur).
    // TODO(yuzefovich): modify the contract to require returning nil on all
    // calls after the first one.
    DrainMeta(context.Context) []ProducerMetadata
}

MetadataSource is an interface implemented by processors and columnar operators that can produce metadata.

type MetadataSources Uses

type MetadataSources []MetadataSource

MetadataSources is a slice of MetadataSource.

func (MetadataSources) DrainMeta Uses

func (s MetadataSources) DrainMeta(ctx context.Context) []ProducerMetadata

DrainMeta calls DrainMeta on all MetadataSources and returns a single slice with all the accumulated metadata.

type MetadataTestReceiverSpec Uses

type MetadataTestReceiverSpec struct {
    SenderIDs []string `protobuf:"bytes,1,rep,name=sender_ids,json=senderIds" json:"sender_ids,omitempty"`
}

func (*MetadataTestReceiverSpec) Descriptor Uses

func (*MetadataTestReceiverSpec) Descriptor() ([]byte, []int)

func (*MetadataTestReceiverSpec) Marshal Uses

func (m *MetadataTestReceiverSpec) Marshal() (dAtA []byte, err error)

func (*MetadataTestReceiverSpec) MarshalTo Uses

func (m *MetadataTestReceiverSpec) MarshalTo(dAtA []byte) (int, error)

func (*MetadataTestReceiverSpec) ProtoMessage Uses

func (*MetadataTestReceiverSpec) ProtoMessage()

func (*MetadataTestReceiverSpec) Reset Uses

func (m *MetadataTestReceiverSpec) Reset()

func (*MetadataTestReceiverSpec) Size Uses

func (m *MetadataTestReceiverSpec) Size() (n int)

func (*MetadataTestReceiverSpec) String Uses

func (m *MetadataTestReceiverSpec) String() string

func (*MetadataTestReceiverSpec) Unmarshal Uses

func (m *MetadataTestReceiverSpec) Unmarshal(dAtA []byte) error

func (*MetadataTestReceiverSpec) XXX_DiscardUnknown Uses

func (m *MetadataTestReceiverSpec) XXX_DiscardUnknown()

func (*MetadataTestReceiverSpec) XXX_Marshal Uses

func (m *MetadataTestReceiverSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*MetadataTestReceiverSpec) XXX_Merge Uses

func (dst *MetadataTestReceiverSpec) XXX_Merge(src proto.Message)

func (*MetadataTestReceiverSpec) XXX_Size Uses

func (m *MetadataTestReceiverSpec) XXX_Size() int

func (*MetadataTestReceiverSpec) XXX_Unmarshal Uses

func (m *MetadataTestReceiverSpec) XXX_Unmarshal(b []byte) error

type MetadataTestSenderSpec Uses

type MetadataTestSenderSpec struct {
    ID string `protobuf:"bytes,1,opt,name=id" json:"id"`
}

func (*MetadataTestSenderSpec) Descriptor Uses

func (*MetadataTestSenderSpec) Descriptor() ([]byte, []int)

func (*MetadataTestSenderSpec) Marshal Uses

func (m *MetadataTestSenderSpec) Marshal() (dAtA []byte, err error)

func (*MetadataTestSenderSpec) MarshalTo Uses

func (m *MetadataTestSenderSpec) MarshalTo(dAtA []byte) (int, error)

func (*MetadataTestSenderSpec) ProtoMessage Uses

func (*MetadataTestSenderSpec) ProtoMessage()

func (*MetadataTestSenderSpec) Reset Uses

func (m *MetadataTestSenderSpec) Reset()

func (*MetadataTestSenderSpec) Size Uses

func (m *MetadataTestSenderSpec) Size() (n int)

func (*MetadataTestSenderSpec) String Uses

func (m *MetadataTestSenderSpec) String() string

func (*MetadataTestSenderSpec) Unmarshal Uses

func (m *MetadataTestSenderSpec) Unmarshal(dAtA []byte) error

func (*MetadataTestSenderSpec) XXX_DiscardUnknown Uses

func (m *MetadataTestSenderSpec) XXX_DiscardUnknown()

func (*MetadataTestSenderSpec) XXX_Marshal Uses

func (m *MetadataTestSenderSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*MetadataTestSenderSpec) XXX_Merge Uses

func (dst *MetadataTestSenderSpec) XXX_Merge(src proto.Message)

func (*MetadataTestSenderSpec) XXX_Size Uses

func (m *MetadataTestSenderSpec) XXX_Size() int

func (*MetadataTestSenderSpec) XXX_Unmarshal Uses

func (m *MetadataTestSenderSpec) XXX_Unmarshal(b []byte) error

type MockDialer Uses

type MockDialer struct {
    // Addr is assumed to be obtained from execinfrapb.StartMockDistSQLServer.
    Addr net.Addr
    // contains filtered or unexported fields
}

MockDialer is a mocked implementation of the Outbox's `Dialer` interface. Used to create a connection with a client stream.

func (*MockDialer) Close Uses

func (d *MockDialer) Close()

Close must be called after the test is done.

func (*MockDialer) Dial Uses

func (d *MockDialer) Dial(
    context.Context, roachpb.NodeID, rpc.ConnectionClass,
) (*grpc.ClientConn, error)

Dial establishes a grpc connection once.

type MockDistSQLServer Uses

type MockDistSQLServer struct {
    InboundStreams   chan InboundStreamNotification
    RunSyncFlowCalls chan RunSyncFlowCall
}

MockDistSQLServer implements the DistSQLServer (gRPC) interface and allows clients to control the inbound streams.

func StartMockDistSQLServer Uses

func StartMockDistSQLServer(
    clock *hlc.Clock, stopper *stop.Stopper, nodeID roachpb.NodeID,
) (uuid.UUID, *MockDistSQLServer, net.Addr, error)

StartMockDistSQLServer starts a MockDistSQLServer and returns the address on which it's listening.

func (*MockDistSQLServer) FlowStream Uses

func (ds *MockDistSQLServer) FlowStream(stream DistSQL_FlowStreamServer) error

FlowStream is part of the DistSQLServer interface.

func (*MockDistSQLServer) RunSyncFlow Uses

func (ds *MockDistSQLServer) RunSyncFlow(stream DistSQL_RunSyncFlowServer) error

RunSyncFlow is part of the DistSQLServer interface.

func (*MockDistSQLServer) SetupFlow Uses

func (ds *MockDistSQLServer) SetupFlow(
    _ context.Context, req *SetupFlowRequest,
) (*SimpleResponse, error)

SetupFlow is part of the DistSQLServer interface.

type NoopCoreSpec Uses

type NoopCoreSpec struct {
}

NoopCoreSpec indicates a "no-op" processor core. This is used when we just need post-processing or when only a synchronizer is required (e.g. at the final endpoint).

func (*NoopCoreSpec) Descriptor Uses

func (*NoopCoreSpec) Descriptor() ([]byte, []int)

func (*NoopCoreSpec) Marshal Uses

func (m *NoopCoreSpec) Marshal() (dAtA []byte, err error)

func (*NoopCoreSpec) MarshalTo Uses

func (m *NoopCoreSpec) MarshalTo(dAtA []byte) (int, error)

func (*NoopCoreSpec) ProtoMessage Uses

func (*NoopCoreSpec) ProtoMessage()

func (*NoopCoreSpec) Reset Uses

func (m *NoopCoreSpec) Reset()

func (*NoopCoreSpec) Size Uses

func (m *NoopCoreSpec) Size() (n int)

func (*NoopCoreSpec) String Uses

func (m *NoopCoreSpec) String() string

func (*NoopCoreSpec) Unmarshal Uses

func (m *NoopCoreSpec) Unmarshal(dAtA []byte) error

func (*NoopCoreSpec) XXX_DiscardUnknown Uses

func (m *NoopCoreSpec) XXX_DiscardUnknown()

func (*NoopCoreSpec) XXX_Marshal Uses

func (m *NoopCoreSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*NoopCoreSpec) XXX_Merge Uses

func (dst *NoopCoreSpec) XXX_Merge(src proto.Message)

func (*NoopCoreSpec) XXX_Size Uses

func (m *NoopCoreSpec) XXX_Size() int

func (*NoopCoreSpec) XXX_Unmarshal Uses

func (m *NoopCoreSpec) XXX_Unmarshal(b []byte) error

type Ordering Uses

type Ordering struct {
    Columns []Ordering_Column `protobuf:"bytes,1,rep,name=columns" json:"columns"`
}

Ordering defines an order - specifically a list of column indices and directions. See sqlbase.ColumnOrdering.

func ConvertToMappedSpecOrdering Uses

func ConvertToMappedSpecOrdering(
    columnOrdering sqlbase.ColumnOrdering, planToStreamColMap []int,
) Ordering

ConvertToMappedSpecOrdering converts a sqlbase.ColumnOrdering type to an Ordering type (as defined in data.proto), using the column indices contained in planToStreamColMap.

func ConvertToSpecOrdering Uses

func ConvertToSpecOrdering(columnOrdering sqlbase.ColumnOrdering) Ordering

ConvertToSpecOrdering converts a sqlbase.ColumnOrdering type to an Ordering type (as defined in data.proto).

func (*Ordering) Descriptor Uses

func (*Ordering) Descriptor() ([]byte, []int)

func (*Ordering) Equal Uses

func (this *Ordering) Equal(that interface{}) bool

func (*Ordering) Marshal Uses

func (m *Ordering) Marshal() (dAtA []byte, err error)

func (*Ordering) MarshalTo Uses

func (m *Ordering) MarshalTo(dAtA []byte) (int, error)

func (*Ordering) ProtoMessage Uses

func (*Ordering) ProtoMessage()

func (*Ordering) Reset Uses

func (m *Ordering) Reset()

func (*Ordering) Size Uses

func (m *Ordering) Size() (n int)

func (*Ordering) String Uses

func (m *Ordering) String() string

func (*Ordering) Unmarshal Uses

func (m *Ordering) Unmarshal(dAtA []byte) error

func (*Ordering) XXX_DiscardUnknown Uses

func (m *Ordering) XXX_DiscardUnknown()

func (*Ordering) XXX_Marshal Uses

func (m *Ordering) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*Ordering) XXX_Merge Uses

func (dst *Ordering) XXX_Merge(src proto.Message)

func (*Ordering) XXX_Size Uses

func (m *Ordering) XXX_Size() int

func (*Ordering) XXX_Unmarshal Uses

func (m *Ordering) XXX_Unmarshal(b []byte) error

type Ordering_Column Uses

type Ordering_Column struct {
    ColIdx    uint32                    `protobuf:"varint,1,opt,name=col_idx,json=colIdx" json:"col_idx"`
    Direction Ordering_Column_Direction `protobuf:"varint,2,opt,name=direction,enum=cockroach.sql.distsqlrun.Ordering_Column_Direction" json:"direction"`
}

func (*Ordering_Column) Descriptor Uses

func (*Ordering_Column) Descriptor() ([]byte, []int)

func (*Ordering_Column) Equal Uses

func (this *Ordering_Column) Equal(that interface{}) bool

func (*Ordering_Column) Marshal Uses

func (m *Ordering_Column) Marshal() (dAtA []byte, err error)

func (*Ordering_Column) MarshalTo Uses

func (m *Ordering_Column) MarshalTo(dAtA []byte) (int, error)

func (*Ordering_Column) ProtoMessage Uses

func (*Ordering_Column) ProtoMessage()

func (*Ordering_Column) Reset Uses

func (m *Ordering_Column) Reset()

func (*Ordering_Column) Size Uses

func (m *Ordering_Column) Size() (n int)

func (*Ordering_Column) String Uses

func (m *Ordering_Column) String() string

func (*Ordering_Column) Unmarshal Uses

func (m *Ordering_Column) Unmarshal(dAtA []byte) error

func (*Ordering_Column) XXX_DiscardUnknown Uses

func (m *Ordering_Column) XXX_DiscardUnknown()

func (*Ordering_Column) XXX_Marshal Uses

func (m *Ordering_Column) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*Ordering_Column) XXX_Merge Uses

func (dst *Ordering_Column) XXX_Merge(src proto.Message)

func (*Ordering_Column) XXX_Size Uses

func (m *Ordering_Column) XXX_Size() int

func (*Ordering_Column) XXX_Unmarshal Uses

func (m *Ordering_Column) XXX_Unmarshal(b []byte) error

type Ordering_Column_Direction Uses

type Ordering_Column_Direction int32

The direction of the desired ordering for a column.

const (
    Ordering_Column_ASC  Ordering_Column_Direction = 0
    Ordering_Column_DESC Ordering_Column_Direction = 1
)

func (Ordering_Column_Direction) Enum Uses

func (x Ordering_Column_Direction) Enum() *Ordering_Column_Direction

func (Ordering_Column_Direction) EnumDescriptor Uses

func (Ordering_Column_Direction) EnumDescriptor() ([]byte, []int)

func (Ordering_Column_Direction) String Uses

func (x Ordering_Column_Direction) String() string

func (*Ordering_Column_Direction) UnmarshalJSON Uses

func (x *Ordering_Column_Direction) UnmarshalJSON(data []byte) error

type OrdinalitySpec Uses

type OrdinalitySpec struct {
}

The specification for a WITH ORDINALITY processor. It adds a new column to each resulting row that contains the ordinal number of the row. Since there are no arguments for this operator, the spec is empty.

func (*OrdinalitySpec) Descriptor Uses

func (*OrdinalitySpec) Descriptor() ([]byte, []int)

func (*OrdinalitySpec) Marshal Uses

func (m *OrdinalitySpec) Marshal() (dAtA []byte, err error)

func (*OrdinalitySpec) MarshalTo Uses

func (m *OrdinalitySpec) MarshalTo(dAtA []byte) (int, error)

func (*OrdinalitySpec) ProtoMessage Uses

func (*OrdinalitySpec) ProtoMessage()

func (*OrdinalitySpec) Reset Uses

func (m *OrdinalitySpec) Reset()

func (*OrdinalitySpec) Size Uses

func (m *OrdinalitySpec) Size() (n int)

func (*OrdinalitySpec) String Uses

func (m *OrdinalitySpec) String() string

func (*OrdinalitySpec) Unmarshal Uses

func (m *OrdinalitySpec) Unmarshal(dAtA []byte) error

func (*OrdinalitySpec) XXX_DiscardUnknown Uses

func (m *OrdinalitySpec) XXX_DiscardUnknown()

func (*OrdinalitySpec) XXX_Marshal Uses

func (m *OrdinalitySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*OrdinalitySpec) XXX_Merge Uses

func (dst *OrdinalitySpec) XXX_Merge(src proto.Message)

func (*OrdinalitySpec) XXX_Size Uses

func (m *OrdinalitySpec) XXX_Size() int

func (*OrdinalitySpec) XXX_Unmarshal Uses

func (m *OrdinalitySpec) XXX_Unmarshal(b []byte) error

type OutputRouterSpec Uses

type OutputRouterSpec struct {
    Type    OutputRouterSpec_Type `protobuf:"varint,1,opt,name=type,enum=cockroach.sql.distsqlrun.OutputRouterSpec_Type" json:"type"`
    Streams []StreamEndpointSpec  `protobuf:"bytes,2,rep,name=streams" json:"streams"`
    // Only used for the BY_HASH type; these are the indexes of the columns we are
    // hashing.
    HashColumns     []uint32                         `protobuf:"varint,3,rep,name=hash_columns,json=hashColumns" json:"hash_columns,omitempty"`
    RangeRouterSpec OutputRouterSpec_RangeRouterSpec `protobuf:"bytes,4,opt,name=range_router_spec,json=rangeRouterSpec" json:"range_router_spec"`
    // disable_buffering disables output buffering. Generally buffering should be
    // enabled to prevent deadlocks. However some plans are known not to deadlock,
    // and so can set this flag to prevent unbounded buffering causing OOMs.
    DisableBuffering bool `protobuf:"varint,5,opt,name=disable_buffering,json=disableBuffering" json:"disable_buffering"`
}

OutputRouterSpec is the specification for the output router of a processor; it decides how to send results to multiple output streams.

func (*OutputRouterSpec) Descriptor Uses

func (*OutputRouterSpec) Descriptor() ([]byte, []int)

func (*OutputRouterSpec) Marshal Uses

func (m *OutputRouterSpec) Marshal() (dAtA []byte, err error)

func (*OutputRouterSpec) MarshalTo Uses

func (m *OutputRouterSpec) MarshalTo(dAtA []byte) (int, error)

func (*OutputRouterSpec) ProtoMessage Uses

func (*OutputRouterSpec) ProtoMessage()

func (*OutputRouterSpec) Reset Uses

func (m *OutputRouterSpec) Reset()

func (*OutputRouterSpec) Size Uses

func (m *OutputRouterSpec) Size() (n int)

func (*OutputRouterSpec) String Uses

func (m *OutputRouterSpec) String() string

func (*OutputRouterSpec) Unmarshal Uses

func (m *OutputRouterSpec) Unmarshal(dAtA []byte) error

func (*OutputRouterSpec) XXX_DiscardUnknown Uses

func (m *OutputRouterSpec) XXX_DiscardUnknown()

func (*OutputRouterSpec) XXX_Marshal Uses

func (m *OutputRouterSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*OutputRouterSpec) XXX_Merge Uses

func (dst *OutputRouterSpec) XXX_Merge(src proto.Message)

func (*OutputRouterSpec) XXX_Size Uses

func (m *OutputRouterSpec) XXX_Size() int

func (*OutputRouterSpec) XXX_Unmarshal Uses

func (m *OutputRouterSpec) XXX_Unmarshal(b []byte) error

type OutputRouterSpec_RangeRouterSpec Uses

type OutputRouterSpec_RangeRouterSpec struct {
    // spans is a slice of Span. Input matching a span will be routed to its
    // specified stream.
    Spans []OutputRouterSpec_RangeRouterSpec_Span `protobuf:"bytes,1,rep,name=spans" json:"spans"`
    // default_dest, if not nil, is the index of the stream to send rows that do
    // not match any span. If nil, a row that does not match a span will produce
    // an error in the router.
    DefaultDest *int32 `protobuf:"varint,2,opt,name=default_dest,json=defaultDest" json:"default_dest,omitempty"`
    // encodings is a slice of columns and encodings. Each will be appended to a
    // []byte, which is used as input to the spans. Columns from the input rows
    // potentially need to be recoded to match the encoding used for the spans.
    Encodings []OutputRouterSpec_RangeRouterSpec_ColumnEncoding `protobuf:"bytes,3,rep,name=encodings" json:"encodings"`
}

func (*OutputRouterSpec_RangeRouterSpec) Descriptor Uses

func (*OutputRouterSpec_RangeRouterSpec) Descriptor() ([]byte, []int)

func (*OutputRouterSpec_RangeRouterSpec) Marshal Uses

func (m *OutputRouterSpec_RangeRouterSpec) Marshal() (dAtA []byte, err error)

func (*OutputRouterSpec_RangeRouterSpec) MarshalTo Uses

func (m *OutputRouterSpec_RangeRouterSpec) MarshalTo(dAtA []byte) (int, error)

func (*OutputRouterSpec_RangeRouterSpec) ProtoMessage Uses

func (*OutputRouterSpec_RangeRouterSpec) ProtoMessage()

func (*OutputRouterSpec_RangeRouterSpec) Reset Uses

func (m *OutputRouterSpec_RangeRouterSpec) Reset()

func (*OutputRouterSpec_RangeRouterSpec) Size Uses

func (m *OutputRouterSpec_RangeRouterSpec) Size() (n int)

func (*OutputRouterSpec_RangeRouterSpec) String Uses

func (m *OutputRouterSpec_RangeRouterSpec) String() string

func (*OutputRouterSpec_RangeRouterSpec) Unmarshal Uses

func (m *OutputRouterSpec_RangeRouterSpec) Unmarshal(dAtA []byte) error

func (*OutputRouterSpec_RangeRouterSpec) XXX_DiscardUnknown Uses

func (m *OutputRouterSpec_RangeRouterSpec) XXX_DiscardUnknown()

func (*OutputRouterSpec_RangeRouterSpec) XXX_Marshal Uses

func (m *OutputRouterSpec_RangeRouterSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*OutputRouterSpec_RangeRouterSpec) XXX_Merge Uses

func (dst *OutputRouterSpec_RangeRouterSpec) XXX_Merge(src proto.Message)

func (*OutputRouterSpec_RangeRouterSpec) XXX_Size Uses

func (m *OutputRouterSpec_RangeRouterSpec) XXX_Size() int

func (*OutputRouterSpec_RangeRouterSpec) XXX_Unmarshal Uses

func (m *OutputRouterSpec_RangeRouterSpec) XXX_Unmarshal(b []byte) error

type OutputRouterSpec_RangeRouterSpec_ColumnEncoding Uses

type OutputRouterSpec_RangeRouterSpec_ColumnEncoding struct {
    // column is the index of a column to encode.
    Column uint32 `protobuf:"varint,1,opt,name=column" json:"column"`
    // encoding specifies how a particular column is to be encoded for
    // generating the sort key for a row. This needs to correspond to the way
    // the Span.{start,end} keys have been generated.
    Encoding descpb.DatumEncoding `protobuf:"varint,2,opt,name=encoding,enum=cockroach.sql.sqlbase.DatumEncoding" json:"encoding"`
}

func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) Descriptor Uses

func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) Descriptor() ([]byte, []int)

func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) Marshal Uses

func (m *OutputRouterSpec_RangeRouterSpec_ColumnEncoding) Marshal() (dAtA []byte, err error)

func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) MarshalTo Uses

func (m *OutputRouterSpec_RangeRouterSpec_ColumnEncoding) MarshalTo(dAtA []byte) (int, error)

func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) ProtoMessage Uses

func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) ProtoMessage()

func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) Reset Uses

func (m *OutputRouterSpec_RangeRouterSpec_ColumnEncoding) Reset()

func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) Size Uses

func (m *OutputRouterSpec_RangeRouterSpec_ColumnEncoding) Size() (n int)

func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) String Uses

func (m *OutputRouterSpec_RangeRouterSpec_ColumnEncoding) String() string

func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) Unmarshal Uses

func (m *OutputRouterSpec_RangeRouterSpec_ColumnEncoding) Unmarshal(dAtA []byte) error

func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) XXX_DiscardUnknown Uses

func (m *OutputRouterSpec_RangeRouterSpec_ColumnEncoding) XXX_DiscardUnknown()

func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) XXX_Marshal Uses

func (m *OutputRouterSpec_RangeRouterSpec_ColumnEncoding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) XXX_Merge Uses

func (dst *OutputRouterSpec_RangeRouterSpec_ColumnEncoding) XXX_Merge(src proto.Message)

func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) XXX_Size Uses

func (m *OutputRouterSpec_RangeRouterSpec_ColumnEncoding) XXX_Size() int

func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) XXX_Unmarshal Uses

func (m *OutputRouterSpec_RangeRouterSpec_ColumnEncoding) XXX_Unmarshal(b []byte) error

type OutputRouterSpec_RangeRouterSpec_Span Uses

type OutputRouterSpec_RangeRouterSpec_Span struct {
    Start []byte `protobuf:"bytes,1,opt,name=start" json:"start,omitempty"`
    End   []byte `protobuf:"bytes,2,opt,name=end" json:"end,omitempty"`
    // stream is the index of the destination stream.
    Stream int32 `protobuf:"varint,3,opt,name=stream" json:"stream"`
}

Span matches bytes in [start, end).

func (*OutputRouterSpec_RangeRouterSpec_Span) Descriptor Uses

func (*OutputRouterSpec_RangeRouterSpec_Span) Descriptor() ([]byte, []int)

func (*OutputRouterSpec_RangeRouterSpec_Span) Marshal Uses

func (m *OutputRouterSpec_RangeRouterSpec_Span) Marshal() (dAtA []byte, err error)

func (*OutputRouterSpec_RangeRouterSpec_Span) MarshalTo Uses

func (m *OutputRouterSpec_RangeRouterSpec_Span) MarshalTo(dAtA []byte) (int, error)

func (*OutputRouterSpec_RangeRouterSpec_Span) ProtoMessage Uses

func (*OutputRouterSpec_RangeRouterSpec_Span) ProtoMessage()

func (*OutputRouterSpec_RangeRouterSpec_Span) Reset Uses

func (m *OutputRouterSpec_RangeRouterSpec_Span) Reset()

func (*OutputRouterSpec_RangeRouterSpec_Span) Size Uses

func (m *OutputRouterSpec_RangeRouterSpec_Span) Size() (n int)

func (*OutputRouterSpec_RangeRouterSpec_Span) String Uses

func (m *OutputRouterSpec_RangeRouterSpec_Span) String() string

func (*OutputRouterSpec_RangeRouterSpec_Span) Unmarshal Uses

func (m *OutputRouterSpec_RangeRouterSpec_Span) Unmarshal(dAtA []byte) error

func (*OutputRouterSpec_RangeRouterSpec_Span) XXX_DiscardUnknown Uses

func (m *OutputRouterSpec_RangeRouterSpec_Span) XXX_DiscardUnknown()

func (*OutputRouterSpec_RangeRouterSpec_Span) XXX_Marshal Uses

func (m *OutputRouterSpec_RangeRouterSpec_Span) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*OutputRouterSpec_RangeRouterSpec_Span) XXX_Merge Uses

func (dst *OutputRouterSpec_RangeRouterSpec_Span) XXX_Merge(src proto.Message)

func (*OutputRouterSpec_RangeRouterSpec_Span) XXX_Size Uses

func (m *OutputRouterSpec_RangeRouterSpec_Span) XXX_Size() int

func (*OutputRouterSpec_RangeRouterSpec_Span) XXX_Unmarshal Uses

func (m *OutputRouterSpec_RangeRouterSpec_Span) XXX_Unmarshal(b []byte) error

type OutputRouterSpec_Type Uses

type OutputRouterSpec_Type int32
const (
    // Single output stream.
    OutputRouterSpec_PASS_THROUGH OutputRouterSpec_Type = 0
    // Each row is sent to all output streams.
    OutputRouterSpec_MIRROR OutputRouterSpec_Type = 1
    // Each row is sent to one stream, chosen by hashing certain columns of
    // the row (specified by the hash_columns field).
    OutputRouterSpec_BY_HASH OutputRouterSpec_Type = 2
    // Each row is sent to one stream, chosen according to preset boundaries
    // for the values of certain columns of the row.
    OutputRouterSpec_BY_RANGE OutputRouterSpec_Type = 3
)

func (OutputRouterSpec_Type) Enum Uses

func (x OutputRouterSpec_Type) Enum() *OutputRouterSpec_Type

func (OutputRouterSpec_Type) EnumDescriptor Uses

func (OutputRouterSpec_Type) EnumDescriptor() ([]byte, []int)

func (OutputRouterSpec_Type) String Uses

func (x OutputRouterSpec_Type) String() string

func (*OutputRouterSpec_Type) UnmarshalJSON Uses

func (x *OutputRouterSpec_Type) UnmarshalJSON(data []byte) error

type PostProcessSpec Uses

type PostProcessSpec struct {
    // A filtering expression which references the internal columns of the
    // processor via ordinal references (@1, @2, etc).
    Filter Expression `protobuf:"bytes,1,opt,name=filter" json:"filter"`
    // If true, output_columns describes a projection. Used to differentiate
    // between an empty projection and no projection.
    //
    // Cannot be set at the same time with render expressions.
    Projection bool `protobuf:"varint,2,opt,name=projection" json:"projection"`
    // The output columns describe a projection on the internal set of columns;
    // only the columns in this list will be emitted.
    //
    // Can only be set if projection is true. Cannot be set at the same time with
    // render expressions.
    OutputColumns []uint32 `protobuf:"varint,3,rep,packed,name=output_columns,json=outputColumns" json:"output_columns,omitempty"`
    // If set, the output is the result of rendering these expressions. The
    // expressions reference the internal columns of the processor.
    //
    // Cannot be set at the same time with output columns.
    RenderExprs []Expression `protobuf:"bytes,4,rep,name=render_exprs,json=renderExprs" json:"render_exprs"`
    // If nonzero, the first <offset> rows will be suppressed.
    Offset uint64 `protobuf:"varint,5,opt,name=offset" json:"offset"`
    // If nonzero, the processor will stop after emitting this many rows. The rows
    // suppressed by <offset>, if any, do not count towards this limit.
    Limit uint64 `protobuf:"varint,6,opt,name=limit" json:"limit"`
}

PostProcessSpec describes the processing required to obtain the output (filtering, projection). It operates on the internal schema of the processor (see ProcessorSpec).

func (*PostProcessSpec) Descriptor Uses

func (*PostProcessSpec) Descriptor() ([]byte, []int)

func (*PostProcessSpec) Marshal Uses

func (m *PostProcessSpec) Marshal() (dAtA []byte, err error)

func (*PostProcessSpec) MarshalTo Uses

func (m *PostProcessSpec) MarshalTo(dAtA []byte) (int, error)

func (*PostProcessSpec) ProtoMessage Uses

func (*PostProcessSpec) ProtoMessage()

func (*PostProcessSpec) Reset Uses

func (m *PostProcessSpec) Reset()

func (*PostProcessSpec) Size Uses

func (m *PostProcessSpec) Size() (n int)

func (*PostProcessSpec) String Uses

func (m *PostProcessSpec) String() string

func (*PostProcessSpec) Unmarshal Uses

func (m *PostProcessSpec) Unmarshal(dAtA []byte) error

func (*PostProcessSpec) XXX_DiscardUnknown Uses

func (m *PostProcessSpec) XXX_DiscardUnknown()

func (*PostProcessSpec) XXX_Marshal Uses

func (m *PostProcessSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*PostProcessSpec) XXX_Merge Uses

func (dst *PostProcessSpec) XXX_Merge(src proto.Message)

func (*PostProcessSpec) XXX_Size Uses

func (m *PostProcessSpec) XXX_Size() int

func (*PostProcessSpec) XXX_Unmarshal Uses

func (m *PostProcessSpec) XXX_Unmarshal(b []byte) error

type ProcessorCoreUnion Uses

type ProcessorCoreUnion struct {
    Noop                    *NoopCoreSpec                `protobuf:"bytes,1,opt,name=noop" json:"noop,omitempty"`
    TableReader             *TableReaderSpec             `protobuf:"bytes,2,opt,name=tableReader" json:"tableReader,omitempty"`
    JoinReader              *JoinReaderSpec              `protobuf:"bytes,3,opt,name=joinReader" json:"joinReader,omitempty"`
    Sorter                  *SorterSpec                  `protobuf:"bytes,4,opt,name=sorter" json:"sorter,omitempty"`
    Aggregator              *AggregatorSpec              `protobuf:"bytes,5,opt,name=aggregator" json:"aggregator,omitempty"`
    Distinct                *DistinctSpec                `protobuf:"bytes,7,opt,name=distinct" json:"distinct,omitempty"`
    MergeJoiner             *MergeJoinerSpec             `protobuf:"bytes,8,opt,name=mergeJoiner" json:"mergeJoiner,omitempty"`
    HashJoiner              *HashJoinerSpec              `protobuf:"bytes,9,opt,name=hashJoiner" json:"hashJoiner,omitempty"`
    Values                  *ValuesCoreSpec              `protobuf:"bytes,10,opt,name=values" json:"values,omitempty"`
    Backfiller              *BackfillerSpec              `protobuf:"bytes,11,opt,name=backfiller" json:"backfiller,omitempty"`
    ReadImport              *ReadImportDataSpec          `protobuf:"bytes,13,opt,name=readImport" json:"readImport,omitempty"`
    CSVWriter               *CSVWriterSpec               `protobuf:"bytes,20,opt,name=CSVWriter" json:"CSVWriter,omitempty"`
    Sampler                 *SamplerSpec                 `protobuf:"bytes,15,opt,name=Sampler" json:"Sampler,omitempty"`
    SampleAggregator        *SampleAggregatorSpec        `protobuf:"bytes,16,opt,name=SampleAggregator" json:"SampleAggregator,omitempty"`
    InterleavedReaderJoiner *InterleavedReaderJoinerSpec `protobuf:"bytes,17,opt,name=interleavedReaderJoiner" json:"interleavedReaderJoiner,omitempty"`
    MetadataTestSender      *MetadataTestSenderSpec      `protobuf:"bytes,18,opt,name=metadataTestSender" json:"metadataTestSender,omitempty"`
    MetadataTestReceiver    *MetadataTestReceiverSpec    `protobuf:"bytes,19,opt,name=metadataTestReceiver" json:"metadataTestReceiver,omitempty"`
    ZigzagJoiner            *ZigzagJoinerSpec            `protobuf:"bytes,21,opt,name=zigzagJoiner" json:"zigzagJoiner,omitempty"`
    ProjectSet              *ProjectSetSpec              `protobuf:"bytes,22,opt,name=projectSet" json:"projectSet,omitempty"`
    Windower                *WindowerSpec                `protobuf:"bytes,23,opt,name=windower" json:"windower,omitempty"`
    LocalPlanNode           *LocalPlanNodeSpec           `protobuf:"bytes,24,opt,name=localPlanNode" json:"localPlanNode,omitempty"`
    ChangeAggregator        *ChangeAggregatorSpec        `protobuf:"bytes,25,opt,name=changeAggregator" json:"changeAggregator,omitempty"`
    ChangeFrontier          *ChangeFrontierSpec          `protobuf:"bytes,26,opt,name=changeFrontier" json:"changeFrontier,omitempty"`
    Ordinality              *OrdinalitySpec              `protobuf:"bytes,27,opt,name=ordinality" json:"ordinality,omitempty"`
    BulkRowWriter           *BulkRowWriterSpec           `protobuf:"bytes,28,opt,name=bulkRowWriter" json:"bulkRowWriter,omitempty"`
    InvertedFilterer        *InvertedFiltererSpec        `protobuf:"bytes,29,opt,name=invertedFilterer" json:"invertedFilterer,omitempty"`
    InvertedJoiner          *InvertedJoinerSpec          `protobuf:"bytes,30,opt,name=invertedJoiner" json:"invertedJoiner,omitempty"`
    BackupData              *BackupDataSpec              `protobuf:"bytes,31,opt,name=backupData" json:"backupData,omitempty"`
    SplitAndScatter         *SplitAndScatterSpec         `protobuf:"bytes,32,opt,name=splitAndScatter" json:"splitAndScatter,omitempty"`
    RestoreData             *RestoreDataSpec             `protobuf:"bytes,33,opt,name=restoreData" json:"restoreData,omitempty"`
}

func (*ProcessorCoreUnion) Descriptor Uses

func (*ProcessorCoreUnion) Descriptor() ([]byte, []int)

func (*ProcessorCoreUnion) GetValue Uses

func (this *ProcessorCoreUnion) GetValue() interface{}

func (*ProcessorCoreUnion) Marshal Uses

func (m *ProcessorCoreUnion) Marshal() (dAtA []byte, err error)

func (*ProcessorCoreUnion) MarshalTo Uses

func (m *ProcessorCoreUnion) MarshalTo(dAtA []byte) (int, error)

func (*ProcessorCoreUnion) ProtoMessage Uses

func (*ProcessorCoreUnion) ProtoMessage()

func (*ProcessorCoreUnion) Reset Uses

func (m *ProcessorCoreUnion) Reset()

func (*ProcessorCoreUnion) SetValue Uses

func (this *ProcessorCoreUnion) SetValue(value interface{}) bool

func (*ProcessorCoreUnion) Size Uses

func (m *ProcessorCoreUnion) Size() (n int)

func (*ProcessorCoreUnion) String Uses

func (m *ProcessorCoreUnion) String() string

func (*ProcessorCoreUnion) Unmarshal Uses

func (m *ProcessorCoreUnion) Unmarshal(dAtA []byte) error

func (*ProcessorCoreUnion) XXX_DiscardUnknown Uses

func (m *ProcessorCoreUnion) XXX_DiscardUnknown()

func (*ProcessorCoreUnion) XXX_Marshal Uses

func (m *ProcessorCoreUnion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ProcessorCoreUnion) XXX_Merge Uses

func (dst *ProcessorCoreUnion) XXX_Merge(src proto.Message)

func (*ProcessorCoreUnion) XXX_Size Uses

func (m *ProcessorCoreUnion) XXX_Size() int

func (*ProcessorCoreUnion) XXX_Unmarshal Uses

func (m *ProcessorCoreUnion) XXX_Unmarshal(b []byte) error

type ProcessorSpec Uses

type ProcessorSpec struct {
    // In most cases, there is one input.
    Input []InputSyncSpec    `protobuf:"bytes,1,rep,name=input" json:"input"`
    Core  ProcessorCoreUnion `protobuf:"bytes,2,opt,name=core" json:"core"`
    Post  PostProcessSpec    `protobuf:"bytes,4,opt,name=post" json:"post"`
    // In most cases, there is one output.
    Output []OutputRouterSpec `protobuf:"bytes,3,rep,name=output" json:"output"`
    // An optional identifier that can be used to correlate processors that are
    // part of the same stage (e.g. multiple joiners that are part of a
    // distributed join). This has no consequence on the running of flows, but is
    // useful for plan diagrams.
    StageID int32 `protobuf:"varint,5,opt,name=stage_id,json=stageId" json:"stage_id"`
    // processor_id uniquely identifies a processor within a physical plan. This
    // has no consequence on the running of flows, but is useful for plan
    // diagrams and linking information like tracing spans and log messages to
    // processors.
    ProcessorID int32 `protobuf:"varint,6,opt,name=processor_id,json=processorId" json:"processor_id"`
}

Each processor has the following components:

- one or more input synchronizers; each one merges rows between one or more
  input streams;

- a processor "core" which encapsulates the inner logic of each processor;

- a post-processing stage which allows "inline" post-processing on results
  (like projection or filtering);

- one or more output synchronizers; each one directs rows to one or more
output streams.

== Internal columns ==

The core outputs rows of a certain schema to the post-processing stage. We call this the "internal schema" (or "internal columns") and it differs for each type of core. Column indices in a PostProcessSpec refers to these internal columns. Some columns may be unused by the post-processing stage; processor implementations are internally optimized to not produce values for such unneeded columns.

func (*ProcessorSpec) Descriptor Uses

func (*ProcessorSpec) Descriptor() ([]byte, []int)

func (*ProcessorSpec) Marshal Uses

func (m *ProcessorSpec) Marshal() (dAtA []byte, err error)

func (*ProcessorSpec) MarshalTo Uses

func (m *ProcessorSpec) MarshalTo(dAtA []byte) (int, error)

func (*ProcessorSpec) ProtoMessage Uses

func (*ProcessorSpec) ProtoMessage()

func (*ProcessorSpec) Reset Uses

func (m *ProcessorSpec) Reset()

func (*ProcessorSpec) Size Uses

func (m *ProcessorSpec) Size() (n int)

func (*ProcessorSpec) String Uses

func (m *ProcessorSpec) String() string

func (*ProcessorSpec) Unmarshal Uses

func (m *ProcessorSpec) Unmarshal(dAtA []byte) error

func (*ProcessorSpec) XXX_DiscardUnknown Uses

func (m *ProcessorSpec) XXX_DiscardUnknown()

func (*ProcessorSpec) XXX_Marshal Uses

func (m *ProcessorSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ProcessorSpec) XXX_Merge Uses

func (dst *ProcessorSpec) XXX_Merge(src proto.Message)

func (*ProcessorSpec) XXX_Size Uses

func (m *ProcessorSpec) XXX_Size() int

func (*ProcessorSpec) XXX_Unmarshal Uses

func (m *ProcessorSpec) XXX_Unmarshal(b []byte) error

type ProducerData Uses

type ProducerData struct {
    // A bunch of rows, encoded. Each datum is encoded according to the
    // corresponding DatumInfo.
    RawBytes []byte `protobuf:"bytes,1,opt,name=raw_bytes,json=rawBytes" json:"raw_bytes,omitempty"`
    // In the special case when the stream contains empty rows, the count is
    // passed instead.
    NumEmptyRows int32 `protobuf:"varint,3,opt,name=num_empty_rows,json=numEmptyRows" json:"num_empty_rows"`
    // A bunch of metadata messages.
    Metadata []RemoteProducerMetadata `protobuf:"bytes,2,rep,name=metadata" json:"metadata"`
}

ProducerData is a message that can be sent multiple times as part of a stream from a producer to a consumer. It contains 0 or more rows and/or 0 or more metadata messages.

func (*ProducerData) Descriptor Uses

func (*ProducerData) Descriptor() ([]byte, []int)

func (*ProducerData) Marshal Uses

func (m *ProducerData) Marshal() (dAtA []byte, err error)

func (*ProducerData) MarshalTo Uses

func (m *ProducerData) MarshalTo(dAtA []byte) (int, error)

func (*ProducerData) ProtoMessage Uses

func (*ProducerData) ProtoMessage()

func (*ProducerData) Reset Uses

func (m *ProducerData) Reset()

func (*ProducerData) Size Uses

func (m *ProducerData) Size() (n int)

func (*ProducerData) String Uses

func (m *ProducerData) String() string

func (*ProducerData) Unmarshal Uses

func (m *ProducerData) Unmarshal(dAtA []byte) error

func (*ProducerData) XXX_DiscardUnknown Uses

func (m *ProducerData) XXX_DiscardUnknown()

func (*ProducerData) XXX_Marshal Uses

func (m *ProducerData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ProducerData) XXX_Merge Uses

func (dst *ProducerData) XXX_Merge(src proto.Message)

func (*ProducerData) XXX_Size Uses

func (m *ProducerData) XXX_Size() int

func (*ProducerData) XXX_Unmarshal Uses

func (m *ProducerData) XXX_Unmarshal(b []byte) error

type ProducerHeader Uses

type ProducerHeader struct {
    FlowID   FlowID   `protobuf:"bytes,1,opt,name=flow_id,json=flowId,customtype=FlowID" json:"flow_id"`
    StreamID StreamID `protobuf:"varint,2,opt,name=stream_id,json=streamId,casttype=StreamID" json:"stream_id"`
}

ProducerHeader is a message that is sent once at the beginning of a stream.

func (*ProducerHeader) Descriptor Uses

func (*ProducerHeader) Descriptor() ([]byte, []int)

func (*ProducerHeader) Marshal Uses

func (m *ProducerHeader) Marshal() (dAtA []byte, err error)

func (*ProducerHeader) MarshalTo Uses

func (m *ProducerHeader) MarshalTo(dAtA []byte) (int, error)

func (*ProducerHeader) ProtoMessage Uses

func (*ProducerHeader) ProtoMessage()

func (*ProducerHeader) Reset Uses

func (m *ProducerHeader) Reset()

func (*ProducerHeader) Size Uses

func (m *ProducerHeader) Size() (n int)

func (*ProducerHeader) String Uses

func (m *ProducerHeader) String() string

func (*ProducerHeader) Unmarshal Uses

func (m *ProducerHeader) Unmarshal(dAtA []byte) error

func (*ProducerHeader) XXX_DiscardUnknown Uses

func (m *ProducerHeader) XXX_DiscardUnknown()

func (*ProducerHeader) XXX_Marshal Uses

func (m *ProducerHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ProducerHeader) XXX_Merge Uses

func (dst *ProducerHeader) XXX_Merge(src proto.Message)

func (*ProducerHeader) XXX_Size Uses

func (m *ProducerHeader) XXX_Size() int

func (*ProducerHeader) XXX_Unmarshal Uses

func (m *ProducerHeader) XXX_Unmarshal(b []byte) error

type ProducerMessage Uses

type ProducerMessage struct {
    Header *ProducerHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
    // Typing information. There will be one DatumInfo for each element in a row.
    // This field has to be populated on, or before, a ProducerMessage with data
    // in it, and can only be populated once. It can be nil if only zero length
    // rows will be sent.
    // TODO(andrei): It'd be nice if the typing information for streams would be
    // configured statically at plan creation time, instead of being discovered
    // dynamically through the first rows that flow.
    Typing []DatumInfo  `protobuf:"bytes,2,rep,name=typing" json:"typing"`
    Data   ProducerData `protobuf:"bytes,3,opt,name=data" json:"data"`
}

func (*ProducerMessage) Descriptor Uses

func (*ProducerMessage) Descriptor() ([]byte, []int)

func (*ProducerMessage) Marshal Uses

func (m *ProducerMessage) Marshal() (dAtA []byte, err error)

func (*ProducerMessage) MarshalTo Uses

func (m *ProducerMessage) MarshalTo(dAtA []byte) (int, error)

func (*ProducerMessage) ProtoMessage Uses

func (*ProducerMessage) ProtoMessage()

func (*ProducerMessage) Reset Uses

func (m *ProducerMessage) Reset()

func (*ProducerMessage) Size Uses

func (m *ProducerMessage) Size() (n int)

func (*ProducerMessage) String Uses

func (m *ProducerMessage) String() string

func (*ProducerMessage) Unmarshal Uses

func (m *ProducerMessage) Unmarshal(dAtA []byte) error

func (*ProducerMessage) XXX_DiscardUnknown Uses

func (m *ProducerMessage) XXX_DiscardUnknown()

func (*ProducerMessage) XXX_Marshal Uses

func (m *ProducerMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ProducerMessage) XXX_Merge Uses

func (dst *ProducerMessage) XXX_Merge(src proto.Message)

func (*ProducerMessage) XXX_Size Uses

func (m *ProducerMessage) XXX_Size() int

func (*ProducerMessage) XXX_Unmarshal Uses

func (m *ProducerMessage) XXX_Unmarshal(b []byte) error

type ProducerMetadata Uses

type ProducerMetadata struct {
    // Only one of these fields will be set. If this ever changes, note that
    // there're consumers out there that extract the error and, if there is one,
    // forward it in isolation and drop the rest of the record.
    Ranges []roachpb.RangeInfo
    // TODO(vivek): change to type Error
    Err error
    // TraceData is sent if snowball tracing is enabled.
    TraceData []tracing.RecordedSpan
    // LeafTxnFinalState contains the final state of the LeafTxn to be
    // sent from leaf flows to the RootTxn held by the flow's ultimate
    // receiver.
    LeafTxnFinalState *roachpb.LeafTxnFinalState
    // RowNum corresponds to a row produced by a "source" processor that takes no
    // inputs. It is used in tests to verify that all metadata is forwarded
    // exactly once to the receiver on the gateway node.
    RowNum *RemoteProducerMetadata_RowNum
    // SamplerProgress contains incremental progress information from the sampler
    // processor.
    SamplerProgress *RemoteProducerMetadata_SamplerProgress
    // BulkProcessorProgress contains incremental progress information from a bulk
    // operation processor (backfiller, import, etc).
    BulkProcessorProgress *RemoteProducerMetadata_BulkProcessorProgress
    // Metrics contains information about goodput of the node.
    Metrics *RemoteProducerMetadata_Metrics
}

ProducerMetadata represents a metadata record flowing through a DistSQL flow.

func GetProducerMeta Uses

func GetProducerMeta() *ProducerMetadata

GetProducerMeta returns a producer metadata object from the pool.

func RemoteProducerMetaToLocalMeta Uses

func RemoteProducerMetaToLocalMeta(
    ctx context.Context, rpm RemoteProducerMetadata,
) (ProducerMetadata, bool)

RemoteProducerMetaToLocalMeta converts a RemoteProducerMetadata struct to ProducerMetadata and returns whether the conversion was successful or not.

func (*ProducerMetadata) Release Uses

func (meta *ProducerMetadata) Release()

Release is part of Releasable interface.

type ProjectSetSpec Uses

type ProjectSetSpec struct {
    // Expressions to be applied
    Exprs []Expression `protobuf:"bytes,1,rep,name=exprs" json:"exprs"`
    // Column types for the generated values
    GeneratedColumns []*types.T `protobuf:"bytes,2,rep,name=generated_columns,json=generatedColumns" json:"generated_columns,omitempty"`
    // The number of columns each expression returns. Same length as exprs.
    NumColsPerGen []uint32 `protobuf:"varint,3,rep,name=num_cols_per_gen,json=numColsPerGen" json:"num_cols_per_gen,omitempty"`
}

ProjectSetSpec is the specification of a processor which applies a set of expressions, which may be set-returning functions, to its input.

func (*ProjectSetSpec) Descriptor Uses

func (*ProjectSetSpec) Descriptor() ([]byte, []int)

func (*ProjectSetSpec) Marshal Uses

func (m *ProjectSetSpec) Marshal() (dAtA []byte, err error)

func (*ProjectSetSpec) MarshalTo Uses

func (m *ProjectSetSpec) MarshalTo(dAtA []byte) (int, error)

func (*ProjectSetSpec) ProtoMessage Uses

func (*ProjectSetSpec) ProtoMessage()

func (*ProjectSetSpec) Reset Uses

func (m *ProjectSetSpec) Reset()

func (*ProjectSetSpec) Size Uses

func (m *ProjectSetSpec) Size() (n int)

func (*ProjectSetSpec) String Uses

func (m *ProjectSetSpec) String() string

func (*ProjectSetSpec) Unmarshal Uses

func (m *ProjectSetSpec) Unmarshal(dAtA []byte) error

func (*ProjectSetSpec) XXX_DiscardUnknown Uses

func (m *ProjectSetSpec) XXX_DiscardUnknown()

func (*ProjectSetSpec) XXX_Marshal Uses

func (m *ProjectSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ProjectSetSpec) XXX_Merge Uses

func (dst *ProjectSetSpec) XXX_Merge(src proto.Message)

func (*ProjectSetSpec) XXX_Size Uses

func (m *ProjectSetSpec) XXX_Size() int

func (*ProjectSetSpec) XXX_Unmarshal Uses

func (m *ProjectSetSpec) XXX_Unmarshal(b []byte) error

type ReadImportDataSpec Uses

type ReadImportDataSpec struct {
    Format roachpb.IOFileFormat `protobuf:"bytes,8,opt,name=format" json:"format"`
    // sample_size is the rate at which to output rows, based on an input row's size.
    SampleSize int32 `protobuf:"varint,2,opt,name=sample_size,json=sampleSize" json:"sample_size"`
    // tables supports input formats that can read multiple tables. If it is
    // non-empty, the keys specify the names of tables for which the processor
    // should read and emit data (ignoring data for any other tables that is
    // present in the input).
    //
    // TODO(dt): If a key has a nil value, the schema for that table should be
    // determined from the input on-the-fly (e.g. by parsing a CREATE TABLE in a
    // dump file) and the processor should emit a key/value for the generated
    // TableDescriptor with the corresponding descriptor ID key. If tables is
    // empty (and table_desc above is not specified), the processor should read
    // all tables in the input, determining their schemas on the fly.
    Tables map[string]*ReadImportDataSpec_ImportTable `protobuf:"bytes,9,rep,name=tables" json:"tables,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
    // uri is a cloud.ExternalStorage URI pointing to the CSV files to be
    // read. The map key must be unique across the entire IMPORT job.
    Uri map[int32]string `protobuf:"bytes,7,rep,name=uri" json:"uri,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
    // resume_pos specifies a map from an input ID to an offset in that
    // input from which the processing should continue.
    // The meaning of offset is specific to each processor.
    ResumePos              map[int32]int64 `protobuf:"bytes,14,rep,name=resume_pos,json=resumePos" json:"resume_pos,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"`
    Progress               JobProgress     `protobuf:"bytes,6,opt,name=progress" json:"progress"`
    SkipMissingForeignKeys bool            `protobuf:"varint,10,opt,name=skip_missing_foreign_keys,json=skipMissingForeignKeys" json:"skip_missing_foreign_keys"`
    // walltimeNanos is the MVCC time at which the created KVs will be written.
    WalltimeNanos int64 `protobuf:"varint,11,opt,name=walltimeNanos" json:"walltimeNanos"`
    // If set, specifies reader parallelism; 0 implies "use default".
    ReaderParallelism int32 `protobuf:"varint,13,opt,name=readerParallelism" json:"readerParallelism"`
    // User who initiated the import. This is used to check access privileges
    // when using FileTable ExternalStorage.
    User string `protobuf:"bytes,15,opt,name=user" json:"user"`
}

func (*ReadImportDataSpec) Descriptor Uses

func (*ReadImportDataSpec) Descriptor() ([]byte, []int)

func (*ReadImportDataSpec) Marshal Uses

func (m *ReadImportDataSpec) Marshal() (dAtA []byte, err error)

func (*ReadImportDataSpec) MarshalTo Uses

func (m *ReadImportDataSpec) MarshalTo(dAtA []byte) (int, error)

func (*ReadImportDataSpec) ProtoMessage Uses

func (*ReadImportDataSpec) ProtoMessage()

func (*ReadImportDataSpec) Reset Uses

func (m *ReadImportDataSpec) Reset()

func (*ReadImportDataSpec) Size Uses

func (m *ReadImportDataSpec) Size() (n int)

func (*ReadImportDataSpec) String Uses

func (m *ReadImportDataSpec) String() string

func (*ReadImportDataSpec) Unmarshal Uses

func (m *ReadImportDataSpec) Unmarshal(dAtA []byte) error

func (*ReadImportDataSpec) XXX_DiscardUnknown Uses

func (m *ReadImportDataSpec) XXX_DiscardUnknown()

func (*ReadImportDataSpec) XXX_Marshal Uses

func (m *ReadImportDataSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ReadImportDataSpec) XXX_Merge Uses

func (dst *ReadImportDataSpec) XXX_Merge(src proto.Message)

func (*ReadImportDataSpec) XXX_Size Uses

func (m *ReadImportDataSpec) XXX_Size() int

func (*ReadImportDataSpec) XXX_Unmarshal Uses

func (m *ReadImportDataSpec) XXX_Unmarshal(b []byte) error

type ReadImportDataSpec_ImportTable Uses

type ReadImportDataSpec_ImportTable struct {
    Desc *descpb.TableDescriptor `protobuf:"bytes,1,opt,name=desc" json:"desc,omitempty"`
    // targetCols is used to store the target columns for each existing table
    // being imported into. These are the columns for which the processor should
    // read and emit data (ignoring data for any other tables or columns outside
    // of the targetCols, that is present in the input).
    TargetCols []string `protobuf:"bytes,2,rep,name=targetCols" json:"targetCols,omitempty"`
}

func (*ReadImportDataSpec_ImportTable) Descriptor Uses

func (*ReadImportDataSpec_ImportTable) Descriptor() ([]byte, []int)

func (*ReadImportDataSpec_ImportTable) Marshal Uses

func (m *ReadImportDataSpec_ImportTable) Marshal() (dAtA []byte, err error)

func (*ReadImportDataSpec_ImportTable) MarshalTo Uses

func (m *ReadImportDataSpec_ImportTable) MarshalTo(dAtA []byte) (int, error)

func (*ReadImportDataSpec_ImportTable) ProtoMessage Uses

func (*ReadImportDataSpec_ImportTable) ProtoMessage()

func (*ReadImportDataSpec_ImportTable) Reset Uses

func (m *ReadImportDataSpec_ImportTable) Reset()

func (*ReadImportDataSpec_ImportTable) Size Uses

func (m *ReadImportDataSpec_ImportTable) Size() (n int)

func (*ReadImportDataSpec_ImportTable) String Uses

func (m *ReadImportDataSpec_ImportTable) String() string

func (*ReadImportDataSpec_ImportTable) Unmarshal Uses

func (m *ReadImportDataSpec_ImportTable) Unmarshal(dAtA []byte) error

func (*ReadImportDataSpec_ImportTable) XXX_DiscardUnknown Uses

func (m *ReadImportDataSpec_ImportTable) XXX_DiscardUnknown()

func (*ReadImportDataSpec_ImportTable) XXX_Marshal Uses

func (m *ReadImportDataSpec_ImportTable) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ReadImportDataSpec_ImportTable) XXX_Merge Uses

func (dst *ReadImportDataSpec_ImportTable) XXX_Merge(src proto.Message)

func (*ReadImportDataSpec_ImportTable) XXX_Size Uses

func (m *ReadImportDataSpec_ImportTable) XXX_Size() int

func (*ReadImportDataSpec_ImportTable) XXX_Unmarshal Uses

func (m *ReadImportDataSpec_ImportTable) XXX_Unmarshal(b []byte) error

type RemoteProducerMetadata Uses

type RemoteProducerMetadata struct {
    // Types that are valid to be assigned to Value:
    //	*RemoteProducerMetadata_RangeInfo
    //	*RemoteProducerMetadata_Error
    //	*RemoteProducerMetadata_TraceData_
    //	*RemoteProducerMetadata_LeafTxnFinalState
    //	*RemoteProducerMetadata_RowNum_
    //	*RemoteProducerMetadata_SamplerProgress_
    //	*RemoteProducerMetadata_Metrics_
    //	*RemoteProducerMetadata_BulkProcessorProgress_
    Value isRemoteProducerMetadata_Value `protobuf_oneof:"value"`
}

RemoteProducerMetadata represents records that a producer wants to pass to a consumer, other than data rows. It's named RemoteProducerMetadata to not clash with ProducerMetadata, which is used internally within a node and has a different go error instead of a proto error inside.

func LocalMetaToRemoteProducerMeta Uses

func LocalMetaToRemoteProducerMeta(
    ctx context.Context, meta ProducerMetadata,
) RemoteProducerMetadata

LocalMetaToRemoteProducerMeta converts a ProducerMetadata struct to RemoteProducerMetadata.

func (*RemoteProducerMetadata) Descriptor Uses

func (*RemoteProducerMetadata) Descriptor() ([]byte, []int)

func (*RemoteProducerMetadata) GetBulkProcessorProgress Uses

func (m *RemoteProducerMetadata) GetBulkProcessorProgress() *RemoteProducerMetadata_BulkProcessorProgress

func (*RemoteProducerMetadata) GetError Uses

func (m *RemoteProducerMetadata) GetError() *Error

func (*RemoteProducerMetadata) GetLeafTxnFinalState Uses

func (m *RemoteProducerMetadata) GetLeafTxnFinalState() *roachpb.LeafTxnFinalState

func (*RemoteProducerMetadata) GetMetrics Uses

func (m *RemoteProducerMetadata) GetMetrics() *RemoteProducerMetadata_Metrics

func (*RemoteProducerMetadata) GetRangeInfo Uses

func (m *RemoteProducerMetadata) GetRangeInfo() *RemoteProducerMetadata_RangeInfos

func (*RemoteProducerMetadata) GetRowNum Uses

func (m *RemoteProducerMetadata) GetRowNum() *RemoteProducerMetadata_RowNum

func (*RemoteProducerMetadata) GetSamplerProgress Uses

func (m *RemoteProducerMetadata) GetSamplerProgress() *RemoteProducerMetadata_SamplerProgress

func (*RemoteProducerMetadata) GetTraceData Uses

func (m *RemoteProducerMetadata) GetTraceData() *RemoteProducerMetadata_TraceData

func (*RemoteProducerMetadata) GetValue Uses

func (m *RemoteProducerMetadata) GetValue() isRemoteProducerMetadata_Value

func (*RemoteProducerMetadata) Marshal Uses

func (m *RemoteProducerMetadata) Marshal() (dAtA []byte, err error)

func (*RemoteProducerMetadata) MarshalTo Uses

func (m *RemoteProducerMetadata) MarshalTo(dAtA []byte) (int, error)

func (*RemoteProducerMetadata) ProtoMessage Uses

func (*RemoteProducerMetadata) ProtoMessage()

func (*RemoteProducerMetadata) Reset Uses

func (m *RemoteProducerMetadata) Reset()

func (*RemoteProducerMetadata) Size Uses

func (m *RemoteProducerMetadata) Size() (n int)

func (*RemoteProducerMetadata)