cockroach: Index | Files | Directories

package physicalplan

import ""


Package Files

aggregator_funcs.go expression.go fake_span_resolver.go physical_plan.go span_resolver.go specs.go


var DistAggregationTable = map[execinfrapb.AggregatorSpec_Func]DistAggregationInfo{
    execinfrapb.AggregatorSpec_ANY_NOT_NULL: {
        LocalStage: []execinfrapb.AggregatorSpec_Func{execinfrapb.AggregatorSpec_ANY_NOT_NULL},
        FinalStage: []FinalStageInfo{
                Fn:        execinfrapb.AggregatorSpec_ANY_NOT_NULL,
                LocalIdxs: passThroughLocalIdxs,

    execinfrapb.AggregatorSpec_BIT_AND: {
        LocalStage: []execinfrapb.AggregatorSpec_Func{execinfrapb.AggregatorSpec_BIT_AND},
        FinalStage: []FinalStageInfo{
                Fn:        execinfrapb.AggregatorSpec_BIT_AND,
                LocalIdxs: passThroughLocalIdxs,

    execinfrapb.AggregatorSpec_BIT_OR: {
        LocalStage: []execinfrapb.AggregatorSpec_Func{execinfrapb.AggregatorSpec_BIT_OR},
        FinalStage: []FinalStageInfo{
                Fn:        execinfrapb.AggregatorSpec_BIT_OR,
                LocalIdxs: passThroughLocalIdxs,

    execinfrapb.AggregatorSpec_BOOL_AND: {
        LocalStage: []execinfrapb.AggregatorSpec_Func{execinfrapb.AggregatorSpec_BOOL_AND},
        FinalStage: []FinalStageInfo{
                Fn:        execinfrapb.AggregatorSpec_BOOL_AND,
                LocalIdxs: passThroughLocalIdxs,

    execinfrapb.AggregatorSpec_BOOL_OR: {
        LocalStage: []execinfrapb.AggregatorSpec_Func{execinfrapb.AggregatorSpec_BOOL_OR},
        FinalStage: []FinalStageInfo{
                Fn:        execinfrapb.AggregatorSpec_BOOL_OR,
                LocalIdxs: passThroughLocalIdxs,

    execinfrapb.AggregatorSpec_COUNT: {
        LocalStage: []execinfrapb.AggregatorSpec_Func{execinfrapb.AggregatorSpec_COUNT},
        FinalStage: []FinalStageInfo{
                Fn:        execinfrapb.AggregatorSpec_SUM_INT,
                LocalIdxs: passThroughLocalIdxs,

    execinfrapb.AggregatorSpec_COUNT_ROWS: {
        LocalStage: []execinfrapb.AggregatorSpec_Func{execinfrapb.AggregatorSpec_COUNT_ROWS},
        FinalStage: []FinalStageInfo{
                Fn:        execinfrapb.AggregatorSpec_SUM_INT,
                LocalIdxs: passThroughLocalIdxs,

    execinfrapb.AggregatorSpec_MAX: {
        LocalStage: []execinfrapb.AggregatorSpec_Func{execinfrapb.AggregatorSpec_MAX},
        FinalStage: []FinalStageInfo{
                Fn:        execinfrapb.AggregatorSpec_MAX,
                LocalIdxs: passThroughLocalIdxs,

    execinfrapb.AggregatorSpec_MIN: {
        LocalStage: []execinfrapb.AggregatorSpec_Func{execinfrapb.AggregatorSpec_MIN},
        FinalStage: []FinalStageInfo{
                Fn:        execinfrapb.AggregatorSpec_MIN,
                LocalIdxs: passThroughLocalIdxs,

    execinfrapb.AggregatorSpec_SUM: {
        LocalStage: []execinfrapb.AggregatorSpec_Func{execinfrapb.AggregatorSpec_SUM},
        FinalStage: []FinalStageInfo{
                Fn:        execinfrapb.AggregatorSpec_SUM,
                LocalIdxs: passThroughLocalIdxs,

    execinfrapb.AggregatorSpec_XOR_AGG: {
        LocalStage: []execinfrapb.AggregatorSpec_Func{execinfrapb.AggregatorSpec_XOR_AGG},
        FinalStage: []FinalStageInfo{
                Fn:        execinfrapb.AggregatorSpec_XOR_AGG,
                LocalIdxs: passThroughLocalIdxs,

    execinfrapb.AggregatorSpec_AVG: {
        LocalStage: []execinfrapb.AggregatorSpec_Func{
        FinalStage: []FinalStageInfo{
                Fn:        execinfrapb.AggregatorSpec_SUM,
                LocalIdxs: []uint32{0},
                Fn:        execinfrapb.AggregatorSpec_SUM_INT,
                LocalIdxs: []uint32{1},
        FinalRendering: func(h *tree.IndexedVarHelper, varIdxs []int) (tree.TypedExpr, error) {
            if len(varIdxs) < 2 {
                panic("fewer than two final aggregation values passed into final render")
            sum := h.IndexedVar(varIdxs[0])
            count := h.IndexedVar(varIdxs[1])

            expr := &tree.BinaryExpr{
                Operator: tree.Div,
                Left:     sum,
                Right:    count,

            if sum.ResolvedType().Family() == types.FloatFamily {
                expr.Right = &tree.CastExpr{
                    Expr: count,
                    Type: types.Float,
            semaCtx := tree.MakeSemaContext()
            semaCtx.IVarContainer = h.Container()
            return expr.TypeCheck(context.TODO(), &semaCtx, types.Any)

    execinfrapb.AggregatorSpec_VARIANCE: {
        LocalStage: []execinfrapb.AggregatorSpec_Func{

        FinalStage: []FinalStageInfo{
                Fn:        execinfrapb.AggregatorSpec_FINAL_VARIANCE,
                LocalIdxs: []uint32{0, 1, 2},

    execinfrapb.AggregatorSpec_STDDEV: {
        LocalStage: []execinfrapb.AggregatorSpec_Func{
        FinalStage: []FinalStageInfo{
                Fn:        execinfrapb.AggregatorSpec_FINAL_STDDEV,
                LocalIdxs: []uint32{0, 1, 2},

DistAggregationTable is DistAggregationInfo look-up table. Functions that don't have an entry in the table are not optimized with a local stage.

func MakeExpression Uses

func MakeExpression(
    expr tree.TypedExpr, ctx ExprContext, indexVarMap []int,
) (execinfrapb.Expression, error)

MakeExpression creates a execinfrapb.Expression.

The execinfrapb.Expression uses the placeholder syntax (@1, @2, @3..) to refer to columns.

The expr uses IndexedVars to refer to columns. The caller can optionally remap these columns by passing an indexVarMap: an IndexedVar with index i becomes column indexVarMap[i].

ctx can be nil in which case a fakeExprCtx will be used.

func MergePlans Uses

func MergePlans(
    mergedPlan *PhysicalPlan,
    left, right *PhysicalPlan,
    leftPlanDistribution, rightPlanDistribution PlanDistribution,
) (leftRouters []ProcessorIdx, rightRouters []ProcessorIdx)

MergePlans merges the processors and streams of two plans into a new plan. The result routers for each side are returned (they point at processors in the merged plan).

func MergeResultTypes Uses

func MergeResultTypes(left, right []*types.T) ([]*types.T, error)

MergeResultTypes reconciles the ResultTypes between two plans. It enforces that each pair of ColumnTypes must either match or be null, in which case the non-null type is used. This logic is necessary for cases like SELECT NULL UNION SELECT 1.

func NewFlowSpec Uses

func NewFlowSpec(flowID execinfrapb.FlowID, gateway roachpb.NodeID) *execinfrapb.FlowSpec

NewFlowSpec returns a new FlowSpec, which may have non-zero capacity in its slice fields.

func NewTableReaderSpec Uses

func NewTableReaderSpec() *execinfrapb.TableReaderSpec

NewTableReaderSpec returns a new TableReaderSpec.

func ReleaseFlowSpec Uses

func ReleaseFlowSpec(spec *execinfrapb.FlowSpec)

ReleaseFlowSpec returns this FlowSpec back to the pool of FlowSpecs. It may not be used again after this call.

func ReleaseSetupFlowRequest Uses

func ReleaseSetupFlowRequest(s *execinfrapb.SetupFlowRequest)

ReleaseSetupFlowRequest releases the resources of this SetupFlowRequest, putting them back into their respective object pools.

func ReleaseTableReaderSpec Uses

func ReleaseTableReaderSpec(s *execinfrapb.TableReaderSpec)

ReleaseTableReaderSpec puts this TableReaderSpec back into its sync pool. It may not be used again after Release returns.

type DistAggregationInfo Uses

type DistAggregationInfo struct {
    // The local stage consists of one or more aggregations. All aggregations have
    // the same input.
    LocalStage []execinfrapb.AggregatorSpec_Func

    // The final stage consists of one or more aggregations that take in an
    // arbitrary number of inputs from the local stages. The inputs are ordered and
    // mapped by the indices of the local aggregations in LocalStage (specified by
    // LocalIdxs).
    FinalStage []FinalStageInfo

    // An optional rendering expression used to obtain the final result; required
    // if there is more than one aggregation in each of the stages.
    // Conceptually this is an expression that has access to the final stage
    // results (via IndexedVars), to be run as the PostProcessing step of the
    // final stage processor.  However, there are some complications:
    //   - this structure is a blueprint for aggregating inputs of different
    //     types, and in some cases the expression may be different depending on
    //     the types (see AVG below).
    //   - we support combining multiple "top level" aggregations into the same
    //     processors, so the correct indexing of the input variables is not
    //     predetermined.
    // Instead of defining a canonical non-typed expression and then tweaking it
    // with visitors, we use a function that directly creates a typed expression
    // on demand. The expression will refer to the final stage results using
    // IndexedVars, with indices specified by varIdxs (1-1 mapping).
    FinalRendering func(h *tree.IndexedVarHelper, varIdxs []int) (tree.TypedExpr, error)

DistAggregationInfo is a blueprint for planning distributed aggregations. It describes two stages - a local stage performs local aggregations wherever data is available and generates partial results, and a final stage aggregates the partial results from all data "partitions".

The simplest example is SUM: the local stage computes the SUM of the items on each node, and a final stage SUMs those partial sums into a final sum. Similar functions are MIN, MAX, BOOL_AND, BOOL_OR.

A less trivial example is COUNT: the local stage counts (COUNT), the final stage adds the counts (SUM_INT).

A more complex example is AVG, for which we have to do *multiple* aggregations in each stage: we need to get a sum and a count, so the local stage does SUM and COUNT, and the final stage does SUM and SUM_INT. We also need an expression that takes these two values and generates the final AVG result.

type ExprContext Uses

type ExprContext interface {
    // EvalContext returns the tree.EvalContext for planning.
    EvalContext() *tree.EvalContext

    // IsLocal returns true if the current plan is local.
    IsLocal() bool

    // EvaluateSubqueries returns true if subqueries should be evaluated before
    // creating the execinfrapb.Expression.
    EvaluateSubqueries() bool

ExprContext is an interface containing objects necessary for creating execinfrapb.Expressions.

type FinalStageInfo Uses

type FinalStageInfo struct {
    Fn  execinfrapb.AggregatorSpec_Func
    // Specifies the ordered slice of outputs from local aggregations to propagate
    // as inputs to Fn. This must be ordered according to the underlying aggregate builtin
    // arguments signature found in aggregate_builtins.go.
    LocalIdxs []uint32

FinalStageInfo is a wrapper around an aggregation function performed in the final stage of distributed aggregations that allows us to specify the corresponding inputs from the local aggregations by their indices in the LocalStage.

type PhysicalPlan Uses

type PhysicalPlan struct {
    // Processors in the plan.
    Processors []Processor

    // LocalProcessors contains all of the planNodeToRowSourceWrappers that were
    // installed in this physical plan to wrap any planNodes that couldn't be
    // properly translated into DistSQL processors. This will be empty if no
    // wrapping had to happen.
    LocalProcessors []execinfra.LocalProcessor

    // LocalProcessorIndexes contains pointers to all of the RowSourceIdx fields
    // of the LocalPlanNodeSpecs that were created. This list is in the same
    // order as LocalProcessors, and is kept up-to-date so that LocalPlanNodeSpecs
    // always have the correct index into the LocalProcessors slice.
    LocalProcessorIndexes []*uint32

    // Streams accumulates the streams in the plan - both local (intra-node) and
    // remote (inter-node); when we have a final plan, the streams are used to
    // generate processor input and output specs (see PopulateEndpoints).
    Streams []Stream

    // ResultRouters identifies the output routers which output the results of the
    // plan. These are the routers to which we have to connect new streams in
    // order to extend the plan.
    // The processors which have this routers are all part of the same "stage":
    // they have the same "schema" and PostProcessSpec.
    // We assume all processors have a single output so we only need the processor
    // index.
    ResultRouters []ProcessorIdx

    // ResultTypes is the schema (column types) of the rows produced by the
    // ResultRouters.
    // This is aliased with InputSyncSpec.ColumnTypes, so it must not be modified
    // in-place during planning.
    ResultTypes []*types.T

    // ResultColumns is the schema (result columns) of the rows produced by the
    // ResultRouters.
    ResultColumns sqlbase.ResultColumns

    // MergeOrdering is the ordering guarantee for the result streams that must be
    // maintained when the streams eventually merge. The column indexes refer to
    // columns for the rows produced by ResultRouters.
    // Empty when there is a single result router. The reason is that maintaining
    // an ordering sometimes requires to add columns to streams for the sole
    // reason of correctly merging the streams later (see AddProjection); we don't
    // want to pay this cost if we don't have multiple streams to merge.
    MergeOrdering execinfrapb.Ordering

    // MaxEstimatedRowCount tracks the maximum estimated row count that a table
    // reader in this plan will output. This information is used to decide
    // whether to use the vectorized execution engine.
    MaxEstimatedRowCount uint64
    // TotalEstimatedScannedRows is the sum of the row count estimate of all the
    // table readers in the plan.
    TotalEstimatedScannedRows uint64

    // GatewayNodeID is the gateway node of the physical plan.
    GatewayNodeID roachpb.NodeID
    // Distribution is the indicator of the distribution of the physical plan.
    Distribution PlanDistribution
    // contains filtered or unexported fields

PhysicalPlan represents a network of processors and streams along with information about the results output by this network. The results come from unconnected output routers of a subset of processors; all these routers output the same kind of data (same schema).

func (*PhysicalPlan) AddDistinctSetOpStage Uses

func (p *PhysicalPlan) AddDistinctSetOpStage(
    nodes []roachpb.NodeID,
    joinCore execinfrapb.ProcessorCoreUnion,
    distinctCores []execinfrapb.ProcessorCoreUnion,
    post execinfrapb.PostProcessSpec,
    eqCols []uint32,
    leftTypes, rightTypes []*types.T,
    leftMergeOrd, rightMergeOrd execinfrapb.Ordering,
    leftRouters, rightRouters []ProcessorIdx,

AddDistinctSetOpStage creates a distinct stage and a join stage to implement INTERSECT and EXCEPT plans.

TODO(yuzefovich): If there's a strong key on the left or right side, we can elide the distinct stage on that side.

func (*PhysicalPlan) AddFilter Uses

func (p *PhysicalPlan) AddFilter(
    expr tree.TypedExpr, exprCtx ExprContext, indexVarMap []int,
) error

AddFilter adds a filter on the output of a plan. The filter is added either as a post-processing step to the last stage or to a new "no-op" stage, as necessary.

See MakeExpression for a description of indexVarMap.

func (*PhysicalPlan) AddJoinStage Uses

func (p *PhysicalPlan) AddJoinStage(
    nodes []roachpb.NodeID,
    core execinfrapb.ProcessorCoreUnion,
    post execinfrapb.PostProcessSpec,
    leftEqCols, rightEqCols []uint32,
    leftTypes, rightTypes []*types.T,
    leftMergeOrd, rightMergeOrd execinfrapb.Ordering,
    leftRouters, rightRouters []ProcessorIdx,

AddJoinStage adds join processors at each of the specified nodes, and wires the left and right-side outputs to these processors.

func (*PhysicalPlan) AddLimit Uses

func (p *PhysicalPlan) AddLimit(count int64, offset int64, exprCtx ExprContext) error

AddLimit adds a limit and/or offset to the results of the current plan. If there are multiple result streams, they are joined into a single processor that is placed on the given node.

For no limit, count should be MaxInt64.

func (*PhysicalPlan) AddNoGroupingStage Uses

func (p *PhysicalPlan) AddNoGroupingStage(
    core execinfrapb.ProcessorCoreUnion,
    post execinfrapb.PostProcessSpec,
    outputTypes []*types.T,
    newOrdering execinfrapb.Ordering,

AddNoGroupingStage adds a processor for each result router, on the same node with the source of the stream; all processors have the same core. This is for stages that correspond to logical blocks that don't require any grouping (e.g. evaluator, sorting, etc).

func (*PhysicalPlan) AddNoGroupingStageWithCoreFunc Uses

func (p *PhysicalPlan) AddNoGroupingStageWithCoreFunc(
    coreFunc func(int, *Processor) execinfrapb.ProcessorCoreUnion,
    post execinfrapb.PostProcessSpec,
    outputTypes []*types.T,
    newOrdering execinfrapb.Ordering,

AddNoGroupingStageWithCoreFunc is like AddNoGroupingStage, but creates a core spec based on the input processor's spec.

func (*PhysicalPlan) AddNoInputStage Uses

func (p *PhysicalPlan) AddNoInputStage(
    corePlacements []ProcessorCorePlacement,
    post execinfrapb.PostProcessSpec,
    outputTypes []*types.T,
    newOrdering execinfrapb.Ordering,

AddNoInputStage creates a stage of processors that don't have any input from the other stages (if such exist). nodes and cores must be a one-to-one mapping so that a particular processor core is planned on the appropriate node.

func (*PhysicalPlan) AddProcessor Uses

func (p *PhysicalPlan) AddProcessor(proc Processor) ProcessorIdx

AddProcessor adds a processor to a PhysicalPlan and returns the index that can be used to refer to that processor.

func (*PhysicalPlan) AddProjection Uses

func (p *PhysicalPlan) AddProjection(columns []uint32)

AddProjection applies a projection to a plan. The new plan outputs the columns of the old plan as listed in the slice. The Ordering is updated; columns in the ordering are added to the projection as needed.

The PostProcessSpec may not be updated if the resulting projection keeps all the columns in their original order.

Note: the columns slice is relinquished to this function, which can modify it or use it directly in specs.

func (*PhysicalPlan) AddRendering Uses

func (p *PhysicalPlan) AddRendering(
    exprs []tree.TypedExpr, exprCtx ExprContext, indexVarMap []int, outTypes []*types.T,
) error

AddRendering adds a rendering (expression evaluation) to the output of a plan. The rendering is achieved either through an adjustment on the last stage post-process spec, or via a new stage.

The Ordering is updated; columns in the ordering are added to the render expressions as necessary.

See MakeExpression for a description of indexVarMap.

func (*PhysicalPlan) AddSingleGroupStage Uses

func (p *PhysicalPlan) AddSingleGroupStage(
    nodeID roachpb.NodeID,
    core execinfrapb.ProcessorCoreUnion,
    post execinfrapb.PostProcessSpec,
    outputTypes []*types.T,

AddSingleGroupStage adds a "single group" stage (one that cannot be parallelized) which consists of a single processor on the specified node. The previous stage (ResultRouters) are all connected to this processor.

func (*PhysicalPlan) AddStageOnNodes Uses

func (p *PhysicalPlan) AddStageOnNodes(
    nodes []roachpb.NodeID,
    core execinfrapb.ProcessorCoreUnion,
    post execinfrapb.PostProcessSpec,
    hashCols []uint32,
    types []*types.T,
    mergeOrd execinfrapb.Ordering,
    routers []ProcessorIdx,

AddStageOnNodes adds a stage of processors that take in a single input logical stream on the specified nodes and connects them to the previous stage via a hash router.

func (*PhysicalPlan) CheckLastStagePost Uses

func (p *PhysicalPlan) CheckLastStagePost() error

CheckLastStagePost checks that the processors of the last stage of the PhysicalPlan have identical post-processing, returning an error if not.

func (*PhysicalPlan) EnsureSingleStreamOnGateway Uses

func (p *PhysicalPlan) EnsureSingleStreamOnGateway()

EnsureSingleStreamOnGateway ensures that there is only one stream on the gateway node in the plan (meaning it possibly merges multiple streams or brings a single stream from a remote node to the gateway).

func (*PhysicalPlan) EnsureSingleStreamPerNode Uses

func (p *PhysicalPlan) EnsureSingleStreamPerNode()

EnsureSingleStreamPerNode goes over the ResultRouters and merges any group of routers that are on the same node, using a no-op processor.

TODO(radu): a no-op processor is not ideal if the next processor is on the same node. A fix for that is much more complicated, requiring remembering extra state in the PhysicalPlan.

func (*PhysicalPlan) GenerateFlowSpecs Uses

func (p *PhysicalPlan) GenerateFlowSpecs() map[roachpb.NodeID]*execinfrapb.FlowSpec

GenerateFlowSpecs takes a plan (with populated endpoints) and generates the set of FlowSpecs (one per node involved in the plan).

gateway is the current node's NodeID.

func (*PhysicalPlan) GetLastStageDistribution Uses

func (p *PhysicalPlan) GetLastStageDistribution() PlanDistribution

GetLastStageDistribution returns the distribution *only* of the last stage. Note that if the last stage consists of a single processor planned on a remote node, such stage is considered distributed.

func (*PhysicalPlan) GetLastStagePost Uses

func (p *PhysicalPlan) GetLastStagePost() execinfrapb.PostProcessSpec

GetLastStagePost returns the PostProcessSpec for the processors in the last stage (ResultRouters).

func (*PhysicalPlan) IsLastStageDistributed Uses

func (p *PhysicalPlan) IsLastStageDistributed() bool

IsLastStageDistributed returns whether the last stage of processors is distributed (meaning that it contains at least one remote processor).

func (*PhysicalPlan) MergeResultStreams Uses

func (p *PhysicalPlan) MergeResultStreams(
    resultRouters []ProcessorIdx,
    sourceRouterSlot int,
    ordering execinfrapb.Ordering,
    destProcessor ProcessorIdx,
    destInput int,

MergeResultStreams connects a set of resultRouters to a synchronizer. The synchronizer is configured with the provided ordering.

func (*PhysicalPlan) NewStage Uses

func (p *PhysicalPlan) NewStage(containsRemoteProcessor bool) int32

NewStage updates the distribution of the plan given the fact whether the new stage contains at least one processor planned on a remote node and returns a stage identifier of the new stage that can be used in processor specs.

func (*PhysicalPlan) NewStageOnNodes Uses

func (p *PhysicalPlan) NewStageOnNodes(nodes []roachpb.NodeID) int32

NewStageOnNodes is the same as NewStage but takes in the information about the nodes participating in the new stage and the gateway.

func (*PhysicalPlan) PopulateEndpoints Uses

func (p *PhysicalPlan) PopulateEndpoints()

PopulateEndpoints processes p.Streams and adds the corresponding StreamEndpointSpecs to the processors' input and output specs. This should be used when the plan is completed and ready to be executed.

func (*PhysicalPlan) SetLastStagePost Uses

func (p *PhysicalPlan) SetLastStagePost(post execinfrapb.PostProcessSpec, outputTypes []*types.T)

SetLastStagePost changes the PostProcess spec of the processors in the last stage (ResultRouters). The caller must update the ordering via SetOrdering.

func (*PhysicalPlan) SetMergeOrdering Uses

func (p *PhysicalPlan) SetMergeOrdering(o execinfrapb.Ordering)

SetMergeOrdering sets p.MergeOrdering.

type PlanDistribution Uses

type PlanDistribution int

PlanDistribution describes the distribution of the physical plan.

const (
    // LocalPlan indicates that the whole plan is executed on the gateway node.
    LocalPlan PlanDistribution = iota

    // PartiallyDistributedPlan indicates that some parts of the plan are
    // distributed while other parts are not (due to limitations of DistSQL).
    // Note that such plans can only be created by distSQLSpecExecFactory.
    // An example of such plan is the plan with distributed scans that have a
    // filter which operates with an OID type (DistSQL currently doesn't
    // support distributed operations with such type). As a result, we end
    // up planning a noop processor on the gateway node that receives all
    // scanned rows from the remote nodes while performing the filtering
    // locally.

    // FullyDistributedPlan indicates the the whole plan is distributed.

func (PlanDistribution) String Uses

func (a PlanDistribution) String() string

func (PlanDistribution) WillDistribute Uses

func (a PlanDistribution) WillDistribute() bool

WillDistribute is a small helper that returns whether at least a part of the plan is distributed.

type Processor Uses

type Processor struct {
    // Node where the processor must be instantiated.
    Node roachpb.NodeID

    // Spec for the processor; note that the StreamEndpointSpecs in the input
    // synchronizers and output routers are not set until the end of the planning
    // process.
    Spec execinfrapb.ProcessorSpec

Processor contains the information associated with a processor in a plan.

type ProcessorCorePlacement Uses

type ProcessorCorePlacement struct {
    NodeID roachpb.NodeID
    Core   execinfrapb.ProcessorCoreUnion

ProcessorCorePlacement indicates on which node a particular processor core needs to be planned.

type ProcessorIdx Uses

type ProcessorIdx int

ProcessorIdx identifies a processor by its index in PhysicalPlan.Processors.

type SpanResolver Uses

type SpanResolver interface {
    // NewSpanResolverIterator creates a new SpanResolverIterator.
    // Txn is used for testing and for determining if follower reads are possible.
    NewSpanResolverIterator(txn *kv.Txn) SpanResolverIterator

SpanResolver resolves key spans to their respective ranges and lease holders. Used for planning physical execution of distributed SQL queries.

Sample usage for resolving a bunch of spans:

func resolveSpans(

ctx context.Context,
it *execinfra.SpanResolverIterator,
spans ...spanWithDir,

) ([][]kv.ReplicaInfo, error) {

lr := execinfra.NewSpanResolver(
  distSender, nodeDescs, nodeDescriptor,
it := lr.NewSpanResolverIterator(nil)
res := make([][]kv.ReplicaInfo, 0)
for _, span := range spans {
  repls := make([]kv.ReplicaInfo, 0)
  for it.Seek(ctx, span.Span, span.dir); ; it.Next(ctx) {
    if !it.Valid() {
      return nil, it.Error()
    repl, err := it.ReplicaInfo(ctx)
    if err != nil {
      return nil, err
    repls = append(repls, repl)
    if !it.NeedAnother() {
  res = append(res, repls)
return res, nil


func NewFakeSpanResolver Uses

func NewFakeSpanResolver(nodes []*roachpb.NodeDescriptor) SpanResolver

NewFakeSpanResolver creates a fake span resolver.

func NewSpanResolver Uses

func NewSpanResolver(
    st *cluster.Settings,
    distSender *kvcoord.DistSender,
    nodeDescs kvcoord.NodeDescStore,
    nodeDesc roachpb.NodeDescriptor,
    rpcCtx *rpc.Context,
    policy replicaoracle.Policy,
) SpanResolver

NewSpanResolver creates a new spanResolver.

type SpanResolverIterator Uses

type SpanResolverIterator interface {
    // Seek positions the iterator on the start of a span (span.Key or
    // span.EndKey, depending on ScanDir). Note that span.EndKey is exclusive,
    // regardless of scanDir.
    // After calling this, ReplicaInfo() will return information about the range
    // containing the start key of the span (or the end key, if the direction is
    // Descending).
    // NeedAnother() will return true until the iterator is positioned on or after
    // the end of the span.  Possible errors encountered should be checked for
    // with Valid().
    // Seek can be called repeatedly on the same iterator. To make optimal uses of
    // caches, Seek()s should be performed on spans sorted according to the
    // scanDir (if Descending, then the span with the highest keys should be
    // Seek()ed first).
    // scanDir changes the direction in which Next() will advance the iterator.
    Seek(ctx context.Context, span roachpb.Span, scanDir kvcoord.ScanDirection)

    // NeedAnother returns true if the current range is not the last for the span
    // that was last Seek()ed.
    NeedAnother() bool

    // Next advances the iterator to the next range. The next range contains the
    // last range's end key (but it does not necessarily start there, because of
    // asynchronous range splits and caching effects).
    // Possible errors encountered should be checked for with Valid().
    Next(ctx context.Context)

    // Valid returns false if an error was encountered by the last Seek() or Next().
    Valid() bool

    // Error returns any error encountered by the last Seek() or Next().
    Error() error

    // Desc returns the current RangeDescriptor.
    Desc() roachpb.RangeDescriptor

    // ReplicaInfo returns information about the replica that has been picked for
    // the current range.
    // A RangeUnavailableError is returned if there's no information in nodeDescs
    // about any of the replicas.
    ReplicaInfo(ctx context.Context) (roachpb.ReplicaDescriptor, error)

SpanResolverIterator is used to iterate over the ranges composing a key span.

type Stream Uses

type Stream struct {
    // SourceProcessor index (within the same plan).
    SourceProcessor ProcessorIdx

    // SourceRouterSlot identifies the position of this stream among the streams
    // that originate from the same router. This is important when routing by hash
    // where the order of the streams in the OutputRouterSpec matters.
    SourceRouterSlot int

    // DestProcessor index (within the same plan).
    DestProcessor ProcessorIdx

    // DestInput identifies the input of DestProcessor (some processors have
    // multiple inputs).
    DestInput int

Stream connects the output router of one processor to an input synchronizer of another processor.


replicaoraclePackage replicaoracle provides functionality for physicalplan to choose a replica for a range.

Package physicalplan imports 23 packages (graph) and is imported by 15 packages. Updated 2020-08-12. Refresh now. Tools for package owners.