analyzer

package
v0.6.1 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jun 12, 2020 License: Apache-2.0 Imports: 20 Imported by: 0

Documentation

Index

Constants

This section is empty.

Variables

View Source
var (
	// ErrColumnTableNotFound is returned when the column does not exist in a
	// the table.
	ErrColumnTableNotFound = errors.NewKind("table %q does not have column %q")
	// ErrColumnNotFound is returned when the column does not exist in any
	// table in scope.
	ErrColumnNotFound = errors.NewKind("column %q could not be found in any table in scope")
	// ErrAmbiguousColumnName is returned when there is a column reference that
	// is present in more than one table.
	ErrAmbiguousColumnName = errors.NewKind("ambiguous column name %q, it's present in all these tables: %v")
	// ErrFieldMissing is returned when the field is not on the schema.
	ErrFieldMissing = errors.NewKind("field %q is not on schema")
	// ErrOrderByColumnIndex is returned when in an order clause there is a
	// column that is unknown.
	ErrOrderByColumnIndex = errors.NewKind("unknown column %d in order by clause")
	// ErrMisusedAlias is returned when a alias is defined and used in the same projection.
	ErrMisusedAlias = errors.NewKind("column %q does not exist in scope, but there is an alias defined in" +
		" this projection with that name. Aliases cannot be used in the same projection they're defined in")
)
View Source
var (
	// ErrValidationResolved is returned when the plan can not be resolved.
	ErrValidationResolved = errors.NewKind("plan is not resolved because of node '%T'")
	// ErrValidationOrderBy is returned when the order by contains aggregation
	// expressions.
	ErrValidationOrderBy = errors.NewKind("OrderBy does not support aggregation expressions")
	// ErrValidationGroupBy is returned when the aggregation expression does not
	// appear in the grouping columns.
	ErrValidationGroupBy = errors.NewKind("GroupBy aggregate expression '%v' doesn't appear in the grouping columns")
	// ErrValidationSchemaSource is returned when there is any column source
	// that does not match the table name.
	ErrValidationSchemaSource = errors.NewKind("one or more schema sources are empty")
	// ErrProjectTuple is returned when there is a tuple of more than 1 column
	// inside a projection.
	ErrProjectTuple = errors.NewKind("selected field %d should have 1 column, but has %d")
	// ErrUnknownIndexColumns is returned when there are columns in the expr
	// to index that are unknown in the table.
	ErrUnknownIndexColumns = errors.NewKind("unknown columns to index for table %q: %s")
	// ErrCaseResultType is returned when one or more of the types of the values in
	// a case expression don't match.
	ErrCaseResultType = errors.NewKind(
		"expecting all case branches to return values of type %s, " +
			"but found value %q of type %s on %s",
	)
	// ErrIntervalInvalidUse is returned when an interval expression is not
	// correctly used.
	ErrIntervalInvalidUse = errors.NewKind(
		"invalid use of an interval, which can only be used with DATE_ADD, " +
			"DATE_SUB and +/- operators to subtract from or add to a date",
	)
	// ErrExplodeInvalidUse is returned when an EXPLODE function is used
	// outside a Project node.
	ErrExplodeInvalidUse = errors.NewKind(
		"using EXPLODE is not supported outside a Project node",
	)

	// ErrSubqueryColumns is returned when an expression subquery returns
	// more than a single column.
	ErrSubqueryColumns = errors.NewKind(
		"subquery expressions can only return a single column",
	)

	// ErrUnionSchemasMatch is returned when both sides of a UNION do not
	// have the same schema.
	ErrUnionSchemasMatch = errors.NewKind(
		"the schema of the left side of union does not match the right side, expected %s to match %s",
	)
)
View Source
var DefaultRules = []Rule{
	{"resolve_natural_joins", resolveNaturalJoins},
	{"resolve_orderby_literals", resolveOrderByLiterals},
	{"resolve_orderby", resolveOrderBy},
	{"resolve_grouping_columns", resolveGroupingColumns},
	{"qualify_columns", qualifyColumns},
	{"resolve_columns", resolveColumns},
	{"resolve_database", resolveDatabase},
	{"resolve_star", resolveStar},
	{"resolve_functions", resolveFunctions},
	{"resolve_having", resolveHaving},
	{"merge_union_schemas", mergeUnionSchemas},
	{"reorder_aggregations", reorderAggregations},
	{"reorder_projection", reorderProjection},
	{"move_join_conds_to_filter", moveJoinConditionsToFilter},
	{"eval_filter", evalFilter},
	{"optimize_distinct", optimizeDistinct},
}

DefaultRules to apply when analyzing nodes.

View Source
var DefaultValidationRules = []Rule{
	{validateResolvedRule, validateIsResolved},
	{validateOrderByRule, validateOrderBy},
	{validateGroupByRule, validateGroupBy},
	{validateSchemaSourceRule, validateSchemaSource},
	{validateProjectTuplesRule, validateProjectTuples},
	{validateIndexCreationRule, validateIndexCreation},
	{validateCaseResultTypesRule, validateCaseResultTypes},
	{validateIntervalUsageRule, validateIntervalUsage},
	{validateExplodeUsageRule, validateExplodeUsage},
	{validateSubqueryColumnsRule, validateSubqueryColumns},
	{validateUnionSchemasMatchRule, validateUnionSchemasMatch},
}

DefaultValidationRules to apply while analyzing nodes.

View Source
var ErrInAnalysis = errors.NewKind("error in analysis: %s")

ErrInAnalysis is thrown for generic analyzer errors

View Source
var ErrInvalidNodeType = errors.NewKind("%s: invalid node of type: %T")

ErrInvalidNodeType is thrown when the analyzer can't handle a particular kind of node type

View Source
var ErrMaxAnalysisIters = errors.NewKind("exceeded max analysis iterations (%d)")

ErrMaxAnalysisIters is thrown when the analysis iterations are exceeded

View Source
var (
	// ErrUnionSchemasDifferentLength is returned when the two sides of a
	// UNION do not have the same number of columns in their schemas.
	ErrUnionSchemasDifferentLength = errors.NewKind(
		"cannot union two queries whose schemas are different lengths; left has %d column(s) right has %d column(s).",
	)
)
View Source
var OnceAfterAll = []Rule{
	{"track_process", trackProcess},
	{"parallelize", parallelize},
	{"clear_warnings", clearWarnings},
}

OnceAfterAll contains the rules to be applied just once after all other rules have been applied.

View Source
var OnceAfterDefault = []Rule{
	{"resolve_generators", resolveGenerators},
	{"remove_unnecessary_converts", removeUnnecessaryConverts},
	{"assign_catalog", assignCatalog},
	{"prune_columns", pruneColumns},
	{"convert_dates", convertDates},
	{"pushdown", pushdown},
	{"optimize_joins", optimizeJoins},
	{"erase_projection", eraseProjection},
}

OnceAfterDefault contains the rules to be applied just once after the DefaultRules.

View Source
var OnceBeforeDefault = []Rule{
	{"resolve_views", resolveViews},
	{"resolve_subqueries", resolveSubqueries},
	{"resolve_tables", resolveTables},
	{"check_aliases", checkAliases},
}

OnceBeforeDefault contains the rules to be applied just once before the DefaultRules.

View Source
var (
	// ParallelQueryCounter describes a metric that accumulates
	// number of parallel queries monotonically.
	ParallelQueryCounter = discard.NewCounter()
)

Functions

This section is empty.

Types

type Analyzer

type Analyzer struct {
	// Whether to log various debugging messages
	Debug bool
	// Whether to output the query plan at each step of the analyzer
	Verbose bool

	Parallelism int
	// Batches of Rules to apply.
	Batches []*Batch
	// Catalog of databases and registered functions.
	Catalog *sql.Catalog
	// contains filtered or unexported fields
}

Analyzer analyzes nodes of the execution plan and applies rules and validations to them.

func NewDefault

func NewDefault(c *sql.Catalog) *Analyzer

NewDefault creates a default Analyzer instance with all default Rules and configuration. To add custom rules, the easiest way is use the Builder.

func (*Analyzer) Analyze

func (a *Analyzer) Analyze(ctx *sql.Context, n sql.Node) (sql.Node, error)

Analyze the node and all its children.

func (*Analyzer) Log

func (a *Analyzer) Log(msg string, args ...interface{})

Log prints an INFO message to stdout with the given message and args if the analyzer is in debug mode.

func (*Analyzer) LogNode

func (a *Analyzer) LogNode(n sql.Node)

LogNode prints the node given if Verbose logging is enabled.

func (*Analyzer) PopDebugContext

func (a *Analyzer) PopDebugContext()

PopDebugContext pops a context message off the context stack.

func (*Analyzer) PushDebugContext

func (a *Analyzer) PushDebugContext(msg string)

PushDebugContext pushes the given context string onto the context stack, to use when logging debug messages.

type Batch

type Batch struct {
	Desc       string
	Iterations int
	Rules      []Rule
}

Batch executes a set of rules a specific number of times. When this number of times is reached, the actual node and ErrMaxAnalysisIters is returned.

func (*Batch) Eval

func (b *Batch) Eval(ctx *sql.Context, a *Analyzer, n sql.Node) (sql.Node, error)

Eval executes the actual rules the specified number of times on the Batch. If max number of iterations is reached, this method will return the actual processed Node and ErrMaxAnalysisIters error.

type Builder

type Builder struct {
	// contains filtered or unexported fields
}

Builder provides an easy way to generate Analyzer with custom rules and options.

func NewBuilder

func NewBuilder(c *sql.Catalog) *Builder

NewBuilder creates a new Builder from a specific catalog. This builder allow us add custom Rules and modify some internal properties.

func (*Builder) AddPostAnalyzeRule

func (ab *Builder) AddPostAnalyzeRule(name string, fn RuleFunc) *Builder

AddPostAnalyzeRule adds a new rule to the analyzer after standard analyzer rules.

func (*Builder) AddPostValidationRule

func (ab *Builder) AddPostValidationRule(name string, fn RuleFunc) *Builder

AddPostValidationRule adds a new rule to the analyzer after standard validation rules.

func (*Builder) AddPreAnalyzeRule

func (ab *Builder) AddPreAnalyzeRule(name string, fn RuleFunc) *Builder

AddPreAnalyzeRule adds a new rule to the analyze before the standard analyzer rules.

func (*Builder) AddPreValidationRule

func (ab *Builder) AddPreValidationRule(name string, fn RuleFunc) *Builder

AddPreValidationRule adds a new rule to the analyzer before standard validation rules.

func (*Builder) Build

func (ab *Builder) Build() *Analyzer

Build creates a new Analyzer using all previous data setted to the Builder

func (*Builder) WithDebug

func (ab *Builder) WithDebug() *Builder

WithDebug activates debug on the Analyzer.

func (*Builder) WithParallelism

func (ab *Builder) WithParallelism(parallelism int) *Builder

WithParallelism sets the parallelism level on the analyzer.

type ExprAliases

type ExprAliases map[string]sql.Expression

type NameableNode

type NameableNode interface {
	sql.Nameable
	sql.Node
}

type Releaser

type Releaser struct {
	Child   sql.Node
	Release func()
}

func (*Releaser) Children

func (r *Releaser) Children() []sql.Node

func (*Releaser) Equal

func (r *Releaser) Equal(n sql.Node) bool

func (*Releaser) Resolved

func (r *Releaser) Resolved() bool

func (*Releaser) RowIter

func (r *Releaser) RowIter(ctx *sql.Context) (sql.RowIter, error)

func (*Releaser) Schema

func (r *Releaser) Schema() sql.Schema

func (*Releaser) String

func (r *Releaser) String() string

func (*Releaser) WithChildren

func (r *Releaser) WithChildren(children ...sql.Node) (sql.Node, error)

type Rule

type Rule struct {
	// Name of the rule.
	Name string
	// Apply transforms a node.
	Apply RuleFunc
}

Rule to transform nodes.

type RuleFunc

type RuleFunc func(*sql.Context, *Analyzer, sql.Node) (sql.Node, error)

RuleFunc is the function to be applied in a rule.

type TableAliases

type TableAliases map[string]sql.Nameable

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL