antlr4: github.com/antlr/antlr4/runtime/Go/antlr Index | Files

package antlr

import "github.com/antlr/antlr4/runtime/Go/antlr"

Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. Use of this file is governed by the BSD 3-clause license that can be found in the LICENSE.txt file in the project root.

Index

Package Files

atn.go atn_config.go atn_config_set.go atn_deserialization_options.go atn_deserializer.go atn_simulator.go atn_state.go atn_type.go char_stream.go common_token_factory.go common_token_stream.go dfa.go dfa_serializer.go dfa_state.go diagnostic_error_listener.go error_listener.go error_strategy.go errors.go file_stream.go input_stream.go int_stream.go interval_set.go lexer.go lexer_action.go lexer_action_executor.go lexer_atn_simulator.go ll1_analyzer.go parser.go parser_atn_simulator.go parser_rule_context.go prediction_context.go prediction_mode.go recognizer.go rule_context.go semantic_context.go token.go token_source.go token_stream.go tokenstream_rewriter.go trace_listener.go transition.go tree.go trees.go utils.go

Constants

const (
    ATNStateInvalidType    = 0
    ATNStateBasic          = 1
    ATNStateRuleStart      = 2
    ATNStateBlockStart     = 3
    ATNStatePlusBlockStart = 4
    ATNStateStarBlockStart = 5
    ATNStateTokenStart     = 6
    ATNStateRuleStop       = 7
    ATNStateBlockEnd       = 8
    ATNStateStarLoopBack   = 9
    ATNStateStarLoopEntry  = 10
    ATNStatePlusLoopBack   = 11
    ATNStateLoopEnd        = 12

    ATNStateInvalidStateNumber = -1
)

Constants for serialization.

const (
    ATNTypeLexer  = 0
    ATNTypeParser = 1
)

Represent the type of recognizer an ATN applies to.

const (
    LexerDefaultMode = 0
    LexerMore        = -2
    LexerSkip        = -3
)
const (
    LexerDefaultTokenChannel = TokenDefaultChannel
    LexerHidden              = TokenHiddenChannel
    LexerMinCharValue        = 0x0000
    LexerMaxCharValue        = 0x10FFFF
)
const (
    LexerActionTypeChannel  = 0 //The type of a {@link LexerChannelAction} action.
    LexerActionTypeCustom   = 1 //The type of a {@link LexerCustomAction} action.
    LexerActionTypeMode     = 2 //The type of a {@link LexerModeAction} action.
    LexerActionTypeMore     = 3 //The type of a {@link LexerMoreAction} action.
    LexerActionTypePopMode  = 4 //The type of a {@link LexerPopModeAction} action.
    LexerActionTypePushMode = 5 //The type of a {@link LexerPushModeAction} action.
    LexerActionTypeSkip     = 6 //The type of a {@link LexerSkipAction} action.
    LexerActionTypeType     = 7 //The type of a {@link LexerTypeAction} action.
)
const (
    //
    // The SLL(*) prediction mode. This prediction mode ignores the current
    // parser context when making predictions. This is the fastest prediction
    // mode, and provides correct results for many grammars. This prediction
    // mode is more powerful than the prediction mode provided by ANTLR 3, but
    // may result in syntax errors for grammar and input combinations which are
    // not SLL.
    //
    // <p>
    // When using this prediction mode, the parser will either return a correct
    // parse tree (i.e. the same parse tree that would be returned with the
    // {@link //LL} prediction mode), or it will Report a syntax error. If a
    // syntax error is encountered when using the {@link //SLL} prediction mode,
    // it may be due to either an actual syntax error in the input or indicate
    // that the particular combination of grammar and input requires the more
    // powerful {@link //LL} prediction abilities to complete successfully.</p>
    //
    // <p>
    // This prediction mode does not provide any guarantees for prediction
    // behavior for syntactically-incorrect inputs.</p>
    //
    PredictionModeSLL = 0
    //
    // The LL(*) prediction mode. This prediction mode allows the current parser
    // context to be used for resolving SLL conflicts that occur during
    // prediction. This is the fastest prediction mode that guarantees correct
    // parse results for all combinations of grammars with syntactically correct
    // inputs.
    //
    // <p>
    // When using this prediction mode, the parser will make correct decisions
    // for all syntactically-correct grammar and input combinations. However, in
    // cases where the grammar is truly ambiguous this prediction mode might not
    // Report a precise answer for <em>exactly which</em> alternatives are
    // ambiguous.</p>
    //
    // <p>
    // This prediction mode does not provide any guarantees for prediction
    // behavior for syntactically-incorrect inputs.</p>
    //
    PredictionModeLL = 1
    //
    // The LL(*) prediction mode with exact ambiguity detection. In addition to
    // the correctness guarantees provided by the {@link //LL} prediction mode,
    // this prediction mode instructs the prediction algorithm to determine the
    // complete and exact set of ambiguous alternatives for every ambiguous
    // decision encountered while parsing.
    //
    // <p>
    // This prediction mode may be used for diagnosing ambiguities during
    // grammar development. Due to the performance overhead of calculating sets
    // of ambiguous alternatives, this prediction mode should be avoided when
    // the exact results are not necessary.</p>
    //
    // <p>
    // This prediction mode does not provide any guarantees for prediction
    // behavior for syntactically-incorrect inputs.</p>
    //
    PredictionModeLLExactAmbigDetection = 2
)
const (
    TokenInvalidType = 0

    // During lookahead operations, this "token" signifies we hit rule end ATN state
    // and did not follow it despite needing to.
    TokenEpsilon = -2

    TokenMinUserTokenType = 1

    TokenEOF = -1

    TokenDefaultChannel = 0

    TokenHiddenChannel = 1
)
const (
    Default_Program_Name = "default"
    Program_Init_Size    = 100
    Min_Token_Index      = 0
)
const (
    TransitionEPSILON    = 1
    TransitionRANGE      = 2
    TransitionRULE       = 3
    TransitionPREDICATE  = 4 // e.g., {isType(input.LT(1))}?
    TransitionATOM       = 5
    TransitionACTION     = 6
    TransitionSET        = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2
    TransitionNOTSET     = 8
    TransitionWILDCARD   = 9
    TransitionPRECEDENCE = 10
)
const (
    BasePredictionContextEmptyReturnState = 0x7FFFFFFF
)

Represents {@code $} in local context prediction, which means wildcard. {@code//+x =//}. /

const (
    LL1AnalyzerHitPred = TokenInvalidType
)

* Special value added to the lookahead sets to indicate that we hit

a predicate during analysis if {@code seeThruPreds==false}.

/

Variables

var (
    LexerATNSimulatorDebug    = false
    LexerATNSimulatorDFADebug = false

    LexerATNSimulatorMinDFAEdge = 0
    LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN

    LexerATNSimulatorMatchCalls = 0
)
var (
    ParserATNSimulatorDebug            = false
    ParserATNSimulatorListATNDecisions = false
    ParserATNSimulatorDFADebug         = false
    ParserATNSimulatorRetryDebug       = false
)
var (
    BasePredictionContextglobalNodeCount = 1
    BasePredictionContextid              = BasePredictionContextglobalNodeCount
)
var ATNDeserializationOptionsdefaultOptions = &ATNDeserializationOptions{true, false, false}
var ATNInvalidAltNumber int
var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewBaseATNConfigSet(false))
var ATNStateInitialNumTransitions = 4
var AddedUnicodeSMP = "59627784-3BE5-417A-B9EB-8131A7286089"
var BasePredictionContextEMPTY = NewEmptyPredictionContext()
var BaseSerializedUUID = "AADB8D7E-AEEF-4415-AD2B-8204D6CF042E"

This is the earliest supported serialized UUID. stick to serialized version for now, we don't need a UUID instance

var CommonTokenFactoryDEFAULT = NewCommonTokenFactory(false)

CommonTokenFactoryDEFAULT is the default CommonTokenFactory. It does not explicitly copy token text when constructing tokens.

var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener()

Provides a default instance of {@link ConsoleErrorListener}.

var ErrEmptyStack = errors.New("Stack is empty")
var LexerMoreActionINSTANCE = NewLexerMoreAction()
var LexerPopModeActionINSTANCE = NewLexerPopModeAction()
var LexerSkipActionINSTANCE = NewLexerSkipAction()

Provides a singleton instance of l parameterless lexer action.

var ParseTreeWalkerDefault = NewParseTreeWalker()
var RuleContextEmpty = NewBaseParserRuleContext(nil, -1)
var SerializedUUID = AddedUnicodeSMP

This is the current serialized UUID.

var SerializedVersion = 3
var SupportedUUIDs = []string{BaseSerializedUUID, AddedUnicodeSMP}

This list contains all of the currently supported UUIDs, ordered by when the feature first appeared in this branch.

var TransitionserializationNames = []string{
    "INVALID",
    "EPSILON",
    "RANGE",
    "RULE",
    "PREDICATE",
    "ATOM",
    "ACTION",
    "SET",
    "NOT_SET",
    "WILDCARD",
    "PRECEDENCE",
}
var TreeInvalidInterval = NewInterval(-1, -2)

func EscapeWhitespace Uses

func EscapeWhitespace(s string, escapeSpaces bool) string

func PredictionModeallConfigsInRuleStopStates Uses

func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool

Checks if all configurations in {@code configs} are in a {@link RuleStopState}. Configurations meeting this condition have reached the end of the decision rule (local context) or end of start rule (full context).

@param configs the configuration set to test @return {@code true} if all configurations in {@code configs} are in a {@link RuleStopState}, otherwise {@code false}

func PredictionModeallSubsetsConflict Uses

func PredictionModeallSubsetsConflict(altsets []*BitSet) bool

Determines if every alternative subset in {@code altsets} contains more than one alternative.

@param altsets a collection of alternative subsets @return {@code true} if every {@link BitSet} in {@code altsets} has {@link BitSet//cardinality cardinality} &gt 1, otherwise {@code false}

func PredictionModeallSubsetsEqual Uses

func PredictionModeallSubsetsEqual(altsets []*BitSet) bool

Determines if every alternative subset in {@code altsets} is equivalent.

@param altsets a collection of alternative subsets @return {@code true} if every member of {@code altsets} is equal to the others, otherwise {@code false}

func PredictionModegetSingleViableAlt Uses

func PredictionModegetSingleViableAlt(altsets []*BitSet) int

func PredictionModegetUniqueAlt Uses

func PredictionModegetUniqueAlt(altsets []*BitSet) int

Returns the unique alternative predicted by all alternative subsets in {@code altsets}. If no such alternative exists, this method returns {@link ATN//INVALID_ALT_NUMBER}.

@param altsets a collection of alternative subsets

func PredictionModehasConfigInRuleStopState Uses

func PredictionModehasConfigInRuleStopState(configs ATNConfigSet) bool

Checks if any configuration in {@code configs} is in a {@link RuleStopState}. Configurations meeting this condition have reached the end of the decision rule (local context) or end of start rule (full context).

@param configs the configuration set to test @return {@code true} if any configuration in {@code configs} is in a {@link RuleStopState}, otherwise {@code false}

func PredictionModehasConflictingAltSet Uses

func PredictionModehasConflictingAltSet(altsets []*BitSet) bool

Determines if any single alternative subset in {@code altsets} contains more than one alternative.

@param altsets a collection of alternative subsets @return {@code true} if {@code altsets} contains a {@link BitSet} with {@link BitSet//cardinality cardinality} &gt 1, otherwise {@code false}

func PredictionModehasNonConflictingAltSet Uses

func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool

Determines if any single alternative subset in {@code altsets} contains exactly one alternative.

@param altsets a collection of alternative subsets @return {@code true} if {@code altsets} contains a {@link BitSet} with {@link BitSet//cardinality cardinality} 1, otherwise {@code false}

func PredictionModehasSLLConflictTerminatingPrediction Uses

func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool

Computes the SLL prediction termination condition.

<p> This method computes the SLL prediction termination condition for both of the following cases.</p>

<ul> <li>The usual SLL+LL fallback upon SLL conflict</li> <li>Pure SLL without LL fallback</li> </ul>

<p><strong>COMBINED SLL+LL PARSING</strong></p>

<p>When LL-fallback is enabled upon SLL conflict, correct predictions are ensured regardless of how the termination condition is computed by this method. Due to the substantially higher cost of LL prediction, the prediction should only fall back to LL when the additional lookahead cannot lead to a unique SLL prediction.</p>

<p>Assuming combined SLL+LL parsing, an SLL configuration set with only conflicting subsets should fall back to full LL, even if the configuration sets don't resolve to the same alternative (e.g. {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting configuration, SLL could continue with the hopes that more lookahead will resolve via one of those non-conflicting configurations.</p>

<p>Here's the prediction termination rule them: SLL (for SLL+LL parsing) stops when it sees only conflicting configuration subsets. In contrast, full LL keeps going when there is uncertainty.</p>

<p><strong>HEURISTIC</strong></p>

<p>As a heuristic, we stop prediction when we see any conflicting subset unless we see a state that only has one alternative associated with it. The single-alt-state thing lets prediction continue upon rules like (otherwise, it would admit defeat too soon):</p>

<p>{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ” }</p>

<p>When the ATN simulation reaches the state before {@code ”}, it has a DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop processing this node because alternative to has another way to continue, via {@code [6|2|[]]}.</p>

<p>It also let's us continue for this rule:</p>

<p>{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }</p>

<p>After Matching input A, we reach the stop state for rule A, state 1. State 8 is the state right before B. Clearly alternatives 1 and 2 conflict and no amount of further lookahead will separate the two. However, alternative 3 will be able to continue and so we do not stop working on this state. In the previous example, we're concerned with states associated with the conflicting alternatives. Here alt 3 is not associated with the conflicting configs, but since we can continue looking for input reasonably, don't declare the state done.</p>

<p><strong>PURE SLL PARSING</strong></p>

<p>To handle pure SLL parsing, all we have to do is make sure that we combine stack contexts for configurations that differ only by semantic predicate. From there, we can do the usual SLL termination heuristic.</p>

<p><strong>PREDICATES IN SLL+LL PARSING</strong></p>

<p>SLL decisions don't evaluate predicates until after they reach DFA stop states because they need to create the DFA cache that works in all semantic situations. In contrast, full LL evaluates predicates collected during start state computation so it can ignore predicates thereafter. This means that SLL termination detection can totally ignore semantic predicates.</p>

<p>Implementation-wise, {@link ATNConfigSet} combines stack contexts but not semantic predicate contexts so we might see two configurations like the following.</p>

<p>{@code (s, 1, x, {}), (s, 1, x', {p})}</p>

<p>Before testing these configurations against others, we have to merge {@code x} and {@code x'} (without modifying the existing configurations). For example, we test {@code (x+x')==x”} when looking for conflicts in the following configurations.</p>

<p>{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {})}</p>

<p>If the configuration set has predicates (as indicated by {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of the configurations to strip out all of the predicates so that a standard {@link ATNConfigSet} will merge everything ignoring predicates.</p>

func PredictionModehasStateAssociatedWithOneAlt Uses

func PredictionModehasStateAssociatedWithOneAlt(configs ATNConfigSet) bool

func PredictionModeresolvesToJustOneViableAlt Uses

func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int

Full LL prediction termination.

<p>Can we stop looking ahead during ATN simulation or is there some uncertainty as to which alternative we will ultimately pick, after consuming more input? Even if there are partial conflicts, we might know that everything is going to resolve to the same minimum alternative. That means we can stop since no more lookahead will change that fact. On the other hand, there might be multiple conflicts that resolve to different minimums. That means we need more look ahead to decide which of those alternatives we should predict.</p>

<p>The basic idea is to split the set of configurations {@code C}, into conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with non-conflicting configurations. Two configurations conflict if they have identical {@link ATNConfig//state} and {@link ATNConfig//context} values but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)} and {@code (s, j, ctx, _)} for {@code i!=j}.</p>

<p>Reduce these configuration subsets to the set of possible alternatives. You can compute the alternative subsets in one pass as follows:</p>

<p>{@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in {@code C} holding {@code s} and {@code ctx} fixed.</p>

<p>Or in pseudo-code, for each configuration {@code c} in {@code C}:</p>

<pre> map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not alt and not pred </pre>

<p>The values in {@code map} are the set of {@code A_s,ctx} sets.</p>

<p>If {@code |A_s,ctx|=1} then there is no conflict associated with {@code s} and {@code ctx}.</p>

<p>Reduce the subsets to singletons by choosing a minimum of each subset. If the union of these alternative subsets is a singleton, then no amount of more lookahead will help us. We will always pick that alternative. If, however, there is more than one alternative, then we are uncertain which alternative to predict and must continue looking for resolution. We may or may not discover an ambiguity in the future, even if there are no conflicting subsets this round.</p>

<p>The biggest sin is to terminate early because it means we've made a decision but were uncertain as to the eventual outcome. We haven't used enough lookahead. On the other hand, announcing a conflict too late is no big deal you will still have the conflict. It's just inefficient. It might even look until the end of file.</p>

<p>No special consideration for semantic predicates is required because predicates are evaluated on-the-fly for full LL prediction, ensuring that no configuration contains a semantic context during the termination check.</p>

<p><strong>CONFLICTING CONFIGS</strong></p>

<p>Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict when {@code i!=j} but {@code x=x'}. Because we merge all {@code (s, i, _)} configurations together, that means that there are at most {@code n} configurations associated with state {@code s} for {@code n} possible alternatives in the decision. The merged stacks complicate the comparison of configuration contexts {@code x} and {@code x'}. Sam checks to see if one is a subset of the other by calling merge and checking to see if the merged result is either {@code x} or {@code x'}. If the {@code x} associated with lowest alternative {@code i} is the superset, then {@code i} is the only possible prediction since the others resolve to {@code min(i)} as well. However, if {@code x} is associated with {@code j>i} then at least one stack configuration for {@code j} is not in conflict with alternative {@code i}. The algorithm should keep going, looking for more lookahead due to the uncertainty.</p>

<p>For simplicity, I'm doing a equality check between {@code x} and {@code x'} that lets the algorithm continue to consume lookahead longer than necessary. The reason I like the equality is of course the simplicity but also because that is the test you need to detect the alternatives that are actually in conflict.</p>

<p><strong>CONTINUE/STOP RULE</strong></p>

<p>Continue if union of resolved alternative sets from non-conflicting and conflicting alternative subsets has more than one alternative. We are uncertain about which alternative to predict.</p>

<p>The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which alternatives are still in the running for the amount of input we've consumed at this point. The conflicting sets let us to strip away configurations that won't lead to more states because we resolve conflicts to the configuration with a minimum alternate for the conflicting set.</p>

<p><strong>CASES</strong></p>

<ul>

<li>no conflicts and more than 1 alternative in set =&gt continue</li>

<li> {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)}, {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} = {@code {1,3}} =&gt continue </li>

<li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)}, {@code (s', 2, y)}, {@code (s”, 1, z)} yields non-conflicting set {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} = {@code {1}} =&gt stop and predict 1</li>

<li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)}, {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U {@code {1}} = {@code {1}} =&gt stop and predict 1, can announce ambiguity {@code {1,2}}</li>

<li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)}, {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U {@code {2}} = {@code {1,2}} =&gt continue</li>

<li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)}, {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U {@code {3}} = {@code {1,3}} =&gt continue</li>

</ul>

<p><strong>EXACT AMBIGUITY DETECTION</strong></p>

<p>If all states Report the same conflicting set of alternatives, then we know we have the exact ambiguity set.</p>

<p><code>|A_<em>i</em>|&gt1</code> and <code>A_<em>i</em> = A_<em>j</em></code> for all <em>i</em>, <em>j</em>.</p>

<p>In other words, we continue examining lookahead until all {@code A_i} have more than one alternative and all {@code A_i} are the same. If {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate because the resolved set is {@code {1}}. To determine what the real ambiguity is, we have to know whether the ambiguity is between one and two or one and three so we keep going. We can only stop prediction when we need exact ambiguity detection when the sets look like {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...</p>

func PrintArrayJavaStyle Uses

func PrintArrayJavaStyle(sa []string) string

func TerminalNodeToStringArray Uses

func TerminalNodeToStringArray(sa []TerminalNode) []string

func TreesGetNodeText Uses

func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string

func TreesStringTree Uses

func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string

Print out a whole tree in LISP form. {@link //getNodeText} is used on the

node payloads to get the text for the nodes.  Detect
parse trees and extract data appropriately.

type AND Uses

type AND struct {
    // contains filtered or unexported fields
}

func NewAND Uses

func NewAND(a, b SemanticContext) *AND

func (*AND) String Uses

func (a *AND) String() string

type ATN Uses

type ATN struct {
    // DecisionToState is the decision points for all rules, subrules, optional
    // blocks, ()+, ()*, etc. Used to build DFA predictors for them.
    DecisionToState []DecisionState
    // contains filtered or unexported fields
}

func NewATN Uses

func NewATN(grammarType int, maxTokenType int) *ATN

func (*ATN) NextTokens Uses

func (a *ATN) NextTokens(s ATNState, ctx RuleContext) *IntervalSet

func (*ATN) NextTokensInContext Uses

func (a *ATN) NextTokensInContext(s ATNState, ctx RuleContext) *IntervalSet

NextTokensInContext computes the set of valid tokens that can occur starting in state s. If ctx is nil, the set of tokens will not include what can follow the rule surrounding s. In other words, the set will be restricted to tokens reachable staying within the rule of s.

func (*ATN) NextTokensNoContext Uses

func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet

NextTokensNoContext computes the set of valid tokens that can occur starting in s and staying in same rule. Token.EPSILON is in set if we reach end of rule.

type ATNConfig Uses

type ATNConfig interface {
    GetState() ATNState
    GetAlt() int
    GetSemanticContext() SemanticContext

    GetContext() PredictionContext
    SetContext(PredictionContext)

    GetReachesIntoOuterContext() int
    SetReachesIntoOuterContext(int)

    String() string
    // contains filtered or unexported methods
}

ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic context). The syntactic context is a graph-structured stack node whose path(s) to the root is the rule invocation(s) chain used to arrive at the state. The semantic context is the tree of semantic predicates encountered before reaching an ATN state.

type ATNConfigSet Uses

type ATNConfigSet interface {
    Add(ATNConfig, *DoubleDict) bool
    AddAll([]ATNConfig) bool

    GetStates() *Set
    GetPredicates() []SemanticContext
    GetItems() []ATNConfig

    OptimizeConfigs(interpreter *BaseATNSimulator)

    Equals(other interface{}) bool

    Length() int
    IsEmpty() bool
    Contains(ATNConfig) bool
    ContainsFast(ATNConfig) bool
    Clear()
    String() string

    HasSemanticContext() bool
    SetHasSemanticContext(v bool)

    ReadOnly() bool
    SetReadOnly(bool)

    GetConflictingAlts() *BitSet
    SetConflictingAlts(*BitSet)

    FullContext() bool

    GetUniqueAlt() int
    SetUniqueAlt(int)

    GetDipsIntoOuterContext() bool
    SetDipsIntoOuterContext(bool)
    // contains filtered or unexported methods
}

type ATNConfigSetPair Uses

type ATNConfigSetPair struct {
    // contains filtered or unexported fields
}

type ATNDeserializationOptions Uses

type ATNDeserializationOptions struct {
    // contains filtered or unexported fields
}

func NewATNDeserializationOptions Uses

func NewATNDeserializationOptions(CopyFrom *ATNDeserializationOptions) *ATNDeserializationOptions

type ATNDeserializer Uses

type ATNDeserializer struct {
    // contains filtered or unexported fields
}

func NewATNDeserializer Uses

func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer

func (*ATNDeserializer) DeserializeFromUInt16 Uses

func (a *ATNDeserializer) DeserializeFromUInt16(data []uint16) *ATN

type ATNState Uses

type ATNState interface {
    GetEpsilonOnlyTransitions() bool

    GetRuleIndex() int
    SetRuleIndex(int)

    GetNextTokenWithinRule() *IntervalSet
    SetNextTokenWithinRule(*IntervalSet)

    GetATN() *ATN
    SetATN(*ATN)

    GetStateType() int

    GetStateNumber() int
    SetStateNumber(int)

    GetTransitions() []Transition
    SetTransitions([]Transition)
    AddTransition(Transition, int)

    String() string
    // contains filtered or unexported methods
}

type AbstractPredicateTransition Uses

type AbstractPredicateTransition interface {
    Transition
    IAbstractPredicateTransitionFoo()
}

type ActionTransition Uses

type ActionTransition struct {
    *BaseTransition
    // contains filtered or unexported fields
}

func NewActionTransition Uses

func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition

func (*ActionTransition) Matches Uses

func (t *ActionTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool

func (*ActionTransition) String Uses

func (t *ActionTransition) String() string

type AltDict Uses

type AltDict struct {
    // contains filtered or unexported fields
}

func NewAltDict Uses

func NewAltDict() *AltDict

func PredictionModeGetStateToAltMap Uses

func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict

Get a map from state to alt subset from a configuration set. For each configuration {@code c} in {@code configs}:

<pre> map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt} </pre>

func (*AltDict) Get Uses

func (a *AltDict) Get(key string) interface{}

type ArrayPredictionContext Uses

type ArrayPredictionContext struct {
    *BasePredictionContext
    // contains filtered or unexported fields
}

func NewArrayPredictionContext Uses

func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) *ArrayPredictionContext

func (*ArrayPredictionContext) GetParent Uses

func (a *ArrayPredictionContext) GetParent(index int) PredictionContext

func (*ArrayPredictionContext) GetReturnStates Uses

func (a *ArrayPredictionContext) GetReturnStates() []int

func (*ArrayPredictionContext) String Uses

func (a *ArrayPredictionContext) String() string

type AtomTransition Uses

type AtomTransition struct {
    *BaseTransition
}

TODO: make all transitions sets? no, should remove set edges

func NewAtomTransition Uses

func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition

func (*AtomTransition) Matches Uses

func (t *AtomTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool

func (*AtomTransition) String Uses

func (t *AtomTransition) String() string

type BailErrorStrategy Uses

type BailErrorStrategy struct {
    *DefaultErrorStrategy
}

func NewBailErrorStrategy Uses

func NewBailErrorStrategy() *BailErrorStrategy

func (*BailErrorStrategy) Recover Uses

func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException)

Instead of recovering from exception {@code e}, re-panic it wrapped in a {@link ParseCancellationException} so it is not caught by the rule func catches. Use {@link Exception//getCause()} to get the original {@link RecognitionException}.

func (*BailErrorStrategy) RecoverInline Uses

func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token

Make sure we don't attempt to recover inline if the parser successfully recovers, it won't panic an exception.

func (*BailErrorStrategy) Sync Uses

func (b *BailErrorStrategy) Sync(recognizer Parser)

Make sure we don't attempt to recover from problems in subrules.//

type BaseATNConfig Uses

type BaseATNConfig struct {
    // contains filtered or unexported fields
}

func NewBaseATNConfig Uses

func NewBaseATNConfig(c ATNConfig, state ATNState, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig

func NewBaseATNConfig1 Uses

func NewBaseATNConfig1(c ATNConfig, state ATNState, context PredictionContext) *BaseATNConfig

func NewBaseATNConfig2 Uses

func NewBaseATNConfig2(c ATNConfig, semanticContext SemanticContext) *BaseATNConfig

func NewBaseATNConfig3 Uses

func NewBaseATNConfig3(c ATNConfig, state ATNState, semanticContext SemanticContext) *BaseATNConfig

func NewBaseATNConfig4 Uses

func NewBaseATNConfig4(c ATNConfig, state ATNState) *BaseATNConfig

func NewBaseATNConfig5 Uses

func NewBaseATNConfig5(state ATNState, alt int, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig

func NewBaseATNConfig6 Uses

func NewBaseATNConfig6(state ATNState, alt int, context PredictionContext) *BaseATNConfig

func NewBaseATNConfig7 Uses

func NewBaseATNConfig7(old *BaseATNConfig) *BaseATNConfig

func (*BaseATNConfig) GetAlt Uses

func (b *BaseATNConfig) GetAlt() int

func (*BaseATNConfig) GetContext Uses

func (b *BaseATNConfig) GetContext() PredictionContext

func (*BaseATNConfig) GetReachesIntoOuterContext Uses

func (b *BaseATNConfig) GetReachesIntoOuterContext() int

func (*BaseATNConfig) GetSemanticContext Uses

func (b *BaseATNConfig) GetSemanticContext() SemanticContext

func (*BaseATNConfig) GetState Uses

func (b *BaseATNConfig) GetState() ATNState

func (*BaseATNConfig) SetContext Uses

func (b *BaseATNConfig) SetContext(v PredictionContext)

func (*BaseATNConfig) SetReachesIntoOuterContext Uses

func (b *BaseATNConfig) SetReachesIntoOuterContext(v int)

func (*BaseATNConfig) String Uses

func (b *BaseATNConfig) String() string

type BaseATNConfigSet Uses

type BaseATNConfigSet struct {
    // contains filtered or unexported fields
}

BaseATNConfigSet is a specialized set of ATNConfig that tracks information about its elements and can combine similar configurations using a graph-structured stack.

func NewBaseATNConfigSet Uses

func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet

func (*BaseATNConfigSet) Add Uses

func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool

Add merges contexts with existing configs for (s, i, pi, _), where s is the ATNConfig.state, i is the ATNConfig.alt, and pi is the ATNConfig.semanticContext. We use (s,i,pi) as the key. Updates dipsIntoOuterContext and hasSemanticContext when necessary.

func (*BaseATNConfigSet) AddAll Uses

func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool

func (*BaseATNConfigSet) Clear Uses

func (b *BaseATNConfigSet) Clear()

func (*BaseATNConfigSet) Contains Uses

func (b *BaseATNConfigSet) Contains(item ATNConfig) bool

func (*BaseATNConfigSet) ContainsFast Uses

func (b *BaseATNConfigSet) ContainsFast(item ATNConfig) bool

func (*BaseATNConfigSet) Equals Uses

func (b *BaseATNConfigSet) Equals(other interface{}) bool

func (*BaseATNConfigSet) FullContext Uses

func (b *BaseATNConfigSet) FullContext() bool

func (*BaseATNConfigSet) GetConflictingAlts Uses

func (b *BaseATNConfigSet) GetConflictingAlts() *BitSet

func (*BaseATNConfigSet) GetDipsIntoOuterContext Uses

func (b *BaseATNConfigSet) GetDipsIntoOuterContext() bool

func (*BaseATNConfigSet) GetItems Uses

func (b *BaseATNConfigSet) GetItems() []ATNConfig

func (*BaseATNConfigSet) GetPredicates Uses

func (b *BaseATNConfigSet) GetPredicates() []SemanticContext

func (*BaseATNConfigSet) GetStates Uses

func (b *BaseATNConfigSet) GetStates() *Set

func (*BaseATNConfigSet) GetUniqueAlt Uses

func (b *BaseATNConfigSet) GetUniqueAlt() int

func (*BaseATNConfigSet) HasSemanticContext Uses

func (b *BaseATNConfigSet) HasSemanticContext() bool

func (*BaseATNConfigSet) IsEmpty Uses

func (b *BaseATNConfigSet) IsEmpty() bool

func (*BaseATNConfigSet) Length Uses

func (b *BaseATNConfigSet) Length() int

func (*BaseATNConfigSet) OptimizeConfigs Uses

func (b *BaseATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator)

func (*BaseATNConfigSet) ReadOnly Uses

func (b *BaseATNConfigSet) ReadOnly() bool

func (*BaseATNConfigSet) SetConflictingAlts Uses

func (b *BaseATNConfigSet) SetConflictingAlts(v *BitSet)

func (*BaseATNConfigSet) SetDipsIntoOuterContext Uses

func (b *BaseATNConfigSet) SetDipsIntoOuterContext(v bool)

func (*BaseATNConfigSet) SetHasSemanticContext Uses

func (b *BaseATNConfigSet) SetHasSemanticContext(v bool)

func (*BaseATNConfigSet) SetReadOnly Uses

func (b *BaseATNConfigSet) SetReadOnly(readOnly bool)

func (*BaseATNConfigSet) SetUniqueAlt Uses

func (b *BaseATNConfigSet) SetUniqueAlt(v int)

func (*BaseATNConfigSet) String Uses

func (b *BaseATNConfigSet) String() string

type BaseATNSimulator Uses

type BaseATNSimulator struct {
    // contains filtered or unexported fields
}

func NewBaseATNSimulator Uses

func NewBaseATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) *BaseATNSimulator

func (*BaseATNSimulator) ATN Uses

func (b *BaseATNSimulator) ATN() *ATN

func (*BaseATNSimulator) DecisionToDFA Uses

func (b *BaseATNSimulator) DecisionToDFA() []*DFA

func (*BaseATNSimulator) SharedContextCache Uses

func (b *BaseATNSimulator) SharedContextCache() *PredictionContextCache

type BaseATNState Uses

type BaseATNState struct {
    // NextTokenWithinRule caches lookahead during parsing. Not used during construction.
    NextTokenWithinRule *IntervalSet
    // contains filtered or unexported fields
}

func NewBaseATNState Uses

func NewBaseATNState() *BaseATNState

func (*BaseATNState) AddTransition Uses

func (as *BaseATNState) AddTransition(trans Transition, index int)

func (*BaseATNState) GetATN Uses

func (as *BaseATNState) GetATN() *ATN

func (*BaseATNState) GetEpsilonOnlyTransitions Uses

func (as *BaseATNState) GetEpsilonOnlyTransitions() bool

func (*BaseATNState) GetNextTokenWithinRule Uses

func (as *BaseATNState) GetNextTokenWithinRule() *IntervalSet

func (*BaseATNState) GetRuleIndex Uses

func (as *BaseATNState) GetRuleIndex() int

func (*BaseATNState) GetStateNumber Uses

func (as *BaseATNState) GetStateNumber() int

func (*BaseATNState) GetStateType Uses

func (as *BaseATNState) GetStateType() int

func (*BaseATNState) GetTransitions Uses

func (as *BaseATNState) GetTransitions() []Transition

func (*BaseATNState) SetATN Uses

func (as *BaseATNState) SetATN(atn *ATN)

func (*BaseATNState) SetNextTokenWithinRule Uses

func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet)

func (*BaseATNState) SetRuleIndex Uses

func (as *BaseATNState) SetRuleIndex(v int)

func (*BaseATNState) SetStateNumber Uses

func (as *BaseATNState) SetStateNumber(stateNumber int)

func (*BaseATNState) SetTransitions Uses

func (as *BaseATNState) SetTransitions(t []Transition)

func (*BaseATNState) String Uses

func (as *BaseATNState) String() string

type BaseAbstractPredicateTransition Uses

type BaseAbstractPredicateTransition struct {
    *BaseTransition
}

func NewBasePredicateTransition Uses

func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition

func (*BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo Uses

func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo()

type BaseBlockStartState Uses

type BaseBlockStartState struct {
    *BaseDecisionState
    // contains filtered or unexported fields
}

BaseBlockStartState is the start of a regular (...) block.

func NewBlockStartState Uses

func NewBlockStartState() *BaseBlockStartState

type BaseDecisionState Uses

type BaseDecisionState struct {
    *BaseATNState
    // contains filtered or unexported fields
}

func NewBaseDecisionState Uses

func NewBaseDecisionState() *BaseDecisionState

type BaseInterpreterRuleContext Uses

type BaseInterpreterRuleContext struct {
    *BaseParserRuleContext
}

func NewBaseInterpreterRuleContext Uses

func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext

type BaseLexer Uses

type BaseLexer struct {
    *BaseRecognizer

    Interpreter         ILexerATNSimulator
    TokenStartCharIndex int
    TokenStartLine      int
    TokenStartColumn    int
    ActionType          int
    Virt                Lexer // The most derived lexer implementation. Allows virtual method calls.
    // contains filtered or unexported fields
}

func NewBaseLexer Uses

func NewBaseLexer(input CharStream) *BaseLexer

func (*BaseLexer) Emit Uses

func (b *BaseLexer) Emit() Token

The standard method called to automatically emit a token at the outermost lexical rule. The token object should point into the char buffer start..stop. If there is a text override in 'text', use that to set the token's text. Override l method to emit custom Token objects or provide a Newfactory. /

func (*BaseLexer) EmitEOF Uses

func (b *BaseLexer) EmitEOF() Token

func (*BaseLexer) EmitToken Uses

func (b *BaseLexer) EmitToken(token Token)

By default does not support multiple emits per NextToken invocation for efficiency reasons. Subclass and override l method, NextToken, and GetToken (to push tokens into a list and pull from that list rather than a single variable as l implementation does). /

func (*BaseLexer) GetATN Uses

func (b *BaseLexer) GetATN() *ATN

func (*BaseLexer) GetAllTokens Uses

func (b *BaseLexer) GetAllTokens() []Token

Return a list of all Token objects in input char stream. Forces load of all tokens. Does not include EOF token. /

func (*BaseLexer) GetCharIndex Uses

func (b *BaseLexer) GetCharIndex() int

What is the index of the current character of lookahead?///

func (*BaseLexer) GetCharPositionInLine Uses

func (b *BaseLexer) GetCharPositionInLine() int

func (*BaseLexer) GetInputStream Uses

func (b *BaseLexer) GetInputStream() CharStream

func (*BaseLexer) GetInterpreter Uses

func (b *BaseLexer) GetInterpreter() ILexerATNSimulator

func (*BaseLexer) GetLine Uses

func (b *BaseLexer) GetLine() int

func (*BaseLexer) GetSourceName Uses

func (b *BaseLexer) GetSourceName() string

func (*BaseLexer) GetText Uses

func (b *BaseLexer) GetText() string

Return the text Matched so far for the current token or any text override. Set the complete text of l token it wipes any previous changes to the text.

func (*BaseLexer) GetTokenFactory Uses

func (b *BaseLexer) GetTokenFactory() TokenFactory

func (*BaseLexer) GetTokenSourceCharStreamPair Uses

func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair

func (*BaseLexer) GetType Uses

func (b *BaseLexer) GetType() int

func (*BaseLexer) More Uses

func (b *BaseLexer) More()

func (*BaseLexer) NextToken Uses

func (b *BaseLexer) NextToken() Token

Return a token from l source i.e., Match a token on the char stream.

func (*BaseLexer) PopMode Uses

func (b *BaseLexer) PopMode() int

func (*BaseLexer) PushMode Uses

func (b *BaseLexer) PushMode(m int)

func (*BaseLexer) Recover Uses

func (b *BaseLexer) Recover(re RecognitionException)

Lexers can normally Match any char in it's vocabulary after Matching a token, so do the easy thing and just kill a character and hope it all works out. You can instead use the rule invocation stack to do sophisticated error recovery if you are in a fragment rule. /

func (*BaseLexer) SetChannel Uses

func (b *BaseLexer) SetChannel(v int)

func (*BaseLexer) SetMode Uses

func (b *BaseLexer) SetMode(m int)

func (*BaseLexer) SetText Uses

func (b *BaseLexer) SetText(text string)

func (*BaseLexer) SetType Uses

func (b *BaseLexer) SetType(t int)

func (*BaseLexer) Skip Uses

func (b *BaseLexer) Skip()

Instruct the lexer to Skip creating a token for current lexer rule and look for another token. NextToken() knows to keep looking when a lexer rule finishes with token set to SKIPTOKEN. Recall that if token==nil at end of any token rule, it creates one for you and emits it. /

type BaseLexerAction Uses

type BaseLexerAction struct {
    // contains filtered or unexported fields
}

func NewBaseLexerAction Uses

func NewBaseLexerAction(action int) *BaseLexerAction

type BaseParseTreeListener Uses

type BaseParseTreeListener struct{}

func (*BaseParseTreeListener) EnterEveryRule Uses

func (l *BaseParseTreeListener) EnterEveryRule(ctx ParserRuleContext)

func (*BaseParseTreeListener) ExitEveryRule Uses

func (l *BaseParseTreeListener) ExitEveryRule(ctx ParserRuleContext)

func (*BaseParseTreeListener) VisitErrorNode Uses

func (l *BaseParseTreeListener) VisitErrorNode(node ErrorNode)

func (*BaseParseTreeListener) VisitTerminal Uses

func (l *BaseParseTreeListener) VisitTerminal(node TerminalNode)

type BaseParseTreeVisitor Uses

type BaseParseTreeVisitor struct{}

func (*BaseParseTreeVisitor) Visit Uses

func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{}

func (*BaseParseTreeVisitor) VisitChildren Uses

func (v *BaseParseTreeVisitor) VisitChildren(node RuleNode) interface{}

func (*BaseParseTreeVisitor) VisitErrorNode Uses

func (v *BaseParseTreeVisitor) VisitErrorNode(node ErrorNode) interface{}

func (*BaseParseTreeVisitor) VisitTerminal Uses

func (v *BaseParseTreeVisitor) VisitTerminal(node TerminalNode) interface{}

type BaseParser Uses

type BaseParser struct {
    *BaseRecognizer

    Interpreter     *ParserATNSimulator
    BuildParseTrees bool
    // contains filtered or unexported fields
}

func NewBaseParser Uses

func NewBaseParser(input TokenStream) *BaseParser

p.is all the parsing support code essentially most of it is error recovery stuff.//

func (*BaseParser) AddParseListener Uses

func (p *BaseParser) AddParseListener(listener ParseTreeListener)

Registers {@code listener} to receive events during the parsing process.

<p>To support output-preserving grammar transformations (including but not limited to left-recursion removal, automated left-factoring, and optimized code generation), calls to listener methods during the parse may differ substantially from calls made by {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In particular, rule entry and exit events may occur in a different order during the parse than after the parser. In addition, calls to certain rule entry methods may be omitted.</p>

<p>With the following specific exceptions, calls to listener events are <em>deterministic</em>, i.e. for identical input the calls to listener methods will be the same.</p>

<ul> <li>Alterations to the grammar used to generate code may change the behavior of the listener calls.</li> <li>Alterations to the command line options passed to ANTLR 4 when generating the parser may change the behavior of the listener calls.</li> <li>Changing the version of the ANTLR Tool used to generate the parser may change the behavior of the listener calls.</li> </ul>

@param listener the listener to add

@panics nilPointerException if {@code} listener is {@code nil}

func (*BaseParser) Consume Uses

func (p *BaseParser) Consume() Token

func (*BaseParser) DumpDFA Uses

func (p *BaseParser) DumpDFA()

For debugging and other purposes.//

func (*BaseParser) EnterOuterAlt Uses

func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int)

func (*BaseParser) EnterRecursionRule Uses

func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int)

func (*BaseParser) EnterRule Uses

func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int)

func (*BaseParser) ExitRule Uses

func (p *BaseParser) ExitRule()

func (*BaseParser) GetATN Uses

func (p *BaseParser) GetATN() *ATN

func (*BaseParser) GetATNWithBypassAlts Uses

func (p *BaseParser) GetATNWithBypassAlts()

The ATN with bypass alternatives is expensive to create so we create it lazily.

@panics UnsupportedOperationException if the current parser does not implement the {@link //getSerializedATN()} method.

func (*BaseParser) GetCurrentToken Uses

func (p *BaseParser) GetCurrentToken() Token

Match needs to return the current input symbol, which gets put into the label for the associated token ref e.g., x=ID.

func (*BaseParser) GetDFAStrings Uses

func (p *BaseParser) GetDFAStrings() string

For debugging and other purposes.//

func (*BaseParser) GetErrorHandler Uses

func (p *BaseParser) GetErrorHandler() ErrorStrategy

func (*BaseParser) GetExpectedTokens Uses

func (p *BaseParser) GetExpectedTokens() *IntervalSet

Computes the set of input symbols which could follow the current parser state and context, as given by {@link //GetState} and {@link //GetContext}, respectively.

@see ATN//getExpectedTokens(int, RuleContext)

func (*BaseParser) GetExpectedTokensWithinCurrentRule Uses

func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet

func (*BaseParser) GetInputStream Uses

func (p *BaseParser) GetInputStream() IntStream

func (*BaseParser) GetInterpreter Uses

func (p *BaseParser) GetInterpreter() *ParserATNSimulator

func (*BaseParser) GetInvokingContext Uses

func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext

func (*BaseParser) GetParseListeners Uses

func (p *BaseParser) GetParseListeners() []ParseTreeListener

func (*BaseParser) GetParserRuleContext Uses

func (p *BaseParser) GetParserRuleContext() ParserRuleContext

func (*BaseParser) GetPrecedence Uses

func (p *BaseParser) GetPrecedence() int

func (*BaseParser) GetRuleIndex Uses

func (p *BaseParser) GetRuleIndex(ruleName string) int

Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.//

func (*BaseParser) GetRuleInvocationStack Uses

func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string

func (*BaseParser) GetSourceName Uses

func (p *BaseParser) GetSourceName() string

func (*BaseParser) GetTokenFactory Uses

func (p *BaseParser) GetTokenFactory() TokenFactory

func (*BaseParser) GetTokenStream Uses

func (p *BaseParser) GetTokenStream() TokenStream

func (*BaseParser) IsExpectedToken Uses

func (p *BaseParser) IsExpectedToken(symbol int) bool

func (*BaseParser) Match Uses

func (p *BaseParser) Match(ttype int) Token

func (*BaseParser) MatchWildcard Uses

func (p *BaseParser) MatchWildcard() Token

func (*BaseParser) NotifyErrorListeners Uses

func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException)

func (*BaseParser) Precpred Uses

func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool

func (*BaseParser) PushNewRecursionContext Uses

func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int)

func (*BaseParser) RemoveParseListener Uses

func (p *BaseParser) RemoveParseListener(listener ParseTreeListener)

Remove {@code listener} from the list of parse listeners.

<p>If {@code listener} is {@code nil} or has not been added as a parse listener, p.method does nothing.</p> @param listener the listener to remove

func (*BaseParser) SetErrorHandler Uses

func (p *BaseParser) SetErrorHandler(e ErrorStrategy)

func (*BaseParser) SetInputStream Uses

func (p *BaseParser) SetInputStream(input TokenStream)

func (*BaseParser) SetParserRuleContext Uses

func (p *BaseParser) SetParserRuleContext(v ParserRuleContext)

func (*BaseParser) SetTokenStream Uses

func (p *BaseParser) SetTokenStream(input TokenStream)

Set the token stream and reset the parser.//

func (*BaseParser) SetTrace Uses

func (p *BaseParser) SetTrace(trace *TraceListener)

During a parse is sometimes useful to listen in on the rule entry and exit events as well as token Matches. p.is for quick and dirty debugging.

func (*BaseParser) TriggerEnterRuleEvent Uses

func (p *BaseParser) TriggerEnterRuleEvent()

Notify any parse listeners of an enter rule event.

func (*BaseParser) TriggerExitRuleEvent Uses

func (p *BaseParser) TriggerExitRuleEvent()

Notify any parse listeners of an exit rule event.

@see //addParseListener

func (*BaseParser) UnrollRecursionContexts Uses

func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext)

type BaseParserRuleContext Uses

type BaseParserRuleContext struct {
    *BaseRuleContext
    // contains filtered or unexported fields
}

func NewBaseParserRuleContext Uses

func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext

func (*BaseParserRuleContext) Accept Uses

func (prc *BaseParserRuleContext) Accept(visitor ParseTreeVisitor) interface{}

func (*BaseParserRuleContext) AddChild Uses

func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext

func (*BaseParserRuleContext) AddErrorNode Uses

func (prc *BaseParserRuleContext) AddErrorNode(badToken Token) *ErrorNodeImpl

func (*BaseParserRuleContext) AddTokenNode Uses

func (prc *BaseParserRuleContext) AddTokenNode(token Token) *TerminalNodeImpl

func (*BaseParserRuleContext) CopyFrom Uses

func (prc *BaseParserRuleContext) CopyFrom(ctx *BaseParserRuleContext)

func (*BaseParserRuleContext) EnterRule Uses

func (prc *BaseParserRuleContext) EnterRule(listener ParseTreeListener)

Double dispatch methods for listeners

func (*BaseParserRuleContext) ExitRule Uses

func (prc *BaseParserRuleContext) ExitRule(listener ParseTreeListener)

func (*BaseParserRuleContext) GetChild Uses

func (prc *BaseParserRuleContext) GetChild(i int) Tree

func (*BaseParserRuleContext) GetChildCount Uses

func (prc *BaseParserRuleContext) GetChildCount() int

func (*BaseParserRuleContext) GetChildOfType Uses

func (prc *BaseParserRuleContext) GetChildOfType(i int, childType reflect.Type) RuleContext

func (*BaseParserRuleContext) GetChildren Uses

func (prc *BaseParserRuleContext) GetChildren() []Tree

func (*BaseParserRuleContext) GetPayload Uses

func (prc *BaseParserRuleContext) GetPayload() interface{}

func (*BaseParserRuleContext) GetRuleContext Uses

func (prc *BaseParserRuleContext) GetRuleContext() RuleContext

func (*BaseParserRuleContext) GetSourceInterval Uses

func (prc *BaseParserRuleContext) GetSourceInterval() *Interval

func (*BaseParserRuleContext) GetStart Uses

func (prc *BaseParserRuleContext) GetStart() Token

func (*BaseParserRuleContext) GetStop Uses

func (prc *BaseParserRuleContext) GetStop() Token

func (*BaseParserRuleContext) GetText Uses

func (prc *BaseParserRuleContext) GetText() string

func (*BaseParserRuleContext) GetToken Uses

func (prc *BaseParserRuleContext) GetToken(ttype int, i int) TerminalNode

func (*BaseParserRuleContext) GetTokens Uses

func (prc *BaseParserRuleContext) GetTokens(ttype int) []TerminalNode

func (*BaseParserRuleContext) GetTypedRuleContext Uses

func (prc *BaseParserRuleContext) GetTypedRuleContext(ctxType reflect.Type, i int) RuleContext

func (*BaseParserRuleContext) GetTypedRuleContexts Uses

func (prc *BaseParserRuleContext) GetTypedRuleContexts(ctxType reflect.Type) []RuleContext

func (*BaseParserRuleContext) RemoveLastChild Uses

func (prc *BaseParserRuleContext) RemoveLastChild()

* Used by EnterOuterAlt to toss out a RuleContext previously added as we entered a rule. If we have // label, we will need to remove generic ruleContext object. /

func (*BaseParserRuleContext) SetException Uses

func (prc *BaseParserRuleContext) SetException(e RecognitionException)

func (*BaseParserRuleContext) SetStart Uses

func (prc *BaseParserRuleContext) SetStart(t Token)

func (*BaseParserRuleContext) SetStop Uses

func (prc *BaseParserRuleContext) SetStop(t Token)

func (*BaseParserRuleContext) String Uses

func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) string

func (*BaseParserRuleContext) ToStringTree Uses

func (prc *BaseParserRuleContext) ToStringTree(ruleNames []string, recog Recognizer) string

type BasePredictionContext Uses

type BasePredictionContext struct {
    // contains filtered or unexported fields
}

func NewBasePredictionContext Uses

func NewBasePredictionContext(cachedHash int) *BasePredictionContext

type BaseRecognitionException Uses

type BaseRecognitionException struct {
    // contains filtered or unexported fields
}

func NewBaseRecognitionException Uses

func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException

func (*BaseRecognitionException) GetInputStream Uses

func (b *BaseRecognitionException) GetInputStream() IntStream

func (*BaseRecognitionException) GetMessage Uses

func (b *BaseRecognitionException) GetMessage() string

func (*BaseRecognitionException) GetOffendingToken Uses

func (b *BaseRecognitionException) GetOffendingToken() Token

func (*BaseRecognitionException) String Uses

func (b *BaseRecognitionException) String() string

type BaseRecognizer Uses

type BaseRecognizer struct {
    RuleNames       []string
    LiteralNames    []string
    SymbolicNames   []string
    GrammarFileName string
    // contains filtered or unexported fields
}

func NewBaseRecognizer Uses

func NewBaseRecognizer() *BaseRecognizer

func (*BaseRecognizer) Action Uses

func (b *BaseRecognizer) Action(context RuleContext, ruleIndex, actionIndex int)

func (*BaseRecognizer) AddErrorListener Uses

func (b *BaseRecognizer) AddErrorListener(listener ErrorListener)

func (*BaseRecognizer) GetErrorHeader Uses

func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string

What is the error header, normally line/character position information?//

func (*BaseRecognizer) GetErrorListenerDispatch Uses

func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener

func (*BaseRecognizer) GetLiteralNames Uses

func (b *BaseRecognizer) GetLiteralNames() []string

func (*BaseRecognizer) GetRuleIndexMap Uses

func (b *BaseRecognizer) GetRuleIndexMap() map[string]int

Get a map from rule names to rule indexes.

<p>Used for XPath and tree pattern compilation.</p>

func (*BaseRecognizer) GetRuleNames Uses

func (b *BaseRecognizer) GetRuleNames() []string

func (*BaseRecognizer) GetState Uses

func (b *BaseRecognizer) GetState() int

func (*BaseRecognizer) GetSymbolicNames Uses

func (b *BaseRecognizer) GetSymbolicNames() []string

func (*BaseRecognizer) GetTokenErrorDisplay Uses

func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string

How should a token be displayed in an error message? The default

is to display just the text, but during development you might
want to have a lot of information spit out.  Override in that case
to use t.String() (which, for CommonToken, dumps everything about
the token). This is better than forcing you to override a method in
your token objects because you don't have to go modify your lexer
so that it creates a NewJava type.

@deprecated This method is not called by the ANTLR 4 Runtime. Specific implementations of {@link ANTLRErrorStrategy} may provide a similar feature when necessary. For example, see {@link DefaultErrorStrategy//GetTokenErrorDisplay}.

func (*BaseRecognizer) GetTokenNames Uses

func (b *BaseRecognizer) GetTokenNames() []string

func (*BaseRecognizer) GetTokenType Uses

func (b *BaseRecognizer) GetTokenType(tokenName string) int

func (*BaseRecognizer) Precpred Uses

func (b *BaseRecognizer) Precpred(localctx RuleContext, precedence int) bool

func (*BaseRecognizer) RemoveErrorListeners Uses

func (b *BaseRecognizer) RemoveErrorListeners()

func (*BaseRecognizer) Sempred Uses

func (b *BaseRecognizer) Sempred(localctx RuleContext, ruleIndex int, actionIndex int) bool

subclass needs to override these if there are sempreds or actions that the ATN interp needs to execute

func (*BaseRecognizer) SetState Uses

func (b *BaseRecognizer) SetState(v int)

type BaseRewriteOperation Uses

type BaseRewriteOperation struct {
    // contains filtered or unexported fields
}

func (*BaseRewriteOperation) Execute Uses

func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int

func (*BaseRewriteOperation) GetIndex Uses

func (op *BaseRewriteOperation) GetIndex() int

func (*BaseRewriteOperation) GetInstructionIndex Uses

func (op *BaseRewriteOperation) GetInstructionIndex() int

func (*BaseRewriteOperation) GetOpName Uses

func (op *BaseRewriteOperation) GetOpName() string

func (*BaseRewriteOperation) GetText Uses

func (op *BaseRewriteOperation) GetText() string

func (*BaseRewriteOperation) GetTokens Uses

func (op *BaseRewriteOperation) GetTokens() TokenStream

func (*BaseRewriteOperation) SetIndex Uses

func (op *BaseRewriteOperation) SetIndex(val int)

func (*BaseRewriteOperation) SetInstructionIndex Uses

func (op *BaseRewriteOperation) SetInstructionIndex(val int)

func (*BaseRewriteOperation) SetOpName Uses

func (op *BaseRewriteOperation) SetOpName(val string)

func (*BaseRewriteOperation) SetText Uses

func (op *BaseRewriteOperation) SetText(val string)

func (*BaseRewriteOperation) SetTokens Uses

func (op *BaseRewriteOperation) SetTokens(val TokenStream)

func (*BaseRewriteOperation) String Uses

func (op *BaseRewriteOperation) String() string

type BaseRuleContext Uses

type BaseRuleContext struct {
    RuleIndex int
    // contains filtered or unexported fields
}

func NewBaseRuleContext Uses

func NewBaseRuleContext(parent RuleContext, invokingState int) *BaseRuleContext

func (*BaseRuleContext) GetAltNumber Uses

func (b *BaseRuleContext) GetAltNumber() int

func (*BaseRuleContext) GetBaseRuleContext Uses

func (b *BaseRuleContext) GetBaseRuleContext() *BaseRuleContext

func (*BaseRuleContext) GetInvokingState Uses

func (b *BaseRuleContext) GetInvokingState() int

func (*BaseRuleContext) GetParent Uses

func (b *BaseRuleContext) GetParent() Tree

func (*BaseRuleContext) GetRuleIndex Uses

func (b *BaseRuleContext) GetRuleIndex() int

func (*BaseRuleContext) IsEmpty Uses

func (b *BaseRuleContext) IsEmpty() bool

A context is empty if there is no invoking state meaning nobody call current context.

func (*BaseRuleContext) SetAltNumber Uses

func (b *BaseRuleContext) SetAltNumber(altNumber int)

func (*BaseRuleContext) SetInvokingState Uses

func (b *BaseRuleContext) SetInvokingState(t int)

func (*BaseRuleContext) SetParent Uses

func (b *BaseRuleContext) SetParent(v Tree)

type BaseSingletonPredictionContext Uses

type BaseSingletonPredictionContext struct {
    *BasePredictionContext
    // contains filtered or unexported fields
}

func NewBaseSingletonPredictionContext Uses

func NewBaseSingletonPredictionContext(parent PredictionContext, returnState int) *BaseSingletonPredictionContext

func (*BaseSingletonPredictionContext) GetParent Uses

func (b *BaseSingletonPredictionContext) GetParent(index int) PredictionContext

func (*BaseSingletonPredictionContext) String Uses

func (b *BaseSingletonPredictionContext) String() string

type BaseToken Uses

type BaseToken struct {
    // contains filtered or unexported fields
}

func (*BaseToken) GetChannel Uses

func (b *BaseToken) GetChannel() int

func (*BaseToken) GetColumn Uses

func (b *BaseToken) GetColumn() int

func (*BaseToken) GetInputStream Uses

func (b *BaseToken) GetInputStream() CharStream

func (*BaseToken) GetLine Uses

func (b *BaseToken) GetLine() int

func (*BaseToken) GetSource Uses

func (b *BaseToken) GetSource() *TokenSourceCharStreamPair

func (*BaseToken) GetStart Uses

func (b *BaseToken) GetStart() int

func (*BaseToken) GetStop Uses

func (b *BaseToken) GetStop() int

func (*BaseToken) GetTokenIndex Uses

func (b *BaseToken) GetTokenIndex() int

func (*BaseToken) GetTokenSource Uses

func (b *BaseToken) GetTokenSource() TokenSource

func (*BaseToken) GetTokenType Uses

func (b *BaseToken) GetTokenType() int

func (*BaseToken) SetTokenIndex Uses

func (b *BaseToken) SetTokenIndex(v int)

type BaseTransition Uses

type BaseTransition struct {
    // contains filtered or unexported fields
}

func NewBaseTransition Uses

func NewBaseTransition(target ATNState) *BaseTransition

func (*BaseTransition) Matches Uses

func (t *BaseTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool

type BasicBlockStartState Uses

type BasicBlockStartState struct {
    *BaseBlockStartState
}

func NewBasicBlockStartState Uses

func NewBasicBlockStartState() *BasicBlockStartState

type BasicState Uses

type BasicState struct {
    *BaseATNState
}

func NewBasicState Uses

func NewBasicState() *BasicState

type BitSet Uses

type BitSet struct {
    // contains filtered or unexported fields
}

func NewBitSet Uses

func NewBitSet() *BitSet

func PredictionModeGetAlts Uses

func PredictionModeGetAlts(altsets []*BitSet) *BitSet

Gets the complete set of represented alternatives for a collection of alternative subsets. This method returns the union of each {@link BitSet} in {@code altsets}.

@param altsets a collection of alternative subsets @return the set of represented alternatives in {@code altsets}

func PredictionModegetConflictingAltSubsets Uses

func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet

This func gets the conflicting alt subsets from a configuration set. For each configuration {@code c} in {@code configs}:

<pre> map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not alt and not pred </pre>

func (*BitSet) String Uses

func (b *BitSet) String() string

type BlockEndState Uses

type BlockEndState struct {
    *BaseATNState
    // contains filtered or unexported fields
}

BlockEndState is a terminal node of a simple (a|b|c) block.

func NewBlockEndState Uses

func NewBlockEndState() *BlockEndState

type BlockStartState Uses

type BlockStartState interface {
    DecisionState
    // contains filtered or unexported methods
}

type BlockStartStateIntPair Uses

type BlockStartStateIntPair struct {
    // contains filtered or unexported fields
}

type CharStream Uses

type CharStream interface {
    IntStream
    GetText(int, int) string
    GetTextFromTokens(start, end Token) string
    GetTextFromInterval(*Interval) string
}

type CommonToken Uses

type CommonToken struct {
    *BaseToken
}

func NewCommonToken Uses

func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken

func (*CommonToken) GetText Uses

func (c *CommonToken) GetText() string

func (*CommonToken) SetText Uses

func (c *CommonToken) SetText(text string)

func (*CommonToken) String Uses

func (c *CommonToken) String() string

type CommonTokenFactory Uses

type CommonTokenFactory struct {
    // contains filtered or unexported fields
}

CommonTokenFactory is the default TokenFactory implementation.

func NewCommonTokenFactory Uses

func NewCommonTokenFactory(copyText bool) *CommonTokenFactory

func (*CommonTokenFactory) Create Uses

func (c *CommonTokenFactory) Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token

type CommonTokenStream Uses

type CommonTokenStream struct {
    // contains filtered or unexported fields
}

CommonTokenStream is an implementation of TokenStream that loads tokens from a TokenSource on-demand and places the tokens in a buffer to provide access to any previous token by index. This token stream ignores the value of Token.getChannel. If your parser requires the token stream filter tokens to only those on a particular channel, such as Token.DEFAULT_CHANNEL or Token.HIDDEN_CHANNEL, use a filtering token stream such a CommonTokenStream.

func NewCommonTokenStream Uses

func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream

func (*CommonTokenStream) Consume Uses

func (c *CommonTokenStream) Consume()

func (*CommonTokenStream) Fill Uses

func (c *CommonTokenStream) Fill()

Fill gets all tokens from the lexer until EOF.

func (*CommonTokenStream) Get Uses

func (c *CommonTokenStream) Get(index int) Token

func (*CommonTokenStream) GetAllText Uses

func (c *CommonTokenStream) GetAllText() string

func (*CommonTokenStream) GetAllTokens Uses

func (c *CommonTokenStream) GetAllTokens() []Token

func (*CommonTokenStream) GetHiddenTokensToLeft Uses

func (c *CommonTokenStream) GetHiddenTokensToLeft(tokenIndex, channel int) []Token

GetHiddenTokensToLeft collects all tokens on channel to the left of the current token until we see a token on DEFAULT_TOKEN_CHANNEL. If channel is -1, it finds any non default channel token.

func (*CommonTokenStream) GetHiddenTokensToRight Uses

func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []Token

GetHiddenTokensToRight collects all tokens on a specified channel to the right of the current token up until we see a token on DEFAULT_TOKEN_CHANNEL or EOF. If channel is -1, it finds any non-default channel token.

func (*CommonTokenStream) GetSourceName Uses

func (c *CommonTokenStream) GetSourceName() string

func (*CommonTokenStream) GetTextFromInterval Uses

func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string

func (*CommonTokenStream) GetTextFromRuleContext Uses

func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string

func (*CommonTokenStream) GetTextFromTokens Uses

func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string

func (*CommonTokenStream) GetTokenSource Uses

func (c *CommonTokenStream) GetTokenSource() TokenSource

func (*CommonTokenStream) GetTokens Uses

func (c *CommonTokenStream) GetTokens(start int, stop int, types *IntervalSet) []Token

GetTokens gets all tokens from start to stop inclusive.

func (*CommonTokenStream) Index Uses

func (c *CommonTokenStream) Index() int

func (*CommonTokenStream) LA Uses

func (c *CommonTokenStream) LA(i int) int

func (*CommonTokenStream) LB Uses

func (c *CommonTokenStream) LB(k int) Token

func (*CommonTokenStream) LT Uses

func (c *CommonTokenStream) LT(k int) Token

func (*CommonTokenStream) Mark Uses

func (c *CommonTokenStream) Mark() int

func (*CommonTokenStream) NextTokenOnChannel Uses

func (c *CommonTokenStream) NextTokenOnChannel(i, channel int) int

NextTokenOnChannel returns the index of the next token on channel given a starting index. Returns i if tokens[i] is on channel. Returns -1 if there are no tokens on channel between i and EOF.

func (*CommonTokenStream) Release Uses

func (c *CommonTokenStream) Release(marker int)

func (*CommonTokenStream) Seek Uses

func (c *CommonTokenStream) Seek(index int)

func (*CommonTokenStream) SetTokenSource Uses

func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource)

SetTokenSource resets the c token stream by setting its token source.

func (*CommonTokenStream) Size Uses

func (c *CommonTokenStream) Size() int

func (*CommonTokenStream) Sync Uses

func (c *CommonTokenStream) Sync(i int) bool

Sync makes sure index i in tokens has a token and returns true if a token is located at index i and otherwise false.

type ConsoleErrorListener Uses

type ConsoleErrorListener struct {
    *DefaultErrorListener
}

func NewConsoleErrorListener Uses

func NewConsoleErrorListener() *ConsoleErrorListener

func (*ConsoleErrorListener) SyntaxError Uses

func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException)

{@inheritDoc}

<p> This implementation prints messages to {@link System//err} containing the values of {@code line}, {@code charPositionInLine}, and {@code msg} using the following format.</p>

<pre> line <em>line</em>:<em>charPositionInLine</em> <em>msg</em> </pre>

type DFA Uses

type DFA struct {
    // contains filtered or unexported fields
}

func NewDFA Uses

func NewDFA(atnStartState DecisionState, decision int) *DFA

func (*DFA) String Uses

func (d *DFA) String(literalNames []string, symbolicNames []string) string

func (*DFA) ToLexerString Uses

func (d *DFA) ToLexerString() string

type DFASerializer Uses

type DFASerializer struct {
    // contains filtered or unexported fields
}

DFASerializer is a DFA walker that knows how to dump them to serialized strings.

func NewDFASerializer Uses

func NewDFASerializer(dfa *DFA, literalNames, symbolicNames []string) *DFASerializer

func (*DFASerializer) GetStateString Uses

func (d *DFASerializer) GetStateString(s *DFAState) string

func (*DFASerializer) String Uses

func (d *DFASerializer) String() string

type DFAState Uses

type DFAState struct {
    // contains filtered or unexported fields
}

DFAState represents a set of possible ATN configurations. As Aho, Sethi, Ullman p. 117 says: "The DFA uses its state to keep track of all possible states the ATN can be in after reading each input symbol. That is to say, after reading input a1a2..an, the DFA is in a state that represents the subset T of the states of the ATN that are reachable from the ATN's start state along some path labeled a1a2..an." In conventional NFA-to-DFA conversion, therefore, the subset T would be a bitset representing the set of states the ATN could be in. We need to track the alt predicted by each state as well, however. More importantly, we need to maintain a stack of states, tracking the closure operations as they jump from rule to rule, emulating rule invocations (method calls). I have to add a stack to simulate the proper lookahead sequences for the underlying LL grammar from which the ATN was derived.

I use a set of ATNConfig objects, not simple states. An ATNConfig is both a state (ala normal conversion) and a RuleContext describing the chain of rules (if any) followed to arrive at that state.

A DFAState may have multiple references to a particular state, but with different ATN contexts (with same or different alts) meaning that state was reached via a different set of rule invocations.

func NewDFAState Uses

func NewDFAState(stateNumber int, configs ATNConfigSet) *DFAState

func (*DFAState) GetAltSet Uses

func (d *DFAState) GetAltSet() *Set

GetAltSet gets the set of all alts mentioned by all ATN configurations in d.

func (*DFAState) String Uses

func (d *DFAState) String() string

type DecisionState Uses

type DecisionState interface {
    ATNState
    // contains filtered or unexported methods
}

type DefaultErrorListener Uses

type DefaultErrorListener struct {
}

func NewDefaultErrorListener Uses

func NewDefaultErrorListener() *DefaultErrorListener

func (*DefaultErrorListener) ReportAmbiguity Uses

func (d *DefaultErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet)

func (*DefaultErrorListener) ReportAttemptingFullContext Uses

func (d *DefaultErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet)

func (*DefaultErrorListener) ReportContextSensitivity Uses

func (d *DefaultErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet)

func (*DefaultErrorListener) SyntaxError Uses

func (d *DefaultErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException)

type DefaultErrorStrategy Uses

type DefaultErrorStrategy struct {
    // contains filtered or unexported fields
}

This is the default implementation of {@link ANTLRErrorStrategy} used for error Reporting and recovery in ANTLR parsers.

func NewDefaultErrorStrategy Uses

func NewDefaultErrorStrategy() *DefaultErrorStrategy

func (*DefaultErrorStrategy) GetExpectedTokens Uses

func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet

func (*DefaultErrorStrategy) GetMissingSymbol Uses

func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token

Conjure up a missing token during error recovery.

The recognizer attempts to recover from single missing symbols. But, actions might refer to that missing symbol. For example, x=ID {f($x)}. The action clearly assumes that there has been an identifier Matched previously and that $x points at that token. If that token is missing, but the next token in the stream is what we want we assume that d token is missing and we keep going. Because we have to return some token to replace the missing token, we have to conjure one up. This method gives the user control over the tokens returned for missing tokens. Mostly, you will want to create something special for identifier tokens. For literals such as '{' and ',', the default action in the parser or tree parser works. It simply creates a CommonToken of the appropriate type. The text will be the token. If you change what tokens must be created by the lexer, override d method to create the appropriate tokens.

func (*DefaultErrorStrategy) GetTokenErrorDisplay Uses

func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string

How should a token be displayed in an error message? The default is to display just the text, but during development you might want to have a lot of information spit out. Override in that case to use t.String() (which, for CommonToken, dumps everything about the token). This is better than forcing you to override a method in your token objects because you don't have to go modify your lexer so that it creates a NewJava type.

func (*DefaultErrorStrategy) Recover Uses

func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException)

{@inheritDoc}

<p>The default implementation reSynchronizes the parser by consuming tokens until we find one in the reSynchronization set--loosely the set of tokens that can follow the current rule.</p>

func (*DefaultErrorStrategy) RecoverInline Uses

func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token

<p>The default implementation attempts to recover from the mismatched input by using single token insertion and deletion as described below. If the recovery attempt fails, d method panics an {@link InputMisMatchException}.</p>

<p><strong>EXTRA TOKEN</strong> (single token deletion)</p>

<p>{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the right token, however, then assume {@code LA(1)} is some extra spurious token and delete it. Then consume and return the next token (which was the {@code LA(2)} token) as the successful result of the Match operation.</p>

<p>This recovery strategy is implemented by {@link //singleTokenDeletion}.</p>

<p><strong>MISSING TOKEN</strong> (single token insertion)</p>

<p>If current token (at {@code LA(1)}) is consistent with what could come after the expected {@code LA(1)} token, then assume the token is missing and use the parser's {@link TokenFactory} to create it on the fly. The "insertion" is performed by returning the created token as the successful result of the Match operation.</p>

<p>This recovery strategy is implemented by {@link //singleTokenInsertion}.</p>

<p><strong>EXAMPLE</strong></p>

<p>For example, Input {@code i=(3} is clearly missing the {@code ')'}. When the parser returns from the nested call to {@code expr}, it will have call chain:</p>

<pre> stat &rarr expr &rarr atom </pre>

and it will be trying to Match the {@code ')'} at d point in the derivation:

<pre> =&gt ID '=' '(' INT ')' ('+' atom)* ” ^ </pre>

The attempt to Match {@code ')'} will fail when it sees {@code ”} and call {@link //recoverInline}. To recover, it sees that {@code LA(1)==”} is in the set of tokens that can follow the {@code ')'} token reference in rule {@code atom}. It can assume that you forgot the {@code ')'}.

func (*DefaultErrorStrategy) ReportError Uses

func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException)

{@inheritDoc}

<p>The default implementation returns immediately if the handler is already in error recovery mode. Otherwise, it calls {@link //beginErrorCondition} and dispatches the Reporting task based on the runtime type of {@code e} according to the following table.</p>

<ul> <li>{@link NoViableAltException}: Dispatches the call to {@link //ReportNoViableAlternative}</li> <li>{@link InputMisMatchException}: Dispatches the call to {@link //ReportInputMisMatch}</li> <li>{@link FailedPredicateException}: Dispatches the call to {@link //ReportFailedPredicate}</li> <li>All other types: calls {@link Parser//NotifyErrorListeners} to Report the exception</li> </ul>

func (*DefaultErrorStrategy) ReportFailedPredicate Uses

func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException)

This is called by {@link //ReportError} when the exception is a {@link FailedPredicateException}.

@see //ReportError

@param recognizer the parser instance @param e the recognition exception

func (*DefaultErrorStrategy) ReportInputMisMatch Uses

func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException)

This is called by {@link //ReportError} when the exception is an {@link InputMisMatchException}.

@see //ReportError

@param recognizer the parser instance @param e the recognition exception

func (*DefaultErrorStrategy) ReportMatch Uses

func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser)

{@inheritDoc}

<p>The default implementation simply calls {@link //endErrorCondition}.</p>

func (*DefaultErrorStrategy) ReportMissingToken Uses

func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser)

This method is called to Report a syntax error which requires the insertion of a missing token into the input stream. At the time d method is called, the missing token has not yet been inserted. When d method returns, {@code recognizer} is in error recovery mode.

<p>This method is called when {@link //singleTokenInsertion} identifies single-token insertion as a viable recovery strategy for a mismatched input error.</p>

<p>The default implementation simply returns if the handler is already in error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to enter error recovery mode, followed by calling {@link Parser//NotifyErrorListeners}.</p>

@param recognizer the parser instance

func (*DefaultErrorStrategy) ReportNoViableAlternative Uses

func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException)

This is called by {@link //ReportError} when the exception is a {@link NoViableAltException}.

@see //ReportError

@param recognizer the parser instance @param e the recognition exception

func (*DefaultErrorStrategy) ReportUnwantedToken Uses

func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser)

This method is called to Report a syntax error which requires the removal of a token from the input stream. At the time d method is called, the erroneous symbol is current {@code LT(1)} symbol and has not yet been removed from the input stream. When d method returns, {@code recognizer} is in error recovery mode.

<p>This method is called when {@link //singleTokenDeletion} identifies single-token deletion as a viable recovery strategy for a mismatched input error.</p>

<p>The default implementation simply returns if the handler is already in error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to enter error recovery mode, followed by calling {@link Parser//NotifyErrorListeners}.</p>

@param recognizer the parser instance

func (*DefaultErrorStrategy) SingleTokenDeletion Uses

func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token

This method implements the single-token deletion inline error recovery strategy. It is called by {@link //recoverInline} to attempt to recover from mismatched input. If this method returns nil, the parser and error handler state will not have changed. If this method returns non-nil, {@code recognizer} will <em>not</em> be in error recovery mode since the returned token was a successful Match.

<p>If the single-token deletion is successful, d method calls {@link //ReportUnwantedToken} to Report the error, followed by {@link Parser//consume} to actually "delete" the extraneous token. Then, before returning {@link //ReportMatch} is called to signal a successful Match.</p>

@param recognizer the parser instance @return the successfully Matched {@link Token} instance if single-token deletion successfully recovers from the mismatched input, otherwise {@code nil}

func (*DefaultErrorStrategy) SingleTokenInsertion Uses

func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool

This method implements the single-token insertion inline error recovery strategy. It is called by {@link //recoverInline} if the single-token deletion strategy fails to recover from the mismatched input. If this method returns {@code true}, {@code recognizer} will be in error recovery mode.

<p>This method determines whether or not single-token insertion is viable by checking if the {@code LA(1)} input symbol could be successfully Matched if it were instead the {@code LA(2)} symbol. If d method returns {@code true}, the caller is responsible for creating and inserting a token with the correct type to produce d behavior.</p>

@param recognizer the parser instance @return {@code true} if single-token insertion is a viable recovery strategy for the current mismatched input, otherwise {@code false}

func (*DefaultErrorStrategy) Sync Uses

func (d *DefaultErrorStrategy) Sync(recognizer Parser)

The default implementation of {@link ANTLRErrorStrategy//Sync} makes sure that the current lookahead symbol is consistent with what were expecting at d point in the ATN. You can call d anytime but ANTLR only generates code to check before subrules/loops and each iteration.

<p>Implements Jim Idle's magic Sync mechanism in closures and optional subrules. E.g.,</p>

<pre> a : Sync ( stuff Sync )* Sync : {consume to what can follow Sync} </pre>

At the start of a sub rule upon error, {@link //Sync} performs single token deletion, if possible. If it can't do that, it bails on the current rule and uses the default error recovery, which consumes until the reSynchronization set of the current rule.

<p>If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block with an empty alternative), then the expected set includes what follows the subrule.</p>

<p>During loop iteration, it consumes until it sees a token that can start a sub rule or what follows loop. Yes, that is pretty aggressive. We opt to stay in the loop as long as possible.</p>

<p><strong>ORIGINS</strong></p>

<p>Previous versions of ANTLR did a poor job of their recovery within loops. A single mismatch token or missing token would force the parser to bail out of the entire rules surrounding the loop. So, for rule</p>

<pre> classfunc : 'class' ID '{' member* '}' </pre>

input with an extra token between members would force the parser to consume until it found the next class definition rather than the next member definition of the current class.

<p>This functionality cost a little bit of effort because the parser has to compare token set at the start of the loop and at each iteration. If for some reason speed is suffering for you, you can turn off d functionality by simply overriding d method as a blank { }.</p>

type DiagnosticErrorListener Uses

type DiagnosticErrorListener struct {
    *DefaultErrorListener
    // contains filtered or unexported fields
}

func NewDiagnosticErrorListener Uses

func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener

func (*DiagnosticErrorListener) ReportAmbiguity Uses

func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet)

func (*DiagnosticErrorListener) ReportAttemptingFullContext Uses

func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet)

func (*DiagnosticErrorListener) ReportContextSensitivity Uses

func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet)

type DoubleDict Uses

type DoubleDict struct {
    // contains filtered or unexported fields
}

func NewDoubleDict Uses

func NewDoubleDict() *DoubleDict

func (*DoubleDict) Get Uses

func (d *DoubleDict) Get(a, b int) interface{}

type EmptyPredictionContext Uses

type EmptyPredictionContext struct {
    *BaseSingletonPredictionContext
}

func NewEmptyPredictionContext Uses

func NewEmptyPredictionContext() *EmptyPredictionContext

func (*EmptyPredictionContext) GetParent Uses

func (e *EmptyPredictionContext) GetParent(index int) PredictionContext

func (*EmptyPredictionContext) String Uses

func (e *EmptyPredictionContext) String() string

type EpsilonTransition Uses

type EpsilonTransition struct {
    *BaseTransition
    // contains filtered or unexported fields
}

func NewEpsilonTransition Uses

func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition

func (*EpsilonTransition) Matches Uses

func (t *EpsilonTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool

func (*EpsilonTransition) String Uses

func (t *EpsilonTransition) String() string

type ErrorListener Uses

type ErrorListener interface {
    SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException)
    ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet)
    ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet)
    ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet)
}

type ErrorNode Uses

type ErrorNode interface {
    TerminalNode
    // contains filtered or unexported methods
}

type ErrorNodeImpl Uses

type ErrorNodeImpl struct {
    *TerminalNodeImpl
}

func NewErrorNodeImpl Uses

func NewErrorNodeImpl(token Token) *ErrorNodeImpl

func (*ErrorNodeImpl) Accept Uses

func (e *ErrorNodeImpl) Accept(v ParseTreeVisitor) interface{}

type ErrorStrategy Uses

type ErrorStrategy interface {
    RecoverInline(Parser) Token
    Recover(Parser, RecognitionException)
    Sync(Parser)

    ReportError(Parser, RecognitionException)
    ReportMatch(Parser)
    // contains filtered or unexported methods
}

type FailedPredicateException Uses

type FailedPredicateException struct {
    *BaseRecognitionException
    // contains filtered or unexported fields
}

func NewFailedPredicateException Uses

func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException

type FileStream Uses

type FileStream struct {
    *InputStream
    // contains filtered or unexported fields
}

func NewFileStream Uses

func NewFileStream(fileName string) (*FileStream, error)

func (*FileStream) GetSourceName Uses

func (f *FileStream) GetSourceName() string

type IATNSimulator Uses

type IATNSimulator interface {
    SharedContextCache() *PredictionContextCache
    ATN() *ATN
    DecisionToDFA() []*DFA
}

type ILexerATNSimulator Uses

type ILexerATNSimulator interface {
    IATNSimulator

    Match(input CharStream, mode int) int
    GetCharPositionInLine() int
    GetLine() int
    GetText(input CharStream) string
    Consume(input CharStream)
    // contains filtered or unexported methods
}

type InputMisMatchException Uses

type InputMisMatchException struct {
    *BaseRecognitionException
}

func NewInputMisMatchException Uses

func NewInputMisMatchException(recognizer Parser) *InputMisMatchException

This signifies any kind of mismatched input exceptions such as when the current input does not Match the expected token.

type InputStream Uses

type InputStream struct {
    // contains filtered or unexported fields
}

func NewInputStream Uses

func NewInputStream(data string) *InputStream

func (*InputStream) Consume Uses

func (is *InputStream) Consume()

func (*InputStream) GetSourceName Uses

func (*InputStream) GetSourceName() string

func (*InputStream) GetText Uses

func (is *InputStream) GetText(start int, stop int) string

func (*InputStream) GetTextFromInterval Uses

func (is *InputStream) GetTextFromInterval(i *Interval) string

func (*InputStream) GetTextFromTokens Uses

func (is *InputStream) GetTextFromTokens(start, stop Token) string

func (*InputStream) Index Uses

func (is *InputStream) Index() int

func (*InputStream) LA Uses

func (is *InputStream) LA(offset int) int

func (*InputStream) LT Uses

func (is *InputStream) LT(offset int) int

func (*InputStream) Mark Uses

func (is *InputStream) Mark() int

mark/release do nothing we have entire buffer

func (*InputStream) Release Uses

func (is *InputStream) Release(marker int)

func (*InputStream) Seek Uses

func (is *InputStream) Seek(index int)

func (*InputStream) Size Uses

func (is *InputStream) Size() int

func (*InputStream) String Uses

func (is *InputStream) String() string

type InsertAfterOp Uses

type InsertAfterOp struct {
    BaseRewriteOperation
}

func NewInsertAfterOp Uses

func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp

func (*InsertAfterOp) Execute Uses

func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int

func (*InsertAfterOp) String Uses

func (op *InsertAfterOp) String() string

type InsertBeforeOp Uses

type InsertBeforeOp struct {
    BaseRewriteOperation
}

func NewInsertBeforeOp Uses

func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp

func (*InsertBeforeOp) Execute Uses

func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int

func (*InsertBeforeOp) String Uses

func (op *InsertBeforeOp) String() string

type IntStack Uses

type IntStack []int

func (*IntStack) Pop Uses

func (s *IntStack) Pop() (int, error)

func (*IntStack) Push Uses

func (s *IntStack) Push(e int)

type IntStream Uses

type IntStream interface {
    Consume()
    LA(int) int
    Mark() int
    Release(marker int)
    Index() int
    Seek(index int)
    Size() int
    GetSourceName() string
}

type InterpreterRuleContext Uses

type InterpreterRuleContext interface {
    ParserRuleContext
}

type Interval Uses

type Interval struct {
    Start int
    Stop  int
}

func NewInterval Uses

func NewInterval(start, stop int) *Interval

stop is not included!

func (*Interval) Contains Uses

func (i *Interval) Contains(item int) bool

func (*Interval) String Uses

func (i *Interval) String() string

type IntervalSet Uses

type IntervalSet struct {
    // contains filtered or unexported fields
}

func NewIntervalSet Uses

func NewIntervalSet() *IntervalSet

func (*IntervalSet) String Uses

func (i *IntervalSet) String() string

func (*IntervalSet) StringVerbose Uses

func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []string, elemsAreChar bool) string

type LL1Analyzer Uses

type LL1Analyzer struct {
    // contains filtered or unexported fields
}

func NewLL1Analyzer Uses

func NewLL1Analyzer(atn *ATN) *LL1Analyzer

func (*LL1Analyzer) Look Uses

func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet

* Compute set of tokens that can follow {@code s} in the ATN in the specified {@code ctx}.

<p>If {@code ctx} is {@code nil} and the end of the rule containing {@code s} is reached, {@link Token//EPSILON} is added to the result set. If {@code ctx} is not {@code nil} and the end of the outermost rule is reached, {@link Token//EOF} is added to the result set.</p>

@param s the ATN state @param stopState the ATN state to stop at. This can be a {@link BlockEndState} to detect epsilon paths through a closure. @param ctx the complete parser context, or {@code nil} if the context should be ignored

@return The set of tokens that can follow {@code s} in the ATN in the specified {@code ctx}. /

type Lexer Uses

type Lexer interface {
    TokenSource
    Recognizer

    Emit() Token

    SetChannel(int)
    PushMode(int)
    PopMode() int
    SetType(int)
    SetMode(int)
}

type LexerATNConfig Uses

type LexerATNConfig struct {
    *BaseATNConfig
    // contains filtered or unexported fields
}

func NewLexerATNConfig1 Uses

func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *LexerATNConfig

func NewLexerATNConfig2 Uses

func NewLexerATNConfig2(c *LexerATNConfig, state ATNState, context PredictionContext) *LexerATNConfig

func NewLexerATNConfig3 Uses

func NewLexerATNConfig3(c *LexerATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig

func NewLexerATNConfig4 Uses

func NewLexerATNConfig4(c *LexerATNConfig, state ATNState) *LexerATNConfig

func NewLexerATNConfig5 Uses

func NewLexerATNConfig5(state ATNState, alt int, context PredictionContext, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig

func NewLexerATNConfig6 Uses

func NewLexerATNConfig6(state ATNState, alt int, context PredictionContext) *LexerATNConfig

type LexerATNSimulator Uses

type LexerATNSimulator struct {
    *BaseATNSimulator

    Line               int
    CharPositionInLine int

    MatchCalls int
    // contains filtered or unexported fields
}

func NewLexerATNSimulator Uses

func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator

func (*LexerATNSimulator) Consume Uses

func (l *LexerATNSimulator) Consume(input CharStream)

func (*LexerATNSimulator) GetCharPositionInLine Uses

func (l *LexerATNSimulator) GetCharPositionInLine() int

func (*LexerATNSimulator) GetLine Uses

func (l *LexerATNSimulator) GetLine() int

func (*LexerATNSimulator) GetText Uses

func (l *LexerATNSimulator) GetText(input CharStream) string

Get the text Matched so far for the current token.

func (*LexerATNSimulator) GetTokenName Uses

func (l *LexerATNSimulator) GetTokenName(tt int) string

func (*LexerATNSimulator) Match Uses

func (l *LexerATNSimulator) Match(input CharStream, mode int) int

func (*LexerATNSimulator) MatchATN Uses

func (l *LexerATNSimulator) MatchATN(input CharStream) int

type LexerAction Uses

type LexerAction interface {
    // contains filtered or unexported methods
}

type LexerActionExecutor Uses

type LexerActionExecutor struct {
    // contains filtered or unexported fields
}

func LexerActionExecutorappend Uses

func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor

Creates a {@link LexerActionExecutor} which executes the actions for the input {@code lexerActionExecutor} followed by a specified {@code lexerAction}.

@param lexerActionExecutor The executor for actions already traversed by the lexer while Matching a token within a particular {@link LexerATNConfig}. If this is {@code nil}, the method behaves as though it were an empty executor. @param lexerAction The lexer action to execute after the actions specified in {@code lexerActionExecutor}.

@return A {@link LexerActionExecutor} for executing the combine actions of {@code lexerActionExecutor} and {@code lexerAction}.

func NewLexerActionExecutor Uses

func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor

type LexerChannelAction Uses

type LexerChannelAction struct {
    *BaseLexerAction
    // contains filtered or unexported fields
}

Implements the {@code channel} lexer action by calling {@link Lexer//setChannel} with the assigned channel. Constructs a New{@code channel} action with the specified channel value. @param channel The channel value to pass to {@link Lexer//setChannel}.

func NewLexerChannelAction Uses

func NewLexerChannelAction(channel int) *LexerChannelAction

func (*LexerChannelAction) String Uses

func (l *LexerChannelAction) String() string

type LexerCustomAction Uses

type LexerCustomAction struct {
    *BaseLexerAction
    // contains filtered or unexported fields
}

func NewLexerCustomAction Uses

func NewLexerCustomAction(ruleIndex, actionIndex int) *LexerCustomAction

type LexerDFASerializer Uses

type LexerDFASerializer struct {
    *DFASerializer
}

func NewLexerDFASerializer Uses

func NewLexerDFASerializer(dfa *DFA) *LexerDFASerializer

func (*LexerDFASerializer) String Uses

func (l *LexerDFASerializer) String() string

type LexerIndexedCustomAction Uses

type LexerIndexedCustomAction struct {
    *BaseLexerAction
    // contains filtered or unexported fields
}

Constructs a Newindexed custom action by associating a character offset with a {@link LexerAction}.

<p>Note: This class is only required for lexer actions for which {@link LexerAction//isPositionDependent} returns {@code true}.</p>

@param offset The offset into the input {@link CharStream}, relative to the token start index, at which the specified lexer action should be executed. @param action The lexer action to execute at a particular offset in the input {@link CharStream}.

func NewLexerIndexedCustomAction Uses

func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction

type LexerModeAction Uses

type LexerModeAction struct {
    *BaseLexerAction
    // contains filtered or unexported fields
}

Implements the {@code mode} lexer action by calling {@link Lexer//mode} with the assigned mode.

func NewLexerModeAction Uses

func NewLexerModeAction(mode int) *LexerModeAction

func (*LexerModeAction) String Uses

func (l *LexerModeAction) String() string

type LexerMoreAction Uses

type LexerMoreAction struct {
    *BaseLexerAction
}

func NewLexerMoreAction Uses

func NewLexerMoreAction() *LexerMoreAction

func (*LexerMoreAction) String Uses

func (l *LexerMoreAction) String() string

type LexerNoViableAltException Uses

type LexerNoViableAltException struct {
    *BaseRecognitionException
    // contains filtered or unexported fields
}

func NewLexerNoViableAltException Uses

func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs ATNConfigSet) *LexerNoViableAltException

func (*LexerNoViableAltException) String Uses

func (l *LexerNoViableAltException) String() string

type LexerPopModeAction Uses

type LexerPopModeAction struct {
    *BaseLexerAction
}

Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}.

<p>The {@code popMode} command does not have any parameters, so l action is implemented as a singleton instance exposed by {@link //INSTANCE}.</p>

func NewLexerPopModeAction Uses

func NewLexerPopModeAction() *LexerPopModeAction

func (*LexerPopModeAction) String Uses

func (l *LexerPopModeAction) String() string

type LexerPushModeAction Uses

type LexerPushModeAction struct {
    *BaseLexerAction
    // contains filtered or unexported fields
}

Implements the {@code pushMode} lexer action by calling {@link Lexer//pushMode} with the assigned mode.

func NewLexerPushModeAction Uses

func NewLexerPushModeAction(mode int) *LexerPushModeAction

func (*LexerPushModeAction) String Uses

func (l *LexerPushModeAction) String() string

type LexerSkipAction Uses

type LexerSkipAction struct {
    *BaseLexerAction
}

Implements the {@code Skip} lexer action by calling {@link Lexer//Skip}.

<p>The {@code Skip} command does not have any parameters, so l action is implemented as a singleton instance exposed by {@link //INSTANCE}.</p>

func NewLexerSkipAction Uses

func NewLexerSkipAction() *LexerSkipAction

func (*LexerSkipAction) String Uses

func (l *LexerSkipAction) String() string

type LexerTypeAction Uses

type LexerTypeAction struct {
    *BaseLexerAction
    // contains filtered or unexported fields
}
Implements the {@code type} lexer action by calling {@link Lexer//setType}

with the assigned type.

func NewLexerTypeAction Uses

func NewLexerTypeAction(thetype int) *LexerTypeAction

func (*LexerTypeAction) String Uses

func (l *LexerTypeAction) String() string

type LoopEndState Uses

type LoopEndState struct {
    *BaseATNState
    // contains filtered or unexported fields
}

LoopEndState marks the end of a * or + loop.

func NewLoopEndState Uses

func NewLoopEndState() *LoopEndState

type LoopEndStateIntPair Uses

type LoopEndStateIntPair struct {
    // contains filtered or unexported fields
}

type NoViableAltException Uses

type NoViableAltException struct {
    *BaseRecognitionException
    // contains filtered or unexported fields
}

func NewNoViableAltException Uses

func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException

Indicates that the parser could not decide which of two or more paths to take based upon the remaining input. It tracks the starting token of the offending input and also knows where the parser was in the various paths when the error. Reported by ReportNoViableAlternative()

type NotSetTransition Uses

type NotSetTransition struct {
    *SetTransition
}

func NewNotSetTransition Uses

func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition

func (*NotSetTransition) Matches Uses

func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool

func (*NotSetTransition) String Uses

func (t *NotSetTransition) String() string

type OR Uses

type OR struct {
    // contains filtered or unexported fields
}

func NewOR Uses

func NewOR(a, b SemanticContext) *OR

func (*OR) String Uses

func (o *OR) String() string

type OrderedATNConfigSet Uses

type OrderedATNConfigSet struct {
    *BaseATNConfigSet
}

func NewOrderedATNConfigSet Uses

func NewOrderedATNConfigSet() *OrderedATNConfigSet

type ParseCancellationException Uses

type ParseCancellationException struct {
}

func NewParseCancellationException Uses

func NewParseCancellationException() *ParseCancellationException

type ParseTree Uses

type ParseTree interface {
    SyntaxTree

    Accept(Visitor ParseTreeVisitor) interface{}
    GetText() string

    ToStringTree([]string, Recognizer) string
}

func TreesDescendants Uses

func TreesDescendants(t ParseTree) []ParseTree

func TreesFindAllTokenNodes Uses

func TreesFindAllTokenNodes(t ParseTree, ttype int) []ParseTree

func TreesfindAllNodes Uses

func TreesfindAllNodes(t ParseTree, index int, findTokens bool) []ParseTree

func TreesfindAllRuleNodes Uses

func TreesfindAllRuleNodes(t ParseTree, ruleIndex int) []ParseTree

type ParseTreeListener Uses

type ParseTreeListener interface {
    VisitTerminal(node TerminalNode)
    VisitErrorNode(node ErrorNode)
    EnterEveryRule(ctx ParserRuleContext)
    ExitEveryRule(ctx ParserRuleContext)
}

type ParseTreeVisitor Uses

type ParseTreeVisitor interface {
    Visit(tree ParseTree) interface{}
    VisitChildren(node RuleNode) interface{}
    VisitTerminal(node TerminalNode) interface{}
    VisitErrorNode(node ErrorNode) interface{}
}

type ParseTreeWalker Uses

type ParseTreeWalker struct {
}

func NewParseTreeWalker Uses

func NewParseTreeWalker() *ParseTreeWalker

func (*ParseTreeWalker) EnterRule Uses

func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode)

The discovery of a rule node, involves sending two events: the generic {@link ParseTreeListener//EnterEveryRule} and a {@link RuleContext}-specific event. First we trigger the generic and then the rule specific. We to them in reverse order upon finishing the node.

func (*ParseTreeWalker) ExitRule Uses

func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode)

func (*ParseTreeWalker) Walk Uses

func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree)

type Parser Uses

type Parser interface {
    Recognizer

    GetInterpreter() *ParserATNSimulator

    GetTokenStream() TokenStream
    GetTokenFactory() TokenFactory
    GetParserRuleContext() ParserRuleContext
    SetParserRuleContext(ParserRuleContext)
    Consume() Token
    GetParseListeners() []ParseTreeListener

    GetErrorHandler() ErrorStrategy
    SetErrorHandler(ErrorStrategy)
    GetInputStream() IntStream
    GetCurrentToken() Token
    GetExpectedTokens() *IntervalSet
    NotifyErrorListeners(string, Token, RecognitionException)
    IsExpectedToken(int) bool
    GetPrecedence() int
    GetRuleInvocationStack(ParserRuleContext) []string
}

type ParserATNSimulator Uses

type ParserATNSimulator struct {
    *BaseATNSimulator
    // contains filtered or unexported fields
}

func NewParserATNSimulator Uses

func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator

func (*ParserATNSimulator) AdaptivePredict Uses

func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, outerContext ParserRuleContext) int

func (*ParserATNSimulator) GetAltThatFinishedDecisionEntryRule Uses

func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs ATNConfigSet) int

func (*ParserATNSimulator) GetPredictionMode Uses

func (p *ParserATNSimulator) GetPredictionMode() int

func (*ParserATNSimulator) GetTokenName Uses

func (p *ParserATNSimulator) GetTokenName(t int) string

func (*ParserATNSimulator) ReportAmbiguity Uses

func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, D *DFAState, startIndex, stopIndex int,
    exact bool, ambigAlts *BitSet, configs ATNConfigSet)

If context sensitive parsing, we know it's ambiguity not conflict//

func (*ParserATNSimulator) ReportAttemptingFullContext Uses

func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs ATNConfigSet, startIndex, stopIndex int)

func (*ParserATNSimulator) ReportContextSensitivity Uses

func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs ATNConfigSet, startIndex, stopIndex int)

func (*ParserATNSimulator) SetPredictionMode Uses

func (p *ParserATNSimulator) SetPredictionMode(v int)

type ParserRuleContext Uses

type ParserRuleContext interface {
    RuleContext

    SetException(RecognitionException)

    AddTokenNode(token Token) *TerminalNodeImpl
    AddErrorNode(badToken Token) *ErrorNodeImpl

    EnterRule(listener ParseTreeListener)
    ExitRule(listener ParseTreeListener)

    SetStart(Token)
    GetStart() Token

    SetStop(Token)
    GetStop() Token

    AddChild(child RuleContext) RuleContext
    RemoveLastChild()
}

type PlusBlockStartState Uses

type PlusBlockStartState struct {
    *BaseBlockStartState
    // contains filtered or unexported fields
}

PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a decision state; we don't use it for code generation. Somebody might need it, it is included for completeness. In reality, PlusLoopbackState is the real decision-making node for A+.

func NewPlusBlockStartState Uses

func NewPlusBlockStartState() *PlusBlockStartState

type PlusLoopbackState Uses

type PlusLoopbackState struct {
    *BaseDecisionState
}

PlusLoopbackState is a decision state for A+ and (A|B)+. It has two transitions: one to the loop back to start of the block, and one to exit.

func NewPlusLoopbackState Uses

func NewPlusLoopbackState() *PlusLoopbackState

type PrecedencePredicate Uses

type PrecedencePredicate struct {
    // contains filtered or unexported fields
}

func NewPrecedencePredicate Uses

func NewPrecedencePredicate(precedence int) *PrecedencePredicate

func PrecedencePredicatefilterPrecedencePredicates Uses

func PrecedencePredicatefilterPrecedencePredicates(set *Set) []*PrecedencePredicate

func (*PrecedencePredicate) String Uses

func (p *PrecedencePredicate) String() string

type PrecedencePredicateTransition Uses

type PrecedencePredicateTransition struct {
    *BaseAbstractPredicateTransition
    // contains filtered or unexported fields
}

func NewPrecedencePredicateTransition Uses

func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition

func (*PrecedencePredicateTransition) Matches Uses

func (t *PrecedencePredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool

func (*PrecedencePredicateTransition) String Uses

func (t *PrecedencePredicateTransition) String() string

type PredPrediction Uses

type PredPrediction struct {
    // contains filtered or unexported fields
}

PredPrediction maps a predicate to a predicted alternative.

func NewPredPrediction Uses

func NewPredPrediction(pred SemanticContext, alt int) *PredPrediction

func (*PredPrediction) String Uses

func (p *PredPrediction) String() string

type Predicate Uses

type Predicate struct {
    // contains filtered or unexported fields
}

func NewPredicate Uses

func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate

func (*Predicate) String Uses

func (p *Predicate) String() string

type PredicateTransition Uses

type PredicateTransition struct {
    *BaseAbstractPredicateTransition
    // contains filtered or unexported fields
}

func NewPredicateTransition Uses

func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition

func (*PredicateTransition) Matches Uses

func (t *PredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool

func (*PredicateTransition) String Uses

func (t *PredicateTransition) String() string

type PredictionContext Uses

type PredictionContext interface {
    GetParent(int) PredictionContext

    String() string
    // contains filtered or unexported methods
}

func SingletonBasePredictionContextCreate Uses

func SingletonBasePredictionContextCreate(parent PredictionContext, returnState int) PredictionContext

type PredictionContextCache Uses

type PredictionContextCache struct {
    // contains filtered or unexported fields
}

func NewPredictionContextCache Uses

func NewPredictionContextCache() *PredictionContextCache

func (*PredictionContextCache) Get Uses

func (p *PredictionContextCache) Get(ctx PredictionContext) PredictionContext

type ProxyErrorListener Uses

type ProxyErrorListener struct {
    *DefaultErrorListener
    // contains filtered or unexported fields
}

func NewProxyErrorListener Uses

func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener

func (*ProxyErrorListener) ReportAmbiguity Uses

func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet)

func (*ProxyErrorListener) ReportAttemptingFullContext Uses

func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet)

func (*ProxyErrorListener) ReportContextSensitivity Uses

func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet)

func (*ProxyErrorListener) SyntaxError Uses

func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException)

type RangeTransition Uses

type RangeTransition struct {
    *BaseTransition
    // contains filtered or unexported fields
}

func NewRangeTransition Uses

func NewRangeTransition(target ATNState, start, stop int) *RangeTransition

func (*RangeTransition) Matches Uses

func (t *RangeTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool

func (*RangeTransition) String Uses

func (t *RangeTransition) String() string

type RecognitionException Uses

type RecognitionException interface {
    GetOffendingToken() Token
    GetMessage() string
    GetInputStream() IntStream
}

type Recognizer Uses

type Recognizer interface {
    GetLiteralNames() []string
    GetSymbolicNames() []string
    GetRuleNames() []string

    Sempred(RuleContext, int, int) bool
    Precpred(RuleContext, int) bool

    GetState() int
    SetState(int)
    Action(RuleContext, int, int)
    AddErrorListener(ErrorListener)
    RemoveErrorListeners()
    GetATN() *ATN
    GetErrorListenerDispatch() ErrorListener
}

type ReplaceOp Uses

type ReplaceOp struct {
    BaseRewriteOperation
    LastIndex int
}

I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp instructions.

func NewReplaceOp Uses

func NewReplaceOp(from, to int, text string, stream TokenStream) *ReplaceOp

func (*ReplaceOp) Execute Uses

func (op *ReplaceOp) Execute(buffer *bytes.Buffer) int

func (*ReplaceOp) String Uses

func (op *ReplaceOp) String() string

type RewriteOperation Uses

type RewriteOperation interface {
    // Execute the rewrite operation by possibly adding to the buffer.
    // Return the index of the next token to operate on.
    Execute(buffer *bytes.Buffer) int
    String() string
    GetInstructionIndex() int
    GetIndex() int
    GetText() string
    GetOpName() string
    GetTokens() TokenStream
    SetInstructionIndex(val int)
    SetIndex(int)
    SetText(string)
    SetOpName(string)
    SetTokens(TokenStream)
}

type RuleContext Uses

type RuleContext interface {
    RuleNode

    GetInvokingState() int
    SetInvokingState(int)

    GetRuleIndex() int
    IsEmpty() bool

    GetAltNumber() int
    SetAltNumber(altNumber int)

    String([]string, RuleContext) string
}

type RuleNode Uses

type RuleNode interface {
    ParseTree

    GetRuleContext() RuleContext
    GetBaseRuleContext() *BaseRuleContext
}

type RuleStartState Uses

type RuleStartState struct {
    *BaseATNState
    // contains filtered or unexported fields
}

func NewRuleStartState Uses

func NewRuleStartState() *RuleStartState

type RuleStopState Uses

type RuleStopState struct {
    *BaseATNState
}

RuleStopState is the last node in the ATN for a rule, unless that rule is the start symbol. In that case, there is one transition to EOF. Later, we might encode references to all calls to this rule to compute FOLLOW sets for error handling.

func NewRuleStopState Uses

func NewRuleStopState() *RuleStopState

type RuleTransition Uses

type RuleTransition struct {
    *BaseTransition
    // contains filtered or unexported fields
}

func NewRuleTransition Uses

func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition

func (*RuleTransition) Matches Uses

func (t *RuleTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool

type SemanticContext Uses

type SemanticContext interface {
    String() string
    // contains filtered or unexported methods
}
var SemanticContextNone SemanticContext = NewPredicate(-1, -1, false)

func SemanticContextandContext Uses

func SemanticContextandContext(a, b SemanticContext) SemanticContext

func SemanticContextorContext Uses

func SemanticContextorContext(a, b SemanticContext) SemanticContext

type Set Uses

type Set struct {
    // contains filtered or unexported fields
}

func NewSet Uses

func NewSet(
    hashcodeFunction func(interface{}) int,
    equalsFunction func(interface{}, interface{}) bool) *Set

func (*Set) String Uses

func (s *Set) String() string

type SetTransition Uses

type SetTransition struct {
    *BaseTransition
}

func NewSetTransition Uses

func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition

func (*SetTransition) Matches Uses

func (t *SetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool

func (*SetTransition) String Uses

func (t *SetTransition) String() string

type SimState Uses

type SimState struct {
    // contains filtered or unexported fields
}

func NewSimState Uses

func NewSimState() *SimState

type SingletonPredictionContext Uses

type SingletonPredictionContext interface {
    PredictionContext
}

type StarBlockStartState Uses

type StarBlockStartState struct {
    *BaseBlockStartState
}

StarBlockStartState is the block that begins a closure loop.

func NewStarBlockStartState Uses

func NewStarBlockStartState() *StarBlockStartState

type StarLoopEntryState Uses

type StarLoopEntryState struct {
    *BaseDecisionState
    // contains filtered or unexported fields
}

func NewStarLoopEntryState Uses

func NewStarLoopEntryState() *StarLoopEntryState

type StarLoopbackState Uses

type StarLoopbackState struct {
    *BaseATNState
}

func NewStarLoopbackState Uses

func NewStarLoopbackState() *StarLoopbackState

type SyntaxTree Uses

type SyntaxTree interface {
    Tree

    GetSourceInterval() *Interval
}

type TerminalNode Uses

type TerminalNode interface {
    ParseTree

    GetSymbol() Token
}

type TerminalNodeImpl Uses

type TerminalNodeImpl struct {
    // contains filtered or unexported fields
}

func NewTerminalNodeImpl Uses

func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl

func (*TerminalNodeImpl) Accept Uses

func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{}

func (*TerminalNodeImpl) GetChild Uses

func (t *TerminalNodeImpl) GetChild(i int) Tree

func (*TerminalNodeImpl) GetChildCount Uses

func (t *TerminalNodeImpl) GetChildCount() int

func (*TerminalNodeImpl) GetChildren Uses

func (t *TerminalNodeImpl) GetChildren() []Tree

func (*TerminalNodeImpl) GetParent Uses

func (t *TerminalNodeImpl) GetParent() Tree

func (*TerminalNodeImpl) GetPayload Uses

func (t *TerminalNodeImpl) GetPayload() interface{}

func (*TerminalNodeImpl) GetSourceInterval Uses

func (t *TerminalNodeImpl) GetSourceInterval() *Interval

func (*TerminalNodeImpl) GetSymbol Uses

func (t *TerminalNodeImpl) GetSymbol() Token

func (*TerminalNodeImpl) GetText Uses

func (t *TerminalNodeImpl) GetText() string

func (*TerminalNodeImpl) SetChildren Uses

func (t *TerminalNodeImpl) SetChildren(tree []Tree)

func (*TerminalNodeImpl) SetParent Uses

func (t *TerminalNodeImpl) SetParent(tree Tree)

func (*TerminalNodeImpl) String Uses

func (t *TerminalNodeImpl) String() string

func (*TerminalNodeImpl) ToStringTree Uses

func (t *TerminalNodeImpl) ToStringTree(s []string, r Recognizer) string

type Token Uses

type Token interface {
    GetSource() *TokenSourceCharStreamPair
    GetTokenType() int
    GetChannel() int
    GetStart() int
    GetStop() int
    GetLine() int
    GetColumn() int

    GetText() string
    SetText(s string)

    GetTokenIndex() int
    SetTokenIndex(v int)

    GetTokenSource() TokenSource
    GetInputStream() CharStream
}

type TokenFactory Uses

type TokenFactory interface {
    Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token
}

TokenFactory creates CommonToken objects.

type TokenSource Uses

type TokenSource interface {
    NextToken() Token
    Skip()
    More()
    GetLine() int
    GetCharPositionInLine() int
    GetInputStream() CharStream
    GetSourceName() string

    GetTokenFactory() TokenFactory
    // contains filtered or unexported methods
}

type TokenSourceCharStreamPair Uses

type TokenSourceCharStreamPair struct {
    // contains filtered or unexported fields
}

type TokenStream Uses

type TokenStream interface {
    IntStream

    LT(k int) Token

    Get(index int) Token
    GetTokenSource() TokenSource
    SetTokenSource(TokenSource)

    GetAllText() string
    GetTextFromInterval(*Interval) string
    GetTextFromRuleContext(RuleContext) string
    GetTextFromTokens(Token, Token) string
}

type TokenStreamRewriter Uses

type TokenStreamRewriter struct {
    // contains filtered or unexported fields
}

func NewTokenStreamRewriter Uses

func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter

func (*TokenStreamRewriter) AddToProgram Uses

func (tsr *TokenStreamRewriter) AddToProgram(name string, op RewriteOperation)

func (*TokenStreamRewriter) Delete Uses

func (tsr *TokenStreamRewriter) Delete(program_name string, from, to int)

func (*TokenStreamRewriter) DeleteDefault Uses

func (tsr *TokenStreamRewriter) DeleteDefault(from, to int)

func (*TokenStreamRewriter) DeleteDefaultPos Uses

func (tsr *TokenStreamRewriter) DeleteDefaultPos(index int)

func (*TokenStreamRewriter) DeleteProgram Uses

func (tsr *TokenStreamRewriter) DeleteProgram(program_name string)

Reset the program so that no instructions exist

func (*TokenStreamRewriter) DeleteProgramDefault Uses

func (tsr *TokenStreamRewriter) DeleteProgramDefault()

func (*TokenStreamRewriter) DeleteToken Uses

func (tsr *TokenStreamRewriter) DeleteToken(program_name string, from, to Token)

func (*TokenStreamRewriter) DeleteTokenDefault Uses

func (tsr *TokenStreamRewriter) DeleteTokenDefault(from, to Token)

func (*TokenStreamRewriter) GetLastRewriteTokenIndex Uses

func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(program_name string) int

func (*TokenStreamRewriter) GetLastRewriteTokenIndexDefault Uses

func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndexDefault() int

func (*TokenStreamRewriter) GetProgram Uses

func (tsr *TokenStreamRewriter) GetProgram(name string) []RewriteOperation

func (*TokenStreamRewriter) GetText Uses

func (tsr *TokenStreamRewriter) GetText(program_name string, interval *Interval) string

Return the text from the original tokens altered per the instructions given to this rewriter.

func (*TokenStreamRewriter) GetTextDefault Uses

func (tsr *TokenStreamRewriter) GetTextDefault() string

Return the text from the original tokens altered per the instructions given to this rewriter.

func (*TokenStreamRewriter) GetTokenStream Uses

func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream

func (*TokenStreamRewriter) InitializeProgram Uses

func (tsr *TokenStreamRewriter) InitializeProgram(name string) []RewriteOperation

func (*TokenStreamRewriter) InsertAfter Uses

func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string)

func (*TokenStreamRewriter) InsertAfterDefault Uses

func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string)

func (*TokenStreamRewriter) InsertAfterToken Uses

func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string)

func (*TokenStreamRewriter) InsertBefore Uses

func (tsr *TokenStreamRewriter) InsertBefore(program_name string, index int, text string)

func (*TokenStreamRewriter) InsertBeforeDefault Uses

func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string)

func (*TokenStreamRewriter) InsertBeforeToken Uses

func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string, token Token, text string)

func (*TokenStreamRewriter) Replace Uses

func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string)

func (*TokenStreamRewriter) ReplaceDefault Uses

func (tsr *TokenStreamRewriter) ReplaceDefault(from, to int, text string)

func (*TokenStreamRewriter) ReplaceDefaultPos Uses

func (tsr *TokenStreamRewriter) ReplaceDefaultPos(index int, text string)

func (*TokenStreamRewriter) ReplaceToken Uses

func (tsr *TokenStreamRewriter) ReplaceToken(program_name string, from, to Token, text string)

func (*TokenStreamRewriter) ReplaceTokenDefault Uses

func (tsr *TokenStreamRewriter) ReplaceTokenDefault(from, to Token, text string)

func (*TokenStreamRewriter) ReplaceTokenDefaultPos Uses

func (tsr *TokenStreamRewriter) ReplaceTokenDefaultPos(index Token, text string)

func (*TokenStreamRewriter) Rollback Uses

func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int)

Rollback the instruction stream for a program so that the indicated instruction (via instructionIndex) is no longer in the stream. UNTESTED!

func (*TokenStreamRewriter) RollbackDefault Uses

func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int)

func (*TokenStreamRewriter) SetLastRewriteTokenIndex Uses

func (tsr *TokenStreamRewriter) SetLastRewriteTokenIndex(program_name string, i int)

type TokensStartState Uses

type TokensStartState struct {
    *BaseDecisionState
}

TokensStartState is the Tokens rule start state linking to each lexer rule start state.

func NewTokensStartState Uses

func NewTokensStartState() *TokensStartState

type TraceListener Uses

type TraceListener struct {
    // contains filtered or unexported fields
}

func NewTraceListener Uses

func NewTraceListener(parser *BaseParser) *TraceListener

func (*TraceListener) EnterEveryRule Uses

func (t *TraceListener) EnterEveryRule(ctx ParserRuleContext)

func (*TraceListener) ExitEveryRule Uses

func (t *TraceListener) ExitEveryRule(ctx ParserRuleContext)

func (*TraceListener) VisitErrorNode Uses

func (t *TraceListener) VisitErrorNode(_ ErrorNode)

func (*TraceListener) VisitTerminal Uses

func (t *TraceListener) VisitTerminal(node TerminalNode)

type Transition Uses

type Transition interface {
    Matches(int, int, int) bool
    // contains filtered or unexported methods
}

type Tree Uses

type Tree interface {
    GetParent() Tree
    SetParent(Tree)
    GetPayload() interface{}
    GetChild(i int) Tree
    GetChildCount() int
    GetChildren() []Tree
}

func TreesGetChildren Uses

func TreesGetChildren(t Tree) []Tree

Return ordered list of all children of this node

func TreesgetAncestors Uses

func TreesgetAncestors(t Tree) []Tree

Return a list of all ancestors of this node. The first node of

list is the root and the last is the parent of this node.

type WildcardTransition Uses

type WildcardTransition struct {
    *BaseTransition
}

func NewWildcardTransition Uses

func NewWildcardTransition(target ATNState) *WildcardTransition

func (*WildcardTransition) Matches Uses

func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool

func (*WildcardTransition) String Uses

func (t *WildcardTransition) String() string

Package antlr imports 12 packages (graph) and is imported by 140 packages. Updated 2018-11-18. Refresh now. Tools for package owners.