import "github.com/antlr/antlr4/runtime/Go/antlr"
Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. Use of this file is governed by the BSD 3-clause license that can be found in the LICENSE.txt file in the project root.
atn.go atn_config.go atn_config_set.go atn_deserialization_options.go atn_deserializer.go atn_simulator.go atn_state.go atn_type.go char_stream.go common_token_factory.go common_token_stream.go dfa.go dfa_serializer.go dfa_state.go diagnostic_error_listener.go error_listener.go error_strategy.go errors.go file_stream.go input_stream.go int_stream.go interval_set.go lexer.go lexer_action.go lexer_action_executor.go lexer_atn_simulator.go ll1_analyzer.go parser.go parser_atn_simulator.go parser_rule_context.go prediction_context.go prediction_mode.go recognizer.go rule_context.go semantic_context.go token.go token_source.go token_stream.go tokenstream_rewriter.go trace_listener.go transition.go tree.go trees.go utils.go
const (
ATNStateInvalidType = 0
ATNStateBasic = 1
ATNStateRuleStart = 2
ATNStateBlockStart = 3
ATNStatePlusBlockStart = 4
ATNStateStarBlockStart = 5
ATNStateTokenStart = 6
ATNStateRuleStop = 7
ATNStateBlockEnd = 8
ATNStateStarLoopBack = 9
ATNStateStarLoopEntry = 10
ATNStatePlusLoopBack = 11
ATNStateLoopEnd = 12
ATNStateInvalidStateNumber = -1
)Constants for serialization.
const (
ATNTypeLexer = 0
ATNTypeParser = 1
)Represent the type of recognizer an ATN applies to.
const (
LexerDefaultMode = 0
LexerMore = -2
LexerSkip = -3
)const (
LexerDefaultTokenChannel = TokenDefaultChannel
LexerHidden = TokenHiddenChannel
LexerMinCharValue = 0x0000
LexerMaxCharValue = 0x10FFFF
)const (
LexerActionTypeChannel = 0 //The type of a {@link LexerChannelAction} action.
LexerActionTypeCustom = 1 //The type of a {@link LexerCustomAction} action.
LexerActionTypeMode = 2 //The type of a {@link LexerModeAction} action.
LexerActionTypeMore = 3 //The type of a {@link LexerMoreAction} action.
LexerActionTypePopMode = 4 //The type of a {@link LexerPopModeAction} action.
LexerActionTypePushMode = 5 //The type of a {@link LexerPushModeAction} action.
LexerActionTypeSkip = 6 //The type of a {@link LexerSkipAction} action.
LexerActionTypeType = 7 //The type of a {@link LexerTypeAction} action.
)const (
//
// The SLL(*) prediction mode. This prediction mode ignores the current
// parser context when making predictions. This is the fastest prediction
// mode, and provides correct results for many grammars. This prediction
// mode is more powerful than the prediction mode provided by ANTLR 3, but
// may result in syntax errors for grammar and input combinations which are
// not SLL.
//
// <p>
// When using this prediction mode, the parser will either return a correct
// parse tree (i.e. the same parse tree that would be returned with the
// {@link //LL} prediction mode), or it will Report a syntax error. If a
// syntax error is encountered when using the {@link //SLL} prediction mode,
// it may be due to either an actual syntax error in the input or indicate
// that the particular combination of grammar and input requires the more
// powerful {@link //LL} prediction abilities to complete successfully.</p>
//
// <p>
// This prediction mode does not provide any guarantees for prediction
// behavior for syntactically-incorrect inputs.</p>
//
PredictionModeSLL = 0
//
// The LL(*) prediction mode. This prediction mode allows the current parser
// context to be used for resolving SLL conflicts that occur during
// prediction. This is the fastest prediction mode that guarantees correct
// parse results for all combinations of grammars with syntactically correct
// inputs.
//
// <p>
// When using this prediction mode, the parser will make correct decisions
// for all syntactically-correct grammar and input combinations. However, in
// cases where the grammar is truly ambiguous this prediction mode might not
// Report a precise answer for <em>exactly which</em> alternatives are
// ambiguous.</p>
//
// <p>
// This prediction mode does not provide any guarantees for prediction
// behavior for syntactically-incorrect inputs.</p>
//
PredictionModeLL = 1
//
// The LL(*) prediction mode with exact ambiguity detection. In addition to
// the correctness guarantees provided by the {@link //LL} prediction mode,
// this prediction mode instructs the prediction algorithm to determine the
// complete and exact set of ambiguous alternatives for every ambiguous
// decision encountered while parsing.
//
// <p>
// This prediction mode may be used for diagnosing ambiguities during
// grammar development. Due to the performance overhead of calculating sets
// of ambiguous alternatives, this prediction mode should be avoided when
// the exact results are not necessary.</p>
//
// <p>
// This prediction mode does not provide any guarantees for prediction
// behavior for syntactically-incorrect inputs.</p>
//
PredictionModeLLExactAmbigDetection = 2
)const (
TokenInvalidType = 0
// During lookahead operations, this "token" signifies we hit rule end ATN state
// and did not follow it despite needing to.
TokenEpsilon = -2
TokenMinUserTokenType = 1
TokenEOF = -1
TokenDefaultChannel = 0
TokenHiddenChannel = 1
)const (
Default_Program_Name = "default"
Program_Init_Size = 100
Min_Token_Index = 0
)const (
TransitionEPSILON = 1
TransitionRANGE = 2
TransitionRULE = 3
TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}?
TransitionATOM = 5
TransitionACTION = 6
TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2
TransitionNOTSET = 8
TransitionWILDCARD = 9
TransitionPRECEDENCE = 10
)const (
BasePredictionContextEmptyReturnState = 0x7FFFFFFF
)Represents {@code $} in local context prediction, which means wildcard. {@code//+x =//}. /
const (
LL1AnalyzerHitPred = TokenInvalidType
)* Special value added to the lookahead sets to indicate that we hit
a predicate during analysis if {@code seeThruPreds==false}.
/
var (
LexerATNSimulatorDebug = false
LexerATNSimulatorDFADebug = false
LexerATNSimulatorMinDFAEdge = 0
LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN
LexerATNSimulatorMatchCalls = 0
)var (
ParserATNSimulatorDebug = false
ParserATNSimulatorListATNDecisions = false
ParserATNSimulatorDFADebug = false
ParserATNSimulatorRetryDebug = false
)var (
BasePredictionContextglobalNodeCount = 1
BasePredictionContextid = BasePredictionContextglobalNodeCount
)var ATNDeserializationOptionsdefaultOptions = &ATNDeserializationOptions{true, false, false}
var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewBaseATNConfigSet(false))
var ATNStateInitialNumTransitions = 4var AddedUnicodeSMP = "59627784-3BE5-417A-B9EB-8131A7286089"var BasePredictionContextEMPTY = NewEmptyPredictionContext()
var BaseSerializedUUID = "AADB8D7E-AEEF-4415-AD2B-8204D6CF042E"This is the earliest supported serialized UUID. stick to serialized version for now, we don't need a UUID instance
var CommonTokenFactoryDEFAULT = NewCommonTokenFactory(false)
CommonTokenFactoryDEFAULT is the default CommonTokenFactory. It does not explicitly copy token text when constructing tokens.
var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener()
Provides a default instance of {@link ConsoleErrorListener}.
var LexerMoreActionINSTANCE = NewLexerMoreAction()
var LexerPopModeActionINSTANCE = NewLexerPopModeAction()
var LexerSkipActionINSTANCE = NewLexerSkipAction()
Provides a singleton instance of l parameterless lexer action.
var ParseTreeWalkerDefault = NewParseTreeWalker()
var RuleContextEmpty = NewBaseParserRuleContext(nil, -1)
var SerializedUUID = AddedUnicodeSMP
This is the current serialized UUID.
var SerializedVersion = 3var SupportedUUIDs = []string{BaseSerializedUUID, AddedUnicodeSMP}
This list contains all of the currently supported UUIDs, ordered by when the feature first appeared in this branch.
var TransitionserializationNames = []string{ "INVALID", "EPSILON", "RANGE", "RULE", "PREDICATE", "ATOM", "ACTION", "SET", "NOT_SET", "WILDCARD", "PRECEDENCE", }
var TreeInvalidInterval = NewInterval(-1, -2)
func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool
Checks if all configurations in {@code configs} are in a {@link RuleStopState}. Configurations meeting this condition have reached the end of the decision rule (local context) or end of start rule (full context).
@param configs the configuration set to test @return {@code true} if all configurations in {@code configs} are in a {@link RuleStopState}, otherwise {@code false}
Determines if every alternative subset in {@code altsets} contains more than one alternative.
@param altsets a collection of alternative subsets @return {@code true} if every {@link BitSet} in {@code altsets} has {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
Determines if every alternative subset in {@code altsets} is equivalent.
@param altsets a collection of alternative subsets @return {@code true} if every member of {@code altsets} is equal to the others, otherwise {@code false}
Returns the unique alternative predicted by all alternative subsets in {@code altsets}. If no such alternative exists, this method returns {@link ATN//INVALID_ALT_NUMBER}.
@param altsets a collection of alternative subsets
func PredictionModehasConfigInRuleStopState(configs ATNConfigSet) bool
Checks if any configuration in {@code configs} is in a {@link RuleStopState}. Configurations meeting this condition have reached the end of the decision rule (local context) or end of start rule (full context).
@param configs the configuration set to test @return {@code true} if any configuration in {@code configs} is in a {@link RuleStopState}, otherwise {@code false}
Determines if any single alternative subset in {@code altsets} contains more than one alternative.
@param altsets a collection of alternative subsets @return {@code true} if {@code altsets} contains a {@link BitSet} with {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
Determines if any single alternative subset in {@code altsets} contains exactly one alternative.
@param altsets a collection of alternative subsets @return {@code true} if {@code altsets} contains a {@link BitSet} with {@link BitSet//cardinality cardinality} 1, otherwise {@code false}
func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool
Computes the SLL prediction termination condition.
<p> This method computes the SLL prediction termination condition for both of the following cases.</p>
<ul> <li>The usual SLL+LL fallback upon SLL conflict</li> <li>Pure SLL without LL fallback</li> </ul>
<p><strong>COMBINED SLL+LL PARSING</strong></p>
<p>When LL-fallback is enabled upon SLL conflict, correct predictions are ensured regardless of how the termination condition is computed by this method. Due to the substantially higher cost of LL prediction, the prediction should only fall back to LL when the additional lookahead cannot lead to a unique SLL prediction.</p>
<p>Assuming combined SLL+LL parsing, an SLL configuration set with only conflicting subsets should fall back to full LL, even if the configuration sets don't resolve to the same alternative (e.g. {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting configuration, SLL could continue with the hopes that more lookahead will resolve via one of those non-conflicting configurations.</p>
<p>Here's the prediction termination rule them: SLL (for SLL+LL parsing) stops when it sees only conflicting configuration subsets. In contrast, full LL keeps going when there is uncertainty.</p>
<p><strong>HEURISTIC</strong></p>
<p>As a heuristic, we stop prediction when we see any conflicting subset unless we see a state that only has one alternative associated with it. The single-alt-state thing lets prediction continue upon rules like (otherwise, it would admit defeat too soon):</p>
<p>{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ” }</p>
<p>When the ATN simulation reaches the state before {@code ”}, it has a DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop processing this node because alternative to has another way to continue, via {@code [6|2|[]]}.</p>
<p>It also let's us continue for this rule:</p>
<p>{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }</p>
<p>After Matching input A, we reach the stop state for rule A, state 1. State 8 is the state right before B. Clearly alternatives 1 and 2 conflict and no amount of further lookahead will separate the two. However, alternative 3 will be able to continue and so we do not stop working on this state. In the previous example, we're concerned with states associated with the conflicting alternatives. Here alt 3 is not associated with the conflicting configs, but since we can continue looking for input reasonably, don't declare the state done.</p>
<p><strong>PURE SLL PARSING</strong></p>
<p>To handle pure SLL parsing, all we have to do is make sure that we combine stack contexts for configurations that differ only by semantic predicate. From there, we can do the usual SLL termination heuristic.</p>
<p><strong>PREDICATES IN SLL+LL PARSING</strong></p>
<p>SLL decisions don't evaluate predicates until after they reach DFA stop states because they need to create the DFA cache that works in all semantic situations. In contrast, full LL evaluates predicates collected during start state computation so it can ignore predicates thereafter. This means that SLL termination detection can totally ignore semantic predicates.</p>
<p>Implementation-wise, {@link ATNConfigSet} combines stack contexts but not semantic predicate contexts so we might see two configurations like the following.</p>
<p>{@code (s, 1, x, {}), (s, 1, x', {p})}</p>
<p>Before testing these configurations against others, we have to merge {@code x} and {@code x'} (without modifying the existing configurations). For example, we test {@code (x+x')==x”} when looking for conflicts in the following configurations.</p>
<p>{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {})}</p>
<p>If the configuration set has predicates (as indicated by {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of the configurations to strip out all of the predicates so that a standard {@link ATNConfigSet} will merge everything ignoring predicates.</p>
func PredictionModehasStateAssociatedWithOneAlt(configs ATNConfigSet) bool
Full LL prediction termination.
<p>Can we stop looking ahead during ATN simulation or is there some uncertainty as to which alternative we will ultimately pick, after consuming more input? Even if there are partial conflicts, we might know that everything is going to resolve to the same minimum alternative. That means we can stop since no more lookahead will change that fact. On the other hand, there might be multiple conflicts that resolve to different minimums. That means we need more look ahead to decide which of those alternatives we should predict.</p>
<p>The basic idea is to split the set of configurations {@code C}, into conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with non-conflicting configurations. Two configurations conflict if they have identical {@link ATNConfig//state} and {@link ATNConfig//context} values but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)} and {@code (s, j, ctx, _)} for {@code i!=j}.</p>
<p>Reduce these configuration subsets to the set of possible alternatives. You can compute the alternative subsets in one pass as follows:</p>
<p>{@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in {@code C} holding {@code s} and {@code ctx} fixed.</p>
<p>Or in pseudo-code, for each configuration {@code c} in {@code C}:</p>
<pre> map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not alt and not pred </pre>
<p>The values in {@code map} are the set of {@code A_s,ctx} sets.</p>
<p>If {@code |A_s,ctx|=1} then there is no conflict associated with {@code s} and {@code ctx}.</p>
<p>Reduce the subsets to singletons by choosing a minimum of each subset. If the union of these alternative subsets is a singleton, then no amount of more lookahead will help us. We will always pick that alternative. If, however, there is more than one alternative, then we are uncertain which alternative to predict and must continue looking for resolution. We may or may not discover an ambiguity in the future, even if there are no conflicting subsets this round.</p>
<p>The biggest sin is to terminate early because it means we've made a decision but were uncertain as to the eventual outcome. We haven't used enough lookahead. On the other hand, announcing a conflict too late is no big deal you will still have the conflict. It's just inefficient. It might even look until the end of file.</p>
<p>No special consideration for semantic predicates is required because predicates are evaluated on-the-fly for full LL prediction, ensuring that no configuration contains a semantic context during the termination check.</p>
<p><strong>CONFLICTING CONFIGS</strong></p>
<p>Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict when {@code i!=j} but {@code x=x'}. Because we merge all {@code (s, i, _)} configurations together, that means that there are at most {@code n} configurations associated with state {@code s} for {@code n} possible alternatives in the decision. The merged stacks complicate the comparison of configuration contexts {@code x} and {@code x'}. Sam checks to see if one is a subset of the other by calling merge and checking to see if the merged result is either {@code x} or {@code x'}. If the {@code x} associated with lowest alternative {@code i} is the superset, then {@code i} is the only possible prediction since the others resolve to {@code min(i)} as well. However, if {@code x} is associated with {@code j>i} then at least one stack configuration for {@code j} is not in conflict with alternative {@code i}. The algorithm should keep going, looking for more lookahead due to the uncertainty.</p>
<p>For simplicity, I'm doing a equality check between {@code x} and {@code x'} that lets the algorithm continue to consume lookahead longer than necessary. The reason I like the equality is of course the simplicity but also because that is the test you need to detect the alternatives that are actually in conflict.</p>
<p><strong>CONTINUE/STOP RULE</strong></p>
<p>Continue if union of resolved alternative sets from non-conflicting and conflicting alternative subsets has more than one alternative. We are uncertain about which alternative to predict.</p>
<p>The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which alternatives are still in the running for the amount of input we've consumed at this point. The conflicting sets let us to strip away configurations that won't lead to more states because we resolve conflicts to the configuration with a minimum alternate for the conflicting set.</p>
<p><strong>CASES</strong></p>
<ul>
<li>no conflicts and more than 1 alternative in set => continue</li>
<li> {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)}, {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} = {@code {1,3}} => continue </li>
<li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)}, {@code (s', 2, y)}, {@code (s”, 1, z)} yields non-conflicting set {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} = {@code {1}} => stop and predict 1</li>
<li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)}, {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U {@code {1}} = {@code {1}} => stop and predict 1, can announce ambiguity {@code {1,2}}</li>
<li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)}, {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U {@code {2}} = {@code {1,2}} => continue</li>
<li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)}, {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U {@code {3}} = {@code {1,3}} => continue</li>
</ul>
<p><strong>EXACT AMBIGUITY DETECTION</strong></p>
<p>If all states Report the same conflicting set of alternatives, then we know we have the exact ambiguity set.</p>
<p><code>|A_<em>i</em>|>1</code> and <code>A_<em>i</em> = A_<em>j</em></code> for all <em>i</em>, <em>j</em>.</p>
<p>In other words, we continue examining lookahead until all {@code A_i} have more than one alternative and all {@code A_i} are the same. If {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate because the resolved set is {@code {1}}. To determine what the real ambiguity is, we have to know whether the ambiguity is between one and two or one and three so we keep going. We can only stop prediction when we need exact ambiguity detection when the sets look like {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...</p>
func TerminalNodeToStringArray(sa []TerminalNode) []string
func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string
Print out a whole tree in LISP form. {@link //getNodeText} is used on the
node payloads to get the text for the nodes. Detect parse trees and extract data appropriately.
type AND struct {
// contains filtered or unexported fields
}func NewAND(a, b SemanticContext) *AND
type ATN struct {
// DecisionToState is the decision points for all rules, subrules, optional
// blocks, ()+, ()*, etc. Used to build DFA predictors for them.
DecisionToState []DecisionState
// contains filtered or unexported fields
}func (a *ATN) NextTokens(s ATNState, ctx RuleContext) *IntervalSet
func (a *ATN) NextTokensInContext(s ATNState, ctx RuleContext) *IntervalSet
NextTokensInContext computes the set of valid tokens that can occur starting in state s. If ctx is nil, the set of tokens will not include what can follow the rule surrounding s. In other words, the set will be restricted to tokens reachable staying within the rule of s.
func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet
NextTokensNoContext computes the set of valid tokens that can occur starting in s and staying in same rule. Token.EPSILON is in set if we reach end of rule.
type ATNConfig interface {
GetState() ATNState
GetAlt() int
GetSemanticContext() SemanticContext
GetContext() PredictionContext
SetContext(PredictionContext)
GetReachesIntoOuterContext() int
SetReachesIntoOuterContext(int)
String() string
// contains filtered or unexported methods
}ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic context). The syntactic context is a graph-structured stack node whose path(s) to the root is the rule invocation(s) chain used to arrive at the state. The semantic context is the tree of semantic predicates encountered before reaching an ATN state.
type ATNConfigSet interface {
Add(ATNConfig, *DoubleDict) bool
AddAll([]ATNConfig) bool
GetStates() *Set
GetPredicates() []SemanticContext
GetItems() []ATNConfig
OptimizeConfigs(interpreter *BaseATNSimulator)
Equals(other interface{}) bool
Length() int
IsEmpty() bool
Contains(ATNConfig) bool
ContainsFast(ATNConfig) bool
Clear()
String() string
HasSemanticContext() bool
SetHasSemanticContext(v bool)
ReadOnly() bool
SetReadOnly(bool)
GetConflictingAlts() *BitSet
SetConflictingAlts(*BitSet)
FullContext() bool
GetUniqueAlt() int
SetUniqueAlt(int)
GetDipsIntoOuterContext() bool
SetDipsIntoOuterContext(bool)
// contains filtered or unexported methods
}type ATNConfigSetPair struct {
// contains filtered or unexported fields
}type ATNDeserializationOptions struct {
// contains filtered or unexported fields
}func NewATNDeserializationOptions(CopyFrom *ATNDeserializationOptions) *ATNDeserializationOptions
type ATNDeserializer struct {
// contains filtered or unexported fields
}func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer
func (a *ATNDeserializer) DeserializeFromUInt16(data []uint16) *ATN
type ATNState interface {
GetEpsilonOnlyTransitions() bool
GetRuleIndex() int
SetRuleIndex(int)
GetNextTokenWithinRule() *IntervalSet
SetNextTokenWithinRule(*IntervalSet)
GetATN() *ATN
SetATN(*ATN)
GetStateType() int
GetStateNumber() int
SetStateNumber(int)
GetTransitions() []Transition
SetTransitions([]Transition)
AddTransition(Transition, int)
String() string
// contains filtered or unexported methods
}type AbstractPredicateTransition interface {
Transition
IAbstractPredicateTransitionFoo()
}type ActionTransition struct {
*BaseTransition
// contains filtered or unexported fields
}func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition
func (t *ActionTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool
func (t *ActionTransition) String() string
type AltDict struct {
// contains filtered or unexported fields
}func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict
Get a map from state to alt subset from a configuration set. For each configuration {@code c} in {@code configs}:
<pre> map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt} </pre>
type ArrayPredictionContext struct {
*BasePredictionContext
// contains filtered or unexported fields
}func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) *ArrayPredictionContext
func (a *ArrayPredictionContext) GetParent(index int) PredictionContext
func (a *ArrayPredictionContext) GetReturnStates() []int
func (a *ArrayPredictionContext) String() string
type AtomTransition struct {
*BaseTransition
}TODO: make all transitions sets? no, should remove set edges
func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition
func (t *AtomTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool
func (t *AtomTransition) String() string
type BailErrorStrategy struct {
*DefaultErrorStrategy
}func NewBailErrorStrategy() *BailErrorStrategy
func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException)
Instead of recovering from exception {@code e}, re-panic it wrapped in a {@link ParseCancellationException} so it is not caught by the rule func catches. Use {@link Exception//getCause()} to get the original {@link RecognitionException}.
func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token
Make sure we don't attempt to recover inline if the parser successfully recovers, it won't panic an exception.
func (b *BailErrorStrategy) Sync(recognizer Parser)
Make sure we don't attempt to recover from problems in subrules.//
type BaseATNConfig struct {
// contains filtered or unexported fields
}func NewBaseATNConfig(c ATNConfig, state ATNState, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig
func NewBaseATNConfig1(c ATNConfig, state ATNState, context PredictionContext) *BaseATNConfig
func NewBaseATNConfig2(c ATNConfig, semanticContext SemanticContext) *BaseATNConfig
func NewBaseATNConfig3(c ATNConfig, state ATNState, semanticContext SemanticContext) *BaseATNConfig
func NewBaseATNConfig4(c ATNConfig, state ATNState) *BaseATNConfig
func NewBaseATNConfig5(state ATNState, alt int, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig
func NewBaseATNConfig6(state ATNState, alt int, context PredictionContext) *BaseATNConfig
func NewBaseATNConfig7(old *BaseATNConfig) *BaseATNConfig
func (b *BaseATNConfig) GetAlt() int
func (b *BaseATNConfig) GetContext() PredictionContext
func (b *BaseATNConfig) GetReachesIntoOuterContext() int
func (b *BaseATNConfig) GetSemanticContext() SemanticContext
func (b *BaseATNConfig) GetState() ATNState
func (b *BaseATNConfig) SetContext(v PredictionContext)
func (b *BaseATNConfig) SetReachesIntoOuterContext(v int)
func (b *BaseATNConfig) String() string
type BaseATNConfigSet struct {
// contains filtered or unexported fields
}BaseATNConfigSet is a specialized set of ATNConfig that tracks information about its elements and can combine similar configurations using a graph-structured stack.
func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet
func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool
Add merges contexts with existing configs for (s, i, pi, _), where s is the ATNConfig.state, i is the ATNConfig.alt, and pi is the ATNConfig.semanticContext. We use (s,i,pi) as the key. Updates dipsIntoOuterContext and hasSemanticContext when necessary.
func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool
func (b *BaseATNConfigSet) Clear()
func (b *BaseATNConfigSet) Contains(item ATNConfig) bool
func (b *BaseATNConfigSet) ContainsFast(item ATNConfig) bool
func (b *BaseATNConfigSet) Equals(other interface{}) bool
func (b *BaseATNConfigSet) FullContext() bool
func (b *BaseATNConfigSet) GetConflictingAlts() *BitSet
func (b *BaseATNConfigSet) GetDipsIntoOuterContext() bool
func (b *BaseATNConfigSet) GetItems() []ATNConfig
func (b *BaseATNConfigSet) GetPredicates() []SemanticContext
func (b *BaseATNConfigSet) GetStates() *Set
func (b *BaseATNConfigSet) GetUniqueAlt() int
func (b *BaseATNConfigSet) HasSemanticContext() bool
func (b *BaseATNConfigSet) IsEmpty() bool
func (b *BaseATNConfigSet) Length() int
func (b *BaseATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator)
func (b *BaseATNConfigSet) ReadOnly() bool
func (b *BaseATNConfigSet) SetConflictingAlts(v *BitSet)
func (b *BaseATNConfigSet) SetDipsIntoOuterContext(v bool)
func (b *BaseATNConfigSet) SetHasSemanticContext(v bool)
func (b *BaseATNConfigSet) SetReadOnly(readOnly bool)
func (b *BaseATNConfigSet) SetUniqueAlt(v int)
func (b *BaseATNConfigSet) String() string
type BaseATNSimulator struct {
// contains filtered or unexported fields
}func NewBaseATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) *BaseATNSimulator
func (b *BaseATNSimulator) ATN() *ATN
func (b *BaseATNSimulator) DecisionToDFA() []*DFA
func (b *BaseATNSimulator) SharedContextCache() *PredictionContextCache
type BaseATNState struct {
// NextTokenWithinRule caches lookahead during parsing. Not used during construction.
NextTokenWithinRule *IntervalSet
// contains filtered or unexported fields
}func NewBaseATNState() *BaseATNState
func (as *BaseATNState) AddTransition(trans Transition, index int)
func (as *BaseATNState) GetATN() *ATN
func (as *BaseATNState) GetEpsilonOnlyTransitions() bool
func (as *BaseATNState) GetNextTokenWithinRule() *IntervalSet
func (as *BaseATNState) GetRuleIndex() int
func (as *BaseATNState) GetStateNumber() int
func (as *BaseATNState) GetStateType() int
func (as *BaseATNState) GetTransitions() []Transition
func (as *BaseATNState) SetATN(atn *ATN)
func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet)
func (as *BaseATNState) SetRuleIndex(v int)
func (as *BaseATNState) SetStateNumber(stateNumber int)
func (as *BaseATNState) SetTransitions(t []Transition)
func (as *BaseATNState) String() string
type BaseAbstractPredicateTransition struct {
*BaseTransition
}func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition
func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo()
type BaseBlockStartState struct {
*BaseDecisionState
// contains filtered or unexported fields
}BaseBlockStartState is the start of a regular (...) block.
func NewBlockStartState() *BaseBlockStartState
type BaseDecisionState struct {
*BaseATNState
// contains filtered or unexported fields
}func NewBaseDecisionState() *BaseDecisionState
type BaseInterpreterRuleContext struct {
*BaseParserRuleContext
}func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext
type BaseLexer struct {
*BaseRecognizer
Interpreter ILexerATNSimulator
TokenStartCharIndex int
TokenStartLine int
TokenStartColumn int
ActionType int
Virt Lexer // The most derived lexer implementation. Allows virtual method calls.
// contains filtered or unexported fields
}func NewBaseLexer(input CharStream) *BaseLexer
The standard method called to automatically emit a token at the outermost lexical rule. The token object should point into the char buffer start..stop. If there is a text override in 'text', use that to set the token's text. Override l method to emit custom Token objects or provide a Newfactory. /
By default does not support multiple emits per NextToken invocation for efficiency reasons. Subclass and override l method, NextToken, and GetToken (to push tokens into a list and pull from that list rather than a single variable as l implementation does). /
Return a list of all Token objects in input char stream. Forces load of all tokens. Does not include EOF token. /
What is the index of the current character of lookahead?///
func (b *BaseLexer) GetInputStream() CharStream
func (b *BaseLexer) GetInterpreter() ILexerATNSimulator
Return the text Matched so far for the current token or any text override. Set the complete text of l token it wipes any previous changes to the text.
func (b *BaseLexer) GetTokenFactory() TokenFactory
func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair
Return a token from l source i.e., Match a token on the char stream.
func (b *BaseLexer) Recover(re RecognitionException)
Lexers can normally Match any char in it's vocabulary after Matching a token, so do the easy thing and just kill a character and hope it all works out. You can instead use the rule invocation stack to do sophisticated error recovery if you are in a fragment rule. /
Instruct the lexer to Skip creating a token for current lexer rule and look for another token. NextToken() knows to keep looking when a lexer rule finishes with token set to SKIPTOKEN. Recall that if token==nil at end of any token rule, it creates one for you and emits it. /
type BaseLexerAction struct {
// contains filtered or unexported fields
}func NewBaseLexerAction(action int) *BaseLexerAction
type BaseParseTreeListener struct{}func (l *BaseParseTreeListener) EnterEveryRule(ctx ParserRuleContext)
func (l *BaseParseTreeListener) ExitEveryRule(ctx ParserRuleContext)
func (l *BaseParseTreeListener) VisitErrorNode(node ErrorNode)
func (l *BaseParseTreeListener) VisitTerminal(node TerminalNode)
type BaseParseTreeVisitor struct{}func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{}
func (v *BaseParseTreeVisitor) VisitChildren(node RuleNode) interface{}
func (v *BaseParseTreeVisitor) VisitErrorNode(node ErrorNode) interface{}
func (v *BaseParseTreeVisitor) VisitTerminal(node TerminalNode) interface{}
type BaseParser struct {
*BaseRecognizer
Interpreter *ParserATNSimulator
BuildParseTrees bool
// contains filtered or unexported fields
}func NewBaseParser(input TokenStream) *BaseParser
p.is all the parsing support code essentially most of it is error recovery stuff.//
func (p *BaseParser) AddParseListener(listener ParseTreeListener)
Registers {@code listener} to receive events during the parsing process.
<p>To support output-preserving grammar transformations (including but not limited to left-recursion removal, automated left-factoring, and optimized code generation), calls to listener methods during the parse may differ substantially from calls made by {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In particular, rule entry and exit events may occur in a different order during the parse than after the parser. In addition, calls to certain rule entry methods may be omitted.</p>
<p>With the following specific exceptions, calls to listener events are <em>deterministic</em>, i.e. for identical input the calls to listener methods will be the same.</p>
<ul> <li>Alterations to the grammar used to generate code may change the behavior of the listener calls.</li> <li>Alterations to the command line options passed to ANTLR 4 when generating the parser may change the behavior of the listener calls.</li> <li>Changing the version of the ANTLR Tool used to generate the parser may change the behavior of the listener calls.</li> </ul>
@param listener the listener to add
@panics nilPointerException if {@code} listener is {@code nil}
func (p *BaseParser) Consume() Token
func (p *BaseParser) DumpDFA()
For debugging and other purposes.//
func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int)
func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int)
func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int)
func (p *BaseParser) ExitRule()
func (p *BaseParser) GetATN() *ATN
func (p *BaseParser) GetATNWithBypassAlts()
The ATN with bypass alternatives is expensive to create so we create it lazily.
@panics UnsupportedOperationException if the current parser does not implement the {@link //getSerializedATN()} method.
func (p *BaseParser) GetCurrentToken() Token
Match needs to return the current input symbol, which gets put into the label for the associated token ref e.g., x=ID.
func (p *BaseParser) GetDFAStrings() string
For debugging and other purposes.//
func (p *BaseParser) GetErrorHandler() ErrorStrategy
func (p *BaseParser) GetExpectedTokens() *IntervalSet
Computes the set of input symbols which could follow the current parser state and context, as given by {@link //GetState} and {@link //GetContext}, respectively.
@see ATN//getExpectedTokens(int, RuleContext)
func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet
func (p *BaseParser) GetInputStream() IntStream
func (p *BaseParser) GetInterpreter() *ParserATNSimulator
func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext
func (p *BaseParser) GetParseListeners() []ParseTreeListener
func (p *BaseParser) GetParserRuleContext() ParserRuleContext
func (p *BaseParser) GetPrecedence() int
func (p *BaseParser) GetRuleIndex(ruleName string) int
Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.//
func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string
func (p *BaseParser) GetSourceName() string
func (p *BaseParser) GetTokenFactory() TokenFactory
func (p *BaseParser) GetTokenStream() TokenStream
func (p *BaseParser) IsExpectedToken(symbol int) bool
func (p *BaseParser) Match(ttype int) Token
func (p *BaseParser) MatchWildcard() Token
func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException)
func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool
func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int)
func (p *BaseParser) RemoveParseListener(listener ParseTreeListener)
Remove {@code listener} from the list of parse listeners.
<p>If {@code listener} is {@code nil} or has not been added as a parse listener, p.method does nothing.</p> @param listener the listener to remove
func (p *BaseParser) SetErrorHandler(e ErrorStrategy)
func (p *BaseParser) SetInputStream(input TokenStream)
func (p *BaseParser) SetParserRuleContext(v ParserRuleContext)
func (p *BaseParser) SetTokenStream(input TokenStream)
Set the token stream and reset the parser.//
func (p *BaseParser) SetTrace(trace *TraceListener)
During a parse is sometimes useful to listen in on the rule entry and exit events as well as token Matches. p.is for quick and dirty debugging.
func (p *BaseParser) TriggerEnterRuleEvent()
Notify any parse listeners of an enter rule event.
func (p *BaseParser) TriggerExitRuleEvent()
Notify any parse listeners of an exit rule event.
@see //addParseListener
func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext)
type BaseParserRuleContext struct {
*BaseRuleContext
// contains filtered or unexported fields
}func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext
func (prc *BaseParserRuleContext) Accept(visitor ParseTreeVisitor) interface{}
func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext
func (prc *BaseParserRuleContext) AddErrorNode(badToken Token) *ErrorNodeImpl
func (prc *BaseParserRuleContext) AddTokenNode(token Token) *TerminalNodeImpl
func (prc *BaseParserRuleContext) CopyFrom(ctx *BaseParserRuleContext)
func (prc *BaseParserRuleContext) EnterRule(listener ParseTreeListener)
Double dispatch methods for listeners
func (prc *BaseParserRuleContext) ExitRule(listener ParseTreeListener)
func (prc *BaseParserRuleContext) GetChild(i int) Tree
func (prc *BaseParserRuleContext) GetChildCount() int
func (prc *BaseParserRuleContext) GetChildOfType(i int, childType reflect.Type) RuleContext
func (prc *BaseParserRuleContext) GetChildren() []Tree
func (prc *BaseParserRuleContext) GetPayload() interface{}
func (prc *BaseParserRuleContext) GetRuleContext() RuleContext
func (prc *BaseParserRuleContext) GetSourceInterval() *Interval
func (prc *BaseParserRuleContext) GetStart() Token
func (prc *BaseParserRuleContext) GetStop() Token
func (prc *BaseParserRuleContext) GetText() string
func (prc *BaseParserRuleContext) GetToken(ttype int, i int) TerminalNode
func (prc *BaseParserRuleContext) GetTokens(ttype int) []TerminalNode
func (prc *BaseParserRuleContext) GetTypedRuleContext(ctxType reflect.Type, i int) RuleContext
func (prc *BaseParserRuleContext) GetTypedRuleContexts(ctxType reflect.Type) []RuleContext
func (prc *BaseParserRuleContext) RemoveLastChild()
* Used by EnterOuterAlt to toss out a RuleContext previously added as we entered a rule. If we have // label, we will need to remove generic ruleContext object. /
func (prc *BaseParserRuleContext) SetException(e RecognitionException)
func (prc *BaseParserRuleContext) SetStart(t Token)
func (prc *BaseParserRuleContext) SetStop(t Token)
func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) string
func (prc *BaseParserRuleContext) ToStringTree(ruleNames []string, recog Recognizer) string
type BasePredictionContext struct {
// contains filtered or unexported fields
}func NewBasePredictionContext(cachedHash int) *BasePredictionContext
type BaseRecognitionException struct {
// contains filtered or unexported fields
}func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException
func (b *BaseRecognitionException) GetInputStream() IntStream
func (b *BaseRecognitionException) GetMessage() string
func (b *BaseRecognitionException) GetOffendingToken() Token
func (b *BaseRecognitionException) String() string
type BaseRecognizer struct {
RuleNames []string
LiteralNames []string
SymbolicNames []string
GrammarFileName string
// contains filtered or unexported fields
}func NewBaseRecognizer() *BaseRecognizer
func (b *BaseRecognizer) Action(context RuleContext, ruleIndex, actionIndex int)
func (b *BaseRecognizer) AddErrorListener(listener ErrorListener)
func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string
What is the error header, normally line/character position information?//
func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener
func (b *BaseRecognizer) GetLiteralNames() []string
func (b *BaseRecognizer) GetRuleIndexMap() map[string]int
Get a map from rule names to rule indexes.
<p>Used for XPath and tree pattern compilation.</p>
func (b *BaseRecognizer) GetRuleNames() []string
func (b *BaseRecognizer) GetState() int
func (b *BaseRecognizer) GetSymbolicNames() []string
func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string
How should a token be displayed in an error message? The default
is to display just the text, but during development you might want to have a lot of information spit out. Override in that case to use t.String() (which, for CommonToken, dumps everything about the token). This is better than forcing you to override a method in your token objects because you don't have to go modify your lexer so that it creates a NewJava type.
@deprecated This method is not called by the ANTLR 4 Runtime. Specific implementations of {@link ANTLRErrorStrategy} may provide a similar feature when necessary. For example, see {@link DefaultErrorStrategy//GetTokenErrorDisplay}.
func (b *BaseRecognizer) GetTokenNames() []string
func (b *BaseRecognizer) GetTokenType(tokenName string) int
func (b *BaseRecognizer) Precpred(localctx RuleContext, precedence int) bool
func (b *BaseRecognizer) RemoveErrorListeners()
func (b *BaseRecognizer) Sempred(localctx RuleContext, ruleIndex int, actionIndex int) bool
subclass needs to override these if there are sempreds or actions that the ATN interp needs to execute
func (b *BaseRecognizer) SetState(v int)
type BaseRewriteOperation struct {
// contains filtered or unexported fields
}func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int
func (op *BaseRewriteOperation) GetIndex() int
func (op *BaseRewriteOperation) GetInstructionIndex() int
func (op *BaseRewriteOperation) GetOpName() string
func (op *BaseRewriteOperation) GetText() string
func (op *BaseRewriteOperation) GetTokens() TokenStream
func (op *BaseRewriteOperation) SetIndex(val int)
func (op *BaseRewriteOperation) SetInstructionIndex(val int)
func (op *BaseRewriteOperation) SetOpName(val string)
func (op *BaseRewriteOperation) SetText(val string)
func (op *BaseRewriteOperation) SetTokens(val TokenStream)
func (op *BaseRewriteOperation) String() string
func NewBaseRuleContext(parent RuleContext, invokingState int) *BaseRuleContext
func (b *BaseRuleContext) GetAltNumber() int
func (b *BaseRuleContext) GetBaseRuleContext() *BaseRuleContext
func (b *BaseRuleContext) GetInvokingState() int
func (b *BaseRuleContext) GetParent() Tree
func (b *BaseRuleContext) GetRuleIndex() int
func (b *BaseRuleContext) IsEmpty() bool
A context is empty if there is no invoking state meaning nobody call current context.
func (b *BaseRuleContext) SetAltNumber(altNumber int)
func (b *BaseRuleContext) SetInvokingState(t int)
func (b *BaseRuleContext) SetParent(v Tree)
type BaseSingletonPredictionContext struct {
*BasePredictionContext
// contains filtered or unexported fields
}func NewBaseSingletonPredictionContext(parent PredictionContext, returnState int) *BaseSingletonPredictionContext
func (b *BaseSingletonPredictionContext) GetParent(index int) PredictionContext
func (b *BaseSingletonPredictionContext) String() string
type BaseToken struct {
// contains filtered or unexported fields
}func (b *BaseToken) GetInputStream() CharStream
func (b *BaseToken) GetSource() *TokenSourceCharStreamPair
func (b *BaseToken) GetTokenSource() TokenSource
type BaseTransition struct {
// contains filtered or unexported fields
}func NewBaseTransition(target ATNState) *BaseTransition
func (t *BaseTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool
type BasicBlockStartState struct {
*BaseBlockStartState
}func NewBasicBlockStartState() *BasicBlockStartState
type BasicState struct {
*BaseATNState
}func NewBasicState() *BasicState
type BitSet struct {
// contains filtered or unexported fields
}Gets the complete set of represented alternatives for a collection of alternative subsets. This method returns the union of each {@link BitSet} in {@code altsets}.
@param altsets a collection of alternative subsets @return the set of represented alternatives in {@code altsets}
func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet
This func gets the conflicting alt subsets from a configuration set. For each configuration {@code c} in {@code configs}:
<pre> map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not alt and not pred </pre>
type BlockEndState struct {
*BaseATNState
// contains filtered or unexported fields
}BlockEndState is a terminal node of a simple (a|b|c) block.
func NewBlockEndState() *BlockEndState
type BlockStartState interface {
DecisionState
// contains filtered or unexported methods
}type BlockStartStateIntPair struct {
// contains filtered or unexported fields
}type CharStream interface {
IntStream
GetText(int, int) string
GetTextFromTokens(start, end Token) string
GetTextFromInterval(*Interval) string
}func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken
func (c *CommonToken) GetText() string
func (c *CommonToken) SetText(text string)
func (c *CommonToken) String() string
type CommonTokenFactory struct {
// contains filtered or unexported fields
}CommonTokenFactory is the default TokenFactory implementation.
func NewCommonTokenFactory(copyText bool) *CommonTokenFactory
func (c *CommonTokenFactory) Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token
type CommonTokenStream struct {
// contains filtered or unexported fields
}CommonTokenStream is an implementation of TokenStream that loads tokens from a TokenSource on-demand and places the tokens in a buffer to provide access to any previous token by index. This token stream ignores the value of Token.getChannel. If your parser requires the token stream filter tokens to only those on a particular channel, such as Token.DEFAULT_CHANNEL or Token.HIDDEN_CHANNEL, use a filtering token stream such a CommonTokenStream.
func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream
func (c *CommonTokenStream) Consume()
func (c *CommonTokenStream) Fill()
Fill gets all tokens from the lexer until EOF.
func (c *CommonTokenStream) Get(index int) Token
func (c *CommonTokenStream) GetAllText() string
func (c *CommonTokenStream) GetAllTokens() []Token
func (c *CommonTokenStream) GetHiddenTokensToLeft(tokenIndex, channel int) []Token
GetHiddenTokensToLeft collects all tokens on channel to the left of the current token until we see a token on DEFAULT_TOKEN_CHANNEL. If channel is -1, it finds any non default channel token.
func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []Token
GetHiddenTokensToRight collects all tokens on a specified channel to the right of the current token up until we see a token on DEFAULT_TOKEN_CHANNEL or EOF. If channel is -1, it finds any non-default channel token.
func (c *CommonTokenStream) GetSourceName() string
func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string
func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string
func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string
func (c *CommonTokenStream) GetTokenSource() TokenSource
func (c *CommonTokenStream) GetTokens(start int, stop int, types *IntervalSet) []Token
GetTokens gets all tokens from start to stop inclusive.
func (c *CommonTokenStream) Index() int
func (c *CommonTokenStream) LA(i int) int
func (c *CommonTokenStream) LB(k int) Token
func (c *CommonTokenStream) LT(k int) Token
func (c *CommonTokenStream) Mark() int
func (c *CommonTokenStream) NextTokenOnChannel(i, channel int) int
NextTokenOnChannel returns the index of the next token on channel given a starting index. Returns i if tokens[i] is on channel. Returns -1 if there are no tokens on channel between i and EOF.
func (c *CommonTokenStream) Release(marker int)
func (c *CommonTokenStream) Seek(index int)
func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource)
SetTokenSource resets the c token stream by setting its token source.
func (c *CommonTokenStream) Size() int
func (c *CommonTokenStream) Sync(i int) bool
Sync makes sure index i in tokens has a token and returns true if a token is located at index i and otherwise false.
type ConsoleErrorListener struct {
*DefaultErrorListener
}func NewConsoleErrorListener() *ConsoleErrorListener
func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException)
{@inheritDoc}
<p> This implementation prints messages to {@link System//err} containing the values of {@code line}, {@code charPositionInLine}, and {@code msg} using the following format.</p>
<pre> line <em>line</em>:<em>charPositionInLine</em> <em>msg</em> </pre>
type DFA struct {
// contains filtered or unexported fields
}func NewDFA(atnStartState DecisionState, decision int) *DFA
type DFASerializer struct {
// contains filtered or unexported fields
}DFASerializer is a DFA walker that knows how to dump them to serialized strings.
func NewDFASerializer(dfa *DFA, literalNames, symbolicNames []string) *DFASerializer
func (d *DFASerializer) GetStateString(s *DFAState) string
func (d *DFASerializer) String() string
type DFAState struct {
// contains filtered or unexported fields
}DFAState represents a set of possible ATN configurations. As Aho, Sethi, Ullman p. 117 says: "The DFA uses its state to keep track of all possible states the ATN can be in after reading each input symbol. That is to say, after reading input a1a2..an, the DFA is in a state that represents the subset T of the states of the ATN that are reachable from the ATN's start state along some path labeled a1a2..an." In conventional NFA-to-DFA conversion, therefore, the subset T would be a bitset representing the set of states the ATN could be in. We need to track the alt predicted by each state as well, however. More importantly, we need to maintain a stack of states, tracking the closure operations as they jump from rule to rule, emulating rule invocations (method calls). I have to add a stack to simulate the proper lookahead sequences for the underlying LL grammar from which the ATN was derived.
I use a set of ATNConfig objects, not simple states. An ATNConfig is both a state (ala normal conversion) and a RuleContext describing the chain of rules (if any) followed to arrive at that state.
A DFAState may have multiple references to a particular state, but with different ATN contexts (with same or different alts) meaning that state was reached via a different set of rule invocations.
func NewDFAState(stateNumber int, configs ATNConfigSet) *DFAState
GetAltSet gets the set of all alts mentioned by all ATN configurations in d.
type DefaultErrorListener struct {
}func NewDefaultErrorListener() *DefaultErrorListener
func (d *DefaultErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet)
func (d *DefaultErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet)
func (d *DefaultErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet)
func (d *DefaultErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException)
type DefaultErrorStrategy struct {
// contains filtered or unexported fields
}This is the default implementation of {@link ANTLRErrorStrategy} used for error Reporting and recovery in ANTLR parsers.
func NewDefaultErrorStrategy() *DefaultErrorStrategy
func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet
func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token
Conjure up a missing token during error recovery.
The recognizer attempts to recover from single missing symbols. But, actions might refer to that missing symbol. For example, x=ID {f($x)}. The action clearly assumes that there has been an identifier Matched previously and that $x points at that token. If that token is missing, but the next token in the stream is what we want we assume that d token is missing and we keep going. Because we have to return some token to replace the missing token, we have to conjure one up. This method gives the user control over the tokens returned for missing tokens. Mostly, you will want to create something special for identifier tokens. For literals such as '{' and ',', the default action in the parser or tree parser works. It simply creates a CommonToken of the appropriate type. The text will be the token. If you change what tokens must be created by the lexer, override d method to create the appropriate tokens.
func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string
How should a token be displayed in an error message? The default is to display just the text, but during development you might want to have a lot of information spit out. Override in that case to use t.String() (which, for CommonToken, dumps everything about the token). This is better than forcing you to override a method in your token objects because you don't have to go modify your lexer so that it creates a NewJava type.
func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException)
{@inheritDoc}
<p>The default implementation reSynchronizes the parser by consuming tokens until we find one in the reSynchronization set--loosely the set of tokens that can follow the current rule.</p>
func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token
<p>The default implementation attempts to recover from the mismatched input by using single token insertion and deletion as described below. If the recovery attempt fails, d method panics an {@link InputMisMatchException}.</p>
<p><strong>EXTRA TOKEN</strong> (single token deletion)</p>
<p>{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the right token, however, then assume {@code LA(1)} is some extra spurious token and delete it. Then consume and return the next token (which was the {@code LA(2)} token) as the successful result of the Match operation.</p>
<p>This recovery strategy is implemented by {@link //singleTokenDeletion}.</p>
<p><strong>MISSING TOKEN</strong> (single token insertion)</p>
<p>If current token (at {@code LA(1)}) is consistent with what could come after the expected {@code LA(1)} token, then assume the token is missing and use the parser's {@link TokenFactory} to create it on the fly. The "insertion" is performed by returning the created token as the successful result of the Match operation.</p>
<p>This recovery strategy is implemented by {@link //singleTokenInsertion}.</p>
<p><strong>EXAMPLE</strong></p>
<p>For example, Input {@code i=(3} is clearly missing the {@code ')'}. When the parser returns from the nested call to {@code expr}, it will have call chain:</p>
<pre> stat &rarr expr &rarr atom </pre>
and it will be trying to Match the {@code ')'} at d point in the derivation:
<pre> => ID '=' '(' INT ')' ('+' atom)* ” ^ </pre>
The attempt to Match {@code ')'} will fail when it sees {@code ”} and call {@link //recoverInline}. To recover, it sees that {@code LA(1)==”} is in the set of tokens that can follow the {@code ')'} token reference in rule {@code atom}. It can assume that you forgot the {@code ')'}.
func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException)
{@inheritDoc}
<p>The default implementation returns immediately if the handler is already in error recovery mode. Otherwise, it calls {@link //beginErrorCondition} and dispatches the Reporting task based on the runtime type of {@code e} according to the following table.</p>
<ul> <li>{@link NoViableAltException}: Dispatches the call to {@link //ReportNoViableAlternative}</li> <li>{@link InputMisMatchException}: Dispatches the call to {@link //ReportInputMisMatch}</li> <li>{@link FailedPredicateException}: Dispatches the call to {@link //ReportFailedPredicate}</li> <li>All other types: calls {@link Parser//NotifyErrorListeners} to Report the exception</li> </ul>
func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException)
This is called by {@link //ReportError} when the exception is a {@link FailedPredicateException}.
@see //ReportError
@param recognizer the parser instance @param e the recognition exception
func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException)
This is called by {@link //ReportError} when the exception is an {@link InputMisMatchException}.
@see //ReportError
@param recognizer the parser instance @param e the recognition exception
func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser)
{@inheritDoc}
<p>The default implementation simply calls {@link //endErrorCondition}.</p>
func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser)
This method is called to Report a syntax error which requires the insertion of a missing token into the input stream. At the time d method is called, the missing token has not yet been inserted. When d method returns, {@code recognizer} is in error recovery mode.
<p>This method is called when {@link //singleTokenInsertion} identifies single-token insertion as a viable recovery strategy for a mismatched input error.</p>
<p>The default implementation simply returns if the handler is already in error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to enter error recovery mode, followed by calling {@link Parser//NotifyErrorListeners}.</p>
@param recognizer the parser instance
func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException)
This is called by {@link //ReportError} when the exception is a {@link NoViableAltException}.
@see //ReportError
@param recognizer the parser instance @param e the recognition exception
func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser)
This method is called to Report a syntax error which requires the removal of a token from the input stream. At the time d method is called, the erroneous symbol is current {@code LT(1)} symbol and has not yet been removed from the input stream. When d method returns, {@code recognizer} is in error recovery mode.
<p>This method is called when {@link //singleTokenDeletion} identifies single-token deletion as a viable recovery strategy for a mismatched input error.</p>
<p>The default implementation simply returns if the handler is already in error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to enter error recovery mode, followed by calling {@link Parser//NotifyErrorListeners}.</p>
@param recognizer the parser instance
func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token
This method implements the single-token deletion inline error recovery strategy. It is called by {@link //recoverInline} to attempt to recover from mismatched input. If this method returns nil, the parser and error handler state will not have changed. If this method returns non-nil, {@code recognizer} will <em>not</em> be in error recovery mode since the returned token was a successful Match.
<p>If the single-token deletion is successful, d method calls {@link //ReportUnwantedToken} to Report the error, followed by {@link Parser//consume} to actually "delete" the extraneous token. Then, before returning {@link //ReportMatch} is called to signal a successful Match.</p>
@param recognizer the parser instance @return the successfully Matched {@link Token} instance if single-token deletion successfully recovers from the mismatched input, otherwise {@code nil}
func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool
This method implements the single-token insertion inline error recovery strategy. It is called by {@link //recoverInline} if the single-token deletion strategy fails to recover from the mismatched input. If this method returns {@code true}, {@code recognizer} will be in error recovery mode.
<p>This method determines whether or not single-token insertion is viable by checking if the {@code LA(1)} input symbol could be successfully Matched if it were instead the {@code LA(2)} symbol. If d method returns {@code true}, the caller is responsible for creating and inserting a token with the correct type to produce d behavior.</p>
@param recognizer the parser instance @return {@code true} if single-token insertion is a viable recovery strategy for the current mismatched input, otherwise {@code false}
func (d *DefaultErrorStrategy) Sync(recognizer Parser)
The default implementation of {@link ANTLRErrorStrategy//Sync} makes sure that the current lookahead symbol is consistent with what were expecting at d point in the ATN. You can call d anytime but ANTLR only generates code to check before subrules/loops and each iteration.
<p>Implements Jim Idle's magic Sync mechanism in closures and optional subrules. E.g.,</p>
<pre> a : Sync ( stuff Sync )* Sync : {consume to what can follow Sync} </pre>
At the start of a sub rule upon error, {@link //Sync} performs single token deletion, if possible. If it can't do that, it bails on the current rule and uses the default error recovery, which consumes until the reSynchronization set of the current rule.
<p>If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block with an empty alternative), then the expected set includes what follows the subrule.</p>
<p>During loop iteration, it consumes until it sees a token that can start a sub rule or what follows loop. Yes, that is pretty aggressive. We opt to stay in the loop as long as possible.</p>
<p><strong>ORIGINS</strong></p>
<p>Previous versions of ANTLR did a poor job of their recovery within loops. A single mismatch token or missing token would force the parser to bail out of the entire rules surrounding the loop. So, for rule</p>
<pre> classfunc : 'class' ID '{' member* '}' </pre>
input with an extra token between members would force the parser to consume until it found the next class definition rather than the next member definition of the current class.
<p>This functionality cost a little bit of effort because the parser has to compare token set at the start of the loop and at each iteration. If for some reason speed is suffering for you, you can turn off d functionality by simply overriding d method as a blank { }.</p>
type DiagnosticErrorListener struct {
*DefaultErrorListener
// contains filtered or unexported fields
}func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener
func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet)
func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet)
func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet)
type DoubleDict struct {
// contains filtered or unexported fields
}func NewDoubleDict() *DoubleDict
func (d *DoubleDict) Get(a, b int) interface{}
type EmptyPredictionContext struct {
*BaseSingletonPredictionContext
}func NewEmptyPredictionContext() *EmptyPredictionContext
func (e *EmptyPredictionContext) GetParent(index int) PredictionContext
func (e *EmptyPredictionContext) String() string
type EpsilonTransition struct {
*BaseTransition
// contains filtered or unexported fields
}func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition
func (t *EpsilonTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool
func (t *EpsilonTransition) String() string
type ErrorListener interface {
SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException)
ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet)
ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet)
ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet)
}type ErrorNode interface {
TerminalNode
// contains filtered or unexported methods
}type ErrorNodeImpl struct {
*TerminalNodeImpl
}func NewErrorNodeImpl(token Token) *ErrorNodeImpl
func (e *ErrorNodeImpl) Accept(v ParseTreeVisitor) interface{}
type ErrorStrategy interface {
RecoverInline(Parser) Token
Recover(Parser, RecognitionException)
Sync(Parser)
ReportError(Parser, RecognitionException)
ReportMatch(Parser)
// contains filtered or unexported methods
}type FailedPredicateException struct {
*BaseRecognitionException
// contains filtered or unexported fields
}func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException
type FileStream struct {
*InputStream
// contains filtered or unexported fields
}func NewFileStream(fileName string) (*FileStream, error)
func (f *FileStream) GetSourceName() string
type IATNSimulator interface {
() *PredictionContextCache
ATN() *ATN
DecisionToDFA() []*DFA
}type ILexerATNSimulator interface {
IATNSimulator
Match(input CharStream, mode int) int
GetCharPositionInLine() int
GetLine() int
GetText(input CharStream) string
Consume(input CharStream)
// contains filtered or unexported methods
}type InputMisMatchException struct {
*BaseRecognitionException
}func NewInputMisMatchException(recognizer Parser) *InputMisMatchException
This signifies any kind of mismatched input exceptions such as when the current input does not Match the expected token.
type InputStream struct {
// contains filtered or unexported fields
}func NewInputStream(data string) *InputStream
func (is *InputStream) Consume()
func (*InputStream) GetSourceName() string
func (is *InputStream) GetText(start int, stop int) string
func (is *InputStream) GetTextFromInterval(i *Interval) string
func (is *InputStream) GetTextFromTokens(start, stop Token) string
func (is *InputStream) Index() int
func (is *InputStream) LA(offset int) int
func (is *InputStream) LT(offset int) int
func (is *InputStream) Mark() int
mark/release do nothing we have entire buffer
func (is *InputStream) Release(marker int)
func (is *InputStream) Seek(index int)
func (is *InputStream) Size() int
func (is *InputStream) String() string
type InsertAfterOp struct {
BaseRewriteOperation
}func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp
func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int
func (op *InsertAfterOp) String() string
type InsertBeforeOp struct {
BaseRewriteOperation
}func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp
func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int
func (op *InsertBeforeOp) String() string
type IntStream interface {
Consume()
LA(int) int
Mark() int
Release(marker int)
Index() int
Seek(index int)
Size() int
GetSourceName() string
}type InterpreterRuleContext interface {
ParserRuleContext
}stop is not included!
type IntervalSet struct {
// contains filtered or unexported fields
}func NewIntervalSet() *IntervalSet
func (i *IntervalSet) String() string
func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []string, elemsAreChar bool) string
type LL1Analyzer struct {
// contains filtered or unexported fields
}func NewLL1Analyzer(atn *ATN) *LL1Analyzer
func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet
* Compute set of tokens that can follow {@code s} in the ATN in the specified {@code ctx}.
<p>If {@code ctx} is {@code nil} and the end of the rule containing {@code s} is reached, {@link Token//EPSILON} is added to the result set. If {@code ctx} is not {@code nil} and the end of the outermost rule is reached, {@link Token//EOF} is added to the result set.</p>
@param s the ATN state @param stopState the ATN state to stop at. This can be a {@link BlockEndState} to detect epsilon paths through a closure. @param ctx the complete parser context, or {@code nil} if the context should be ignored
@return The set of tokens that can follow {@code s} in the ATN in the specified {@code ctx}. /
type Lexer interface {
TokenSource
Recognizer
Emit() Token
SetChannel(int)
PushMode(int)
PopMode() int
SetType(int)
SetMode(int)
}type LexerATNConfig struct {
*BaseATNConfig
// contains filtered or unexported fields
}func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *LexerATNConfig
func NewLexerATNConfig2(c *LexerATNConfig, state ATNState, context PredictionContext) *LexerATNConfig
func NewLexerATNConfig3(c *LexerATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig
func NewLexerATNConfig4(c *LexerATNConfig, state ATNState) *LexerATNConfig
func NewLexerATNConfig5(state ATNState, alt int, context PredictionContext, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig
func NewLexerATNConfig6(state ATNState, alt int, context PredictionContext) *LexerATNConfig
type LexerATNSimulator struct {
*BaseATNSimulator
Line int
CharPositionInLine int
MatchCalls int
// contains filtered or unexported fields
}func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator
func (l *LexerATNSimulator) Consume(input CharStream)
func (l *LexerATNSimulator) GetCharPositionInLine() int
func (l *LexerATNSimulator) GetLine() int
func (l *LexerATNSimulator) GetText(input CharStream) string
Get the text Matched so far for the current token.
func (l *LexerATNSimulator) GetTokenName(tt int) string
func (l *LexerATNSimulator) Match(input CharStream, mode int) int
func (l *LexerATNSimulator) MatchATN(input CharStream) int
type LexerAction interface {
// contains filtered or unexported methods
}type LexerActionExecutor struct {
// contains filtered or unexported fields
}func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor
Creates a {@link LexerActionExecutor} which executes the actions for the input {@code lexerActionExecutor} followed by a specified {@code lexerAction}.
@param lexerActionExecutor The executor for actions already traversed by the lexer while Matching a token within a particular {@link LexerATNConfig}. If this is {@code nil}, the method behaves as though it were an empty executor. @param lexerAction The lexer action to execute after the actions specified in {@code lexerActionExecutor}.
@return A {@link LexerActionExecutor} for executing the combine actions of {@code lexerActionExecutor} and {@code lexerAction}.
func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor
type LexerChannelAction struct {
*BaseLexerAction
// contains filtered or unexported fields
}Implements the {@code channel} lexer action by calling {@link Lexer//setChannel} with the assigned channel. Constructs a New{@code channel} action with the specified channel value. @param channel The channel value to pass to {@link Lexer//setChannel}.
func NewLexerChannelAction(channel int) *LexerChannelAction
func (l *LexerChannelAction) String() string
type LexerCustomAction struct {
*BaseLexerAction
// contains filtered or unexported fields
}func NewLexerCustomAction(ruleIndex, actionIndex int) *LexerCustomAction
type LexerDFASerializer struct {
*DFASerializer
}func NewLexerDFASerializer(dfa *DFA) *LexerDFASerializer
func (l *LexerDFASerializer) String() string
type LexerIndexedCustomAction struct {
*BaseLexerAction
// contains filtered or unexported fields
}Constructs a Newindexed custom action by associating a character offset with a {@link LexerAction}.
<p>Note: This class is only required for lexer actions for which {@link LexerAction//isPositionDependent} returns {@code true}.</p>
@param offset The offset into the input {@link CharStream}, relative to the token start index, at which the specified lexer action should be executed. @param action The lexer action to execute at a particular offset in the input {@link CharStream}.
func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction
type LexerModeAction struct {
*BaseLexerAction
// contains filtered or unexported fields
}Implements the {@code mode} lexer action by calling {@link Lexer//mode} with the assigned mode.
func NewLexerModeAction(mode int) *LexerModeAction
func (l *LexerModeAction) String() string
type LexerMoreAction struct {
*BaseLexerAction
}func NewLexerMoreAction() *LexerMoreAction
func (l *LexerMoreAction) String() string
type LexerNoViableAltException struct {
*BaseRecognitionException
// contains filtered or unexported fields
}func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs ATNConfigSet) *LexerNoViableAltException
func (l *LexerNoViableAltException) String() string
type LexerPopModeAction struct {
*BaseLexerAction
}Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}.
<p>The {@code popMode} command does not have any parameters, so l action is implemented as a singleton instance exposed by {@link //INSTANCE}.</p>
func NewLexerPopModeAction() *LexerPopModeAction
func (l *LexerPopModeAction) String() string
type LexerPushModeAction struct {
*BaseLexerAction
// contains filtered or unexported fields
}Implements the {@code pushMode} lexer action by calling {@link Lexer//pushMode} with the assigned mode.
func NewLexerPushModeAction(mode int) *LexerPushModeAction
func (l *LexerPushModeAction) String() string
type LexerSkipAction struct {
*BaseLexerAction
}Implements the {@code Skip} lexer action by calling {@link Lexer//Skip}.
<p>The {@code Skip} command does not have any parameters, so l action is implemented as a singleton instance exposed by {@link //INSTANCE}.</p>
func NewLexerSkipAction() *LexerSkipAction
func (l *LexerSkipAction) String() string
type LexerTypeAction struct {
*BaseLexerAction
// contains filtered or unexported fields
}Implements the {@code type} lexer action by calling {@link Lexer//setType}
with the assigned type.
func NewLexerTypeAction(thetype int) *LexerTypeAction
func (l *LexerTypeAction) String() string
type LoopEndState struct {
*BaseATNState
// contains filtered or unexported fields
}LoopEndState marks the end of a * or + loop.
func NewLoopEndState() *LoopEndState
type LoopEndStateIntPair struct {
// contains filtered or unexported fields
}type NoViableAltException struct {
*BaseRecognitionException
// contains filtered or unexported fields
}func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException
Indicates that the parser could not decide which of two or more paths to take based upon the remaining input. It tracks the starting token of the offending input and also knows where the parser was in the various paths when the error. Reported by ReportNoViableAlternative()
type NotSetTransition struct {
*SetTransition
}func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition
func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool
func (t *NotSetTransition) String() string
type OR struct {
// contains filtered or unexported fields
}func NewOR(a, b SemanticContext) *OR
type OrderedATNConfigSet struct {
*BaseATNConfigSet
}func NewOrderedATNConfigSet() *OrderedATNConfigSet
type ParseCancellationException struct {
}func NewParseCancellationException() *ParseCancellationException
type ParseTree interface {
SyntaxTree
Accept(Visitor ParseTreeVisitor) interface{}
GetText() string
ToStringTree([]string, Recognizer) string
}type ParseTreeListener interface {
VisitTerminal(node TerminalNode)
VisitErrorNode(node ErrorNode)
EnterEveryRule(ctx ParserRuleContext)
ExitEveryRule(ctx ParserRuleContext)
}type ParseTreeVisitor interface {
Visit(tree ParseTree) interface{}
VisitChildren(node RuleNode) interface{}
VisitTerminal(node TerminalNode) interface{}
VisitErrorNode(node ErrorNode) interface{}
}type ParseTreeWalker struct {
}func NewParseTreeWalker() *ParseTreeWalker
func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode)
The discovery of a rule node, involves sending two events: the generic {@link ParseTreeListener//EnterEveryRule} and a {@link RuleContext}-specific event. First we trigger the generic and then the rule specific. We to them in reverse order upon finishing the node.
func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode)
func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree)
type Parser interface {
Recognizer
GetInterpreter() *ParserATNSimulator
GetTokenStream() TokenStream
GetTokenFactory() TokenFactory
GetParserRuleContext() ParserRuleContext
SetParserRuleContext(ParserRuleContext)
Consume() Token
GetParseListeners() []ParseTreeListener
GetErrorHandler() ErrorStrategy
SetErrorHandler(ErrorStrategy)
GetInputStream() IntStream
GetCurrentToken() Token
GetExpectedTokens() *IntervalSet
NotifyErrorListeners(string, Token, RecognitionException)
IsExpectedToken(int) bool
GetPrecedence() int
GetRuleInvocationStack(ParserRuleContext) []string
}type ParserATNSimulator struct {
*BaseATNSimulator
// contains filtered or unexported fields
}func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator
func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, outerContext ParserRuleContext) int
func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs ATNConfigSet) int
func (p *ParserATNSimulator) GetPredictionMode() int
func (p *ParserATNSimulator) GetTokenName(t int) string
func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, D *DFAState, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet)
If context sensitive parsing, we know it's ambiguity not conflict//
func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs ATNConfigSet, startIndex, stopIndex int)
func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs ATNConfigSet, startIndex, stopIndex int)
func (p *ParserATNSimulator) SetPredictionMode(v int)
type ParserRuleContext interface {
RuleContext
SetException(RecognitionException)
AddTokenNode(token Token) *TerminalNodeImpl
AddErrorNode(badToken Token) *ErrorNodeImpl
EnterRule(listener ParseTreeListener)
ExitRule(listener ParseTreeListener)
SetStart(Token)
GetStart() Token
SetStop(Token)
GetStop() Token
AddChild(child RuleContext) RuleContext
RemoveLastChild()
}type PlusBlockStartState struct {
*BaseBlockStartState
// contains filtered or unexported fields
}PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a decision state; we don't use it for code generation. Somebody might need it, it is included for completeness. In reality, PlusLoopbackState is the real decision-making node for A+.
func NewPlusBlockStartState() *PlusBlockStartState
type PlusLoopbackState struct {
*BaseDecisionState
}PlusLoopbackState is a decision state for A+ and (A|B)+. It has two transitions: one to the loop back to start of the block, and one to exit.
func NewPlusLoopbackState() *PlusLoopbackState
type PrecedencePredicate struct {
// contains filtered or unexported fields
}func NewPrecedencePredicate(precedence int) *PrecedencePredicate
func PrecedencePredicatefilterPrecedencePredicates(set *Set) []*PrecedencePredicate
func (p *PrecedencePredicate) String() string
type PrecedencePredicateTransition struct {
*BaseAbstractPredicateTransition
// contains filtered or unexported fields
}func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition
func (t *PrecedencePredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool
func (t *PrecedencePredicateTransition) String() string
type PredPrediction struct {
// contains filtered or unexported fields
}PredPrediction maps a predicate to a predicted alternative.
func NewPredPrediction(pred SemanticContext, alt int) *PredPrediction
func (p *PredPrediction) String() string
type Predicate struct {
// contains filtered or unexported fields
}type PredicateTransition struct {
*BaseAbstractPredicateTransition
// contains filtered or unexported fields
}func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition
func (t *PredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool
func (t *PredicateTransition) String() string
type PredictionContext interface {
GetParent(int) PredictionContext
String() string
// contains filtered or unexported methods
}func SingletonBasePredictionContextCreate(parent PredictionContext, returnState int) PredictionContext
type PredictionContextCache struct {
// contains filtered or unexported fields
}func NewPredictionContextCache() *PredictionContextCache
func (p *PredictionContextCache) Get(ctx PredictionContext) PredictionContext
type ProxyErrorListener struct {
*DefaultErrorListener
// contains filtered or unexported fields
}func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener
func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet)
func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet)
func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet)
func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException)
type RangeTransition struct {
*BaseTransition
// contains filtered or unexported fields
}func NewRangeTransition(target ATNState, start, stop int) *RangeTransition
func (t *RangeTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool
func (t *RangeTransition) String() string
type RecognitionException interface {
GetOffendingToken() Token
GetMessage() string
GetInputStream() IntStream
}type Recognizer interface {
GetLiteralNames() []string
GetSymbolicNames() []string
GetRuleNames() []string
Sempred(RuleContext, int, int) bool
Precpred(RuleContext, int) bool
GetState() int
SetState(int)
Action(RuleContext, int, int)
AddErrorListener(ErrorListener)
RemoveErrorListeners()
GetATN() *ATN
GetErrorListenerDispatch() ErrorListener
}type ReplaceOp struct {
BaseRewriteOperation
LastIndex int
}I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp instructions.
func NewReplaceOp(from, to int, text string, stream TokenStream) *ReplaceOp
type RewriteOperation interface {
// Execute the rewrite operation by possibly adding to the buffer.
// Return the index of the next token to operate on.
Execute(buffer *bytes.Buffer) int
String() string
GetInstructionIndex() int
GetIndex() int
GetText() string
GetOpName() string
GetTokens() TokenStream
SetInstructionIndex(val int)
SetIndex(int)
SetText(string)
SetOpName(string)
SetTokens(TokenStream)
}type RuleContext interface {
RuleNode
GetInvokingState() int
SetInvokingState(int)
GetRuleIndex() int
IsEmpty() bool
GetAltNumber() int
SetAltNumber(altNumber int)
String([]string, RuleContext) string
}type RuleNode interface {
ParseTree
GetRuleContext() RuleContext
GetBaseRuleContext() *BaseRuleContext
}type RuleStartState struct {
*BaseATNState
// contains filtered or unexported fields
}func NewRuleStartState() *RuleStartState
type RuleStopState struct {
*BaseATNState
}RuleStopState is the last node in the ATN for a rule, unless that rule is the start symbol. In that case, there is one transition to EOF. Later, we might encode references to all calls to this rule to compute FOLLOW sets for error handling.
func NewRuleStopState() *RuleStopState
type RuleTransition struct {
*BaseTransition
// contains filtered or unexported fields
}func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition
func (t *RuleTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool
var SemanticContextNone SemanticContext = NewPredicate(-1, -1, false)
func SemanticContextandContext(a, b SemanticContext) SemanticContext
func SemanticContextorContext(a, b SemanticContext) SemanticContext
type Set struct {
// contains filtered or unexported fields
}func NewSet(
hashcodeFunction func(interface{}) int,
equalsFunction func(interface{}, interface{}) bool) *Settype SetTransition struct {
*BaseTransition
}func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition
func (t *SetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool
func (t *SetTransition) String() string
type SimState struct {
// contains filtered or unexported fields
}type SingletonPredictionContext interface {
PredictionContext
}type StarBlockStartState struct {
*BaseBlockStartState
}StarBlockStartState is the block that begins a closure loop.
func NewStarBlockStartState() *StarBlockStartState
type StarLoopEntryState struct {
*BaseDecisionState
// contains filtered or unexported fields
}func NewStarLoopEntryState() *StarLoopEntryState
type StarLoopbackState struct {
*BaseATNState
}func NewStarLoopbackState() *StarLoopbackState
type TerminalNodeImpl struct {
// contains filtered or unexported fields
}func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl
func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{}
func (t *TerminalNodeImpl) GetChild(i int) Tree
func (t *TerminalNodeImpl) GetChildCount() int
func (t *TerminalNodeImpl) GetChildren() []Tree
func (t *TerminalNodeImpl) GetParent() Tree
func (t *TerminalNodeImpl) GetPayload() interface{}
func (t *TerminalNodeImpl) GetSourceInterval() *Interval
func (t *TerminalNodeImpl) GetSymbol() Token
func (t *TerminalNodeImpl) GetText() string
func (t *TerminalNodeImpl) SetChildren(tree []Tree)
func (t *TerminalNodeImpl) SetParent(tree Tree)
func (t *TerminalNodeImpl) String() string
func (t *TerminalNodeImpl) ToStringTree(s []string, r Recognizer) string
type Token interface {
GetSource() *TokenSourceCharStreamPair
GetTokenType() int
GetChannel() int
GetStart() int
GetStop() int
GetLine() int
GetColumn() int
GetText() string
SetText(s string)
GetTokenIndex() int
SetTokenIndex(v int)
GetTokenSource() TokenSource
GetInputStream() CharStream
}type TokenFactory interface {
Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token
}TokenFactory creates CommonToken objects.
type TokenSource interface {
NextToken() Token
Skip()
More()
GetLine() int
GetCharPositionInLine() int
GetInputStream() CharStream
GetSourceName() string
GetTokenFactory() TokenFactory
// contains filtered or unexported methods
}type TokenSourceCharStreamPair struct {
// contains filtered or unexported fields
}type TokenStream interface {
IntStream
LT(k int) Token
Get(index int) Token
GetTokenSource() TokenSource
SetTokenSource(TokenSource)
GetAllText() string
GetTextFromInterval(*Interval) string
GetTextFromRuleContext(RuleContext) string
GetTextFromTokens(Token, Token) string
}type TokenStreamRewriter struct {
// contains filtered or unexported fields
}func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter
func (tsr *TokenStreamRewriter) AddToProgram(name string, op RewriteOperation)
func (tsr *TokenStreamRewriter) Delete(program_name string, from, to int)
func (tsr *TokenStreamRewriter) DeleteDefault(from, to int)
func (tsr *TokenStreamRewriter) DeleteDefaultPos(index int)
func (tsr *TokenStreamRewriter) DeleteProgram(program_name string)
Reset the program so that no instructions exist
func (tsr *TokenStreamRewriter) DeleteProgramDefault()
func (tsr *TokenStreamRewriter) DeleteToken(program_name string, from, to Token)
func (tsr *TokenStreamRewriter) DeleteTokenDefault(from, to Token)
func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(program_name string) int
func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndexDefault() int
func (tsr *TokenStreamRewriter) GetProgram(name string) []RewriteOperation
func (tsr *TokenStreamRewriter) GetText(program_name string, interval *Interval) string
Return the text from the original tokens altered per the instructions given to this rewriter.
func (tsr *TokenStreamRewriter) GetTextDefault() string
Return the text from the original tokens altered per the instructions given to this rewriter.
func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream
func (tsr *TokenStreamRewriter) InitializeProgram(name string) []RewriteOperation
func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string)
func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string)
func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string)
func (tsr *TokenStreamRewriter) InsertBefore(program_name string, index int, text string)
func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string)
func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string, token Token, text string)
func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string)
func (tsr *TokenStreamRewriter) ReplaceDefault(from, to int, text string)
func (tsr *TokenStreamRewriter) ReplaceDefaultPos(index int, text string)
func (tsr *TokenStreamRewriter) ReplaceToken(program_name string, from, to Token, text string)
func (tsr *TokenStreamRewriter) ReplaceTokenDefault(from, to Token, text string)
func (tsr *TokenStreamRewriter) ReplaceTokenDefaultPos(index Token, text string)
func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int)
Rollback the instruction stream for a program so that the indicated instruction (via instructionIndex) is no longer in the stream. UNTESTED!
func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int)
func (tsr *TokenStreamRewriter) SetLastRewriteTokenIndex(program_name string, i int)
type TokensStartState struct {
*BaseDecisionState
}TokensStartState is the Tokens rule start state linking to each lexer rule start state.
func NewTokensStartState() *TokensStartState
type TraceListener struct {
// contains filtered or unexported fields
}func NewTraceListener(parser *BaseParser) *TraceListener
func (t *TraceListener) EnterEveryRule(ctx ParserRuleContext)
func (t *TraceListener) ExitEveryRule(ctx ParserRuleContext)
func (t *TraceListener) VisitErrorNode(_ ErrorNode)
func (t *TraceListener) VisitTerminal(node TerminalNode)
type Transition interface {
Matches(int, int, int) bool
// contains filtered or unexported methods
}type Tree interface {
GetParent() Tree
SetParent(Tree)
GetPayload() interface{}
GetChild(i int) Tree
GetChildCount() int
GetChildren() []Tree
}Return ordered list of all children of this node
Return a list of all ancestors of this node. The first node of
list is the root and the last is the parent of this node.
type WildcardTransition struct {
*BaseTransition
}func NewWildcardTransition(target ATNState) *WildcardTransition
func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool
func (t *WildcardTransition) String() string
Package antlr imports 12 packages (graph) and is imported by 143 packages. Updated 2018-08-04. Refresh now. Tools for package owners.