From 8d4393fe815926bd8fe4e82a0bfba63b36a934e9 Mon Sep 17 00:00:00 2001
From: Joakker
-// This implementation prints messages to {@link System//err} containing the
-// values of {@code line}, {@code charPositionInLine}, and {@code msg} using
-// the following format. The default implementation simply calls {@link //endErrorCondition} to
-// ensure that the handler is not in error recovery mode. The default implementation simply calls {@link //endErrorCondition}. The default implementation returns immediately if the handler is already
-// in error recovery mode. Otherwise, it calls {@link //beginErrorCondition}
-// and dispatches the Reporting task based on the runtime type of {@code e}
-// according to the following table. The default implementation reSynchronizes the parser by consuming tokens
+// Recover reSynchronizes the parser by consuming tokens
// until we find one in the reSynchronization set--loosely the set of tokens
-// that can follow the current rule. Implements Jim Idle's magic Sync mechanism in closures and optional
-// subrules. E.g., If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block
+// If the sub rule is optional ((...)?, (...)*, or block
// with an empty alternative), then the expected set includes what follows
-// the subrule. During loop iteration, it consumes until it sees a token that can start a
+// During loop iteration, it consumes until it sees a token that can start a
// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to
-// stay in the loop as long as possible. ORIGINS Previous versions of ANTLR did a poor job of their recovery within loops.
+// Previous versions of ANTLR did a poor job of their recovery within loops.
// A single mismatch token or missing token would force the parser to bail
-// out of the entire rules surrounding the loop. So, for rule This functionality cost a little bit of effort because the parser has to
+// This functionality cost a little bit of effort because the parser has to
// compare token set at the start of the loop and at each iteration. If for
// some reason speed is suffering for you, you can turn off d
-// functionality by simply overriding d method as a blank { }. This method is called when {@link //singleTokenDeletion} identifies
+// This method is called when //singleTokenDeletion identifies
// single-token deletion as a viable recovery strategy for a mismatched
-// input error. The default implementation simply returns if the handler is already in
-// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to
+// The default implementation simply returns if the handler is already in
+// error recovery mode. Otherwise, it calls //beginErrorCondition to
// enter error recovery mode, followed by calling
-// {@link Parser//NotifyErrorListeners}. This method is called when {@link //singleTokenInsertion} identifies
+// This method is called when //singleTokenInsertion identifies
// single-token insertion as a viable recovery strategy for a mismatched
-// input error. The default implementation simply returns if the handler is already in
-// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to
+// The default implementation simply returns if the handler is already in
+// error recovery mode. Otherwise, it calls //beginErrorCondition to
// enter error recovery mode, followed by calling
-// {@link Parser//NotifyErrorListeners}. The default implementation attempts to recover from the mismatched input
+// RecoverInline attempts to recover from the mismatched input
// by using single token insertion and deletion as described below. If the
// recovery attempt fails, d method panics an
-// {@link InputMisMatchException}. EXTRA TOKEN (single token deletion) {@code LA(1)} is not what we are looking for. If {@code LA(2)} has the
-// right token, however, then assume {@code LA(1)} is some extra spurious
+// LA(1) is not what we are looking for. If LA(2) has the
+// right token, however, then assume LA(1) is some extra spurious
// token and delete it. Then consume and return the next token (which was
-// the {@code LA(2)} token) as the successful result of the Match operation. This recovery strategy is implemented by {@link
-// //singleTokenDeletion}. MISSING TOKEN (single token insertion) If current token (at {@code LA(1)}) is consistent with what could come
-// after the expected {@code LA(1)} token, then assume the token is missing
-// and use the parser's {@link TokenFactory} to create it on the fly. The
+// If current token (at LA(1)) is consistent with what could come
+// after the expected LA(1) token, then assume the token is missing
+// and use the parser's TokenFactory to create it on the fly. The
// "insertion" is performed by returning the created token as the successful
-// result of the Match operation. This recovery strategy is implemented by {@link
-// //singleTokenInsertion}. EXAMPLE For example, Input {@code i=(3} is clearly missing the {@code ')'}. When
-// the parser returns from the nested call to {@code expr}, it will have
-// call chain: This method determines whether or not single-token insertion is viable by
-// checking if the {@code LA(1)} input symbol could be successfully Matched
-// if it were instead the {@code LA(2)} symbol. If d method returns
-// {@code true}, the caller is responsible for creating and inserting a
-// token with the correct type to produce d behavior. If the single-token deletion is successful, d method calls
-// {@link //ReportUnwantedToken} to Report the error, followed by
-// {@link Parser//consume} to actually "delete" the extraneous token. Then,
-// before returning {@link //ReportMatch} is called to signal a successful
-// Match.
-// This error strategy is useful in the following scenarios.
-// {@code myparser.setErrorHandler(NewBailErrorStrategy())} If the set of expected tokens is not known and could not be computed,
-// b method returns {@code nil}.
-//
-
+// the situation occurs.
type DiagnosticErrorListener struct {
*DefaultErrorListener
exactOnly bool
}
+// NewDiagnosticErrorListener returns a new instance of DiagnosticErrorListener.
+// Whether all ambiguities or only exact ambiguities are Reported is represented
+// by exactOnly
func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener {
-
- n := new(DiagnosticErrorListener)
-
- // whether all ambiguities or only exact ambiguities are Reported.
- n.exactOnly = exactOnly
- return n
+ return &DiagnosticErrorListener{exactOnly: exactOnly}
}
-func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
+// ReportAmbiguity reports a parsing ambiguity
+func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *bitSet, configs ATNConfigSet) {
if d.exactOnly && !exact {
return
}
@@ -55,7 +54,8 @@ func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, s
recognizer.NotifyErrorListeners(msg, nil, nil)
}
-func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
+// ReportAttemptingFullContext reports attempting full context.
+func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *bitSet, configs ATNConfigSet) {
msg := "reportAttemptingFullContext d=" +
d.getDecisionDescription(recognizer, dfa) +
@@ -64,6 +64,7 @@ func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser,
recognizer.NotifyErrorListeners(msg, nil, nil)
}
+// ReportContextSensitivity reports context sensitivity.
func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
msg := "reportContextSensitivity d=" +
d.getDecisionDescription(recognizer, dfa) +
@@ -95,15 +96,15 @@ func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa
// @param ReportedAlts The set of conflicting or ambiguous alternatives, as
// Reported by the parser.
// @param configs The conflicting or ambiguous configuration set.
-// @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise
-// returns the set of alternatives represented in {@code configs}.
+// @return Returns ReportedAlts if it is not nil, otherwise
+// returns the set of alternatives represented in configs.
//
-func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set ATNConfigSet) *BitSet {
+func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *bitSet, s ATNConfigSet) *bitSet {
if ReportedAlts != nil {
return ReportedAlts
}
- result := NewBitSet()
- for _, c := range set.GetItems() {
+ result := newBitSet()
+ for _, c := range s.GetItems() {
result.add(c.GetAlt())
}
diff --git a/runtime/Go/antlr/doc.go b/runtime/Go/antlr/doc.go
new file mode 100644
index 0000000000..b3bdf71a84
--- /dev/null
+++ b/runtime/Go/antlr/doc.go
@@ -0,0 +1,45 @@
+// Package antlr provides the runtime implementation for recognizers generated
+// by the antlr4 tool.
+//
+// Creating Go recognizers
+//
+// You can easily generate a Go recognizer by passing the -Dlanguage=Go option
+// to the command-line tool:
+//
+// $ ls
+// JSON5.g4
+// $ antlr4 -Dlanguage=Go JSON5.g4
+// $ ls
+// JSON5.g4 JSON5.tokens json5_lexer.go json5_parser.go
+// JSON5Lexer.tokens JSON5.interp json5_base_listener.go json5_listener.go
+// JSON5Lexer.interp
+//
+// Usage
+//
+// To print the parse tree using the JSON5 parser generated above:
+//
+// package main
+//
+// import (
+// // Import the antlr runtime using this path
+// "github.com/antlr/antlr4/runtime/Go/antlr"
+//
+// // Suppose your parser is in this package
+// "github.com/user/my-project/parser"
+// )
+//
+// func main() {
+// is := antlr.NewInputStream(`{"hello": "world"}`)
+// lx := parser.NewJSON5Lexer(is)
+// ts := antlr.NewCommonTokenStream(lx, antlr.TokenDefaultChannel)
+// pr := parser.NewJSON5Parser(ts)
+//
+// fmt.Println(pr.Json5().ToStringTree(pr.RuleNames, pr))
+// }
+//
+// This will print:
+//
+// (json5 (value (obj { (pair (key "hello") : (value "world")) }))
-// line line:charPositionInLine msg
-//
+// line line:charPositionInLine msg
//
func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg)
}
+// ProxyErrorListener delegates it's calls to other error listeners.
type ProxyErrorListener struct {
*DefaultErrorListener
delegates []ErrorListener
}
+// NewProxyErrorListener returns a new instance of ProxyErrorListener
func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener {
if delegates == nil {
panic("delegates is not provided")
}
- l := new(ProxyErrorListener)
- l.delegates = delegates
- return l
+ return &ProxyErrorListener{delegates: delegates}
}
+// SyntaxError reports a syntax error in the source.
func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
for _, d := range p.delegates {
d.SyntaxError(recognizer, offendingSymbol, line, column, msg, e)
}
}
-func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
+// ReportAmbiguity reports a parsing ambiguity.
+func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *bitSet, configs ATNConfigSet) {
for _, d := range p.delegates {
d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
}
}
-func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
+// ReportAttemptingFullContext reports attempting a full context.
+func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *bitSet, configs ATNConfigSet) {
for _, d := range p.delegates {
d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs)
}
}
+// ReportContextSensitivity reports context sensitivity.
func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
for _, d := range p.delegates {
d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs)
diff --git a/runtime/Go/antlr/error_strategy.go b/runtime/Go/antlr/error_strategy.go
index 977a6e4549..50ffb61d9b 100644
--- a/runtime/Go/antlr/error_strategy.go
+++ b/runtime/Go/antlr/error_strategy.go
@@ -11,6 +11,7 @@ import (
"strings"
)
+// ErrorStrategy is used for error reporting and recovery.
type ErrorStrategy interface {
reset(Parser)
RecoverInline(Parser) Token
@@ -21,42 +22,38 @@ type ErrorStrategy interface {
ReportMatch(Parser)
}
-// This is the default implementation of {@link ANTLRErrorStrategy} used for
-// error Reporting and recovery in ANTLR parsers.
-//
+// DefaultErrorStrategy is the default implementation of ErrorStrategy.
type DefaultErrorStrategy struct {
- errorRecoveryMode bool
- lastErrorIndex int
- lastErrorStates *IntervalSet
-}
-
-var _ ErrorStrategy = &DefaultErrorStrategy{}
-
-func NewDefaultErrorStrategy() *DefaultErrorStrategy {
-
- d := new(DefaultErrorStrategy)
-
// Indicates whether the error strategy is currently "recovering from an
// error". This is used to suppress Reporting multiple error messages while
// attempting to recover from a detected syntax error.
//
// @see //inErrorRecoveryMode
//
- d.errorRecoveryMode = false
-
+ errorRecoveryMode bool
// The index into the input stream where the last error occurred.
// This is used to prevent infinite loops where an error is found
// but no token is consumed during recovery...another error is found,
// ad nauseum. This is a failsafe mechanism to guarantee that at least
// one token/tree node is consumed for two errors.
//
- d.lastErrorIndex = -1
- d.lastErrorStates = nil
- return d
+ lastErrorIndex int
+ lastErrorStates *IntervalSet
+}
+
+var _ ErrorStrategy = &DefaultErrorStrategy{}
+
+// NewDefaultErrorStrategy returns a new DefaultErrorStrategy.
+func NewDefaultErrorStrategy() *DefaultErrorStrategy {
+ return &DefaultErrorStrategy{
+ errorRecoveryMode: false,
+ lastErrorIndex: -1,
+ lastErrorStates: nil,
+ }
}
-//
-//
+// · All other types: calls Parser//NotifyErrorListeners to Report
+// the exception
//
func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) {
// if we've already Reported an error and have not Matched a token
@@ -137,11 +130,9 @@ func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionExcep
}
}
-// {@inheritDoc}
-//
-//
-// a : Sync ( stuff Sync )*
-// Sync : {consume to what can follow Sync}
-//
+// a : Sync ( stuff Sync )*
+// Sync : {consume to what can follow Sync}
//
-// At the start of a sub rule upon error, {@link //Sync} performs single
+// At the start of a sub rule upon error, //Sync performs single
// token deletion, if possible. If it can't do that, it bails on the current
// rule and uses the default error recovery, which consumes until the
// reSynchronization set of the current rule.
//
-//
-// classfunc : 'class' ID '{' member* '}'
-//
+// classfunc : 'class' ID '{' member* '}'
//
-// input with an extra token between members would force the parser to
+// Input with an extra token between members would force the parser to
// consume until it found the next class definition rather than the next
// member definition of the current class.
//
-//
-// stat &rarr expr &rarr atom
-//
+// stat &rarr expr &rarr atom
//
-// and it will be trying to Match the {@code ')'} at d point in the
+// and it will be trying to Match the ')' at d point in the
// derivation:
//
-//
-// => ID '=' '(' INT ')' ('+' atom)* ''
-// ^
-//
+// > ID '=' '(' INT ')' ('+' atom)* ''
+// ^
//
-// The attempt to Match {@code ')'} will fail when it sees {@code ''} and
-// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==''}
-// is in the set of tokens that can follow the {@code ')'} token reference
-// in rule {@code atom}. It can assume that you forgot the {@code ')'}.
+// The attempt to Match ')' will fail when it sees '' and
+// call //recoverInline. To recover, it sees that LA(1)==''
+// is in the set of tokens that can follow the ')' token reference
+// in rule atom. It can assume that you forgot the ')'.
//
func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
// SINGLE TOKEN DELETION
@@ -418,22 +401,21 @@ func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
panic(NewInputMisMatchException(recognizer))
}
-//
-// This method implements the single-token insertion inline error recovery
-// strategy. It is called by {@link //recoverInline} if the single-token
+// SingleTokenInsertion implements the single-token insertion inline error recovery
+// strategy. It is called by //recoverInline if the single-token
// deletion strategy fails to recover from the mismatched input. If this
-// method returns {@code true}, {@code recognizer} will be in error recovery
+// method returns true, recognizer will be in error recovery
// mode.
//
-//
-//
+// the BailErrorStrategy avoids wasting work on recovering from errors
+// when the result will be ignored either way.
+//
//
-//
The {@code Skip} command does not have any parameters, so l action is -// implemented as a singleton instance exposed by {@link //INSTANCE}.
+// The Skip command does not have any parameters, so l action is +// implemented as a singleton instance exposed by //INSTANCE. type LexerSkipAction struct { *BaseLexerAction } +// NewLexerSkipAction returns a new instance of LexerSkipAction. func NewLexerSkipAction() *LexerSkipAction { - la := new(LexerSkipAction) - la.BaseLexerAction = NewBaseLexerAction(LexerActionTypeSkip) - return la + return &LexerSkipAction{ + BaseLexerAction: NewBaseLexerAction(LexerActionTypeSkip), + } } -// Provides a singleton instance of l parameterless lexer action. +// LexerSkipActionINSTANCE provides a singleton instance of l parameterless lexer action. var LexerSkipActionINSTANCE = NewLexerSkipAction() func (l *LexerSkipAction) execute(lexer Lexer) { @@ -85,7 +87,7 @@ func (l *LexerSkipAction) String() string { return "skip" } -// Implements the {@code type} lexer action by calling {@link Lexer//setType} +// LexerTypeAction implements the type lexer action by calling Lexer//setType // with the assigned type. type LexerTypeAction struct { *BaseLexerAction @@ -93,11 +95,12 @@ type LexerTypeAction struct { thetype int } +// NewLexerTypeAction returns a new instance of LexerTypeAction. func NewLexerTypeAction(thetype int) *LexerTypeAction { - l := new(LexerTypeAction) - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeType) - l.thetype = thetype - return l + return &LexerTypeAction{ + BaseLexerAction: NewBaseLexerAction(LexerActionTypeType), + thetype: thetype, + } } func (l *LexerTypeAction) execute(lexer Lexer) { @@ -125,25 +128,24 @@ func (l *LexerTypeAction) String() string { return "actionType(" + strconv.Itoa(l.thetype) + ")" } -// Implements the {@code pushMode} lexer action by calling -// {@link Lexer//pushMode} with the assigned mode. +// LexerPushModeAction implements the pushMode lexer action by calling +// Lexer//pushMode with the assigned mode. type LexerPushModeAction struct { *BaseLexerAction mode int } +// NewLexerPushModeAction returns a new instance of LexerPushModeAction. func NewLexerPushModeAction(mode int) *LexerPushModeAction { - - l := new(LexerPushModeAction) - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePushMode) - - l.mode = mode - return l + return &LexerPushModeAction{ + BaseLexerAction: NewBaseLexerAction(LexerActionTypePushMode), + mode: mode, + } } -//This action is implemented by calling {@link Lexer//pushMode} with the -// value provided by {@link //getMode}.
+// This action is implemented by calling Lexer//pushMode with the +// value provided by //getMode. func (l *LexerPushModeAction) execute(lexer Lexer) { lexer.PushMode(l.mode) } @@ -169,26 +171,25 @@ func (l *LexerPushModeAction) String() string { return "pushMode(" + strconv.Itoa(l.mode) + ")" } -// Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}. +// LexerPopModeAction implements the popMode lexer action by calling Lexer//popMode. // -//The {@code popMode} command does not have any parameters, so l action is -// implemented as a singleton instance exposed by {@link //INSTANCE}.
+// The popMode command does not have any parameters, so l action is +// implemented as a singleton instance exposed by //INSTANCE. type LexerPopModeAction struct { *BaseLexerAction } +// NewLexerPopModeAction returns a new instance of LexerPopModeAction. func NewLexerPopModeAction() *LexerPopModeAction { - - l := new(LexerPopModeAction) - - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePopMode) - - return l + return &LexerPopModeAction{ + BaseLexerAction: NewBaseLexerAction(LexerActionTypePopMode), + } } +// LexerPopModeActionINSTANCE TODO: docs. var LexerPopModeActionINSTANCE = NewLexerPopModeAction() -//This action is implemented by calling {@link Lexer//popMode}.
+// This action is implemented by calling Lexer//popMode. func (l *LexerPopModeAction) execute(lexer Lexer) { lexer.PopMode() } @@ -197,25 +198,25 @@ func (l *LexerPopModeAction) String() string { return "popMode" } -// Implements the {@code more} lexer action by calling {@link Lexer//more}. +// LexerMoreAction implements the more lexer action by calling Lexer//more. // -//The {@code more} command does not have any parameters, so l action is -// implemented as a singleton instance exposed by {@link //INSTANCE}.
- +// The more command does not have any parameters, so l action is +// implemented as a singleton instance exposed by //INSTANCE. type LexerMoreAction struct { *BaseLexerAction } +// NewLexerMoreAction returns a new instance of LexerMoreAction. func NewLexerMoreAction() *LexerMoreAction { - l := new(LexerMoreAction) - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMore) - - return l + return &LexerMoreAction{ + BaseLexerAction: NewBaseLexerAction(LexerActionTypeMore), + } } +// LexerMoreActionINSTANCE TODO: docs. var LexerMoreActionINSTANCE = NewLexerMoreAction() -//This action is implemented by calling {@link Lexer//popMode}.
+// This action is implemented by calling Lexer//popMode. func (l *LexerMoreAction) execute(lexer Lexer) { lexer.More() } @@ -224,7 +225,7 @@ func (l *LexerMoreAction) String() string { return "more" } -// Implements the {@code mode} lexer action by calling {@link Lexer//mode} with +// LexerModeAction implements the mode lexer action by calling Lexer//mode with // the assigned mode. type LexerModeAction struct { *BaseLexerAction @@ -232,15 +233,16 @@ type LexerModeAction struct { mode int } +// NewLexerModeAction returns a new instance of LexerModeAction. func NewLexerModeAction(mode int) *LexerModeAction { - l := new(LexerModeAction) - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMode) - l.mode = mode - return l + return &LexerModeAction{ + BaseLexerAction: NewBaseLexerAction(LexerActionTypeMode), + mode: mode, + } } -//This action is implemented by calling {@link Lexer//mode} with the -// value provided by {@link //getMode}.
+// This action is implemented by calling Lexer//mode with the +// value provided by //getMode. func (l *LexerModeAction) execute(lexer Lexer) { lexer.SetMode(l.mode) } @@ -266,39 +268,41 @@ func (l *LexerModeAction) String() string { return "mode(" + strconv.Itoa(l.mode) + ")" } -// Executes a custom lexer action by calling {@link Recognizer//action} with the -// rule and action indexes assigned to the custom action. The implementation of -// a custom action is added to the generated code for the lexer in an override -// of {@link Recognizer//action} when the grammar is compiled. +// LexerCustomAction executes a custom lexer action by calling +// Recognizer//action with the rule and action indexes assigned to the custom +// action. The implementation of a custom action is added to the generated code +// for the lexer in an override of Recognizer//action when the grammar is +// compiled. // -//This class may represent embedded actions created with the {...}
+// This class may represent embedded actions created with the {...}
// syntax in ANTLR 4, as well as actions created for lexer commands where the
-// command argument could not be evaluated when the grammar was compiled.
Custom actions are implemented by calling {@link Lexer//action} with the -// appropriate rule and action indexes.
+// Custom actions are implemented by calling Lexer//action with the +// appropriate rule and action indexes. func (l *LexerCustomAction) execute(lexer Lexer) { lexer.Action(nil, l.ruleIndex, l.actionIndex) } @@ -321,25 +325,26 @@ func (l *LexerCustomAction) equals(other LexerAction) bool { } } -// Implements the {@code channel} lexer action by calling -// {@link Lexer//setChannel} with the assigned channel. -// Constructs a New{@code channel} action with the specified channel value. -// @param channel The channel value to pass to {@link Lexer//setChannel}. +// LexerChannelAction implements the channel lexer action by calling +// Lexer//setChannel with the assigned channel. +// Constructs a Newchannel action with the specified channel value. +// @param channel The channel value to pass to Lexer//setChannel. type LexerChannelAction struct { *BaseLexerAction channel int } +// NewLexerChannelAction returns a new instance of LexerChannelAction. func NewLexerChannelAction(channel int) *LexerChannelAction { - l := new(LexerChannelAction) - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel) - l.channel = channel - return l + return &LexerChannelAction{ + BaseLexerAction: NewBaseLexerAction(LexerActionTypeChannel), + channel: channel, + } } -//This action is implemented by calling {@link Lexer//setChannel} with the -// value provided by {@link //getChannel}.
+// This action is implemented by calling Lexer//setChannel with the +// value provided by //getChannel. func (l *LexerChannelAction) execute(lexer Lexer) { lexer.SetChannel(l.channel) } @@ -365,26 +370,26 @@ func (l *LexerChannelAction) String() string { return "channel(" + strconv.Itoa(l.channel) + ")" } -// This implementation of {@link LexerAction} is used for tracking input offsets -// for position-dependent actions within a {@link LexerActionExecutor}. +// LexerIndexedCustomAction is used for tracking input offsets +// for position-dependent actions within a LexerActionExecutor. // -//This action is not serialized as part of the ATN, and is only required for +// This action is not serialized as part of the ATN, and is only required for // position-dependent lexer actions which appear at a location other than the // end of a rule. For more information about DFA optimizations employed for -// lexer actions, see {@link LexerActionExecutor//append} and -// {@link LexerActionExecutor//fixOffsetBeforeMatch}.
+// lexer actions, see LexerActionExecutor//append and +// LexerActionExecutor//fixOffsetBeforeMatch. -// Constructs a Newindexed custom action by associating a character offset -// with a {@link LexerAction}. +// LexerIndexedCustomAction constructs a Newindexed custom action by associating +// a character offset with a LexerAction. // -//Note: This class is only required for lexer actions for which -// {@link LexerAction//isPositionDependent} returns {@code true}.
+// Note: This class is only required for lexer actions for which +// LexerAction//isPositionDependent returns true. // -// @param offset The offset into the input {@link CharStream}, relative to +// @param offset The offset into the input CharStream, relative to // the token start index, at which the specified lexer action should be // executed. // @param action The lexer action to execute at a particular offset in the -// input {@link CharStream}. +// input CharStream. type LexerIndexedCustomAction struct { *BaseLexerAction @@ -393,20 +398,19 @@ type LexerIndexedCustomAction struct { isPositionDependent bool } +// NewLexerIndexedCustomAction returns a new instance of +// LexerIndexedCustomAction. func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction { - - l := new(LexerIndexedCustomAction) - l.BaseLexerAction = NewBaseLexerAction(lexerAction.getActionType()) - - l.offset = offset - l.lexerAction = lexerAction - l.isPositionDependent = true - - return l + return &LexerIndexedCustomAction{ + BaseLexerAction: NewBaseLexerAction(lexerAction.getActionType()), + offset: offset, + lexerAction: lexerAction, + isPositionDependent: true, + } } -//This method calls {@link //execute} on the result of {@link //getAction} -// using the provided {@code lexer}.
+// This method calls //execute on the result of //getAction +// using the provided lexer. func (l *LexerIndexedCustomAction) execute(lexer Lexer) { // assume the input stream position was properly set by the calling code l.lexerAction.execute(lexer) diff --git a/runtime/Go/antlr/lexer_action_executor.go b/runtime/Go/antlr/lexer_action_executor.go index 80b949a1a5..154e6f03b9 100644 --- a/runtime/Go/antlr/lexer_action_executor.go +++ b/runtime/Go/antlr/lexer_action_executor.go @@ -4,31 +4,32 @@ package antlr -// Represents an executor for a sequence of lexer actions which traversed during -// the Matching operation of a lexer rule (token). +// LexerActionExecutor represents an executor for a sequence of lexer actions +// which traversed during the Matching operation of a lexer rule (token). // -//The executor tracks position information for position-dependent lexer actions +// The executor tracks position information for position-dependent lexer actions // efficiently, ensuring that actions appearing only at the end of the rule do -// not cause bloating of the {@link DFA} created for the lexer.
- +// not cause bloating of the DFA created for the lexer. type LexerActionExecutor struct { - lexerActions []LexerAction - cachedHash int + lexerActions []LexerAction + + // Caches the result of //hashCode since the hash code is an element + // of the performance-critical LexerATNConfig//hashCode operation. + cachedHash int } +// NewLexerActionExecutor returns a new instance of LexerActionExecutor. func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor { if lexerActions == nil { lexerActions = make([]LexerAction, 0) } - l := new(LexerActionExecutor) - - l.lexerActions = lexerActions + l := &LexerActionExecutor{ + lexerActions: lexerActions, + cachedHash: murmurInit(57), + } - // Caches the result of {@link //hashCode} since the hash code is an element - // of the performance-critical {@link LexerATNConfig//hashCode} operation. - l.cachedHash = murmurInit(57) for _, a := range lexerActions { l.cachedHash = murmurUpdate(l.cachedHash, a.hash()) } @@ -36,19 +37,19 @@ func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor { return l } -// Creates a {@link LexerActionExecutor} which executes the actions for -// the input {@code lexerActionExecutor} followed by a specified -// {@code lexerAction}. +// LexerActionExecutorappend creates a LexerActionExecutor which executes the +// actions for the input lexerActionExecutor followed by a specified +// lexerAction. // // @param lexerActionExecutor The executor for actions already traversed by // the lexer while Matching a token within a particular -// {@link LexerATNConfig}. If this is {@code nil}, the method behaves as +// LexerATNConfig. If this is nil, the method behaves as // though it were an empty executor. // @param lexerAction The lexer action to execute after the actions -// specified in {@code lexerActionExecutor}. +// specified in lexerActionExecutor. // -// @return A {@link LexerActionExecutor} for executing the combine actions -// of {@code lexerActionExecutor} and {@code lexerAction}. +// @return A LexerActionExecutor for executing the combine actions +// of lexerActionExecutor and lexerAction. func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor { if lexerActionExecutor == nil { return NewLexerActionExecutor([]LexerAction{lexerAction}) @@ -57,32 +58,32 @@ func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAc return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction)) } -// Creates a {@link LexerActionExecutor} which encodes the current offset +// Creates a LexerActionExecutor which encodes the current offset // for position-dependent lexer actions. // -//Normally, when the executor encounters lexer actions where -// {@link LexerAction//isPositionDependent} returns {@code true}, it calls -// {@link IntStream//seek} on the input {@link CharStream} to set the input +// Normally, when the executor encounters lexer actions where +// LexerAction//isPositionDependent returns true, it calls +// IntStream//seek on the input CharStream to set the input // position to the end of the current token. This behavior provides // for efficient DFA representation of lexer actions which appear at the end // of a lexer rule, even when the lexer rule Matches a variable number of -// characters.
+// characters. // -//Prior to traversing a Match transition in the ATN, the current offset +// Prior to traversing a Match transition in the ATN, the current offset // from the token start index is assigned to all position-dependent lexer // actions which have not already been assigned a fixed offset. By storing // the offsets relative to the token start index, the DFA representation of // lexer actions which appear in the middle of tokens remains efficient due // to sharing among tokens of the same length, regardless of their absolute -// position in the input stream.
+// position in the input stream. // -//If the current executor already has offsets assigned to all -// position-dependent lexer actions, the method returns {@code this}.
+// If the current executor already has offsets assigned to all +// position-dependent lexer actions, the method returns this. // // @param offset The current offset to assign to all position-dependent // lexer actions which do not already have offsets assigned. // -// @return A {@link LexerActionExecutor} which stores input stream offsets +// @return A LexerActionExecutor which stores input stream offsets // for all position-dependent lexer actions. // / func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor { @@ -109,23 +110,22 @@ func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecu } // Execute the actions encapsulated by l executor within the context of a -// particular {@link Lexer}. +// particular Lexer. // -//This method calls {@link IntStream//seek} to set the position of the -// {@code input} {@link CharStream} prior to calling -// {@link LexerAction//execute} on a position-dependent action. Before the +// This method calls IntStream//seek to set the position of the +// input CharStream prior to calling +// LexerAction//execute on a position-dependent action. Before the // method returns, the input position will be restored to the same position -// it was in when the method was invoked.
+// it was in when the method was invoked. // // @param lexer The lexer instance. // @param input The input stream which is the source for the current token. -// When l method is called, the current {@link IntStream//index} for -// {@code input} should be the start of the following token, i.e. 1 +// When l method is called, the current IntStream//index for +// input should be the start of the following token, i.e. 1 // character past the end of the current token. // @param startIndex The token start index. This value may be passed to -// {@link IntStream//seek} to set the {@code input} position to the beginning +// IntStream//seek to set the input position to the beginning // of the token. -// / func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex int) { requiresSeek := false stopIndex := input.Index() diff --git a/runtime/Go/antlr/lexer_atn_simulator.go b/runtime/Go/antlr/lexer_atn_simulator.go index 131364f75c..93d8f54218 100644 --- a/runtime/Go/antlr/lexer_atn_simulator.go +++ b/runtime/Go/antlr/lexer_atn_simulator.go @@ -9,6 +9,7 @@ import ( "strconv" ) +// Utility flags var ( LexerATNSimulatorDebug = false LexerATNSimulatorDFADebug = false @@ -19,6 +20,7 @@ var ( LexerATNSimulatorMatchCalls = 0 ) +// ILexerATNSimulator TODO: docs type ILexerATNSimulator interface { IATNSimulator @@ -30,43 +32,44 @@ type ILexerATNSimulator interface { Consume(input CharStream) } +// LexerATNSimulator TODO: docs. type LexerATNSimulator struct { *BaseATNSimulator - recog Lexer - predictionMode int - mergeCache DoubleDict - startIndex int - Line int - CharPositionInLine int - mode int - prevAccept *SimState - MatchCalls int -} - -func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator { - l := new(LexerATNSimulator) - - l.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache) - - l.decisionToDFA = decisionToDFA - l.recog = recog + recog Lexer + predictionMode int + mergeCache DoubleDict // The current token's starting index into the character stream. // Shared across DFA to ATN simulation in case the ATN fails and the // DFA did not have a previous accept state. In l case, we use the // ATN-generated exception object. - l.startIndex = -1 - // line number 1..n within the input/// - l.Line = 1 + startIndex int + // line number 1..n within the input + Line int // The index of the character relative to the beginning of the line - // 0..n-1/// - l.CharPositionInLine = 0 - l.mode = LexerDefaultMode + // 0..n-1 + CharPositionInLine int + mode int // Used during DFA/ATN exec to record the most recent accept configuration // info - l.prevAccept = NewSimState() + prevAccept *SimState + MatchCalls int +} + +// NewLexerATNSimulator returns a new instance of LexerATNSimulator. +func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator { + base := NewBaseATNSimulator(atn, sharedContextCache) + base.decisionToDFA = decisionToDFA // done - return l + return &LexerATNSimulator{ + BaseATNSimulator: base, + recog: recog, + startIndex: -1, + Line: 1, + CharPositionInLine: 0, + mode: LexerDefaultMode, + prevAccept: NewSimState(), + } } func (l *LexerATNSimulator) copyState(simulator *LexerATNSimulator) { @@ -76,6 +79,7 @@ func (l *LexerATNSimulator) copyState(simulator *LexerATNSimulator) { l.startIndex = simulator.startIndex } +// Match TODO: docs. func (l *LexerATNSimulator) Match(input CharStream, mode int) int { l.MatchCalls++ l.mode = mode @@ -105,6 +109,7 @@ func (l *LexerATNSimulator) reset() { l.mode = LexerDefaultMode } +// MatchATN TODO: docs. func (l *LexerATNSimulator) MatchATN(input CharStream) int { startState := l.atn.modeToStartState[l.mode] @@ -194,13 +199,12 @@ func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int { // Get an existing target state for an edge in the DFA. If the target state // for the edge has not yet been computed or is otherwise not available, -// l method returns {@code nil}. +// l method returns nil. // // @param s The current DFA state // @param t The next input symbol // @return The existing target DFA state for the given input symbol -// {@code t}, or {@code nil} if the target state for l edge is not -// already cached +// t, or nil if the target state for l edge is not already cached func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState { if s.edges == nil || t < LexerATNSimulatorMinDFAEdge || t > LexerATNSimulatorMaxDFAEdge { return nil @@ -221,8 +225,8 @@ func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState // @param t The next input symbol // // @return The computed target DFA state for the given input symbol -// {@code t}. If {@code t} does not lead to a valid DFA state, l method -// returns {@link //ERROR}. +// t. If t does not lead to a valid DFA state, l method +// returns //ERROR. func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState { reach := NewOrderedATNConfigSet() @@ -259,7 +263,7 @@ func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, } // Given a starting configuration set, figure out all ATN configurations -// we can reach upon input {@code t}. Parameter {@code reach} is a return +// we can reach upon input t. Parameter reach is a return // parameter. func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNConfigSet, reach ATNConfigSet, t int) { // l is used to Skip processing for configs which have a lower priority @@ -299,7 +303,7 @@ func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNC func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) { if LexerATNSimulatorDebug { - fmt.Printf("ACTION %s\n", lexerActionExecutor) + fmt.Printf("ACTION %v\n", lexerActionExecutor) } // seek to after last char in token input.Seek(index) @@ -322,7 +326,7 @@ func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *Ord configs := NewOrderedATNConfigSet() for i := 0; i < len(p.GetTransitions()); i++ { target := p.GetTransitions()[i].getTarget() - cfg := NewLexerATNConfig6(target, i+1, BasePredictionContextEMPTY) + cfg := NewLexerATNConfig(target, i+1, BasePredictionContextEMPTY) l.closure(input, cfg, configs, false, false, false) } @@ -332,11 +336,11 @@ func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *Ord // Since the alternatives within any lexer decision are ordered by // preference, l method stops pursuing the closure as soon as an accept // state is reached. After the first accept state is reached by depth-first -// search from {@code config}, all other (potentially reachable) states for +// search from config, all other (potentially reachable) states for // l rule would have a lower priority. // -// @return {@code true} if an accept state is reached, otherwise -// {@code false}. +// @return true if an accept state is reached, otherwise +// false. func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, configs ATNConfigSet, currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool { @@ -471,24 +475,23 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNC // Evaluate a predicate specified in the lexer. // -//If {@code speculative} is {@code true}, l method was called before -// {@link //consume} for the Matched character. This method should call -// {@link //consume} before evaluating the predicate to ensure position -// sensitive values, including {@link Lexer//GetText}, {@link Lexer//GetLine}, -// and {@link Lexer//getcolumn}, properly reflect the current -// lexer state. This method should restore {@code input} and the simulator +// If speculative is true, l method was called before +// //consume for the Matched character. This method should call +// //consume before evaluating the predicate to ensure position +// sensitive values, including Lexer//GetText, Lexer//GetLine, +// and Lexer//getcolumn, properly reflect the current +// lexer state. This method should restore input and the simulator // to the original state before returning (i.e. undo the actions made by the -// call to {@link //consume}.
+// call to //consume. // // @param input The input stream. // @param ruleIndex The rule containing the predicate. // @param predIndex The index of the predicate within the rule. -// @param speculative {@code true} if the current index in {@code input} is +// @param speculative true if the current index in input is // one character before the predicate's location. // -// @return {@code true} if the specified predicate evaluates to -// {@code true}. -// / +// @return true if the specified predicate evaluates to +// true. func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool { // assume true if no recognizer was provided if l.recog == nil { @@ -600,12 +603,13 @@ func (l *LexerATNSimulator) getDFA(mode int) *DFA { return l.decisionToDFA[mode] } -// Get the text Matched so far for the current token. +// GetText Matched so far for the current token. func (l *LexerATNSimulator) GetText(input CharStream) string { // index is first lookahead char, don't include. return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1)) } +// Consume the current character. func (l *LexerATNSimulator) Consume(input CharStream) { curChar := input.LA(1) if curChar == int('\n') { @@ -617,20 +621,23 @@ func (l *LexerATNSimulator) Consume(input CharStream) { input.Consume() } +// GetCharPositionInLine returns the current position in the line. func (l *LexerATNSimulator) GetCharPositionInLine() int { return l.CharPositionInLine } +// GetLine returns the number of the line we're currently in. func (l *LexerATNSimulator) GetLine() int { return l.Line } +// GetTokenName returns the name of the given token type. func (l *LexerATNSimulator) GetTokenName(tt int) string { if tt == -1 { return "EOF" } - return "'" + string(tt) + "'" + return fmt.Sprintf("'%d'", tt) } func resetSimState(sim *SimState) { @@ -640,6 +647,7 @@ func resetSimState(sim *SimState) { sim.dfaState = nil } +// SimState TODO: docs. type SimState struct { index int line int @@ -647,9 +655,10 @@ type SimState struct { dfaState *DFAState } +// NewSimState returns a new instance of SimState func NewSimState() *SimState { - s := new(SimState) - resetSimState(s) + s := &SimState{} + s.reset() return s } diff --git a/runtime/Go/antlr/ll1_analyzer.go b/runtime/Go/antlr/ll1_analyzer.go index 3ebc40a76b..fc55502c49 100644 --- a/runtime/Go/antlr/ll1_analyzer.go +++ b/runtime/Go/antlr/ll1_analyzer.go @@ -4,32 +4,29 @@ package antlr +// LL1Analyzer extends ATN. type LL1Analyzer struct { atn *ATN } +// NewLL1Analyzer returns a new instance of LL1Analizer func NewLL1Analyzer(atn *ATN) *LL1Analyzer { - la := new(LL1Analyzer) - la.atn = atn - return la + return &LL1Analyzer{atn: atn} } -//* Special value added to the lookahead sets to indicate that we hit -// a predicate during analysis if {@code seeThruPreds==false}. -/// -const ( - LL1AnalyzerHitPred = TokenInvalidType -) +// LL1AnalyzerHitPred is a special value added to the lookahead sets to +// indicate that we hit a predicate during analysis if seeThruPreds==false. +const LL1AnalyzerHitPred = TokenInvalidType //* // Calculates the SLL(1) expected lookahead set for each outgoing transition -// of an {@link ATNState}. The returned array has one element for each -// outgoing transition in {@code s}. If the closure from transition +// of an ATNState. The returned array has one element for each +// outgoing transition in s. If the closure from transition // i leads to a semantic predicate before Matching a symbol, the -// element at index i of the result will be {@code nil}. +// element at index i of the result will be nil. // // @param s the ATN state -// @return the expected symbols for each outgoing transition of {@code s}. +// @return the expected symbols for each outgoing transition of s. func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet { if s == nil { return nil @@ -38,9 +35,9 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet { look := make([]*IntervalSet, count) for alt := 0; alt < count; alt++ { look[alt] = NewIntervalSet() - lookBusy := NewSet(nil, nil) + lookBusy := newSet(nil, nil) seeThruPreds := false // fail to get lookahead upon pred - la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false) + la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, newBitSet(), seeThruPreds, false) // Wipe out lookahead for la alternative if we found nothing // or we had a predicate when we !seeThruPreds if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) { @@ -50,24 +47,22 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet { return look } -//* -// Compute set of tokens that can follow {@code s} in the ATN in the -// specified {@code ctx}. +// Look computes a set of tokens that can follow s in the ATN in the +// specified ctx. // -//If {@code ctx} is {@code nil} and the end of the rule containing -// {@code s} is reached, {@link Token//EPSILON} is added to the result set. -// If {@code ctx} is not {@code nil} and the end of the outermost rule is -// reached, {@link Token//EOF} is added to the result set.
+// If ctx is nil and the end of the rule containing +// s is reached, Token//EPSILON is added to the result set. +// If ctx is not nil and the end of the outermost rule is +// reached, Token//EOF is added to the result set. // // @param s the ATN state // @param stopState the ATN state to stop at. This can be a -// {@link BlockEndState} to detect epsilon paths through a closure. -// @param ctx the complete parser context, or {@code nil} if the context +// BlockEndState to detect epsilon paths through a closure. +// @param ctx the complete parser context, or nil if the context // should be ignored // -// @return The set of tokens that can follow {@code s} in the ATN in the -// specified {@code ctx}. -/// +// @return The set of tokens that can follow s in the ATN in the +// specified ctx. func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet { r := NewIntervalSet() seeThruPreds := true // ignore preds get all lookahead @@ -75,50 +70,49 @@ func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet if ctx != nil { lookContext = predictionContextFromRuleContext(s.GetATN(), ctx) } - la.look1(s, stopState, lookContext, r, NewSet(nil, nil), NewBitSet(), seeThruPreds, true) + la.look1(s, stopState, lookContext, r, newSet(nil, nil), newBitSet(), seeThruPreds, true) return r } -//* -// Compute set of tokens that can follow {@code s} in the ATN in the -// specified {@code ctx}. +// Compute set of tokens that can follow s in the ATN in the +// specified ctx. // -//If {@code ctx} is {@code nil} and {@code stopState} or the end of the -// rule containing {@code s} is reached, {@link Token//EPSILON} is added to -// the result set. If {@code ctx} is not {@code nil} and {@code addEOF} is -// {@code true} and {@code stopState} or the end of the outermost rule is -// reached, {@link Token//EOF} is added to the result set.
+// If ctx is nil and stopState or the end of the +// rule containing s is reached, Token//EPSILON is added to +// the result set. If ctx is not nil and addEOF is +// true and stopState or the end of the outermost rule is +// reached, Token//EOF is added to the result set. // // @param s the ATN state. // @param stopState the ATN state to stop at. This can be a -// {@link BlockEndState} to detect epsilon paths through a closure. -// @param ctx The outer context, or {@code nil} if the outer context should +// BlockEndState to detect epsilon paths through a closure. +// @param ctx The outer context, or nil if the outer context should // not be used. // @param look The result lookahead set. // @param lookBusy A set used for preventing epsilon closures in the ATN // from causing a stack overflow. Outside code should pass -// {@code NewSetIf the symbol type does not Match, -// {@link ANTLRErrorStrategy//recoverInline} is called on the current error -// strategy to attempt recovery. If {@link //getBuildParseTree} is -// {@code true} and the token index of the symbol returned by -// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to -// the parse tree by calling {@link ParserRuleContext//addErrorNode}.
+// If the symbol type does not Match, +// ANTLRErrorStrategy//recoverInline is called on the current error +// strategy to attempt recovery. If //getBuildParseTree is +// true and the token index of the symbol returned by +// ANTLRErrorStrategy//recoverInline is -1, the symbol is added to +// the parse tree by calling ParserRuleContext//addErrorNode. // // @param ttype the token type to Match // @return the Matched symbol // @panics RecognitionException if the current input symbol did not Match -// {@code ttype} and the error strategy could not recover from the +// ttype and the error strategy could not recover from the // mismatched symbol - func (p *BaseParser) Match(ttype int) Token { t := p.GetCurrentToken() @@ -155,22 +155,21 @@ func (p *BaseParser) Match(ttype int) Token { return t } -// Match current input symbol as a wildcard. If the symbol type Matches -// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//ReportMatch} -// and {@link //consume} are called to complete the Match process. +// MatchWildcard matches the current input symbol as a wildcard. If the symbol type Matches +// (i.e. has a value greater than 0), ANTLRErrorStrategy//ReportMatch +// and //consume are called to complete the Match process. // -//If the symbol type does not Match, -// {@link ANTLRErrorStrategy//recoverInline} is called on the current error -// strategy to attempt recovery. If {@link //getBuildParseTree} is -// {@code true} and the token index of the symbol returned by -// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to -// the parse tree by calling {@link ParserRuleContext//addErrorNode}.
+// If the symbol type does not Match, +// ANTLRErrorStrategy//recoverInline is called on the current error +// strategy to attempt recovery. If //getBuildParseTree is +// true and the token index of the symbol returned by +// ANTLRErrorStrategy//recoverInline is -1, the symbol is added to +// the parse tree by calling ParserRuleContext//addErrorNode. // // @return the Matched symbol // @panics RecognitionException if the current input symbol did not Match // a wildcard and the error strategy could not recover from the mismatched // symbol - func (p *BaseParser) MatchWildcard() Token { t := p.GetCurrentToken() if t.GetTokenType() > 0 { @@ -188,14 +187,17 @@ func (p *BaseParser) MatchWildcard() Token { return t } +// GetParserRuleContext returns the current rule context for this parser. func (p *BaseParser) GetParserRuleContext() ParserRuleContext { return p.ctx } +// SetParserRuleContext sets the current rule context for this parser. func (p *BaseParser) SetParserRuleContext(v ParserRuleContext) { p.ctx = v } +// GetParseListeners returns the listeners attuned to this parser. func (p *BaseParser) GetParseListeners() []ParseTreeListener { if p.parseListeners == nil { return make([]ParseTreeListener, 0) @@ -203,34 +205,33 @@ func (p *BaseParser) GetParseListeners() []ParseTreeListener { return p.parseListeners } -// Registers {@code listener} to receive events during the parsing process. +// AddParseListener Registers listener to receive events during the parsing process. // -//To support output-preserving grammar transformations (including but not +// To support output-preserving grammar transformations (including but not // limited to left-recursion removal, automated left-factoring, and // optimized code generation), calls to listener methods during the parse // may differ substantially from calls made by -// {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In +// ParseTreeWalker//DEFAULT used after the parse is complete. In // particular, rule entry and exit events may occur in a different order // during the parse than after the parser. In addition, calls to certain -// rule entry methods may be omitted.
+// rule entry methods may be omitted. // -//With the following specific exceptions, calls to listener events are +// With the following specific exceptions, calls to listener events are // deterministic, i.e. for identical input the calls to listener -// methods will be the same.
+// methods will be the same. // -//If {@code listener} is {@code nil} or has not been added as a parse -// listener, p.method does nothing.
+// If listener is nil or has not been added as a parse +// listener, p.method does nothing. // @param listener the listener to remove -// func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) { if p.parseListeners != nil { @@ -278,7 +277,7 @@ func (p *BaseParser) removeParseListeners() { p.parseListeners = nil } -// Notify any parse listeners of an enter rule event. +// TriggerEnterRuleEvent Notify any parse listeners of an enter rule event. func (p *BaseParser) TriggerEnterRuleEvent() { if p.parseListeners != nil { ctx := p.ctx @@ -289,8 +288,7 @@ func (p *BaseParser) TriggerEnterRuleEvent() { } } -// -// Notify any parse listeners of an exit rule event. +// TriggerExitRuleEvent notifies any parse listeners of an exit rule event. // // @see //addParseListener // @@ -308,14 +306,17 @@ func (p *BaseParser) TriggerExitRuleEvent() { } } +// GetInterpreter returns the current ATN simulator this parser is employing. func (p *BaseParser) GetInterpreter() *ParserATNSimulator { return p.Interpreter } +// GetATN returns the atnof this parser's interpreter. func (p *BaseParser) GetATN() *ATN { return p.Interpreter.atn } +// GetTokenFactory returns the token factory this parser's input employs. func (p *BaseParser) GetTokenFactory() TokenFactory { return p.input.GetTokenSource().GetTokenFactory() } @@ -325,12 +326,7 @@ func (p *BaseParser) setTokenFactory(factory TokenFactory) { p.input.GetTokenSource().setTokenFactory(factory) } -// The ATN with bypass alternatives is expensive to create so we create it -// lazily. -// -// @panics UnsupportedOperationException if the current parser does not -// implement the {@link //getSerializedATN()} method. -// +// GetATNWithBypassAlts is not currently implemented. func (p *BaseParser) GetATNWithBypassAlts() { // TODO @@ -353,13 +349,11 @@ func (p *BaseParser) GetATNWithBypassAlts() { // The preferred method of getting a tree pattern. For example, here's a // sample use: // -//-// ParseTree t = parser.expr() -// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0", -// MyParser.RULE_expr) -// ParseTreeMatch m = p.Match(t) -// String id = m.Get("ID") -//+// ParseTree t = parser.expr() +// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0", +// MyParser.RULE_expr) +// ParseTreeMatch m = p.Match(t) +// String id = m.Get("ID") func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) { @@ -381,32 +375,34 @@ func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Le // return m.compile(pattern, patternRuleIndex) } +// GetInputStream returns the input to this parser. func (p *BaseParser) GetInputStream() IntStream { return p.GetTokenStream() } +// SetInputStream sets the token stream and resets the parser. func (p *BaseParser) SetInputStream(input TokenStream) { p.SetTokenStream(input) } +// GetTokenStream returns the input to this parser. func (p *BaseParser) GetTokenStream() TokenStream { return p.input } -// Set the token stream and reset the parser.// +// SetTokenStream sets the token stream and resets the parser. func (p *BaseParser) SetTokenStream(input TokenStream) { p.input = nil p.reset() p.input = input } -// Match needs to return the current input symbol, which gets put -// into the label for the associated token ref e.g., x=ID. -// +// GetCurrentToken returns the current input symbol being processed. func (p *BaseParser) GetCurrentToken() Token { return p.input.LT(1) } +// NotifyErrorListeners informs the error listeners that an error has occurred. func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException) { if offendingToken == nil { offendingToken = p.GetCurrentToken() @@ -418,6 +414,7 @@ func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err listener.SyntaxError(p, offendingToken, line, column, msg, err) } +// Consume the current token generated by the lexer and return it. func (p *BaseParser) Consume() Token { o := p.GetCurrentToken() if o.GetTokenType() != TokenEOF { @@ -454,6 +451,7 @@ func (p *BaseParser) addContextToParseTree() { } } +// EnterRule executes when entering a new state. func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int) { p.SetState(state) p.ctx = localctx @@ -466,6 +464,7 @@ func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int) } } +// ExitRule executes when exiting a state. func (p *BaseParser) ExitRule() { p.ctx.SetStop(p.input.LT(-1)) // trigger event on ctx, before it reverts to parent @@ -480,6 +479,7 @@ func (p *BaseParser) ExitRule() { } } +// EnterOuterAlt executes when entering the outer context. func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) { localctx.SetAltNumber(altNum) // if we have Newlocalctx, make sure we replace existing ctx @@ -493,11 +493,8 @@ func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) { p.ctx = localctx } -// Get the precedence level for the top-most precedence rule. -// -// @return The precedence level for the top-most precedence rule, or -1 if +// GetPrecedence returns the precedence level for the top-most precedence rule, or -1 if // the parser context is not nested within a precedence rule. - func (p *BaseParser) GetPrecedence() int { if len(p.precedenceStack) == 0 { return -1 @@ -506,6 +503,7 @@ func (p *BaseParser) GetPrecedence() int { return p.precedenceStack[len(p.precedenceStack)-1] } +// EnterRecursionRule executes when entering a recursive rule. func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int) { p.SetState(state) p.precedenceStack.Push(precedence) @@ -517,9 +515,7 @@ func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleI } } -// -// Like {@link //EnterRule} but for recursive rules. - +// PushNewRecursionContext like EnterRule but for recursive rules. func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int) { previous := p.ctx previous.SetParent(localctx) @@ -537,6 +533,7 @@ func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, } } +// UnrollRecursionContexts triggers exiting a recursive context. func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) { p.precedenceStack.Pop() p.ctx.SetStop(p.input.LT(-1)) @@ -558,6 +555,7 @@ func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) { } } +// GetInvokingContext returns the parent of the context at the given index func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext { ctx := p.ctx for ctx != nil { @@ -569,6 +567,8 @@ func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext { return nil } +// Precpred returns true if the given context's precedence is greater than or +// equal to the precedence at the top of the precedence stack. func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool { return precedence >= p.precedenceStack[len(p.precedenceStack)-1] } @@ -578,20 +578,16 @@ func (p *BaseParser) inContext(context ParserRuleContext) bool { return false } -// -// Checks whether or not {@code symbol} can follow the current state in the +// IsExpectedToken checks whether or not symbol can follow the current state in the // ATN. The behavior of p.method is equivalent to the following, but is // implemented such that the complete context-sensitive follow set does not // need to be explicitly constructed. // -//
-// return getExpectedTokens().contains(symbol) -//+// return getExpectedTokens().contains(symbol) // // @param symbol the symbol type to check -// @return {@code true} if {@code symbol} can follow the current state in -// the ATN, otherwise {@code false}. - +// @return true if symbol can follow the current state in +// the ATN, otherwise false. func (p *BaseParser) IsExpectedToken(symbol int) bool { atn := p.Interpreter.atn ctx := p.ctx @@ -619,8 +615,8 @@ func (p *BaseParser) IsExpectedToken(symbol int) bool { return false } -// Computes the set of input symbols which could follow the current parser -// state and context, as given by {@link //GetState} and {@link //GetContext}, +// GetExpectedTokens computes the set of input symbols which could follow the current parser +// state and context, as given by //GetState and //GetContext, // respectively. // // @see ATN//getExpectedTokens(int, RuleContext) @@ -629,13 +625,15 @@ func (p *BaseParser) GetExpectedTokens() *IntervalSet { return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx) } +// GetExpectedTokensWithinCurrentRule returns the valid tokens expected in the +// current rule being processed. func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet { atn := p.Interpreter.atn s := atn.states[p.state] return atn.NextTokens(s, nil) } -// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.// +// GetRuleIndex returns a rule's index (i.e., RULE_ruleName field) or -1 if not found.// func (p *BaseParser) GetRuleIndex(ruleName string) int { var ruleIndex, ok = p.GetRuleIndexMap()[ruleName] if ok { @@ -645,13 +643,12 @@ func (p *BaseParser) GetRuleIndex(ruleName string) int { return -1 } -// Return List<String> of the rule names in your parser instance +// GetRuleInvocationStack returns List
When {@code lookToEndOfRule} is true, p method uses -// {@link ATN//NextTokens} for each configuration in {@code configs} which is +// When lookToEndOfRule is true, p method uses +// ATN//NextTokens for each configuration in configs which is // not already in a rule stop state to see if a rule stop state is reachable -// from the configuration via epsilon-only transitions.
+// from the configuration via epsilon-only transitions. // // @param configs the configuration set to update // @param lookToEndOfRule when true, p method checks for rule stop states // reachable by epsilon-only transitions from each configuration in -// {@code configs}. +// configs. // -// @return {@code configs} if all configurations in {@code configs} are in a +// @return configs if all configurations in configs are in a // rule stop state, otherwise return a Newconfiguration set containing only -// the configurations from {@code configs} which are in a rule stop state +// the configurations from configs which are in a rule stop state // func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs ATNConfigSet, lookToEndOfRule bool) ATNConfigSet { if PredictionModeallConfigsInRuleStopStates(configs) { @@ -651,7 +650,7 @@ func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs ATNConfi NextTokens := p.atn.NextTokens(config.GetState(), nil) if NextTokens.contains(TokenEpsilon) { endOfRuleState := p.atn.ruleToStopState[config.GetState().GetRuleIndex()] - result.Add(NewBaseATNConfig4(config, endOfRuleState), p.mergeCache) + result.Add(BaseATNConfigState(config, endOfRuleState), p.mergeCache) } } } @@ -664,8 +663,8 @@ func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, full configs := NewBaseATNConfigSet(fullCtx) for i := 0; i < len(a.GetTransitions()); i++ { target := a.GetTransitions()[i].getTarget() - c := NewBaseATNConfig6(target, i+1, initialContext) - closureBusy := NewSet(nil, nil) + c := BaseAtnConfigDefaultContext(target, i+1, initialContext) + closureBusy := newSet(nil, nil) p.closure(c, configs, closureBusy, true, fullCtx, false) } return configs @@ -673,59 +672,56 @@ func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, full // // This method transforms the start state computed by -// {@link //computeStartState} to the special start state used by a +// //computeStartState to the special start state used by a // precedence DFA for a particular precedence value. The transformation // process applies the following changes to the start state's configuration // set. // -//// The prediction context must be considered by p filter to address // situations like the following. -//
-//
-//
-// grammar TA
-// prog: statement* EOF
-// statement: letterA | statement letterA 'b'
-// letterA: 'a'
-//
-//
-// +// +// +// grammar TA +// prog: statement* EOF +// statement: letterA | statement letterA 'b' +// letterA: 'a' +// +// // If the above grammar, the ATN state immediately before the token -// reference {@code 'a'} in {@code letterA} is reachable from the left edge +// reference 'a' in letterA is reachable from the left edge // of both the primary and closure blocks of the left-recursive rule -// {@code statement}. The prediction context associated with each of these +// statement. The prediction context associated with each of these // configurations distinguishes between them, and prevents the alternative -// which stepped out to {@code prog} (and then back in to {@code statement} +// which stepped out to prog (and then back in to statement // from being eliminated by the filter. -//
+// // // @param configs The configuration set computed by -// {@link //computeStartState} as the start state for the DFA. +// //computeStartState as the start state for the DFA. // @return The transformed configuration set representing the start state // for a precedence DFA at a particular precedence level (determined by -// calling {@link Parser//getPrecedence}). +// calling Parser//getPrecedence). // func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConfigSet { @@ -744,7 +740,7 @@ func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConf } statesFromAlt1[config.GetState().GetStateNumber()] = config.GetContext() if updatedContext != config.GetSemanticContext() { - configSet.Add(NewBaseATNConfig2(config, updatedContext), p.mergeCache) + configSet.Add(ATNConfigWithContext(config, updatedContext), p.mergeCache) } else { configSet.Add(config, p.mergeCache) } @@ -778,7 +774,7 @@ func (p *ParserATNSimulator) getReachableTarget(trans Transition, ttype int) ATN return nil } -func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs ATNConfigSet, nalts int) []SemanticContext { +func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *bitSet, configs ATNConfigSet, nalts int) []SemanticContext { altToPred := make([]SemanticContext, nalts+1) for _, c := range configs.GetItems() { @@ -805,7 +801,7 @@ func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs ATN return altToPred } -func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPred []SemanticContext) []*PredPrediction { +func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *bitSet, altToPred []SemanticContext) []*PredPrediction { pairs := make([]*PredPrediction, 0) containsPredicate := false for i := 1; i < len(altToPred); i++ { @@ -827,48 +823,47 @@ func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPre // // This method is used to improve the localization of error messages by // choosing an alternative rather than panicing a -// {@link NoViableAltException} in particular prediction scenarios where the -// {@link //ERROR} state was reached during ATN simulation. +// NoViableAltException in particular prediction scenarios where the +// //ERROR state was reached during ATN simulation. +// // -//// The default implementation of p method uses the following // algorithm to identify an ATN configuration which successfully parsed the // decision entry rule. Choosing such an alternative ensures that the -// {@link ParserRuleContext} returned by the calling rule will be complete +// ParserRuleContext returned by the calling rule will be complete // and valid, and the syntax error will be Reported later at a more -// localized location.
+// localized location. +// +// · If a syntactically valid path or paths reach the end of the decision rule and +// they are semantically valid if predicated, return the min associated alt. // -//+// · Otherwise, return ATN//INVALID_ALT_NUMBER +// +// // In some scenarios, the algorithm described above could predict an -// alternative which will result in a {@link FailedPredicateException} in +// alternative which will result in a FailedPredicateException in // the parser. Specifically, p could occur if the only configuration // capable of successfully parsing to the end of the decision rule is // blocked by a semantic predicate. By choosing p alternative within -// {@link //AdaptivePredict} instead of panicing a -// {@link NoViableAltException}, the resulting -// {@link FailedPredicateException} in the parser will identify the specific +// //AdaptivePredict instead of panicing a +// NoViableAltException, the resulting +// FailedPredicateException in the parser will identify the specific // predicate which is preventing the parser from successfully parsing the // decision rule, which helps developers identify and correct logic errors // in semantic predicates. -//
+// // // @param configs The ATN configurations which were valid immediately before -// the {@link //ERROR} state was reached +// the //ERROR state was reached // @param outerContext The is the \gamma_0 initial parser context from the paper // or the parser stack at the instant before prediction commences. // -// @return The value to return from {@link //AdaptivePredict}, or -// {@link ATN//INVALID_ALT_NUMBER} if a suitable alternative was not -// identified and {@link //AdaptivePredict} should Report an error instead. +// @return The value to return from //AdaptivePredict, or +// ATN//INVALID_ALT_NUMBER if a suitable alternative was not +// identified and //AdaptivePredict should Report an error instead. // func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs ATNConfigSet, outerContext ParserRuleContext) int { cfgs := p.splitAccordingToSemanticValidity(configs, outerContext) @@ -888,6 +883,7 @@ func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntry return ATNInvalidAltNumber } +// GetAltThatFinishedDecisionEntryRule TODO: docs. func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs ATNConfigSet) int { alts := NewIntervalSet() @@ -906,14 +902,15 @@ func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs ATNConf } // Walk the list of configurations and split them according to -// those that have preds evaluating to true/false. If no pred, assume -// true pred and include in succeeded set. Returns Pair of sets. +// those that have preds evaluating to true/false. If no pred, assume +// true pred and include in succeeded set. Returns Pair of sets. // -// Create a NewSet so as not to alter the incoming parameter. +// Create a NewSet so as not to alter the incoming parameter. // -// Assumption: the input stream has been restored to the starting point -// prediction, which is where predicates need to evaluate. +// Assumption: the input stream has been restored to the starting point +// prediction, which is where predicates need to evaluate. +// ATNConfigSetPair TODO: delete. type ATNConfigSetPair struct { item0, item1 ATNConfigSet } @@ -938,13 +935,13 @@ func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs ATNConfigS } // Look through a list of predicate/alt pairs, returning alts for the -// pairs that win. A {@code NONE} predicate indicates an alt containing an +// pairs that win. A NONE predicate indicates an alt containing an // unpredicated config which behaves as "always true." If !complete // then we stop at the first predicate that evaluates to true. This // includes pairs with nil predicates. // -func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet { - predictions := NewBitSet() +func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *bitSet { + predictions := newBitSet() for i := 0; i < len(predPredictions); i++ { pair := predPredictions[i] if pair.pred == SemanticContextNone { @@ -972,13 +969,13 @@ func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPredicti return predictions } -func (p *ParserATNSimulator) closure(config ATNConfig, configs ATNConfigSet, closureBusy *Set, collectPredicates, fullCtx, treatEOFAsEpsilon bool) { +func (p *ParserATNSimulator) closure(config ATNConfig, configs ATNConfigSet, closureBusy *set, collectPredicates, fullCtx, treatEOFAsEpsilon bool) { initialDepth := 0 p.closureCheckingStopState(config, configs, closureBusy, collectPredicates, fullCtx, initialDepth, treatEOFAsEpsilon) } -func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs ATNConfigSet, closureBusy *Set, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { +func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs ATNConfigSet, closureBusy *set, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { if ParserATNSimulatorDebug { fmt.Println("closure(" + config.String() + ")") @@ -996,7 +993,7 @@ func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs for i := 0; i < config.GetContext().length(); i++ { if config.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState { if fullCtx { - configs.Add(NewBaseATNConfig1(config, config.GetState(), BasePredictionContextEMPTY), p.mergeCache) + configs.Add(ATNConfigWithStateContext(config, config.GetState(), BasePredictionContextEMPTY), p.mergeCache) continue } else { // we have no context info, just chase follow links (if greedy) @@ -1010,7 +1007,7 @@ func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs returnState := p.atn.states[config.GetContext().getReturnState(i)] newContext := config.GetContext().GetParent(i) // "pop" return state - c := NewBaseATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext()) + c := BaseATNConfigContext(returnState, config.GetAlt(), newContext, config.GetSemanticContext()) // While we have context to pop back from, we may have // gotten that context AFTER having falling off a rule. // Make sure we track that we are now out of context. @@ -1033,7 +1030,7 @@ func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs } // Do the actual work of walking epsilon edges// -func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, closureBusy *Set, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { +func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, closureBusy *set, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { state := config.GetState() // optimization if !state.GetEpsilonOnlyTransitions() { @@ -1109,13 +1106,13 @@ func (p *ParserATNSimulator) getEpsilonTarget(config ATNConfig, t Transition, co case TransitionACTION: return p.actionTransition(config, t.(*ActionTransition)) case TransitionEPSILON: - return NewBaseATNConfig4(config, t.getTarget()) + return BaseATNConfigState(config, t.getTarget()) case TransitionATOM: // EOF transitions act like epsilon transitions after the first EOF // transition is traversed if treatEOFAsEpsilon { if t.Matches(TokenEOF, 0, 1) { - return NewBaseATNConfig4(config, t.getTarget()) + return BaseATNConfigState(config, t.getTarget()) } } return nil @@ -1124,7 +1121,7 @@ func (p *ParserATNSimulator) getEpsilonTarget(config ATNConfig, t Transition, co // transition is traversed if treatEOFAsEpsilon { if t.Matches(TokenEOF, 0, 1) { - return NewBaseATNConfig4(config, t.getTarget()) + return BaseATNConfigState(config, t.getTarget()) } } return nil @@ -1133,7 +1130,7 @@ func (p *ParserATNSimulator) getEpsilonTarget(config ATNConfig, t Transition, co // transition is traversed if treatEOFAsEpsilon { if t.Matches(TokenEOF, 0, 1) { - return NewBaseATNConfig4(config, t.getTarget()) + return BaseATNConfigState(config, t.getTarget()) } } return nil @@ -1146,7 +1143,7 @@ func (p *ParserATNSimulator) actionTransition(config ATNConfig, t *ActionTransit if ParserATNSimulatorDebug { fmt.Println("ACTION edge " + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex)) } - return NewBaseATNConfig4(config, t.getTarget()) + return BaseATNConfigState(config, t.getTarget()) } func (p *ParserATNSimulator) precedenceTransition(config ATNConfig, @@ -1171,14 +1168,14 @@ func (p *ParserATNSimulator) precedenceTransition(config ATNConfig, predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext) p.input.Seek(currentPosition) if predSucceeds { - c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context + c = BaseATNConfigState(config, pt.getTarget()) // no pred context } } else { newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate()) - c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx) + c = ATNConfigStateContext(config, pt.getTarget(), newSemCtx) } } else { - c = NewBaseATNConfig4(config, pt.getTarget()) + c = BaseATNConfigState(config, pt.getTarget()) } if ParserATNSimulatorDebug { fmt.Println("config from pred transition=" + c.String()) @@ -1207,14 +1204,14 @@ func (p *ParserATNSimulator) predTransition(config ATNConfig, pt *PredicateTrans predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext) p.input.Seek(currentPosition) if predSucceeds { - c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context + c = BaseATNConfigState(config, pt.getTarget()) // no pred context } } else { newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate()) - c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx) + c = ATNConfigStateContext(config, pt.getTarget(), newSemCtx) } } else { - c = NewBaseATNConfig4(config, pt.getTarget()) + c = BaseATNConfigState(config, pt.getTarget()) } if ParserATNSimulatorDebug { fmt.Println("config from pred transition=" + c.String()) @@ -1228,12 +1225,12 @@ func (p *ParserATNSimulator) ruleTransition(config ATNConfig, t *RuleTransition) } returnState := t.followState newContext := SingletonBasePredictionContextCreate(config.GetContext(), returnState.GetStateNumber()) - return NewBaseATNConfig1(config, t.getTarget(), newContext) + return ATNConfigWithStateContext(config, t.getTarget(), newContext) } -func (p *ParserATNSimulator) getConflictingAlts(configs ATNConfigSet) *BitSet { - altsets := PredictionModegetConflictingAltSubsets(configs) - return PredictionModeGetAlts(altsets) +func (p *ParserATNSimulator) getConflictingAlts(configs ATNConfigSet) *bitSet { + altsets := predictionModeGetConflictingAltSubsets(configs) + return predictionModeGetAlts(altsets) } // Sam pointed out a problem with the previous definition, v3, of @@ -1272,17 +1269,18 @@ func (p *ParserATNSimulator) getConflictingAlts(configs ATNConfigSet) *BitSet { // that we still need to pursue. // -func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs ATNConfigSet) *BitSet { - var conflictingAlts *BitSet +func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs ATNConfigSet) *bitSet { + var conflictingAlts *bitSet if configs.GetUniqueAlt() != ATNInvalidAltNumber { - conflictingAlts = NewBitSet() + conflictingAlts = newBitSet() conflictingAlts.add(configs.GetUniqueAlt()) } else { - conflictingAlts = configs.GetConflictingAlts() + conflictingAlts = configs.getConflictingAlts() } return conflictingAlts } +// GetTokenName returns the name of the given token type. func (p *ParserATNSimulator) GetTokenName(t int) string { if t == TokenEOF { return "EOF" @@ -1356,23 +1354,23 @@ func (p *ParserATNSimulator) getUniqueAlt(configs ATNConfigSet) int { // // Add an edge to the DFA, if possible. This method calls -// {@link //addDFAState} to ensure the {@code to} state is present in the -// DFA. If {@code from} is {@code nil}, or if {@code t} is outside the +// //addDFAState to ensure the to state is present in the +// DFA. If from is nil, or if t is outside the // range of edges that can be represented in the DFA tables, p method // returns without adding the edge to the DFA. // -//If {@code to} is {@code nil}, p method returns {@code nil}. -// Otherwise, p method returns the {@link DFAState} returned by calling -// {@link //addDFAState} for the {@code to} state.
+// If to is nil, p method returns nil. +// Otherwise, p method returns the DFAState returned by calling +// //addDFAState for the to state. // // @param dfa The DFA // @param from The source state for the edge // @param t The input symbol // @param to The target state for the edge // -// @return If {@code to} is {@code nil}, p method returns {@code nil} -// otherwise p method returns the result of calling {@link //addDFAState} -// on {@code to} +// @return If to is nil, p method returns nil +// otherwise p method returns the result of calling //addDFAState +// on to // func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState { if ParserATNSimulatorDebug { @@ -1402,18 +1400,18 @@ func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFA } // -// Add state {@code D} to the DFA if it is not already present, and return -// the actual instance stored in the DFA. If a state equivalent to {@code D} +// Add state D to the DFA if it is not already present, and return +// the actual instance stored in the DFA. If a state equivalent to D // is already in the DFA, the existing state is returned. Otherwise p -// method returns {@code D} after adding it to the DFA. +// method returns D after adding it to the DFA. // -//If {@code D} is {@link //ERROR}, p method returns {@link //ERROR} and -// does not change the DFA.
+// If D is //ERROR, p method returns //ERROR and +// does not change the DFA. // // @param dfa The dfa // @param D The DFA state to add // @return The state stored in the DFA. This will be either the existing -// state if {@code D} is already in the DFA, or {@code D} itself if the +// state if D is already in the DFA, or D itself if the // state was not already present. // func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState { @@ -1437,7 +1435,8 @@ func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState { return d } -func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs ATNConfigSet, startIndex, stopIndex int) { +// ReportAttemptingFullContext TODO: docs. +func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *bitSet, configs ATNConfigSet, startIndex, stopIndex int) { if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { interval := NewInterval(startIndex, stopIndex+1) fmt.Println("ReportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + @@ -1448,6 +1447,7 @@ func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAl } } +// ReportContextSensitivity TODO: docs. func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs ATNConfigSet, startIndex, stopIndex int) { if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { interval := NewInterval(startIndex, stopIndex+1) @@ -1459,9 +1459,9 @@ func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, } } -// If context sensitive parsing, we know it's ambiguity not conflict// +// ReportAmbiguity if context sensitive parsing, we know it's ambiguity not conflict func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, D *DFAState, startIndex, stopIndex int, - exact bool, ambigAlts *BitSet, configs ATNConfigSet) { + exact bool, ambigAlts *bitSet, configs ATNConfigSet) { if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { interval := NewInterval(startIndex, stopIndex+1) fmt.Println("ReportAmbiguity " + ambigAlts.String() + ":" + configs.String() + diff --git a/runtime/Go/antlr/parser_rule_context.go b/runtime/Go/antlr/parser_rule_context.go index 49cd10c5ff..202907c927 100644 --- a/runtime/Go/antlr/parser_rule_context.go +++ b/runtime/Go/antlr/parser_rule_context.go @@ -9,6 +9,7 @@ import ( "strconv" ) +// ParserRuleContext represents the nodes of the tree generated by the parser. type ParserRuleContext interface { RuleContext @@ -30,18 +31,26 @@ type ParserRuleContext interface { RemoveLastChild() } +// BaseParserRuleContext is the default implementation of ParserRuleContext. type BaseParserRuleContext struct { *BaseRuleContext - start, stop Token + // The exception that forced prc rule to return. If the rule successfully + // completed, prc is nil. exception RecognitionException + start, stop Token children []Tree } +// NewBaseParserRuleContext returns a new instance of BaseParserRuleContext func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext { - prc := new(BaseParserRuleContext) - - prc.BaseRuleContext = NewBaseRuleContext(parent, invokingStateNumber) + prc := &BaseParserRuleContext{ + BaseRuleContext: NewBaseRuleContext(parent, invokingStateNumber), + children: nil, + start: nil, + stop: nil, + exception: nil, + } prc.RuleIndex = -1 // * If we are debugging or building a parse tree for a Visitor, @@ -50,24 +59,21 @@ func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) // operation because we don't the need to track the details about // how we parse prc rule. // / - prc.children = nil - prc.start = nil - prc.stop = nil - // The exception that forced prc rule to return. If the rule successfully - // completed, prc is {@code nil}. - prc.exception = nil return prc } +// SetException TODO: docs. func (prc *BaseParserRuleContext) SetException(e RecognitionException) { prc.exception = e } +// GetChildren returns this rule's children. func (prc *BaseParserRuleContext) GetChildren() []Tree { return prc.children } +// CopyFrom copies the information from the given node into this one. func (prc *BaseParserRuleContext) CopyFrom(ctx *BaseParserRuleContext) { // from RuleContext prc.parentCtx = ctx.parentCtx @@ -77,6 +83,7 @@ func (prc *BaseParserRuleContext) CopyFrom(ctx *BaseParserRuleContext) { prc.stop = ctx.stop } +// GetText returns the text in this node. func (prc *BaseParserRuleContext) GetText() string { if prc.GetChildCount() == 0 { return "" @@ -91,11 +98,12 @@ func (prc *BaseParserRuleContext) GetText() string { } // Double dispatch methods for listeners -func (prc *BaseParserRuleContext) EnterRule(listener ParseTreeListener) { -} -func (prc *BaseParserRuleContext) ExitRule(listener ParseTreeListener) { -} +// EnterRule executes when entering this node. +func (prc *BaseParserRuleContext) EnterRule(listener ParseTreeListener) {} + +// ExitRule executes when exiting this node. +func (prc *BaseParserRuleContext) ExitRule(listener ParseTreeListener) {} // * Does not set parent link other add methods do that/// func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode { @@ -109,6 +117,7 @@ func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) Termi return child } +// AddChild adds the given rule to this node's children. func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext { if prc.children == nil { prc.children = make([]Tree, 0) @@ -120,25 +129,24 @@ func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext { return child } -// * Used by EnterOuterAlt to toss out a RuleContext previously added as -// we entered a rule. If we have // label, we will need to remove +// RemoveLastChild is used by EnterOuterAlt to toss out a RuleContext previously +// added as we entered a rule. If we have label, we will need to remove // generic ruleContext object. -// / func (prc *BaseParserRuleContext) RemoveLastChild() { if prc.children != nil && len(prc.children) > 0 { prc.children = prc.children[0 : len(prc.children)-1] } } +// AddTokenNode to this one's children. func (prc *BaseParserRuleContext) AddTokenNode(token Token) *TerminalNodeImpl { - node := NewTerminalNodeImpl(token) prc.addTerminalNodeChild(node) node.parentCtx = prc return node - } +// AddErrorNode to this one's children. func (prc *BaseParserRuleContext) AddErrorNode(badToken Token) *ErrorNodeImpl { node := NewErrorNodeImpl(badToken) prc.addTerminalNodeChild(node) @@ -146,6 +154,7 @@ func (prc *BaseParserRuleContext) AddErrorNode(badToken Token) *ErrorNodeImpl { return node } +// GetChild returns this node's i-th child. func (prc *BaseParserRuleContext) GetChild(i int) Tree { if prc.children != nil && len(prc.children) >= i { return prc.children[i] @@ -154,6 +163,7 @@ func (prc *BaseParserRuleContext) GetChild(i int) Tree { return nil } +// GetChildOfType returns the i-th child of the given type. func (prc *BaseParserRuleContext) GetChildOfType(i int, childType reflect.Type) RuleContext { if childType == nil { return prc.GetChild(i).(RuleContext) @@ -173,34 +183,43 @@ func (prc *BaseParserRuleContext) GetChildOfType(i int, childType reflect.Type) return nil } +// ToStringTree returns the lisp-like representation of this node and it's +// children. func (prc *BaseParserRuleContext) ToStringTree(ruleNames []string, recog Recognizer) string { return TreesStringTree(prc, ruleNames, recog) } +// GetRuleContext returns the node itself. func (prc *BaseParserRuleContext) GetRuleContext() RuleContext { return prc } +// Accept TODO: docs. func (prc *BaseParserRuleContext) Accept(visitor ParseTreeVisitor) interface{} { return visitor.VisitChildren(prc) } +// SetStart of this node's range. func (prc *BaseParserRuleContext) SetStart(t Token) { prc.start = t } +// GetStart of this node's range. func (prc *BaseParserRuleContext) GetStart() Token { return prc.start } +// SetStop of this node's range. func (prc *BaseParserRuleContext) SetStop(t Token) { prc.stop = t } +// GetStop of this node's range. func (prc *BaseParserRuleContext) GetStop() Token { return prc.stop } +// GetToken returns the i-th terminal of the given token type. func (prc *BaseParserRuleContext) GetToken(ttype int, i int) TerminalNode { for j := 0; j < len(prc.children); j++ { @@ -218,6 +237,7 @@ func (prc *BaseParserRuleContext) GetToken(ttype int, i int) TerminalNode { return nil } +// GetTokens returns all the terminals in this node's children. func (prc *BaseParserRuleContext) GetTokens(ttype int) []TerminalNode { if prc.children == nil { return make([]TerminalNode, 0) @@ -237,6 +257,7 @@ func (prc *BaseParserRuleContext) GetTokens(ttype int) []TerminalNode { return tokens } +// GetPayload TODO: docs. func (prc *BaseParserRuleContext) GetPayload() interface{} { return prc } @@ -264,10 +285,12 @@ func (prc *BaseParserRuleContext) getChild(ctxType reflect.Type, i int) RuleCont // Go lacks generics, so it's not possible for us to return the child with the correct type, but we do // check for convertibility +// GetTypedRuleContext TODO: docs. func (prc *BaseParserRuleContext) GetTypedRuleContext(ctxType reflect.Type, i int) RuleContext { return prc.getChild(ctxType, i) } +// GetTypedRuleContexts TODO: docs. func (prc *BaseParserRuleContext) GetTypedRuleContexts(ctxType reflect.Type) []RuleContext { if prc.children == nil { return make([]RuleContext, 0) @@ -285,6 +308,7 @@ func (prc *BaseParserRuleContext) GetTypedRuleContexts(ctxType reflect.Type) []R return contexts } +// GetChildCount returns how many children this node has. func (prc *BaseParserRuleContext) GetChildCount() int { if prc.children == nil { return 0 @@ -293,6 +317,7 @@ func (prc *BaseParserRuleContext) GetChildCount() int { return len(prc.children) } +// GetSourceInterval TODO: docs. func (prc *BaseParserRuleContext) GetSourceInterval() *Interval { if prc.start == nil || prc.stop == nil { return TreeInvalidInterval @@ -340,21 +365,25 @@ func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) s return s } +// RuleContextEmpty TODO: docs. var RuleContextEmpty = NewBaseParserRuleContext(nil, -1) +// InterpreterRuleContext TODO: docs. type InterpreterRuleContext interface { ParserRuleContext } +// BaseInterpreterRuleContext TODO: docs. type BaseInterpreterRuleContext struct { *BaseParserRuleContext } +// NewBaseInterpreterRuleContext returns a new instance of +// BaseInterpreterRuleContext. func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext { - - prc := new(BaseInterpreterRuleContext) - - prc.BaseParserRuleContext = NewBaseParserRuleContext(parent, invokingStateNumber) + prc := &BaseInterpreterRuleContext{ + BaseParserRuleContext: NewBaseParserRuleContext(parent, invokingStateNumber), + } prc.RuleIndex = ruleIndex diff --git a/runtime/Go/antlr/prediction_context.go b/runtime/Go/antlr/prediction_context.go index 99acb333fa..3a1d32d3ca 100644 --- a/runtime/Go/antlr/prediction_context.go +++ b/runtime/Go/antlr/prediction_context.go @@ -8,23 +8,19 @@ import ( "strconv" ) -// Represents {@code $} in local context prediction, which means wildcard. -// {@code//+x =//}. -// / -const ( - BasePredictionContextEmptyReturnState = 0x7FFFFFFF -) - -// Represents {@code $} in an array in full context mode, when {@code $} -// doesn't mean wildcard: {@code $ + x = [$,x]}. Here, -// {@code $} = {@link //EmptyReturnState}. -// / +// BasePredictionContextEmptyReturnState represents $ in local context +// prediction, which means wildcard. +const BasePredictionContextEmptyReturnState = 0x7FFFFFFF +// Represents $ in an array in full context mode, when $ +// doesn't mean wildcard: $ + x = [$,x]. Here, +// $ = EmptyReturnState. var ( BasePredictionContextglobalNodeCount = 1 BasePredictionContextid = BasePredictionContextglobalNodeCount ) +// PredictionContext TODO: docs type PredictionContext interface { hash() int GetParent(int) PredictionContext @@ -36,15 +32,14 @@ type PredictionContext interface { String() string } +// BasePredictionContext is the default implementation of PredictionContext. type BasePredictionContext struct { cachedHash int } +// NewBasePredictionContext returns a new instance of BasePredictionContext. func NewBasePredictionContext(cachedHash int) *BasePredictionContext { - pc := new(BasePredictionContext) - pc.cachedHash = cachedHash - - return pc + return &BasePredictionContext{cachedHash: cachedHash} } func (b *BasePredictionContext) isEmpty() bool { @@ -63,18 +58,18 @@ func calculateEmptyHash() int { return murmurFinish(h, 0) } -// Used to cache {@link BasePredictionContext} objects. Its used for the shared -// context cash associated with contexts in DFA states. This cache -// can be used for both lexers and parsers. - +// PredictionContextCache is used to cache BasePredictionContext objects. Its +// used for the shared context cash associated with contexts in DFA states. This +// cache can be used for both lexers and parsers. type PredictionContextCache struct { cache map[PredictionContext]PredictionContext } +// NewPredictionContextCache returns a new instance of PredictionContextCache. func NewPredictionContextCache() *PredictionContextCache { - t := new(PredictionContextCache) - t.cache = make(map[PredictionContext]PredictionContext) - return t + return &PredictionContextCache{ + cache: make(map[PredictionContext]PredictionContext), + } } // Add a context to the cache and return it. If the context already exists, @@ -93,6 +88,7 @@ func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext { return ctx } +// Get the given prediction context. func (p *PredictionContextCache) Get(ctx PredictionContext) PredictionContext { return p.cache[ctx] } @@ -101,10 +97,13 @@ func (p *PredictionContextCache) length() int { return len(p.cache) } +// SingletonPredictionContext TODO: docs. type SingletonPredictionContext interface { PredictionContext } +// BaseSingletonPredictionContext is the default implementation of +// SingletonPredictionContext. type BaseSingletonPredictionContext struct { *BasePredictionContext @@ -112,10 +111,15 @@ type BaseSingletonPredictionContext struct { returnState int } +// NewBaseSingletonPredictionContext returns a new instance of +// BaseSingletonPredictionContext. func NewBaseSingletonPredictionContext(parent PredictionContext, returnState int) *BaseSingletonPredictionContext { - s := new(BaseSingletonPredictionContext) - s.BasePredictionContext = NewBasePredictionContext(37) + s := &BaseSingletonPredictionContext{ + BasePredictionContext: NewBasePredictionContext(37), + parentCtx: parent, + returnState: returnState, + } if parent != nil { s.cachedHash = calculateHash(parent, returnState) @@ -123,12 +127,10 @@ func NewBaseSingletonPredictionContext(parent PredictionContext, returnState int s.cachedHash = calculateEmptyHash() } - s.parentCtx = parent - s.returnState = returnState - return s } +// SingletonBasePredictionContextCreate TODO: docs. func SingletonBasePredictionContextCreate(parent PredictionContext, returnState int) PredictionContext { if returnState == BasePredictionContextEmptyReturnState && parent == nil { // someone can pass in the bits of an array ctx that mean $ @@ -142,6 +144,7 @@ func (b *BaseSingletonPredictionContext) length() int { return 1 } +// GetParent of this context. func (b *BaseSingletonPredictionContext) GetParent(index int) PredictionContext { return b.parentCtx } @@ -206,25 +209,26 @@ func (b *BaseSingletonPredictionContext) String() string { return strconv.Itoa(b.returnState) + " " + up } +// BasePredictionContextEMPTY TODO: docs. var BasePredictionContextEMPTY = NewEmptyPredictionContext() +// EmptyPredictionContext TODO: docs. type EmptyPredictionContext struct { *BaseSingletonPredictionContext } +// NewEmptyPredictionContext returns a new instance of EmptyPredictionContext. func NewEmptyPredictionContext() *EmptyPredictionContext { - - p := new(EmptyPredictionContext) - - p.BaseSingletonPredictionContext = NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState) - - return p + return &EmptyPredictionContext{ + BaseSingletonPredictionContext: NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState), + } } func (e *EmptyPredictionContext) isEmpty() bool { return true } +// GetParent always returns nil. func (e *EmptyPredictionContext) GetParent(index int) PredictionContext { return nil } @@ -241,6 +245,7 @@ func (e *EmptyPredictionContext) String() string { return "$" } +// ArrayPredictionContext TODO: docs. type ArrayPredictionContext struct { *BasePredictionContext @@ -248,25 +253,27 @@ type ArrayPredictionContext struct { returnStates []int } +// NewArrayPredictionContext returns a new instance of ArrayPredictionContext. func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) *ArrayPredictionContext { // Parent can be nil only if full ctx mode and we make an array - // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using + // from //EMPTY and non-empty. We merge //EMPTY by using // nil parent and - // returnState == {@link //EmptyReturnState}. + // returnState == //EmptyReturnState. - c := new(ArrayPredictionContext) - c.BasePredictionContext = NewBasePredictionContext(37) + c := &ArrayPredictionContext{ + BasePredictionContext: NewBasePredictionContext(37), + parents: parents, + returnStates: returnStates, + } for i := range parents { c.cachedHash += calculateHash(parents[i], returnStates[i]) } - c.parents = parents - c.returnStates = returnStates - return c } +// GetReturnStates returns the return states of this object. func (a *ArrayPredictionContext) GetReturnStates() []int { return a.returnStates } @@ -285,6 +292,7 @@ func (a *ArrayPredictionContext) length() int { return len(a.returnStates) } +// GetParent returns the i-th parent. func (a *ArrayPredictionContext) GetParent(index int) PredictionContext { return a.parents[index] } @@ -315,7 +323,7 @@ func (a *ArrayPredictionContext) hash() int { h = murmurUpdate(h, r) } - return murmurFinish(h, 2 * len(a.parents)) + return murmurFinish(h, 2*len(a.parents)) } func (a *ArrayPredictionContext) String() string { @@ -343,8 +351,8 @@ func (a *ArrayPredictionContext) String() string { return s + "]" } -// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph. -// Return {@link //EMPTY} if {@code outerContext} is empty or nil. +// Convert a RuleContext tree to a BasePredictionContext graph. +// Return //EMPTY if outerContext is empty or nil. // / func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext { if outerContext == nil { @@ -396,33 +404,33 @@ func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) } // -// Merge two {@link SingletonBasePredictionContext} instances. +// Merge two SingletonBasePredictionContext instances. // -//Stack tops equal, parents merge is same return left graph.
+// Stack tops equal, parents merge is same return left graph.
//
Same stack top, parents differ merge parents giving array node, then
+// Same stack top, parents differ merge parents giving array node, then
// remainders of those graphs. A Newroot node is created to point to the
// merged parents.
//
Different stack tops pointing to same parent. Make array node for the
+// Different stack tops pointing to same parent. Make array node for the
// root where both element in the root point to the same (original)
// parent.
//
Different stack tops pointing to different parents. Make array node for
+// Different stack tops pointing to different parents. Make array node for
// the root where each element points to the corresponding original
// parent.
//
These local-context merge operations are used when {@code rootIsWildcard} -// is true.
+// These local-context merge operations are used when rootIsWildcard +// is true. // -//{@link //EMPTY} is superset of any graph return {@link //EMPTY}.
-//
{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is
-// {@code //EMPTY} return left graph.
-//
Special case of last merge if local context.
-//
These full-context merge operations are used when {@code rootIsWildcard} -// is false.
+// These full-context merge operations are used when rootIsWildcard +// is false. // -// +// // -//Must keep all contexts {@link //EMPTY} in array is a special value (and
+// Must keep all contexts //EMPTY in array is a special value (and
// nil parent).
-//
Different tops, different parents.
-//
Shared top, same parents.
-//
Shared top, different parents.
-//
Shared top, all shared parents.
+// Shared top, all shared parents.
//
Equal tops, merge parents and reduce top to
-// {@link SingletonBasePredictionContext}.
-//
// When using this prediction mode, the parser will either return a correct // parse tree (i.e. the same parse tree that would be returned with the - // {@link //LL} prediction mode), or it will Report a syntax error. If a - // syntax error is encountered when using the {@link //SLL} prediction mode, + // //LL prediction mode), or it will Report a syntax error. If a + // syntax error is encountered when using the //SLL prediction mode, // it may be due to either an actual syntax error in the input or indicate // that the particular combination of grammar and input requires the more - // powerful {@link //LL} prediction abilities to complete successfully.
+ // powerful //LL prediction abilities to complete successfully. // - //- // This prediction mode does not provide any guarantees for prediction - // behavior for syntactically-incorrect inputs.
// + // This prediction mode does not provide any guarantees for prediction + // behavior for syntactically-incorrect inputs. PredictionModeSLL = 0 + + // PredictionModeLL is the LL(*) prediction mode. This prediction mode + // allows the current parser context to be used for resolving SLL conflicts + // that occur during prediction. This is the fastest prediction mode that + // guarantees correct parse results for all combinations of grammars with + // syntactically correct inputs. // - // The LL(*) prediction mode. This prediction mode allows the current parser - // context to be used for resolving SLL conflicts that occur during - // prediction. This is the fastest prediction mode that guarantees correct - // parse results for all combinations of grammars with syntactically correct - // inputs. - // - //// When using this prediction mode, the parser will make correct decisions // for all syntactically-correct grammar and input combinations. However, in // cases where the grammar is truly ambiguous this prediction mode might not // Report a precise answer for exactly which alternatives are - // ambiguous.
+ // ambiguous. // - //- // This prediction mode does not provide any guarantees for prediction - // behavior for syntactically-incorrect inputs.
// + // This prediction mode does not provide any guarantees for prediction + // behavior for syntactically-incorrect inputs. PredictionModeLL = 1 + + // PredictionModeLLExactAmbigDetection is the LL(*) prediction mode with + // exact ambiguity detection. In addition to the correctness guarantees + // provided by the //LL prediction mode, this prediction mode instructs the + // prediction algorithm to determine the complete and exact set of ambiguous + // alternatives for every ambiguous decision encountered while parsing. // - // The LL(*) prediction mode with exact ambiguity detection. In addition to - // the correctness guarantees provided by the {@link //LL} prediction mode, - // this prediction mode instructs the prediction algorithm to determine the - // complete and exact set of ambiguous alternatives for every ambiguous - // decision encountered while parsing. - // - //// This prediction mode may be used for diagnosing ambiguities during // grammar development. Due to the performance overhead of calculating sets // of ambiguous alternatives, this prediction mode should be avoided when - // the exact results are not necessary.
+ // the exact results are not necessary. // - //// This prediction mode does not provide any guarantees for prediction - // behavior for syntactically-incorrect inputs.
- // + // behavior for syntactically-incorrect inputs. PredictionModeLLExactAmbigDetection = 2 ) +// PredictionModehasSLLConflictTerminatingPrediction computes the SLL prediction +// termination condition. // -// Computes the SLL prediction termination condition. -// -//// This method computes the SLL prediction termination condition for both of -// the following cases.
+// the following cases. +// +// · The usual SLL+LL fallback upon SLL conflict // -//COMBINED SLL+LL PARSING
+// COMBINED SLL+LL PARSING // -//When LL-fallback is enabled upon SLL conflict, correct predictions are +// When LL-fallback is enabled upon SLL conflict, correct predictions are // ensured regardless of how the termination condition is computed by this // method. Due to the substantially higher cost of LL prediction, the // prediction should only fall back to LL when the additional lookahead -// cannot lead to a unique SLL prediction.
+// cannot lead to a unique SLL prediction. // -//Assuming combined SLL+LL parsing, an SLL configuration set with only +// Assuming combined SLL+LL parsing, an SLL configuration set with only // conflicting subsets should fall back to full LL, even if the // configuration sets don't resolve to the same alternative (e.g. -// {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting +// {1,2} and {3,4}. If there is at least one non-conflicting // configuration, SLL could continue with the hopes that more lookahead will -// resolve via one of those non-conflicting configurations.
+// resolve via one of those non-conflicting configurations. // -//Here's the prediction termination rule them: SLL (for SLL+LL parsing) +// Here's the prediction termination rule them: SLL (for SLL+LL parsing) // stops when it sees only conflicting configuration subsets. In contrast, -// full LL keeps going when there is uncertainty.
+// full LL keeps going when there is uncertainty. // -//HEURISTIC
+// HEURISTIC // -//As a heuristic, we stop prediction when we see any conflicting subset +// As a heuristic, we stop prediction when we see any conflicting subset // unless we see a state that only has one alternative associated with it. // The single-alt-state thing lets prediction continue upon rules like -// (otherwise, it would admit defeat too soon):
+// (otherwise, it would admit defeat too soon): // -//{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) '' }
+// [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) '' // -//When the ATN simulation reaches the state before {@code ''}, it has a -// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally -// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop +// When the ATN simulation reaches the state before '', it has a +// DFA state that looks like: [12|1|[], 6|2|[], 12|2|[]]. Naturally +// 12|1|[] and 12|2|[] conflict, but we cannot stop // processing this node because alternative to has another way to continue, -// via {@code [6|2|[]]}.
+// via [6|2|[]]. // -//It also let's us continue for this rule:
+// It also let's us continue for this rule: // -//{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }
+// [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B // -//After Matching input A, we reach the stop state for rule A, state 1. +// After Matching input A, we reach the stop state for rule A, state 1. // State 8 is the state right before B. Clearly alternatives 1 and 2 // conflict and no amount of further lookahead will separate the two. // However, alternative 3 will be able to continue and so we do not stop // working on this state. In the previous example, we're concerned with // states associated with the conflicting alternatives. Here alt 3 is not // associated with the conflicting configs, but since we can continue -// looking for input reasonably, don't declare the state done.
+// looking for input reasonably, don't declare the state done. // -//PURE SLL PARSING
+// PURE SLL PARSING // -//To handle pure SLL parsing, all we have to do is make sure that we +// To handle pure SLL parsing, all we have to do is make sure that we // combine stack contexts for configurations that differ only by semantic -// predicate. From there, we can do the usual SLL termination heuristic.
+// predicate. From there, we can do the usual SLL termination heuristic. // -//PREDICATES IN SLL+LL PARSING
+// PREDICATES IN SLL+LL PARSING // -//SLL decisions don't evaluate predicates until after they reach DFA stop +// SLL decisions don't evaluate predicates until after they reach DFA stop // states because they need to create the DFA cache that works in all // semantic situations. In contrast, full LL evaluates predicates collected // during start state computation so it can ignore predicates thereafter. // This means that SLL termination detection can totally ignore semantic -// predicates.
+// predicates. // -//Implementation-wise, {@link ATNConfigSet} combines stack contexts but not +// Implementation-wise, ATNConfigSet combines stack contexts but not // semantic predicate contexts so we might see two configurations like the -// following.
+// following. // -//{@code (s, 1, x, {}), (s, 1, x', {p})}
+// (s, 1, x, {), (s, 1, x', {p})} // -//Before testing these configurations against others, we have to merge -// {@code x} and {@code x'} (without modifying the existing configurations). -// For example, we test {@code (x+x')==x''} when looking for conflicts in -// the following configurations.
+// Before testing these configurations against others, we have to merge +// x and x' (without modifying the existing configurations). +// For example, we test (x+x')==x'' when looking for conflicts in +// the following configurations. // -//{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})}
+// (s, 1, x, {), (s, 1, x', {p}), (s, 2, x'', {})} // -//If the configuration set has predicates (as indicated by -// {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of +// If the configuration set has predicates (as indicated by +// ATNConfigSet//hasSemanticContext), this algorithm makes a copy of // the configurations to strip out all of the predicates so that a standard -// {@link ATNConfigSet} will merge everything ignoring predicates.
+// ATNConfigSet will merge everything ignoring predicates. // func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool { // Configs in rule stop states indicate reaching the end of the decision @@ -182,7 +171,7 @@ func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConf for _, c := range configs.GetItems() { // NewBaseATNConfig({semanticContext:}, c) - c = NewBaseATNConfig2(c, SemanticContextNone) + c = ATNConfigWithContext(c, SemanticContextNone) dup.Add(c, nil) } configs = dup @@ -190,18 +179,18 @@ func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConf // now we have combined contexts for configs with dissimilar preds } // pure SLL or combined SLL+LL mode parsing - altsets := PredictionModegetConflictingAltSubsets(configs) + altsets := predictionModeGetConflictingAltSubsets(configs) return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs) } -// Checks if any configuration in {@code configs} is in a -// {@link RuleStopState}. Configurations meeting this condition have reached +// PredictionModehasConfigInRuleStopState checks if any configuration in configs +// is in a RuleStopState. Configurations meeting this condition have reached // the end of the decision rule (local context) or end of start rule (full // context). // // @param configs the configuration set to test -// @return {@code true} if any configuration in {@code configs} is in a -// {@link RuleStopState}, otherwise {@code false} +// @return true if any configuration in configs is in a +// RuleStopState, otherwise false func PredictionModehasConfigInRuleStopState(configs ATNConfigSet) bool { for _, c := range configs.GetItems() { if _, ok := c.GetState().(*RuleStopState); ok { @@ -211,14 +200,14 @@ func PredictionModehasConfigInRuleStopState(configs ATNConfigSet) bool { return false } -// Checks if all configurations in {@code configs} are in a -// {@link RuleStopState}. Configurations meeting this condition have reached -// the end of the decision rule (local context) or end of start rule (full -// context). +// PredictionModeallConfigsInRuleStopStates checks if all configurations in +// configs are in a RuleStopState. Configurations meeting this condition have +// reached the end of the decision rule (local context) or end of start rule +// (full context). // // @param configs the configuration set to test -// @return {@code true} if all configurations in {@code configs} are in a -// {@link RuleStopState}, otherwise {@code false} +// @return true if all configurations in configs are in a +// RuleStopState, otherwise false func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool { for _, c := range configs.GetItems() { @@ -229,172 +218,161 @@ func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool { return true } +// PredictionModeresolvesToJustOneViableAlt is full LL prediction termination. // -// Full LL prediction termination. -// -//Can we stop looking ahead during ATN simulation or is there some +// Can we stop looking ahead during ATN simulation or is there some // uncertainty as to which alternative we will ultimately pick, after // consuming more input? Even if there are partial conflicts, we might know // that everything is going to resolve to the same minimum alternative. That // means we can stop since no more lookahead will change that fact. On the // other hand, there might be multiple conflicts that resolve to different // minimums. That means we need more look ahead to decide which of those -// alternatives we should predict.
+// alternatives we should predict. // -//The basic idea is to split the set of configurations {@code C}, into -// conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with +// The basic idea is to split the set of configurations C, into +// conflicting subsets (s, _, ctx, _) and singleton subsets with // non-conflicting configurations. Two configurations conflict if they have -// identical {@link ATNConfig//state} and {@link ATNConfig//context} values -// but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)} -// and {@code (s, j, ctx, _)} for {@code i!=j}.
+// identical ATNConfig//state and ATNConfig//context values +// but different ATNConfig//alt value, e.g. (s, i, ctx, _) +// and (s, j, ctx, _) for i!=j. // -//Reduce these configuration subsets to the set of possible alternatives. -// You can compute the alternative subsets in one pass as follows:
+// Reduce these configuration subsets to the set of possible alternatives. +// You can compute the alternative subsets in one pass as follows: // -//{@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in -// {@code C} holding {@code s} and {@code ctx} fixed.
+// A_s,ctx = {i | (s, i, ctx, _)} for each configuration in +// C holding s and ctx fixed. // -//Or in pseudo-code, for each configuration {@code c} in {@code C}:
+// Or in pseudo-code, for each configuration c in C: // -//-// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not -// alt and not pred -//+// map[c] U= c.ATNConfig//alt alt // map hash/equals uses s and x, not +// alt and not pred // -//
The values in {@code map} are the set of {@code A_s,ctx} sets.
+// The values in map are the set of A_s,ctx sets. // -//If {@code |A_s,ctx|=1} then there is no conflict associated with -// {@code s} and {@code ctx}.
+// If |A_s,ctx|=1 then there is no conflict associated with +// s and ctx. // -//Reduce the subsets to singletons by choosing a minimum of each subset. If +// Reduce the subsets to singletons by choosing a minimum of each subset. If // the union of these alternative subsets is a singleton, then no amount of // more lookahead will help us. We will always pick that alternative. If, // however, there is more than one alternative, then we are uncertain which // alternative to predict and must continue looking for resolution. We may // or may not discover an ambiguity in the future, even if there are no -// conflicting subsets this round.
+// conflicting subsets this round. // -//The biggest sin is to terminate early because it means we've made a +// The biggest sin is to terminate early because it means we've made a // decision but were uncertain as to the eventual outcome. We haven't used // enough lookahead. On the other hand, announcing a conflict too late is no // big deal you will still have the conflict. It's just inefficient. It -// might even look until the end of file.
+// might even look until the end of file. // -//No special consideration for semantic predicates is required because +// No special consideration for semantic predicates is required because // predicates are evaluated on-the-fly for full LL prediction, ensuring that // no configuration contains a semantic context during the termination -// check.
-// -//CONFLICTING CONFIGS
-// -//Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict -// when {@code i!=j} but {@code x=x'}. Because we merge all -// {@code (s, i, _)} configurations together, that means that there are at -// most {@code n} configurations associated with state {@code s} for -// {@code n} possible alternatives in the decision. The merged stacks -// complicate the comparison of configuration contexts {@code x} and -// {@code x'}. Sam checks to see if one is a subset of the other by calling -// merge and checking to see if the merged result is either {@code x} or -// {@code x'}. If the {@code x} associated with lowest alternative {@code i} -// is the superset, then {@code i} is the only possible prediction since the -// others resolve to {@code min(i)} as well. However, if {@code x} is -// associated with {@code j>i} then at least one stack configuration for -// {@code j} is not in conflict with alternative {@code i}. The algorithm -// should keep going, looking for more lookahead due to the uncertainty.
-// -//For simplicity, I'm doing a equality check between {@code x} and -// {@code x'} that lets the algorithm continue to consume lookahead longer +// check. +// +// CONFLICTING CONFIGS +// +// Two configurations (s, i, x) and (s, j, x'), conflict +// when i!=j but x=x'. Because we merge all +// (s, i, _) configurations together, that means that there are at +// most n configurations associated with state s for +// n possible alternatives in the decision. The merged stacks +// complicate the comparison of configuration contexts x and +// x'. Sam checks to see if one is a subset of the other by calling +// merge and checking to see if the merged result is either x or +// x'. If the x associated with lowest alternative i +// is the superset, then i is the only possible prediction since the +// others resolve to min(i) as well. However, if x is +// associated with j>i then at least one stack configuration for +// j is not in conflict with alternative i. The algorithm +// should keep going, looking for more lookahead due to the uncertainty. +// +// For simplicity, I'm doing a equality check between x and +// x' that lets the algorithm continue to consume lookahead longer // than necessary. The reason I like the equality is of course the // simplicity but also because that is the test you need to detect the -// alternatives that are actually in conflict.
+// alternatives that are actually in conflict. // -//CONTINUE/STOP RULE
+// CONTINUE/STOP RULE // -//Continue if union of resolved alternative sets from non-conflicting and +// Continue if union of resolved alternative sets from non-conflicting and // conflicting alternative subsets has more than one alternative. We are -// uncertain about which alternative to predict.
+// uncertain about which alternative to predict. // -//The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which +// The complete set of alternatives, [i for (_,i,_)], tells us which // alternatives are still in the running for the amount of input we've // consumed at this point. The conflicting sets let us to strip away // configurations that won't lead to more states because we resolve // conflicts to the configuration with a minimum alternate for the -// conflicting set.
+// conflicting set. // -//CASES
+// CASES // -//EXACT AMBIGUITY DETECTION
+// EXACT AMBIGUITY DETECTION // -//If all states Report the same conflicting set of alternatives, then we -// know we have the exact ambiguity set.
+// If all states Report the same conflicting set of alternatives, then we +// know we have the exact ambiguity set. // -//|A_i|>1
and
-// A_i = A_j
for all i, j.
In other words, we continue examining lookahead until all {@code A_i} -// have more than one alternative and all {@code A_i} are the same. If -// {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate -// because the resolved set is {@code {1}}. To determine what the real +// In other words, we continue examining lookahead until all A_i +// have more than one alternative and all A_i are the same. If +// A={{1,2, {1,3}}}, then regular LL prediction would terminate +// because the resolved set is {1}. To determine what the real // ambiguity is, we have to know whether the ambiguity is between one and // two or one and three so we keep going. We can only stop prediction when // we need exact ambiguity detection when the sets look like -// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...
+// A={{1,2}} or {{1,2,{1,2}}}, etc... // -func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int { +func PredictionModeresolvesToJustOneViableAlt(altsets []*bitSet) int { return PredictionModegetSingleViableAlt(altsets) } -// -// Determines if every alternative subset in {@code altsets} contains more -// than one alternative. +// PredictionModeallSubsetsConflict determines if every alternative subset in +// altsets contains more than one alternative. // // @param altsets a collection of alternative subsets -// @return {@code true} if every {@link BitSet} in {@code altsets} has -// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} -// -func PredictionModeallSubsetsConflict(altsets []*BitSet) bool { +// @return true if every BitSet in altsets has +// BitSet//cardinality cardinality > 1, otherwise false +func PredictionModeallSubsetsConflict(altsets []*bitSet) bool { return !PredictionModehasNonConflictingAltSet(altsets) } -// -// Determines if any single alternative subset in {@code altsets} contains -// exactly one alternative. +// PredictionModehasNonConflictingAltSet determines if any single alternative +// subset in altsets contains exactly one alternative. // // @param altsets a collection of alternative subsets -// @return {@code true} if {@code altsets} contains a {@link BitSet} with -// {@link BitSet//cardinality cardinality} 1, otherwise {@code false} -// -func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool { +// @return true if altsets contains a BitSet with +// BitSet//cardinality cardinality 1, otherwise false +func PredictionModehasNonConflictingAltSet(altsets []*bitSet) bool { for i := 0; i < len(altsets); i++ { alts := altsets[i] if alts.length() == 1 { @@ -404,15 +382,14 @@ func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool { return false } -// -// Determines if any single alternative subset in {@code altsets} contains -// more than one alternative. +// PredictionModehasConflictingAltSet Determines if any single alternative +// subset in altsets contains more than one alternative. // // @param altsets a collection of alternative subsets -// @return {@code true} if {@code altsets} contains a {@link BitSet} with -// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} +// @return true if altsets contains a BitSet with +// BitSet//cardinality cardinality > 1, otherwise false // -func PredictionModehasConflictingAltSet(altsets []*BitSet) bool { +func PredictionModehasConflictingAltSet(altsets []*bitSet) bool { for i := 0; i < len(altsets); i++ { alts := altsets[i] if alts.length() > 1 { @@ -422,15 +399,15 @@ func PredictionModehasConflictingAltSet(altsets []*BitSet) bool { return false } -// -// Determines if every alternative subset in {@code altsets} is equivalent. +// PredictionModeallSubsetsEqual determines if every alternative subset in +// altsets is equivalent. // // @param altsets a collection of alternative subsets -// @return {@code true} if every member of {@code altsets} is equal to the -// others, otherwise {@code false} +// @return true if every member of altsets is equal to the +// others, otherwise false // -func PredictionModeallSubsetsEqual(altsets []*BitSet) bool { - var first *BitSet +func PredictionModeallSubsetsEqual(altsets []*bitSet) bool { + var first *bitSet for i := 0; i < len(altsets); i++ { alts := altsets[i] @@ -444,15 +421,14 @@ func PredictionModeallSubsetsEqual(altsets []*BitSet) bool { return true } -// -// Returns the unique alternative predicted by all alternative subsets in -// {@code altsets}. If no such alternative exists, this method returns -// {@link ATN//INVALID_ALT_NUMBER}. +// PredictionModegetUniqueAlt returns the unique alternative predicted by all +// alternative subsets in altsets. If no such alternative exists, this method +// returns ATN//INVALID_ALT_NUMBER. // // @param altsets a collection of alternative subsets // -func PredictionModegetUniqueAlt(altsets []*BitSet) int { - all := PredictionModeGetAlts(altsets) +func PredictionModegetUniqueAlt(altsets []*bitSet) int { + all := predictionModeGetAlts(altsets) if all.length() == 1 { return all.minValue() } @@ -460,84 +436,82 @@ func PredictionModegetUniqueAlt(altsets []*BitSet) int { return ATNInvalidAltNumber } -// Gets the complete set of represented alternatives for a collection of -// alternative subsets. This method returns the union of each {@link BitSet} -// in {@code altsets}. +// predictionModeGetAlts gets the complete set of represented alternatives for +// a collection of alternative subsets. This method returns the union of each +// BitSet in altsets. // // @param altsets a collection of alternative subsets -// @return the set of represented alternatives in {@code altsets} +// @return the set of represented alternatives in altsets // -func PredictionModeGetAlts(altsets []*BitSet) *BitSet { - all := NewBitSet() +func predictionModeGetAlts(altsets []*bitSet) *bitSet { + all := newBitSet() for _, alts := range altsets { all.or(alts) } return all } +// predictionModeGetConflictingAltSubsets gets the conflicting alt subsets from +// a configuration set. For each configuration c in configs: // -// This func gets the conflicting alt subsets from a configuration set. -// For each configuration {@code c} in {@code configs}: -// -//-// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not -// alt and not pred -//+// map[c] U= c.ATNConfig//alt alt // map hash/equals uses s and x, not +// alt and not pred // -func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet { - configToAlts := make(map[int]*BitSet) +func predictionModeGetConflictingAltSubsets(configs ATNConfigSet) []*bitSet { + configToAlts := make(map[int]*bitSet) for _, c := range configs.GetItems() { - key := 31 * c.GetState().GetStateNumber() + c.GetContext().hash() + key := 31*c.GetState().GetStateNumber() + c.GetContext().hash() alts, ok := configToAlts[key] if !ok { - alts = NewBitSet() + alts = newBitSet() configToAlts[key] = alts } alts.add(c.GetAlt()) } - values := make([]*BitSet, 0, 10) + values := make([]*bitSet, 0, 10) for _, v := range configToAlts { values = append(values, v) } return values } +// predictionModeGetStateToAltMap get a map from state to alt subset from a +// configuration set. For each configuration c in configs: // -// Get a map from state to alt subset from a configuration set. For each -// configuration {@code c} in {@code configs}: -// -//
-// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt} -//+// map[c.ATNConfig//state state] U= c.ATNConfig//alt alt // -func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict { - m := NewAltDict() +func predictionModeGetStateToAltMap(configs ATNConfigSet) *altDict { + m := newAltDict() for _, c := range configs.GetItems() { alts := m.Get(c.GetState().String()) if alts == nil { - alts = NewBitSet() + alts = newBitSet() m.put(c.GetState().String(), alts) } - alts.(*BitSet).add(c.GetAlt()) + alts.(*bitSet).add(c.GetAlt()) } return m } +// PredictionModehasStateAssociatedWithOneAlt returns true if the given config +// has a state with exactly one alternative. func PredictionModehasStateAssociatedWithOneAlt(configs ATNConfigSet) bool { - values := PredictionModeGetStateToAltMap(configs).values() + values := predictionModeGetStateToAltMap(configs).values() for i := 0; i < len(values); i++ { - if values[i].(*BitSet).length() == 1 { + if values[i].(*bitSet).length() == 1 { return true } } return false } -func PredictionModegetSingleViableAlt(altsets []*BitSet) int { +// PredictionModegetSingleViableAlt returns the index of the single viable +// alternative if there is one. +func PredictionModegetSingleViableAlt(altsets []*bitSet) int { result := ATNInvalidAltNumber for i := 0; i < len(altsets); i++ { diff --git a/runtime/Go/antlr/recognizer.go b/runtime/Go/antlr/recognizer.go index 89d35244a2..20ccc40953 100644 --- a/runtime/Go/antlr/recognizer.go +++ b/runtime/Go/antlr/recognizer.go @@ -6,11 +6,10 @@ package antlr import ( "fmt" - "strings" - "strconv" ) +// Recognizer is the base type for generated lexers and parsers. type Recognizer interface { GetLiteralNames() []string GetSymbolicNames() []string @@ -28,6 +27,7 @@ type Recognizer interface { GetErrorListenerDispatch() ErrorListener } +// BaseRecognizer is the base implementation for Recognizer. type BaseRecognizer struct { listeners []ErrorListener state int @@ -38,11 +38,12 @@ type BaseRecognizer struct { GrammarFileName string } +// NewBaseRecognizer returns a new instance of BaseRecognizer func NewBaseRecognizer() *BaseRecognizer { - rec := new(BaseRecognizer) - rec.listeners = []ErrorListener{ConsoleErrorListenerINSTANCE} - rec.state = -1 - return rec + return &BaseRecognizer{ + listeners: []ErrorListener{ConsoleErrorListenerINSTANCE}, + state: -1, + } } var tokenTypeMapCache = make(map[string]int) @@ -55,163 +56,87 @@ func (b *BaseRecognizer) checkVersion(toolVersion string) { } } +// Action does nothing by default. func (b *BaseRecognizer) Action(context RuleContext, ruleIndex, actionIndex int) { panic("action not implemented on Recognizer!") } +// AddErrorListener subscribes the given listener to this recognizer. func (b *BaseRecognizer) AddErrorListener(listener ErrorListener) { b.listeners = append(b.listeners, listener) } +// RemoveErrorListeners removes all the listeners from this recognizer. func (b *BaseRecognizer) RemoveErrorListeners() { - b.listeners = make([]ErrorListener, 0) + b.listeners = b.listeners[:0] } +// GetRuleNames returns the names for the rules in this recognizer. func (b *BaseRecognizer) GetRuleNames() []string { return b.RuleNames } +// GetTokenNames returns the literal names contained in this recognizer. func (b *BaseRecognizer) GetTokenNames() []string { return b.LiteralNames } +// GetSymbolicNames returns the symbolic names contained in this recognizer. func (b *BaseRecognizer) GetSymbolicNames() []string { return b.SymbolicNames } +// GetLiteralNames returns the literal names contained in this recognizer. func (b *BaseRecognizer) GetLiteralNames() []string { return b.LiteralNames } +// GetState returns the current state of this recognizer. func (b *BaseRecognizer) GetState() int { return b.state } +// SetState sets the current state of this recognizer. func (b *BaseRecognizer) SetState(v int) { b.state = v } -//func (b *Recognizer) GetTokenTypeMap() { -// var tokenNames = b.GetTokenNames() -// if (tokenNames==nil) { -// panic("The current recognizer does not provide a list of token names.") -// } -// var result = tokenTypeMapCache[tokenNames] -// if(result==nil) { -// result = tokenNames.reduce(function(o, k, i) { o[k] = i }) -// result.EOF = TokenEOF -// tokenTypeMapCache[tokenNames] = result -// } -// return result -//} - -// Get a map from rule names to rule indexes. +// GetRuleIndexMap returns a map from rule names to rule indexes. // -//
Used for XPath and tree pattern compilation.
+// By default it's not implemented. // +// Used for XPath and tree pattern compilation. func (b *BaseRecognizer) GetRuleIndexMap() map[string]int { - panic("Method not defined!") - // var ruleNames = b.GetRuleNames() - // if (ruleNames==nil) { - // panic("The current recognizer does not provide a list of rule names.") - // } - // - // var result = ruleIndexMapCache[ruleNames] - // if(result==nil) { - // result = ruleNames.reduce(function(o, k, i) { o[k] = i }) - // ruleIndexMapCache[ruleNames] = result - // } - // return result } +// GetTokenType does nothing by default. func (b *BaseRecognizer) GetTokenType(tokenName string) int { panic("Method not defined!") - // var ttype = b.GetTokenTypeMap()[tokenName] - // if (ttype !=nil) { - // return ttype - // } else { - // return TokenInvalidType - // } } -//func (b *Recognizer) GetTokenTypeMap() map[string]int { -// Vocabulary vocabulary = getVocabulary() -// -// Synchronized (tokenTypeMapCache) { -// Map-// Since tokens on hidden channels (e.g. whitespace or comments) are not -// added to the parse trees, they will not appear in the output of b -// method. // - +// Since tokens on hidden channels (e.g. whitespace or comments) are not +// added to the parse trees, they will not appear in the output of b method. func (b *BaseRuleContext) GetParent() Tree { return b.parentCtx } diff --git a/runtime/Go/antlr/semantic_context.go b/runtime/Go/antlr/semantic_context.go index 49205a1624..5306867464 100644 --- a/runtime/Go/antlr/semantic_context.go +++ b/runtime/Go/antlr/semantic_context.go @@ -9,14 +9,12 @@ import ( "strconv" ) -// A tree structure used to record the semantic context in which -// an ATN configuration is valid. It's either a single predicate, -// a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}. +// SemanticContext is a tree structure used to record the semantic context in +// which an ATN configuration is valid. It's either a single predicate, a +// conjunction p1&&p2, or a sum of products p1||p2. // -//
I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of -// {@link SemanticContext} within the scope of this outer class.
-// - +// I have scoped the AND, OR, and Predicate subclasses of +// SemanticContext within the scope of this outer class. type SemanticContext interface { comparable @@ -27,6 +25,7 @@ type SemanticContext interface { String() string } +// SemanticContextandContext TODO: docs. func SemanticContextandContext(a, b SemanticContext) SemanticContext { if a == nil || a == SemanticContextNone { return b @@ -42,6 +41,7 @@ func SemanticContextandContext(a, b SemanticContext) SemanticContext { return result } +// SemanticContextorContext TODO: docs. func SemanticContextorContext(a, b SemanticContext) SemanticContext { if a == nil { return b @@ -60,24 +60,24 @@ func SemanticContextorContext(a, b SemanticContext) SemanticContext { return result } +// Predicate represents a semantic predicate type Predicate struct { ruleIndex int predIndex int isCtxDependent bool } +// NewPredicate returns a new instance of Predicate. func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate { - p := new(Predicate) - - p.ruleIndex = ruleIndex - p.predIndex = predIndex - p.isCtxDependent = isCtxDependent // e.g., $i ref in pred - return p + return &Predicate{ + ruleIndex: ruleIndex, + predIndex: predIndex, + isCtxDependent: isCtxDependent, + } } -//The default {@link SemanticContext}, which is semantically equivalent to -//a predicate of the form {@code {true}?}. - +// SemanticContextNone is the default SemanticContext, which is semantically +// equivalent to a predicate of the form {true?}. var SemanticContextNone SemanticContext = NewPredicate(-1, -1, false) func (p *Predicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { @@ -115,16 +115,14 @@ func (p *Predicate) String() string { return "{" + strconv.Itoa(p.ruleIndex) + ":" + strconv.Itoa(p.predIndex) + "}?" } +// PrecedencePredicate TODO: docs type PrecedencePredicate struct { precedence int } +// NewPrecedencePredicate returns a new instance of PrecedencePredicate. func NewPrecedencePredicate(precedence int) *PrecedencePredicate { - - p := new(PrecedencePredicate) - p.precedence = precedence - - return p + return &PrecedencePredicate{precedence: precedence} } func (p *PrecedencePredicate) evaluate(parser Recognizer, outerContext RuleContext) bool { @@ -161,10 +159,11 @@ func (p *PrecedencePredicate) String() string { return "{" + strconv.Itoa(p.precedence) + ">=prec}?" } -func PrecedencePredicatefilterPrecedencePredicates(set *Set) []*PrecedencePredicate { +// PrecedencePredicatefilterPrecedencePredicates TODO: docs. +func PrecedencePredicatefilterPrecedencePredicates(s *set) []*PrecedencePredicate { result := make([]*PrecedencePredicate, 0) - for _, v := range set.values() { + for _, v := range s.values() { if c2, ok := v.(*PrecedencePredicate); ok { result = append(result, c2) } @@ -173,16 +172,15 @@ func PrecedencePredicatefilterPrecedencePredicates(set *Set) []*PrecedencePredic return result } -// A semantic context which is true whenever none of the contained contexts -// is false.` - +// AND is true whenever none of the contained contexts is false. type AND struct { opnds []SemanticContext } +// NewAND returns a new instance of NewAND. func NewAND(a, b SemanticContext) *AND { - operands := NewSet(nil, nil) + operands := newSet(nil, nil) if aa, ok := a.(*AND); ok { for _, o := range aa.opnds { operands.add(o) @@ -218,10 +216,7 @@ func NewAND(a, b SemanticContext) *AND { opnds[i] = v.(SemanticContext) } - and := new(AND) - and.opnds = opnds - - return and + return &AND{opnds: opnds} } func (a *AND) equals(other interface{}) bool { @@ -242,9 +237,9 @@ func (a *AND) equals(other interface{}) bool { // // {@inheritDoc} // -//+// // The evaluation of predicates by a context is short-circuiting, but -// unordered.
+// unordered. // func (a *AND) evaluate(parser Recognizer, outerContext RuleContext) bool { for i := 0; i < len(a.opnds); i++ { @@ -323,18 +318,15 @@ func (a *AND) String() string { return s } -// -// A semantic context which is true whenever at least one of the contained -// contexts is true. -// - +// OR is true whenever at least one of the contained contexts is true. type OR struct { opnds []SemanticContext } +// NewOR returns a new instance of OR. func NewOR(a, b SemanticContext) *OR { - operands := NewSet(nil, nil) + operands := newSet(nil, nil) if aa, ok := a.(*OR); ok { for _, o := range aa.opnds { operands.add(o) @@ -371,20 +363,17 @@ func NewOR(a, b SemanticContext) *OR { opnds[i] = v.(SemanticContext) } - o := new(OR) - o.opnds = opnds - - return o + return &OR{opnds: opnds} } -func (o *OR) equals(other interface{}) bool { - if o == other { +func (a *OR) equals(other interface{}) bool { + if a == other { return true } else if _, ok := other.(*OR); !ok { return false } else { for i, v := range other.(*OR).opnds { - if !o.opnds[i].equals(v) { + if !a.opnds[i].equals(v) { return false } } @@ -392,24 +381,24 @@ func (o *OR) equals(other interface{}) bool { } } -//+// // The evaluation of predicates by o context is short-circuiting, but -// unordered.
+// unordered. // -func (o *OR) evaluate(parser Recognizer, outerContext RuleContext) bool { - for i := 0; i < len(o.opnds); i++ { - if o.opnds[i].evaluate(parser, outerContext) { +func (a *OR) evaluate(parser Recognizer, outerContext RuleContext) bool { + for i := 0; i < len(a.opnds); i++ { + if a.opnds[i].evaluate(parser, outerContext) { return true } } return false } -func (o *OR) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { +func (a *OR) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { differs := false operands := make([]SemanticContext, 0) - for i := 0; i < len(o.opnds); i++ { - context := o.opnds[i] + for i := 0; i < len(a.opnds); i++ { + context := a.opnds[i] evaluated := context.evalPrecedence(parser, outerContext) differs = differs || (evaluated != context) if evaluated == SemanticContextNone { @@ -421,7 +410,7 @@ func (o *OR) evalPrecedence(parser Recognizer, outerContext RuleContext) Semanti } } if !differs { - return o + return a } if len(operands) == 0 { // all elements were false, so the OR context is false @@ -440,10 +429,10 @@ func (o *OR) evalPrecedence(parser Recognizer, outerContext RuleContext) Semanti return result } -func (o *OR) String() string { +func (a *OR) String() string { s := "" - for _, o := range o.opnds { + for _, o := range a.opnds { s += "|| " + fmt.Sprint(o) } diff --git a/runtime/Go/antlr/testing_assert_test.go b/runtime/Go/antlr/testing_assert_test.go deleted file mode 100644 index f3ca0d3412..0000000000 --- a/runtime/Go/antlr/testing_assert_test.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -// These assert functions are borrowed from https://github.com/stretchr/testify/ (MIT License) - -package antlr - -import ( - "fmt" - "reflect" - "testing" -) - -type assert struct { - t *testing.T -} - -func assertNew(t *testing.T) *assert { - return &assert{ - t: t, - } -} - -func (a *assert) Equal(expected, actual interface{}) bool { - if !objectsAreEqual(expected, actual) { - return a.Fail(fmt.Sprintf("Not equal:\n"+ - "expected: %#v\n"+ - " actual: %#v\n", expected, actual)) - } - return true -} - -func objectsAreEqual(expected, actual interface{}) bool { - if expected == nil || actual == nil { - return expected == actual - } - return reflect.DeepEqual(expected, actual) -} - -func (a *assert) Nil(object interface{}) bool { - if isNil(object) { - return true - } - return a.Fail(fmt.Sprintf("Expected nil, but got: %#v", object)) -} - -func (a *assert) NotNil(object interface{}) bool { - if !isNil(object) { - return true - } - return a.Fail("Expected value not to be nil.") -} - -// isNil checks if a specified object is nil or not, without Failing. -func isNil(object interface{}) bool { - if object == nil { - return true - } - - value := reflect.ValueOf(object) - kind := value.Kind() - if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() { - return true - } - - return false -} - -func (a *assert) Panics(f func()) bool { - if funcDidPanic, panicValue := didPanic(f); !funcDidPanic { - return a.Fail(fmt.Sprintf("func %#v should panic\n\r\tPanic value:\t%v", f, panicValue)) - } - - return true -} - -// Fail reports a failure through -func (a *assert) Fail(failureMessage string) bool { - a.t.Errorf("%s", failureMessage) - return false -} - -// didPanic returns true if the function passed to it panics. Otherwise, it returns false. -func didPanic(f func()) (bool, interface{}) { - didPanic := false - var message interface{} - func() { - defer func() { - if message = recover(); message != nil { - didPanic = true - } - }() - // call the target function - f() - }() - return didPanic, message -} diff --git a/runtime/Go/antlr/testing_lexer_b_test.go b/runtime/Go/antlr/testing_lexer_b_test.go deleted file mode 100644 index 4ab9b340d3..0000000000 --- a/runtime/Go/antlr/testing_lexer_b_test.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -/* -LexerB is a lexer for testing purpose. - -This file is generated from this grammer. - -lexer grammar LexerB; - -ID : 'a'..'z'+; -INT : '0'..'9'+; -SEMI : ';'; -ASSIGN : '='; -PLUS : '+'; -MULT : '*'; -WS : ' '+; -*/ - -var lexerB_serializedLexerAtn = []uint16{ - 3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 9, 40, 8, - 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, - 7, 4, 8, 9, 8, 3, 2, 6, 2, 19, 10, 2, 13, 2, 14, 2, 20, 3, 3, 6, 3, 24, - 10, 3, 13, 3, 14, 3, 25, 3, 4, 3, 4, 3, 5, 3, 5, 3, 6, 3, 6, 3, 7, 3, 7, - 3, 8, 6, 8, 37, 10, 8, 13, 8, 14, 8, 38, 2, 2, 9, 3, 3, 5, 4, 7, 5, 9, - 6, 11, 7, 13, 8, 15, 9, 3, 2, 2, 2, 42, 2, 3, 3, 2, 2, 2, 2, 5, 3, 2, 2, - 2, 2, 7, 3, 2, 2, 2, 2, 9, 3, 2, 2, 2, 2, 11, 3, 2, 2, 2, 2, 13, 3, 2, - 2, 2, 2, 15, 3, 2, 2, 2, 3, 18, 3, 2, 2, 2, 5, 23, 3, 2, 2, 2, 7, 27, 3, - 2, 2, 2, 9, 29, 3, 2, 2, 2, 11, 31, 3, 2, 2, 2, 13, 33, 3, 2, 2, 2, 15, - 36, 3, 2, 2, 2, 17, 19, 4, 99, 124, 2, 18, 17, 3, 2, 2, 2, 19, 20, 3, 2, - 2, 2, 20, 18, 3, 2, 2, 2, 20, 21, 3, 2, 2, 2, 21, 4, 3, 2, 2, 2, 22, 24, - 4, 50, 59, 2, 23, 22, 3, 2, 2, 2, 24, 25, 3, 2, 2, 2, 25, 23, 3, 2, 2, - 2, 25, 26, 3, 2, 2, 2, 26, 6, 3, 2, 2, 2, 27, 28, 7, 61, 2, 2, 28, 8, 3, - 2, 2, 2, 29, 30, 7, 63, 2, 2, 30, 10, 3, 2, 2, 2, 31, 32, 7, 45, 2, 2, - 32, 12, 3, 2, 2, 2, 33, 34, 7, 44, 2, 2, 34, 14, 3, 2, 2, 2, 35, 37, 7, - 34, 2, 2, 36, 35, 3, 2, 2, 2, 37, 38, 3, 2, 2, 2, 38, 36, 3, 2, 2, 2, 38, - 39, 3, 2, 2, 2, 39, 16, 3, 2, 2, 2, 6, 2, 20, 25, 38, 2, -} - -var lexerB_lexerDeserializer = NewATNDeserializer(nil) -var lexerB_lexerAtn = lexerB_lexerDeserializer.DeserializeFromUInt16(lexerB_serializedLexerAtn) - -var lexerB_lexerChannelNames = []string{ - "DEFAULT_TOKEN_CHANNEL", "HIDDEN", -} - -var lexerB_lexerModeNames = []string{ - "DEFAULT_MODE", -} - -var lexerB_lexerLiteralNames = []string{ - "", "", "", "';'", "'='", "'+'", "'*'", -} - -var lexerB_lexerSymbolicNames = []string{ - "", "ID", "INT", "SEMI", "ASSIGN", "PLUS", "MULT", "WS", -} - -var lexerB_lexerRuleNames = []string{ - "ID", "INT", "SEMI", "ASSIGN", "PLUS", "MULT", "WS", -} - -type LexerB struct { - *BaseLexer - channelNames []string - modeNames []string - // TODO: EOF string -} - -var lexerB_lexerDecisionToDFA = make([]*DFA, len(lexerB_lexerAtn.DecisionToState)) - -func init() { - for index, ds := range lexerB_lexerAtn.DecisionToState { - lexerB_lexerDecisionToDFA[index] = NewDFA(ds, index) - } -} - -func NewLexerB(input CharStream) *LexerB { - l := new(LexerB) - - l.BaseLexer = NewBaseLexer(input) - l.Interpreter = NewLexerATNSimulator(l, lexerB_lexerAtn, lexerB_lexerDecisionToDFA, NewPredictionContextCache()) - - l.channelNames = lexerB_lexerChannelNames - l.modeNames = lexerB_lexerModeNames - l.RuleNames = lexerB_lexerRuleNames - l.LiteralNames = lexerB_lexerLiteralNames - l.SymbolicNames = lexerB_lexerSymbolicNames - l.GrammarFileName = "LexerB.g4" - // TODO: l.EOF = TokenEOF - - return l -} - -// LexerB tokens. -const ( - LexerBID = 1 - LexerBINT = 2 - LexerBSEMI = 3 - LexerBASSIGN = 4 - LexerBPLUS = 5 - LexerBMULT = 6 - LexerBWS = 7 -) diff --git a/runtime/Go/antlr/testing_util_test.go b/runtime/Go/antlr/testing_util_test.go deleted file mode 100644 index 20428831b3..0000000000 --- a/runtime/Go/antlr/testing_util_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package antlr - -import ( - "fmt" - "strings" -) - -// newTestCommonToken create common token with tokentype, text and channel -// notice: test purpose only -func newTestCommonToken(tokenType int, text string, channel int) *CommonToken { - t := new(CommonToken) - t.BaseToken = new(BaseToken) - t.tokenType = tokenType - t.channel = channel - t.text = text - t.line = 0 - t.column = -1 - return t -} - -// tokensToString returnes []Tokens string -// notice: test purpose only -func tokensToString(tokens []Token) string { - buf := make([]string, len(tokens)) - for i, token := range tokens { - buf[i] = fmt.Sprintf("%v", token) - } - - return "[" + strings.Join(buf, ", ") + "]" -} diff --git a/runtime/Go/antlr/token.go b/runtime/Go/antlr/token.go index 2d8e99095d..a86f2da6f4 100644 --- a/runtime/Go/antlr/token.go +++ b/runtime/Go/antlr/token.go @@ -9,15 +9,17 @@ import ( "strings" ) +// TokenSourceCharStreamPair is a tuple (TokenSource, CharStream) type TokenSourceCharStreamPair struct { tokenSource TokenSource charStream CharStream } +// Token is the result of lexical analysis and the input to the parser. +// // A token has properties: text, type, line, character position in the line // (so we can ignore tabs), token channel, index, and source from which // we obtained this token. - type Token interface { GetSource() *TokenSourceCharStreamPair GetTokenType() int @@ -37,6 +39,7 @@ type Token interface { GetInputStream() CharStream } +// BaseToken is the base implementation of Token. type BaseToken struct { source *TokenSourceCharStreamPair tokenType int // token type of the token @@ -51,88 +54,104 @@ type BaseToken struct { } const ( + // TokenInvalidType represents an error token. TokenInvalidType = 0 - // During lookahead operations, this "token" signifies we hit rule end ATN state - // and did not follow it despite needing to. + // TokenEpsilon is a utility token type. During lookahead operations, this + // "token" signifies we hit rule end ATN state and did not follow it despite + // needing to. TokenEpsilon = -2 + // TokenMinUserTokenType is the smallest value for a generated token type. TokenMinUserTokenType = 1 + // TokenEOF represents an end of input token. TokenEOF = -1 // All tokens go to the parser (unless Skip() is called in that rule) // on a particular "channel". The parser tunes to a particular channel // so that whitespace etc... can go to the parser on a "hidden" channel. + // TokenDefaultChannel represents the default channel. TokenDefaultChannel = 0 - // Anything on different channel than DEFAULT_CHANNEL is not parsed - // by parser. - + // TokenHiddenChannel represents the hidden channel. TokenHiddenChannel = 1 ) +// GetChannel returns the channel this token is sent through. func (b *BaseToken) GetChannel() int { return b.channel } +// GetStart returns the offset this token starts at. func (b *BaseToken) GetStart() int { return b.start } +// GetStop returns the offset this token stops at. func (b *BaseToken) GetStop() int { return b.stop } +// GetLine returns the line this token starts at. func (b *BaseToken) GetLine() int { return b.line } +// GetColumn returns the column this token starts at. func (b *BaseToken) GetColumn() int { return b.column } +// GetTokenType returns the kind of token this is. func (b *BaseToken) GetTokenType() int { return b.tokenType } +// GetSource returns the source and character stream of this token. func (b *BaseToken) GetSource() *TokenSourceCharStreamPair { return b.source } +// GetTokenIndex returns the index of this token func (b *BaseToken) GetTokenIndex() int { return b.tokenIndex } +// SetTokenIndex sets the index of this token. func (b *BaseToken) SetTokenIndex(v int) { b.tokenIndex = v } +// GetTokenSource returns the source of this token. func (b *BaseToken) GetTokenSource() TokenSource { return b.source.tokenSource } +// GetInputStream returns the character stream of this token. func (b *BaseToken) GetInputStream() CharStream { return b.source.charStream } +// CommonToken extends BaseToken type CommonToken struct { *BaseToken } +// NewCommonToken returns a new instance of CommonToken. func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken { - t := new(CommonToken) - - t.BaseToken = new(BaseToken) - - t.source = source - t.tokenType = tokenType - t.channel = channel - t.start = start - t.stop = stop - t.tokenIndex = -1 + t := &CommonToken{ + BaseToken: &BaseToken{ + source: source, + tokenType: tokenType, + channel: channel, + start: start, + stop: stop, + tokenIndex: -1, + }, + } if t.source.tokenSource != nil { t.line = source.tokenSource.GetLine() t.column = source.tokenSource.GetCharPositionInLine() @@ -142,20 +161,19 @@ func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start return t } -// An empty {@link Pair} which is used as the default value of -// {@link //source} for tokens that do not have a source. +// An empty Pair which is used as the default value of +// //source for tokens that do not have a source. //CommonToken.EMPTY_SOURCE = [ nil, nil ] -// Constructs a New{@link CommonToken} as a copy of another {@link Token}. +// Constructs a NewCommonToken as a copy of another Token. // -//-// If {@code oldToken} is also a {@link CommonToken} instance, the newly -// constructed token will share a reference to the {@link //text} field and -// the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will -// be assigned the result of calling {@link //GetText}, and {@link //source} -// will be constructed from the result of {@link Token//GetTokenSource} and -// {@link Token//GetInputStream}.
+// If oldToken is also a CommonToken instance, the newly +// constructed token will share a reference to the //text field and +// the Pair stored in //source. Otherwise, //text will +// be assigned the result of calling //GetText, and //source +// will be constructed from the result of Token//GetTokenSource and +// Token//GetInputStream. // // @param oldToken The token to copy. // @@ -168,6 +186,7 @@ func (c *CommonToken) clone() *CommonToken { return t } +// GetText returns the text contained in this token. func (c *CommonToken) GetText() string { if c.text != "" { return c.text @@ -183,10 +202,12 @@ func (c *CommonToken) GetText() string { return "+// // You can insert stuff, replace, and delete chunks. Note that the operations -// are done lazily--only if you convert the buffer to a {@link String} with -// {@link TokenStream#getText()}. This is very efficient because you are not +// are done lazily--only if you convert the buffer to a String with +// TokenStream#getText(). This is very efficient because you are not // moving data around all the time. As the buffer of tokens is converted to -// strings, the {@link #getText()} method(s) scan the input token stream and +// strings, the #getText() method(s) scan the input token stream and // check to see if there is an operation at the current index. If so, the -// operation is done and then normal {@link String} rendering continues on the +// operation is done and then normal String rendering continues on the // buffer. This is like having multiple Turing machine instruction streams -// (programs) operating on a single input tape. :)
-//- +// (programs) operating on a single input tape. :) +// // This rewriter makes no modifications to the token stream. It does not ask the // stream to fill itself up nor does it advance the input cursor. The token -// stream {@link TokenStream#index()} will return the same value before and -// after any {@link #getText()} call.
- -//+// stream TokenStream#index() will return the same value before and +// after any #getText() call. +// // The rewriter only works on tokens that you have in the buffer and ignores the // current input cursor. If you are buffering tokens on-demand, calling -// {@link #getText()} halfway through the input will only do rewrites for those -// tokens in the first half of the file.
- -//-// Since the operations are done lazily at {@link #getText}-time, operations do +// #getText() halfway through the input will only do rewrites for those +// tokens in the first half of the file. +// +// Since the operations are done lazily at #getText-time, operations do // not screw up the token index values. That is, an insert operation at token -// index {@code i} does not change the index values for tokens -// {@code i}+1..n-1.
- -//+// index i does not change the index values for tokens +// i+1..n-1. +// // Because operations never actually alter the buffer, you may always get the // original token stream back without undoing anything. Since the instructions // are queued up, you can easily simulate transactions and roll back any changes -// if there is an error just by removing instructions. For example,
- -//-// CharStream input = new ANTLRFileStream("input"); -// TLexer lex = new TLexer(input); -// CommonTokenStream tokens = new CommonTokenStream(lex); -// T parser = new T(tokens); -// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens); -// parser.startRule(); -//- -//
-// Then in the rules, you can execute (assuming rewriter is visible):
- -//-// Token t,u; -// ... -// rewriter.insertAfter(t, "text to put after t");} -// rewriter.insertAfter(u, "text after u");} -// System.out.println(rewriter.getText()); -//- -//
+// if there is an error just by removing instructions. For example, +// +// CharStream input = new ANTLRFileStream("input"); +// TLexer lex = new TLexer(input); +// CommonTokenStream tokens = new CommonTokenStream(lex); +// T parser = new T(tokens); +// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens); +// parser.startRule(); +// +// Then in the rules, you can execute (assuming rewriter is visible): +// +// Token t,u; +// ... +// rewriter.insertAfter(t, "text to put after t");} +// rewriter.insertAfter(u, "text after u");} +// System.out.println(rewriter.getText()); +// // You can also have multiple "instruction streams" and get multiple rewrites // from a single pass over the input. Just name the instruction streams and use // that name again when printing the buffer. This could be useful for generating -// a C file and also its header file--all from the same buffer:
- -//-// rewriter.insertAfter("pass1", t, "text to put after t");} -// rewriter.insertAfter("pass2", u, "text after u");} -// System.out.println(rewriter.getText("pass1")); -// System.out.println(rewriter.getText("pass2")); -//- -//
+// a C file and also its header file--all from the same buffer: +// +// rewriter.insertAfter("pass1", t, "text to put after t");} +// rewriter.insertAfter("pass2", u, "text after u");} +// System.out.println(rewriter.getText("pass1")); +// System.out.println(rewriter.getText("pass2")); +// // If you don't use named rewrite streams, a "default" stream is used as the -// first example shows.
- - - -const( - Default_Program_Name = "default" - Program_Init_Size = 100 - Min_Token_Index = 0 +// first example shows. +const ( + DefaultProgramName = "default" + ProgramInitSize = 100 + MinTokenIndex = 0 ) // Define the rewrite operation hierarchy +// RewriteOperation represents the top of the rewrite operation hierarchy. type RewriteOperation interface { // Execute the rewrite operation by possibly adding to the buffer. // Return the index of the next token to operate on. - Execute(buffer *bytes.Buffer) int - String() string - GetInstructionIndex() int - GetIndex() int - GetText() string - GetOpName() string - GetTokens() TokenStream + Execute(buffer *bytes.Buffer) int + String() string + GetInstructionIndex() int + GetIndex() int + GetText() string + GetOpName() string + GetTokens() TokenStream SetInstructionIndex(val int) SetIndex(int) SetText(string) @@ -112,151 +95,166 @@ type RewriteOperation interface { SetTokens(TokenStream) } +// BaseRewriteOperation is the default implementation for RewriteOperation type BaseRewriteOperation struct { //Current index of rewrites list - instruction_index int + instructionIndex int //Token buffer index - index int + index int //Substitution text - text string + text string //Actual operation name - op_name string + opName string //Pointer to token steam - tokens TokenStream + tokens TokenStream } -func (op *BaseRewriteOperation)GetInstructionIndex() int{ - return op.instruction_index +// GetInstructionIndex returns the instruction index of this operation. +func (op *BaseRewriteOperation) GetInstructionIndex() int { + return op.instructionIndex } -func (op *BaseRewriteOperation)GetIndex() int{ +// GetIndex returns the index of this operation. +func (op *BaseRewriteOperation) GetIndex() int { return op.index } -func (op *BaseRewriteOperation)GetText() string{ +// GetText returns the text of this operation. +func (op *BaseRewriteOperation) GetText() string { return op.text } -func (op *BaseRewriteOperation)GetOpName() string{ - return op.op_name +// GetOpName returns the name of this operation. +func (op *BaseRewriteOperation) GetOpName() string { + return op.opName } -func (op *BaseRewriteOperation)GetTokens() TokenStream{ +// GetTokens returns the input stream of this operation. +func (op *BaseRewriteOperation) GetTokens() TokenStream { return op.tokens } -func (op *BaseRewriteOperation)SetInstructionIndex(val int){ - op.instruction_index = val +// SetInstructionIndex sets the instruction index for this operation. +func (op *BaseRewriteOperation) SetInstructionIndex(val int) { + op.instructionIndex = val } -func (op *BaseRewriteOperation)SetIndex(val int) { +// SetIndex sets the index of this operation. +func (op *BaseRewriteOperation) SetIndex(val int) { op.index = val } -func (op *BaseRewriteOperation)SetText(val string){ +// SetText sets the text for this operation. +func (op *BaseRewriteOperation) SetText(val string) { op.text = val } -func (op *BaseRewriteOperation)SetOpName(val string){ - op.op_name = val +// SetOpName sets the name for this operation. +func (op *BaseRewriteOperation) SetOpName(val string) { + op.opName = val } -func (op *BaseRewriteOperation)SetTokens(val TokenStream) { +// SetTokens sets the input stream for this operation. +func (op *BaseRewriteOperation) SetTokens(val TokenStream) { op.tokens = val } - -func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int{ +// Execute the operation on the buffer. +func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int { return op.index } -func (op *BaseRewriteOperation) String() string { +func (op *BaseRewriteOperation) String() string { return fmt.Sprintf("<%s@%d:\"%s\">", - op.op_name, + op.opName, op.tokens.Get(op.GetIndex()), op.text, ) } - +// InsertBeforeOp TODO: docs. type InsertBeforeOp struct { BaseRewriteOperation } -func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp{ - return &InsertBeforeOp{BaseRewriteOperation:BaseRewriteOperation{ - index:index, - text:text, - op_name:"InsertBeforeOp", - tokens:stream, +// NewInsertBeforeOp returns a new instance of InsertBeforeOp. +func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp { + return &InsertBeforeOp{BaseRewriteOperation: BaseRewriteOperation{ + index: index, + text: text, + opName: "InsertBeforeOp", + tokens: stream, }} } -func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int{ +// Execute the operation on the buffer. +func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int { buffer.WriteString(op.text) - if op.tokens.Get(op.index).GetTokenType() != TokenEOF{ + if op.tokens.Get(op.index).GetTokenType() != TokenEOF { buffer.WriteString(op.tokens.Get(op.index).GetText()) } - return op.index+1 + return op.index + 1 } func (op *InsertBeforeOp) String() string { return op.BaseRewriteOperation.String() } -// Distinguish between insert after/before to do the "insert afters" -// first and then the "insert befores" at same index. Implementation -// of "insert after" is "insert before index+1". - +// InsertAfterOp distinguishes between insert after/before to do the "insert +// afters" first and then the "insert befores" at same index. Implementation +// of "insert after" is "insert before index+1". type InsertAfterOp struct { BaseRewriteOperation } -func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp{ - return &InsertAfterOp{BaseRewriteOperation:BaseRewriteOperation{ - index:index+1, - text:text, - tokens:stream, +// NewInsertAfterOp returns a new instance of InsertAfterOp. +func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp { + return &InsertAfterOp{BaseRewriteOperation: BaseRewriteOperation{ + index: index + 1, + text: text, + tokens: stream, }} } +// Execute the operation on the buffer. func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int { buffer.WriteString(op.text) - if op.tokens.Get(op.index).GetTokenType() != TokenEOF{ + if op.tokens.Get(op.index).GetTokenType() != TokenEOF { buffer.WriteString(op.tokens.Get(op.index).GetText()) } - return op.index+1 + return op.index + 1 } func (op *InsertAfterOp) String() string { return op.BaseRewriteOperation.String() } -// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp -// instructions. -type ReplaceOp struct{ +// ReplaceOp replaces range from x..y with (y-x)+1 +type ReplaceOp struct { BaseRewriteOperation LastIndex int } -func NewReplaceOp(from, to int, text string, stream TokenStream)*ReplaceOp { +// NewReplaceOp returns a new instance of ReplaceOp. +func NewReplaceOp(from, to int, text string, stream TokenStream) *ReplaceOp { return &ReplaceOp{ - BaseRewriteOperation:BaseRewriteOperation{ - index:from, - text:text, - op_name:"ReplaceOp", - tokens:stream, + BaseRewriteOperation: BaseRewriteOperation{ + index: from, + text: text, + opName: "ReplaceOp", + tokens: stream, }, - LastIndex:to, + LastIndex: to, } } -func (op *ReplaceOp)Execute(buffer *bytes.Buffer) int{ - if op.text != ""{ +// Execute the operation on the buffer. +func (op *ReplaceOp) Execute(buffer *bytes.Buffer) int { + if op.text != "" { buffer.WriteString(op.text) } - return op.LastIndex +1 + return op.LastIndex + 1 } func (op *ReplaceOp) String() string { @@ -268,213 +266,247 @@ func (op *ReplaceOp) String() string { op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text) } - +// TokenStreamRewriter takes in a token stream and rewrites it's tokens. type TokenStreamRewriter struct { //Our source stream - tokens TokenStream + tokens TokenStream // You may have multiple, named streams of rewrite operations. // I'm calling these things "programs." // Maps String (name) → rewrite (List) - programs map[string][]RewriteOperation - last_rewrite_token_indexes map[string]int + programs map[string][]RewriteOperation + lastRewriteTokenIndexes map[string]int } -func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter{ +// NewTokenStreamRewriter returns a new instance of TokenStreamRewritter. +func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter { return &TokenStreamRewriter{ - tokens: tokens, - programs: map[string][]RewriteOperation{ - Default_Program_Name:make([]RewriteOperation,0, Program_Init_Size), + tokens: tokens, + programs: map[string][]RewriteOperation{ + DefaultProgramName: make([]RewriteOperation, 0, ProgramInitSize), }, - last_rewrite_token_indexes: map[string]int{}, + lastRewriteTokenIndexes: map[string]int{}, } } -func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream{ +// GetTokenStream returns the token stream this rewriter pulls from. +func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream { return tsr.tokens } -// Rollback the instruction stream for a program so that -// the indicated instruction (via instructionIndex) is no -// longer in the stream. UNTESTED! -func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int){ - is, ok := tsr.programs[program_name] - if ok{ - tsr.programs[program_name] = is[Min_Token_Index:instruction_index] +// Rollback the instruction stream for a program so that +// the indicated instruction (via instructionIndex) is no +// longer in the stream. UNTESTED! +func (tsr *TokenStreamRewriter) Rollback(programName string, instructionIndex int) { + is, ok := tsr.programs[programName] + if ok { + tsr.programs[programName] = is[MinTokenIndex:instructionIndex] } } -func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int){ - tsr.Rollback(Default_Program_Name, instruction_index) +// RollbackDefault TODO: docs. +func (tsr *TokenStreamRewriter) RollbackDefault(instructionIndex int) { + tsr.Rollback(DefaultProgramName, instructionIndex) } -//Reset the program so that no instructions exist -func (tsr *TokenStreamRewriter) DeleteProgram(program_name string){ - tsr.Rollback(program_name, Min_Token_Index) //TODO: double test on that cause lower bound is not included + +// DeleteProgram resets the program so that no instructions exist +func (tsr *TokenStreamRewriter) DeleteProgram(programName string) { + tsr.Rollback(programName, MinTokenIndex) //TODO: double test on that cause lower bound is not included } -func (tsr *TokenStreamRewriter) DeleteProgramDefault(){ - tsr.DeleteProgram(Default_Program_Name) +// DeleteProgramDefault deletes the default program. +func (tsr *TokenStreamRewriter) DeleteProgramDefault() { + tsr.DeleteProgram(DefaultProgramName) } -func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string){ +// InsertAfter TODO: docs. +func (tsr *TokenStreamRewriter) InsertAfter(programName string, index int, text string) { // to insert after, just insert before next index (even if past end) var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens) - rewrites := tsr.GetProgram(program_name) + rewrites := tsr.GetProgram(programName) op.SetInstructionIndex(len(rewrites)) - tsr.AddToProgram(program_name, op) + tsr.AddToProgram(programName, op) } -func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string){ - tsr.InsertAfter(Default_Program_Name, index, text) +// InsertAfterDefault TODO: docs. +func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string) { + tsr.InsertAfter(DefaultProgramName, index, text) } -func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string){ - tsr.InsertAfter(program_name, token.GetTokenIndex(), text) +// InsertAfterToken TODO: docs. +func (tsr *TokenStreamRewriter) InsertAfterToken(programName string, token Token, text string) { + tsr.InsertAfter(programName, token.GetTokenIndex(), text) } -func (tsr* TokenStreamRewriter) InsertBefore(program_name string, index int, text string){ +// InsertBefore TODO: docs. +func (tsr *TokenStreamRewriter) InsertBefore(programName string, index int, text string) { var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens) - rewrites := tsr.GetProgram(program_name) + rewrites := tsr.GetProgram(programName) op.SetInstructionIndex(len(rewrites)) - tsr.AddToProgram(program_name, op) + tsr.AddToProgram(programName, op) } -func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string){ - tsr.InsertBefore(Default_Program_Name, index, text) +// InsertBeforeDefault TODO: docs. +func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string) { + tsr.InsertBefore(DefaultProgramName, index, text) } -func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string,token Token, text string){ - tsr.InsertBefore(program_name, token.GetTokenIndex(), text) +// InsertBeforeToken TODO: docs. +func (tsr *TokenStreamRewriter) InsertBeforeToken(programName string, token Token, text string) { + tsr.InsertBefore(programName, token.GetTokenIndex(), text) } -func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string){ - if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size(){ - panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)", +// Replace TODO: docs. +func (tsr *TokenStreamRewriter) Replace(programName string, from, to int, text string) { + if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size() { + panic(fmt.Errorf("replace: range invalid: %d..%d(size=%d)", from, to, tsr.tokens.Size())) } var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens) - rewrites := tsr.GetProgram(program_name) + rewrites := tsr.GetProgram(programName) op.SetInstructionIndex(len(rewrites)) - tsr.AddToProgram(program_name, op) + tsr.AddToProgram(programName, op) } -func (tsr *TokenStreamRewriter)ReplaceDefault(from, to int, text string) { - tsr.Replace(Default_Program_Name, from, to, text) +// ReplaceDefault TODO: docs. +func (tsr *TokenStreamRewriter) ReplaceDefault(from, to int, text string) { + tsr.Replace(DefaultProgramName, from, to, text) } -func (tsr *TokenStreamRewriter)ReplaceDefaultPos(index int, text string){ +// ReplaceDefaultPos TODO: docs. +func (tsr *TokenStreamRewriter) ReplaceDefaultPos(index int, text string) { tsr.ReplaceDefault(index, index, text) } -func (tsr *TokenStreamRewriter)ReplaceToken(program_name string, from, to Token, text string){ - tsr.Replace(program_name, from.GetTokenIndex(), to.GetTokenIndex(), text) +// ReplaceToken TODO: docs. +func (tsr *TokenStreamRewriter) ReplaceToken(programName string, from, to Token, text string) { + tsr.Replace(programName, from.GetTokenIndex(), to.GetTokenIndex(), text) } -func (tsr *TokenStreamRewriter)ReplaceTokenDefault(from, to Token, text string){ - tsr.ReplaceToken(Default_Program_Name, from, to, text) +// ReplaceTokenDefault TODO: docs. +func (tsr *TokenStreamRewriter) ReplaceTokenDefault(from, to Token, text string) { + tsr.ReplaceToken(DefaultProgramName, from, to, text) } -func (tsr *TokenStreamRewriter)ReplaceTokenDefaultPos(index Token, text string){ +// ReplaceTokenDefaultPos TODO: docs. +func (tsr *TokenStreamRewriter) ReplaceTokenDefaultPos(index Token, text string) { tsr.ReplaceTokenDefault(index, index, text) } -func (tsr *TokenStreamRewriter)Delete(program_name string, from, to int){ - tsr.Replace(program_name, from, to, "" ) +// Delete TODO: docs. +func (tsr *TokenStreamRewriter) Delete(programName string, from, to int) { + tsr.Replace(programName, from, to, "") } -func (tsr *TokenStreamRewriter)DeleteDefault(from, to int){ - tsr.Delete(Default_Program_Name, from, to) +// DeleteDefault TODO: docs. +func (tsr *TokenStreamRewriter) DeleteDefault(from, to int) { + tsr.Delete(DefaultProgramName, from, to) } -func (tsr *TokenStreamRewriter)DeleteDefaultPos(index int){ - tsr.DeleteDefault(index,index) +// DeleteDefaultPos TODO: docs. +func (tsr *TokenStreamRewriter) DeleteDefaultPos(index int) { + tsr.DeleteDefault(index, index) } -func (tsr *TokenStreamRewriter)DeleteToken(program_name string, from, to Token) { - tsr.ReplaceToken(program_name, from, to, "") +// DeleteToken TODO: docs. +func (tsr *TokenStreamRewriter) DeleteToken(programName string, from, to Token) { + tsr.ReplaceToken(programName, from, to, "") } -func (tsr *TokenStreamRewriter)DeleteTokenDefault(from,to Token){ - tsr.DeleteToken(Default_Program_Name, from, to) +// DeleteTokenDefault TODO: docs. +func (tsr *TokenStreamRewriter) DeleteTokenDefault(from, to Token) { + tsr.DeleteToken(DefaultProgramName, from, to) } -func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndex(program_name string)int { - i, ok := tsr.last_rewrite_token_indexes[program_name] - if !ok{ +// GetLastRewriteTokenIndex TODO: docs. +func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(programName string) int { + i, ok := tsr.lastRewriteTokenIndexes[programName] + if !ok { return -1 } return i } -func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndexDefault()int{ - return tsr.GetLastRewriteTokenIndex(Default_Program_Name) +// GetLastRewriteTokenIndexDefault TODO: docs. +func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndexDefault() int { + return tsr.GetLastRewriteTokenIndex(DefaultProgramName) } -func (tsr *TokenStreamRewriter)SetLastRewriteTokenIndex(program_name string, i int){ - tsr.last_rewrite_token_indexes[program_name] = i +// SetLastRewriteTokenIndex TODO: docs. +func (tsr *TokenStreamRewriter) SetLastRewriteTokenIndex(programName string, i int) { + tsr.lastRewriteTokenIndexes[programName] = i } -func (tsr *TokenStreamRewriter)InitializeProgram(name string)[]RewriteOperation{ - is := make([]RewriteOperation, 0, Program_Init_Size) +// InitializeProgram TODO: docs. +func (tsr *TokenStreamRewriter) InitializeProgram(name string) []RewriteOperation { + is := make([]RewriteOperation, 0, ProgramInitSize) tsr.programs[name] = is return is } -func (tsr *TokenStreamRewriter)AddToProgram(name string, op RewriteOperation){ +// AddToProgram adds the given operation to the program. +func (tsr *TokenStreamRewriter) AddToProgram(name string, op RewriteOperation) { is := tsr.GetProgram(name) is = append(is, op) tsr.programs[name] = is } -func (tsr *TokenStreamRewriter)GetProgram(name string) []RewriteOperation { +// GetProgram returns the operations for this rewritter. +func (tsr *TokenStreamRewriter) GetProgram(name string) []RewriteOperation { is, ok := tsr.programs[name] - if !ok{ + if !ok { is = tsr.InitializeProgram(name) } return is } -// Return the text from the original tokens altered per the -// instructions given to this rewriter. -func (tsr *TokenStreamRewriter)GetTextDefault() string{ + +// GetTextDefault return the text from the original tokens altered per the +// instructions given to this rewriter. +func (tsr *TokenStreamRewriter) GetTextDefault() string { return tsr.GetText( - Default_Program_Name, + DefaultProgramName, NewInterval(0, tsr.tokens.Size()-1)) } -// Return the text from the original tokens altered per the -// instructions given to this rewriter. -func (tsr *TokenStreamRewriter)GetText(program_name string, interval *Interval) string { - rewrites := tsr.programs[program_name] + +// GetText returns the text from the original tokens altered per the instructions +// given to this rewriter. +func (tsr *TokenStreamRewriter) GetText(programName string, interval *Interval) string { + rewrites := tsr.programs[programName] start := interval.Start - stop := interval.Stop + stop := interval.Stop // ensure start/end are in range stop = min(stop, tsr.tokens.Size()-1) - start = max(start,0) - if rewrites == nil || len(rewrites) == 0{ + start = max(start, 0) + if rewrites == nil || len(rewrites) == 0 { return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute } buf := bytes.Buffer{} // First, optimize instruction stream indexToOp := reduceToSingleOperationPerIndex(rewrites) // Walk buffer, executing instructions and emitting tokens - for i:=start; i<=stop && ia
a", "DistinguishBetweenInsertAfterAndInsertBeforeToPreserverOrder2", - func(r *TokenStreamRewriter){ - r.InsertBeforeDefault(0, "") - r.InsertBeforeDefault(0, "") - r.InsertAfterDefault(0, "
") - r.InsertAfterDefault(0, "") - r.InsertBeforeDefault(1, "") - r.InsertAfterDefault(1,"") - }), - NewLexerTest("ab", "a
")
- r.InsertBeforeDefault(0, "")
- r.InsertBeforeDefault(0, "
This is a one way link. It emanates from a state (usually via a list of -// transitions) and has a target state.
-// -//Since we never have to change the ATN transitions once we construct it, -// the states. We'll use the term Edge for the DFA to distinguish them from -// ATN transitions.
+// Transition is a one way link. It emanates from a state (usually via a list +// of transitions) and has a target state. +// +// Since we never have to change the ATN transitions once we construct it, +// the states. We'll use the term Edge for the DFA to distinguish them from +// ATN transitions. type Transition interface { getTarget() ATNState setTarget(ATNState) @@ -27,28 +27,27 @@ type Transition interface { Matches(int, int, int) bool } +// BaseTransition is the base implementation for Transition. type BaseTransition struct { - target ATNState + target ATNState + // Are we epsilon, action, sempred? isEpsilon bool label int intervalSet *IntervalSet serializationType int } +// NewBaseTransition returns a new instance of BaseTransition. func NewBaseTransition(target ATNState) *BaseTransition { - if target == nil { panic("target cannot be nil.") } - t := new(BaseTransition) - - t.target = target - // Are we epsilon, action, sempred? - t.isEpsilon = false - t.intervalSet = nil - - return t + return &BaseTransition{ + target: target, + isEpsilon: false, + intervalSet: nil, + } } func (t *BaseTransition) getTarget() ATNState { @@ -71,10 +70,12 @@ func (t *BaseTransition) getSerializationType() int { return t.serializationType } +// Matches is not implemented. func (t *BaseTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { panic("Not implemented") } +// Transition types const ( TransitionEPSILON = 1 TransitionRANGE = 2 @@ -88,6 +89,7 @@ const ( TransitionPRECEDENCE = 10 ) +// TransitionserializationNames are the names of the constants defined above. var TransitionserializationNames = []string{ "INVALID", "EPSILON", @@ -102,39 +104,16 @@ var TransitionserializationNames = []string{ "PRECEDENCE", } -//var TransitionserializationTypes struct { -// EpsilonTransition int -// RangeTransition int -// RuleTransition int -// PredicateTransition int -// AtomTransition int -// ActionTransition int -// SetTransition int -// NotSetTransition int -// WildcardTransition int -// PrecedencePredicateTransition int -//}{ -// TransitionEPSILON, -// TransitionRANGE, -// TransitionRULE, -// TransitionPREDICATE, -// TransitionATOM, -// TransitionACTION, -// TransitionSET, -// TransitionNOTSET, -// TransitionWILDCARD, -// TransitionPRECEDENCE -//} - -// TODO: make all transitions sets? no, should remove set edges +// AtomTransition TODO: make all transitions sets? no, should remove set edges type AtomTransition struct { *BaseTransition } +// NewAtomTransition returns a new instance of AtomTransition. func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition { - - t := new(AtomTransition) - t.BaseTransition = NewBaseTransition(target) + t := &AtomTransition{ + BaseTransition: NewBaseTransition(target), + } t.label = intervalSet // The token type or character value or, signifies special intervalSet. t.intervalSet = t.makeLabel() @@ -149,6 +128,7 @@ func (t *AtomTransition) makeLabel() *IntervalSet { return s } +// Matches returns true if the symbol matches this transition's label. func (t *AtomTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { return t.label == symbol } @@ -157,6 +137,7 @@ func (t *AtomTransition) String() string { return strconv.Itoa(t.label) } +// RuleTransition is a transition between two ATNStates. type RuleTransition struct { *BaseTransition @@ -164,41 +145,47 @@ type RuleTransition struct { ruleIndex, precedence int } +// NewRuleTransition returns a new instance of RuleTransition func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition { + t := &RuleTransition{ + BaseTransition: NewBaseTransition(ruleStart), + followState: followState, + ruleIndex: ruleIndex, + precedence: precedence, + } - t := new(RuleTransition) - t.BaseTransition = NewBaseTransition(ruleStart) - - t.ruleIndex = ruleIndex - t.precedence = precedence - t.followState = followState t.serializationType = TransitionRULE t.isEpsilon = true return t } +// Matches always returns false. func (t *RuleTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { return false } +// EpsilonTransition represents a transition to an epsilon state. type EpsilonTransition struct { *BaseTransition outermostPrecedenceReturn int } +// NewEpsilonTransition returns a new instance of EpsilonTransition func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition { - - t := new(EpsilonTransition) - t.BaseTransition = NewBaseTransition(target) + t := &EpsilonTransition{ + BaseTransition: NewBaseTransition(target), + outermostPrecedenceReturn: outermostPrecedenceReturn, + } t.serializationType = TransitionEPSILON t.isEpsilon = true - t.outermostPrecedenceReturn = outermostPrecedenceReturn + return t } +// Matches always returns false. func (t *EpsilonTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { return false } @@ -207,20 +194,23 @@ func (t *EpsilonTransition) String() string { return "epsilon" } +// RangeTransition represents a transition range. type RangeTransition struct { *BaseTransition start, stop int } +// NewRangeTransition returns a new instance of RangeTransition. func NewRangeTransition(target ATNState, start, stop int) *RangeTransition { - t := new(RangeTransition) - t.BaseTransition = NewBaseTransition(target) + t := &RangeTransition{ + BaseTransition: NewBaseTransition(target), + start: start, + stop: stop, + } t.serializationType = TransitionRANGE - t.start = start - t.stop = stop t.intervalSet = t.makeLabel() return t } @@ -231,33 +221,38 @@ func (t *RangeTransition) makeLabel() *IntervalSet { return s } +// Matches returns true if the given symbol is whithin the range this object +// represents. func (t *RangeTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { return symbol >= t.start && symbol <= t.stop } func (t *RangeTransition) String() string { - return "'" + string(t.start) + "'..'" + string(t.stop) + "'" + return fmt.Sprintf("'%d..%d'", t.start, t.stop) } +// AbstractPredicateTransition TODO: docs type AbstractPredicateTransition interface { Transition IAbstractPredicateTransitionFoo() } +// BaseAbstractPredicateTransition TODO: docs type BaseAbstractPredicateTransition struct { *BaseTransition } +// NewBasePredicateTransition returns a new instance of BasePredicateTransition func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition { - - t := new(BaseAbstractPredicateTransition) - t.BaseTransition = NewBaseTransition(target) - - return t + return &BaseAbstractPredicateTransition{ + BaseTransition: NewBaseTransition(target), + } } +// IAbstractPredicateTransitionFoo does nothing. func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {} +// PredicateTransition TODO: docs type PredicateTransition struct { *BaseAbstractPredicateTransition @@ -265,19 +260,22 @@ type PredicateTransition struct { ruleIndex, predIndex int } +// NewPredicateTransition returns a new instance of PredicateTransition. func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition { - t := new(PredicateTransition) - t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target) + t := &PredicateTransition{ + BaseAbstractPredicateTransition: NewBasePredicateTransition(target), + ruleIndex: ruleIndex, + predIndex: predIndex, + isCtxDependent: isCtxDependent, + } t.serializationType = TransitionPREDICATE - t.ruleIndex = ruleIndex - t.predIndex = predIndex - t.isCtxDependent = isCtxDependent // e.g., $i ref in pred t.isEpsilon = true return t } +// Matches returns false. func (t *PredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { return false } @@ -290,6 +288,7 @@ func (t *PredicateTransition) String() string { return "pred_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.predIndex) } +// ActionTransition TODO: docs type ActionTransition struct { *BaseTransition @@ -297,19 +296,21 @@ type ActionTransition struct { ruleIndex, actionIndex, predIndex int } +// NewActionTransition returns a new instance of ActionTransition. func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition { - - t := new(ActionTransition) - t.BaseTransition = NewBaseTransition(target) + t := &ActionTransition{ + BaseTransition: NewBaseTransition(target), + ruleIndex: ruleIndex, + actionIndex: actionIndex, + isCtxDependent: isCtxDependent, + } t.serializationType = TransitionACTION - t.ruleIndex = ruleIndex - t.actionIndex = actionIndex - t.isCtxDependent = isCtxDependent // e.g., $i ref in pred t.isEpsilon = true return t } +// Matches returns false. func (t *ActionTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { return false } @@ -318,18 +319,20 @@ func (t *ActionTransition) String() string { return "action_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex) } +// SetTransition TODO: docs. type SetTransition struct { *BaseTransition } -func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition { - - t := new(SetTransition) - t.BaseTransition = NewBaseTransition(target) +// NewSetTransition returns a new instance of SetTransition. +func NewSetTransition(target ATNState, s *IntervalSet) *SetTransition { + t := &SetTransition{ + BaseTransition: NewBaseTransition(target), + } t.serializationType = TransitionSET - if set != nil { - t.intervalSet = set + if s != nil { + t.intervalSet = s } else { t.intervalSet = NewIntervalSet() t.intervalSet.addOne(TokenInvalidType) @@ -338,6 +341,7 @@ func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition { return t } +// Matches if symbol is contained in this interval. func (t *SetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { return t.intervalSet.contains(symbol) } @@ -346,21 +350,23 @@ func (t *SetTransition) String() string { return t.intervalSet.String() } +// NotSetTransition TODO: docs. type NotSetTransition struct { *SetTransition } -func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition { - - t := new(NotSetTransition) - - t.SetTransition = NewSetTransition(target, set) +// NewNotSetTransition returns a new instance of NotSetTransition. +func NewNotSetTransition(target ATNState, s *IntervalSet) *NotSetTransition { + t := &NotSetTransition{ + SetTransition: NewSetTransition(target, s), + } t.serializationType = TransitionNOTSET return t } +// Matches if the given symbol is not in this range. func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.intervalSet.contains(symbol) } @@ -369,19 +375,22 @@ func (t *NotSetTransition) String() string { return "~" + t.intervalSet.String() } +// WildcardTransition TODO: docs type WildcardTransition struct { *BaseTransition } +// NewWildcardTransition returns a new instance of WildcardTransition. func NewWildcardTransition(target ATNState) *WildcardTransition { - - t := new(WildcardTransition) - t.BaseTransition = NewBaseTransition(target) + t := &WildcardTransition{ + BaseTransition: NewBaseTransition(target), + } t.serializationType = TransitionWILDCARD return t } +// Matches returns true if the symbol is between the given vocab symbols. func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { return symbol >= minVocabSymbol && symbol <= maxVocabSymbol } @@ -390,24 +399,28 @@ func (t *WildcardTransition) String() string { return "." } +// PrecedencePredicateTransition represents a transition to a predicate. type PrecedencePredicateTransition struct { *BaseAbstractPredicateTransition precedence int } +// NewPrecedencePredicateTransition returns a new instance of +// PrecedencePredicateTransition. func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition { - - t := new(PrecedencePredicateTransition) - t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target) + t := &PrecedencePredicateTransition{ + BaseAbstractPredicateTransition: NewBasePredicateTransition(target), + precedence: precedence, + } t.serializationType = TransitionPRECEDENCE - t.precedence = precedence t.isEpsilon = true return t } +// Matches always returns false. func (t *PrecedencePredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { return false } diff --git a/runtime/Go/antlr/tree.go b/runtime/Go/antlr/tree.go index bdeb6d7881..d7d25fee81 100644 --- a/runtime/Go/antlr/tree.go +++ b/runtime/Go/antlr/tree.go @@ -5,11 +5,12 @@ package antlr // The basic notion of a tree has a parent, a payload, and a list of children. -// It is the most abstract interface for all the trees used by ANTLR. /// +// TreeInvalidInterval represents a nonexistent interval whithin the input. var TreeInvalidInterval = NewInterval(-1, -2) +// Tree is the most abstract interface for all the trees used by ANTLR. type Tree interface { GetParent() Tree SetParent(Tree) @@ -19,12 +20,14 @@ type Tree interface { GetChildren() []Tree } +// SyntaxTree implements Tree. type SyntaxTree interface { Tree GetSourceInterval() *Interval } +// ParseTree implements SyntaxTree. type ParseTree interface { SyntaxTree @@ -34,6 +37,7 @@ type ParseTree interface { ToStringTree([]string, Recognizer) string } +// RuleNode implements ParseTree for non-terminal rules. type RuleNode interface { ParseTree @@ -41,18 +45,21 @@ type RuleNode interface { GetBaseRuleContext() *BaseRuleContext } +// TerminalNode implements ParseTree for terminals. type TerminalNode interface { ParseTree GetSymbol() Token } +// ErrorNode implements TerminalNode for errors. type ErrorNode interface { TerminalNode errorNode() } +// ParseTreeVisitor is the base type for tree visitors. type ParseTreeVisitor interface { Visit(tree ParseTree) interface{} VisitChildren(node RuleNode) interface{} @@ -60,14 +67,22 @@ type ParseTreeVisitor interface { VisitErrorNode(node ErrorNode) interface{} } +// BaseParseTreeVisitor is the base implementation of ParseTreeVisitor. type BaseParseTreeVisitor struct{} var _ ParseTreeVisitor = &BaseParseTreeVisitor{} -func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return nil } -func (v *BaseParseTreeVisitor) VisitChildren(node RuleNode) interface{} { return nil } +// Visit visits the given parse tree. +func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return nil } + +// VisitChildren visits the node's children. +func (v *BaseParseTreeVisitor) VisitChildren(node RuleNode) interface{} { return nil } + +// VisitTerminal visits a terminal node. func (v *BaseParseTreeVisitor) VisitTerminal(node TerminalNode) interface{} { return nil } -func (v *BaseParseTreeVisitor) VisitErrorNode(node ErrorNode) interface{} { return nil } + +// VisitErrorNode visits an error node. +func (v *BaseParseTreeVisitor) VisitErrorNode(node ErrorNode) interface{} { return nil } // TODO //func (this ParseTreeVisitor) Visit(ctx) { @@ -90,6 +105,7 @@ func (v *BaseParseTreeVisitor) VisitErrorNode(node ErrorNode) interface{} { re // return Visitor[funcName](ctx) //} +// ParseTreeListener listens to the ParseTreeWalker. type ParseTreeListener interface { VisitTerminal(node TerminalNode) VisitErrorNode(node ErrorNode) @@ -97,15 +113,24 @@ type ParseTreeListener interface { ExitEveryRule(ctx ParserRuleContext) } +// BaseParseTreeListener is the base type for any parse tree listener. type BaseParseTreeListener struct{} var _ ParseTreeListener = &BaseParseTreeListener{} -func (l *BaseParseTreeListener) VisitTerminal(node TerminalNode) {} -func (l *BaseParseTreeListener) VisitErrorNode(node ErrorNode) {} +// VisitTerminal visits a terminal node. +func (l *BaseParseTreeListener) VisitTerminal(node TerminalNode) {} + +// VisitErrorNode visits an error node. +func (l *BaseParseTreeListener) VisitErrorNode(node ErrorNode) {} + +// EnterEveryRule is called before visiting each rule. func (l *BaseParseTreeListener) EnterEveryRule(ctx ParserRuleContext) {} -func (l *BaseParseTreeListener) ExitEveryRule(ctx ParserRuleContext) {} +// ExitEveryRule is called after visiting each rule. +func (l *BaseParseTreeListener) ExitEveryRule(ctx ParserRuleContext) {} + +// TerminalNodeImpl implements TerminalNode. type TerminalNodeImpl struct { parentCtx RuleContext @@ -114,43 +139,50 @@ type TerminalNodeImpl struct { var _ TerminalNode = &TerminalNodeImpl{} +// NewTerminalNodeImpl returns a new instance of TerminalNodeImpl. func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl { - tn := new(TerminalNodeImpl) - - tn.parentCtx = nil - tn.symbol = symbol - - return tn + return &TerminalNodeImpl{ + parentCtx: nil, + symbol: symbol, + } } +// GetChild returns nil. Terminal nodes cannot have children. func (t *TerminalNodeImpl) GetChild(i int) Tree { return nil } +// GetChildren returns nil. Terminal nodes cannot have children. func (t *TerminalNodeImpl) GetChildren() []Tree { return nil } +// SetChildren always panics. Terminal nodes cannot have children. func (t *TerminalNodeImpl) SetChildren(tree []Tree) { panic("Cannot set children on terminal node") } +// GetSymbol returns this node's symbol func (t *TerminalNodeImpl) GetSymbol() Token { return t.symbol } +// GetParent returns this node's parent. func (t *TerminalNodeImpl) GetParent() Tree { return t.parentCtx } +// SetParent sets this node's parent. func (t *TerminalNodeImpl) SetParent(tree Tree) { t.parentCtx = tree.(RuleContext) } +// GetPayload returns this node's symbol. func (t *TerminalNodeImpl) GetPayload() interface{} { return t.symbol } +// GetSourceInterval returns the interval that this node covers. func (t *TerminalNodeImpl) GetSourceInterval() *Interval { if t.symbol == nil { return TreeInvalidInterval @@ -159,18 +191,22 @@ func (t *TerminalNodeImpl) GetSourceInterval() *Interval { return NewInterval(tokenIndex, tokenIndex) } +// GetChildCount always returns 0. func (t *TerminalNodeImpl) GetChildCount() int { return 0 } +// Accept wraps around VisitTerminal. func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{} { return v.VisitTerminal(t) } +// GetText returns the text whithin this node's token. func (t *TerminalNodeImpl) GetText() string { return t.symbol.GetText() } +// String implements the Stringer interface func (t *TerminalNodeImpl) String() string { if t.symbol.GetTokenType() == TokenEOF { return "