From b7cd26b004698f78147eeafdbbfd252966d6d57d Mon Sep 17 00:00:00 2001 From: Denis Cheremisov Date: Sun, 17 Mar 2019 03:02:18 +0300 Subject: [PATCH] without passing support --- LDE.g4 | 14 + Makefile | 2 +- TOOL_RULES.md | 7 +- generate.go | 3 + go.mod | 14 +- ...ion_pass_until.go => action_pass_after.go} | 18 +- internal/ast/action_pass_after_or_ignore.go | 31 + internal/ast/action_pass_before.go | 46 + internal/ast/action_pass_before_or_ignore.go | 31 + internal/ast/action_pass_until_or_ignore.go | 31 - .../ast/action_start_char_without_pass.go | 27 + .../ast/action_start_string_without_pass.go | 28 + internal/ast/action_z_dispatcher.go | 8 +- internal/generator/generator.go | 12 +- internal/generator/gogen/heads.go | 87 +- internal/generator/gogen/lookup.go | 74 +- internal/go.mod | 3 - internal/listener/listener.go | 73 +- internal/parser/LDE.interp | 9 +- internal/parser/LDE.tokens | 36 +- internal/parser/LDELexer.interp | 8 +- internal/parser/LDELexer.tokens | 36 +- internal/parser/lde_base_listener.go | 20 +- internal/parser/lde_lexer.go | 165 +- internal/parser/lde_listener.go | 20 +- internal/parser/lde_parser.go | 1061 ++++++++---- internal/srcbuilder/dispatching.go | 128 +- ldetool.go | 1 - testing/common.lde | 4 + testing/common_lde.go | 48 + testing/common_test.go | 26 + testing/parsing_lde.go | 1 + .../antlr/antlr4/runtime/Go/antlr/atn.go | 152 -- .../antlr4/runtime/Go/antlr/atn_config.go | 295 ---- .../antlr4/runtime/Go/antlr/atn_config_set.go | 387 ----- .../Go/antlr/atn_deserialization_options.go | 25 - .../runtime/Go/antlr/atn_deserializer.go | 828 --------- .../antlr4/runtime/Go/antlr/atn_simulator.go | 50 - .../antlr4/runtime/Go/antlr/atn_state.go | 386 ----- .../antlr/antlr4/runtime/Go/antlr/atn_type.go | 11 - .../antlr4/runtime/Go/antlr/char_stream.go | 12 - .../runtime/Go/antlr/common_token_factory.go | 56 - .../runtime/Go/antlr/common_token_stream.go | 447 ----- .../antlr/antlr4/runtime/Go/antlr/dfa.go | 171 -- .../antlr4/runtime/Go/antlr/dfa_serializer.go | 152 -- .../antlr4/runtime/Go/antlr/dfa_state.go | 166 -- .../Go/antlr/diagnostic_error_listener.go | 111 -- .../antlr4/runtime/Go/antlr/error_listener.go | 108 -- .../antlr4/runtime/Go/antlr/error_strategy.go | 758 --------- .../antlr/antlr4/runtime/Go/antlr/errors.go | 241 --- .../antlr4/runtime/Go/antlr/file_stream.go | 49 - .../antlr4/runtime/Go/antlr/input_stream.go | 113 -- .../antlr4/runtime/Go/antlr/int_stream.go | 16 - .../antlr4/runtime/Go/antlr/interval_set.go | 296 ---- .../antlr/antlr4/runtime/Go/antlr/lexer.go | 417 ----- .../antlr4/runtime/Go/antlr/lexer_action.go | 431 ----- .../runtime/Go/antlr/lexer_action_executor.go | 170 -- .../runtime/Go/antlr/lexer_atn_simulator.go | 658 -------- .../antlr4/runtime/Go/antlr/ll1_analyzer.go | 215 --- .../antlr/antlr4/runtime/Go/antlr/parser.go | 718 -------- .../runtime/Go/antlr/parser_atn_simulator.go | 1473 ----------------- .../runtime/Go/antlr/parser_rule_context.go | 362 ---- .../runtime/Go/antlr/prediction_context.go | 756 --------- .../runtime/Go/antlr/prediction_mode.go | 553 ------- .../antlr4/runtime/Go/antlr/recognizer.go | 217 --- .../antlr4/runtime/Go/antlr/rule_context.go | 114 -- .../runtime/Go/antlr/semantic_context.go | 455 ----- .../antlr/antlr4/runtime/Go/antlr/token.go | 210 --- .../antlr4/runtime/Go/antlr/token_source.go | 17 - .../antlr4/runtime/Go/antlr/token_stream.go | 20 - .../antlr4/runtime/Go/antlr/trace_listener.go | 32 - .../antlr4/runtime/Go/antlr/transition.go | 421 ----- .../antlr/antlr4/runtime/Go/antlr/tree.go | 251 --- .../antlr/antlr4/runtime/Go/antlr/trees.go | 137 -- .../antlr/antlr4/runtime/Go/antlr/utils.go | 417 ----- 75 files changed, 1492 insertions(+), 13454 deletions(-) create mode 100644 generate.go rename internal/ast/{action_pass_until.go => action_pass_after.go} (75%) create mode 100644 internal/ast/action_pass_after_or_ignore.go create mode 100644 internal/ast/action_pass_before.go create mode 100644 internal/ast/action_pass_before_or_ignore.go delete mode 100644 internal/ast/action_pass_until_or_ignore.go create mode 100644 internal/ast/action_start_char_without_pass.go create mode 100644 internal/ast/action_start_string_without_pass.go delete mode 100644 internal/go.mod delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/trace_listener.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/transition.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/trees.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils.go diff --git a/LDE.g4 b/LDE.g4 index cd59b92..bb94d47 100644 --- a/LDE.g4 +++ b/LDE.g4 @@ -19,11 +19,14 @@ baseAction atomicAction : passTargetPrefix + | checkTargetPrefix | passHeadingCharacters | mayBePassTargetPrefix | passChars | passUntil | mayPassUntil + | goUntil + | mayGoUntil | takeUntil | takeUntilIncluding | takeUntilOrRest @@ -42,6 +45,11 @@ passTargetPrefix | '^' targetLit ; +checkTargetPrefix + : '@' targetLit '[' IntLit ']' + | '@' targetLit + ; + mayBePassTargetPrefix : '?' '^' targetLit '[' IntLit ']' | '?' '^' targetLit @@ -50,6 +58,12 @@ mayBePassTargetPrefix passChars : '_' '[' IntLit ':' ']'; +goUntil + : '..' target; + +mayGoUntil + : '?' '..' target; + passUntil : '_' target; diff --git a/Makefile b/Makefile index 635a6d0..a34022a 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ test: PATH=${GOPATH}/bin:${PATH} - go install github.com/sirkon/ldetool + go install go get -u github.com/stretchr/testify go get -u github.com/sirkon/decconv go generate github.com/sirkon/ldetool/testing diff --git a/TOOL_RULES.md b/TOOL_RULES.md index 63e5d9d..a26f435 100644 --- a/TOOL_RULES.md +++ b/TOOL_RULES.md @@ -63,10 +63,13 @@ Rule and capture names must be public and starts from capital letter (i.e. `Name |``^'c'``|Check if the rest starts with the given character *c* and pass it.
Signal error otherwise|``^'@'("@usr") → "usr"``| |``?^`c'``|If the rest starts with the given character *c* then pass it|``?^'@'("@usr") → "usr"``
``?^'@'("usr") → "usr"``| |``^"t"``|Check if the rest starts with the given text *t* and pass it.
Signal error otherwise|``^"ab"("ab12") → "12"``| +|``@"t"``|Check if the rest starts with the given text *t* without passing it.
Signal error otherwise|``@"ab"("ab12") → "ab12"``| |``?^"t"``|If the rest starts with the given text *t* then pass it|``^"ab"("ab12") → "12"``
``^"ab"("a12") → "a12"``| |``_'c'``|Look for the character *c* in the rest and pass it.
Signal error when it was not found|``_'1'("a12") → "2"``| +|``..'c'``|Look for the character *c* in the rest without passing it.
Signal error when it was not found|``..'1'("a12") → "12"``| |``_?'c'``|Works exactly like ``_'c'`` if the character *c* was found.
Do nothing otherwise|``_'1'("a12") → "2"``
``_'1'("a2") → "a2"``| -|``_'c'[:N]``|Look for the character *c* in first N characters the rest and pass it.
Signal error when it was not found|``_'1'[:2]("a12") → "2"``
``_'1'[:2]("aa12") → error``
``_'1'[:3]("aa123c") → "23c"``| +|``..?'c'``|Works exactly like ``..'c'`` if the character *c* was found.
Do nothing otherwise|``..'1'("a12") → "12"``
``..'1'("a2") → "a2"``| +|``'c'[:N]``|Look for the character *c* in first N characters the rest and pass it.
Signal error when it was not found|``_'1'[:2]("a12") → "2"``
``_'1'[:2]("aa12") → error``
``_'1'[:3]("aa123c") → "23c"``| |``_?'c'[:N]``|Look for the character *c* in first N characters the rest and pass it.
Ignore when text *t* was not found|``_'1'[:2]("a12") → "2"``
``_'1'[:2]("aa12") → "aa12"``
``_'1'[:3]("aa123c") → "23c"``| |``_'c'[M:N]``|Look for the character *c* in the M..N-1 characters of the rest
and pass it.
Signal error when it was not found|``_'1'[1:2]("a12") → "2"``
``_'1'[1:2]("12") → error``
``_'1'[0:2]("123c") → "23c"``| |``_'c'[M:]``|Look for the character *c* in the M, M+1, etc characters of the rest
and pass it.
Signal error when it was not found|``_'1'[1:]("a12") → "2"``
``_'1'[1:]("12") → error``
``_'1'[0:]("123c") → "23c"``| @@ -82,6 +85,8 @@ Rule and capture names must be public and starts from capital letter (i.e. `Name |``_"t"[M]``|Symbols of the rest from (M+1)-th position must starts with *t* |``_"ab"[1:3]("1ab2") → "2"``
``_?"ab"[2:4]("1ab2") → error``| |``_?"t"[M:N]``| |``_?"ab"[1:3]("1ab2") → "2"``
``_?"ab"[2:4]("1ab2") → "1ab2"``| +> Notice, each `_` action except `_[N:]` has its `..` counterpart which works exactly like `_` except it stops right before the target without passing it. + #### Note You can put `~` sign before a char or string you are looking for. This means "short" lookup: for loop will be used for char lookup instead of `bytes.IndexByte`: diff --git a/generate.go b/generate.go new file mode 100644 index 0000000..0966b11 --- /dev/null +++ b/generate.go @@ -0,0 +1,3 @@ +package main + +//go:generate antlr4 -visitor -no-visitor -listener -o internal/parser -Dlanguage=Go LDE.g4 diff --git a/go.mod b/go.mod index a119b10..3eacb46 100644 --- a/go.mod +++ b/go.mod @@ -1,19 +1,13 @@ -module github.com/sirkon/ldetool/v3 +module github.com/sirkon/ldetool require ( - github.com/antlr/antlr4 v0.0.0-20190207013812-1c6c62afc7cb + github.com/antlr/antlr4 v0.0.0-20190313170020-28fc84874d7f github.com/go-yaml/yaml v2.1.0+incompatible - github.com/kr/pretty v0.1.0 // indirect github.com/sanity-io/litter v1.1.0 github.com/sirkon/decconv v1.0.0 github.com/sirkon/gosrcfmt v1.5.0 - github.com/sirkon/gotify v0.5.0 - github.com/sirkon/ldetool/internal v0.0.0-00010101000000-000000000000 + github.com/sirkon/gotify v0.6.0 github.com/sirkon/message v1.5.1 - github.com/stretchr/testify v1.2.2 + github.com/stretchr/testify v1.3.0 github.com/urfave/cli v1.20.0 - gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect - gopkg.in/yaml.v2 v2.2.1 // indirect ) - -replace github.com/sirkon/ldetool/internal => ./internal diff --git a/internal/ast/action_pass_until.go b/internal/ast/action_pass_after.go similarity index 75% rename from internal/ast/action_pass_until.go rename to internal/ast/action_pass_after.go index 9aa336d..59e3860 100644 --- a/internal/ast/action_pass_until.go +++ b/internal/ast/action_pass_after.go @@ -4,19 +4,19 @@ import ( "fmt" ) -var _ Action = &PassUntil{} +var _ Action = &PassAfter{} -// PassUntil ... -type PassUntil struct { +// PassAfter ... +type PassAfter struct { access Limit *Target } -func (pu *PassUntil) Accept(d ActionDispatcher) error { - return d.DispatchPassUntil(pu) +func (pu *PassAfter) Accept(d ActionDispatcher) error { + return d.DispatchPassAfter(pu) } -func (pu *PassUntil) String() string { +func (pu *PassAfter) String() string { if pu.Limit.Lower == pu.Limit.Upper && pu.Limit.Lower > 0 { switch pu.Limit.Type { case String: @@ -38,9 +38,9 @@ func (pu *PassUntil) String() string { return res } -// PassUntilTarget ... -func PassUntilTarget() *PassUntil { - return &PassUntil{ +// PassAfterTarget ... +func PassAfterTarget() *PassAfter { + return &PassAfter{ Limit: NewTarget(), } } diff --git a/internal/ast/action_pass_after_or_ignore.go b/internal/ast/action_pass_after_or_ignore.go new file mode 100644 index 0000000..0dd6a47 --- /dev/null +++ b/internal/ast/action_pass_after_or_ignore.go @@ -0,0 +1,31 @@ +package ast + +var _ Action = &PassAfterOrIgnore{} + +// PassAfterOrIgnore ... +type PassAfterOrIgnore struct { + access + Limit *Target +} + +func (p *PassAfterOrIgnore) Accept(d ActionDispatcher) error { + return d.DispatchPassAfterOrIgnore(p) +} + +func (p *PassAfterOrIgnore) String() string { + pu := &PassAfter{ + Limit: p.Limit, + } + if p.Limit.Lower == p.Limit.Upper && p.Limit.Lower > 0 { + return pu.String() + " or ignore otherwise" + } else { + return pu.String() + " or ignore if not found" + } +} + +// PassAfterTargetOrIgnore ... +func PassAfterTargetOrIgnore() *PassAfterOrIgnore { + return &PassAfterOrIgnore{ + Limit: NewTarget(), + } +} diff --git a/internal/ast/action_pass_before.go b/internal/ast/action_pass_before.go new file mode 100644 index 0000000..cfd7187 --- /dev/null +++ b/internal/ast/action_pass_before.go @@ -0,0 +1,46 @@ +package ast + +import ( + "fmt" +) + +var _ Action = &PassBefore{} + +// PassBefore ... +type PassBefore struct { + access + Limit *Target +} + +func (pu *PassBefore) Accept(d ActionDispatcher) error { + return d.DispatchPassBefore(pu) +} + +func (pu *PassBefore) String() string { + if pu.Limit.Lower == pu.Limit.Upper && pu.Limit.Lower > 0 { + switch pu.Limit.Type { + case String: + return fmt.Sprintf("Check if the rest at %s character and further starts with prefix %s and pass until it", posLit(pu.Limit.Lower+1), pu.Limit.Value) + case Char: + return fmt.Sprintf("Check if %s character equals to %s and pass until it", posLit(pu.Limit.Lower+1), pu.Limit.Value) + } + } + + var area string + if pu.Limit.Lower > 0 && pu.Limit.Upper > 0 { + area = fmt.Sprintf("rest[%d:%d]", pu.Limit.Lower, pu.Limit.Upper) + } else if pu.Limit.Lower > 0 { + area = fmt.Sprintf("rest[%d:]", pu.Limit.Lower) + } else { + area = "the rest" + } + res := fmt.Sprintf("Look for \033[1m%s\033[0m in %s without passing it", pu.Limit.Value, area) + return res +} + +// PassBeforeTarget ... +func PassBeforeTarget() *PassBefore { + return &PassBefore{ + Limit: NewTarget(), + } +} diff --git a/internal/ast/action_pass_before_or_ignore.go b/internal/ast/action_pass_before_or_ignore.go new file mode 100644 index 0000000..49595e2 --- /dev/null +++ b/internal/ast/action_pass_before_or_ignore.go @@ -0,0 +1,31 @@ +package ast + +var _ Action = &PassBeforeOrIgnore{} + +// PassBeforeOrIgnore ... +type PassBeforeOrIgnore struct { + access + Limit *Target +} + +func (p *PassBeforeOrIgnore) Accept(d ActionDispatcher) error { + return d.DispatchPassBeforeOrIgnore(p) +} + +func (p *PassBeforeOrIgnore) String() string { + pu := &PassBefore{ + Limit: p.Limit, + } + if p.Limit.Lower == p.Limit.Upper && p.Limit.Lower > 0 { + return pu.String() + " or ignore otherwise" + } else { + return pu.String() + " or ignore if not found" + } +} + +// PassBeforeTargetOrIgnore ... +func PassBeforeTargetOrIgnore() *PassBeforeOrIgnore { + return &PassBeforeOrIgnore{ + Limit: NewTarget(), + } +} diff --git a/internal/ast/action_pass_until_or_ignore.go b/internal/ast/action_pass_until_or_ignore.go deleted file mode 100644 index 09038e6..0000000 --- a/internal/ast/action_pass_until_or_ignore.go +++ /dev/null @@ -1,31 +0,0 @@ -package ast - -var _ Action = &PassUntilOrIgnore{} - -// PassUntilOrIgnore ... -type PassUntilOrIgnore struct { - access - Limit *Target -} - -func (p *PassUntilOrIgnore) Accept(d ActionDispatcher) error { - return d.DispatchPassUntilOrIgnore(p) -} - -func (p *PassUntilOrIgnore) String() string { - pu := &PassUntil{ - Limit: p.Limit, - } - if p.Limit.Lower == p.Limit.Upper && p.Limit.Lower > 0 { - return pu.String() + " or ignore otherwise" - } else { - return pu.String() + " or ignore if not found" - } -} - -// PassUntilTargetOrIgnore ... -func PassUntilTargetOrIgnore() *PassUntilOrIgnore { - return &PassUntilOrIgnore{ - Limit: NewTarget(), - } -} diff --git a/internal/ast/action_start_char_without_pass.go b/internal/ast/action_start_char_without_pass.go new file mode 100644 index 0000000..ec4cdbf --- /dev/null +++ b/internal/ast/action_start_char_without_pass.go @@ -0,0 +1,27 @@ +package ast + +import ( + "fmt" + "github.com/antlr/antlr4/runtime/Go/antlr" +) + +var _ Action = &StartCharWithoutPass{} + +// StartCharWithoutPass ... +type StartCharWithoutPass struct { + access + Value string +} + +func (sc *StartCharWithoutPass) Accept(d ActionDispatcher) error { + return d.DispatchStartCharWithoutPass(sc) +} + +func (sc *StartCharWithoutPass) String() string { + return fmt.Sprintf("Check and pass character \033[1m%s\033[0m", sc.Value) +} + +// StartsWithCharWithoutPass ... +func StartsWithCharWithoutPass(target antlr.Token) *StartCharWithoutPass { + return &StartCharWithoutPass{Value: target.GetText()} +} diff --git a/internal/ast/action_start_string_without_pass.go b/internal/ast/action_start_string_without_pass.go new file mode 100644 index 0000000..a0daf2e --- /dev/null +++ b/internal/ast/action_start_string_without_pass.go @@ -0,0 +1,28 @@ +package ast + +import ( + "fmt" + "github.com/antlr/antlr4/runtime/Go/antlr" +) + +var _ Action = &StartStringWithoutPass{} + +// StartStringWithoutPass ... +type StartStringWithoutPass struct { + access + Value string +} + +func (ss *StartStringWithoutPass) Accept(d ActionDispatcher) error { + return d.DispatchStartStringWithoutPass(ss) +} + +func (ss *StartStringWithoutPass) String() string { + return fmt.Sprintf("Check and pass \033[1m%s\033[0m", ss.Value) + +} + +// StartsWithStringWithoutPass constructor +func StartsWithStringWithoutPass(target antlr.Token) *StartStringWithoutPass { + return &StartStringWithoutPass{Value: target.GetText()} +} diff --git a/internal/ast/action_z_dispatcher.go b/internal/ast/action_z_dispatcher.go index fe6abd5..d3a4fe9 100644 --- a/internal/ast/action_z_dispatcher.go +++ b/internal/ast/action_z_dispatcher.go @@ -15,10 +15,14 @@ type ActionDispatcher interface { DispatchOptional(a *Optional) error DispatchPassHeadingCharacters(a PassHeadingCharacters) error DispatchPassFirst(a PassFixed) error - DispatchPassUntil(a *PassUntil) error - DispatchPassUntilOrIgnore(a *PassUntilOrIgnore) error + DispatchPassAfter(a *PassAfter) error + DispatchPassAfterOrIgnore(a *PassAfterOrIgnore) error + DispatchPassBefore(a *PassBefore) error + DispatchPassBeforeOrIgnore(a *PassBeforeOrIgnore) error DispatchStartChar(a *StartChar) error + DispatchStartCharWithoutPass(a *StartCharWithoutPass) error DispatchStartString(a *StartString) error + DispatchStartStringWithoutPass(a *StartStringWithoutPass) error DispatchTake(a *Take) error DispatchTakeIncluding(a *TakeIncluding) error DispatchTakeRest(a *TakeRest) error diff --git a/internal/generator/generator.go b/internal/generator/generator.go index 220413d..03ee880 100644 --- a/internal/generator/generator.go +++ b/internal/generator/generator.go @@ -21,14 +21,14 @@ type Generator interface { AtEnd() error // Head - HeadString(anchor string, ignore bool) error - HeadChar(char string, ignore bool) error + HeadString(anchor string, ignore bool, pass bool) error + HeadChar(char string, ignore bool, pass bool) error // Lookups - LookupString(anchor string, lower, upper int, close, ignore bool) error - LookupFixedString(anchor string, offset int, ignore bool) error - LookupChar(anchor string, lower, upper int, close, ignore bool) error - LookupFixedChar(anchor string, offset int, ignore bool) error + LookupString(anchor string, lower, upper int, close, ignore, pass bool) error + LookupFixedString(anchor string, offset int, ignore, pass bool) error + LookupChar(anchor string, lower, upper int, close, ignore, pass bool) error + LookupFixedChar(anchor string, offset int, ignore, pass bool) error // Takes // Take before anchor (string or character) diff --git a/internal/generator/gogen/heads.go b/internal/generator/gogen/heads.go index 14128ab..eea0706 100644 --- a/internal/generator/gogen/heads.go +++ b/internal/generator/gogen/heads.go @@ -73,7 +73,7 @@ func (g *Generator) shortPrefixCheck(unquoted, anchor string, offset int) srcobj ) } -func (g *Generator) checkStringPrefix(anchor string, offset int, ignore bool) error { +func (g *Generator) checkStringPrefix(anchor string, offset int, ignore, pass bool) error { var unquoted string if err := json.Unmarshal([]byte(anchor), &unquoted); err != nil { return fmt.Errorf("cannot unqouote \033[1m%s\033[0m: %s", anchor, err) @@ -82,10 +82,19 @@ func (g *Generator) checkStringPrefix(anchor string, offset int, ignore bool) er body := g.body body.Append(srcobj.Raw("\n")) if offset > 0 { - body.Append( - srcobj.Comment(fmt.Sprintf("Checks if rest[%d:] starts with `%s` and pass it", offset, anchor))) + if pass { + body.Append( + srcobj.Comment(fmt.Sprintf("Checks if rest[%d:] starts with `%s` and pass it", offset, anchor))) + } else { + body.Append( + srcobj.Comment(fmt.Sprintf("Checks if rest[%d:] starts with `%s`", offset, anchor))) + } } else { - body.Append(srcobj.Comment(fmt.Sprintf("Checks if the rest starts with `%s` and pass it", anchor))) + if pass { + body.Append(srcobj.Comment(fmt.Sprintf("Checks if the rest starts with `%s` and pass it", anchor))) + } else { + body.Append(srcobj.Comment(fmt.Sprintf("Checks if the rest starts with `%s`", anchor))) + } } var rest = g.rest() @@ -132,23 +141,30 @@ func (g *Generator) checkStringPrefix(anchor string, offset int, ignore bool) er } } - body.Append(srcobj.If{ - Expr: code, - Then: srcobj.LineAssign{ - Receiver: g.curRestVar(), - Expr: srcobj.SliceFrom(srcobj.Raw(g.curRestVar()), shift), - }, - Else: failure, - }) + if pass { + body.Append(srcobj.If{ + Expr: code, + Then: srcobj.LineAssign{ + Receiver: g.curRestVar(), + Expr: srcobj.SliceFrom(srcobj.Raw(g.curRestVar()), shift), + }, + Else: failure, + }) + } else { + body.Append(srcobj.If{ + Expr: srcobj.OperatorNot(code), + Then: failure, + }) + } return nil } // HeadString checks if the rest starts with the given string and passes it -func (g *Generator) HeadString(anchor string, ignore bool) error { - return g.checkStringPrefix(anchor, 0, ignore) +func (g *Generator) HeadString(anchor string, ignore, pass bool) error { + return g.checkStringPrefix(anchor, 0, ignore, pass) } -func (g *Generator) checkCharPrefix(char string, offset int, ignore bool) error { +func (g *Generator) checkCharPrefix(char string, offset int, ignore, pass bool) error { if err := g.regRightVar(g.curRestVar()); err != nil { return err } @@ -171,10 +187,17 @@ func (g *Generator) checkCharPrefix(char string, offset int, ignore bool) error body := srcobj.NewBody(srcobj.Raw("\n")) if offset > 0 { - body.Append( - srcobj.Comment(fmt.Sprintf("Checks if rest[%d:] starts with %s and pass it", offset, char))) + if pass { + body.Append(srcobj.Comment(fmt.Sprintf("Checks if rest[%d:] starts with %s and pass it", offset, char))) + } else { + body.Append(srcobj.Comment(fmt.Sprintf("Checks if rest[%d:] starts with %s", offset, char))) + } } else { - body.Append(srcobj.Comment(fmt.Sprintf("Checks if the rest starts with %s and pass it", char))) + if pass { + body.Append(srcobj.Comment(fmt.Sprintf("Checks if the rest starts with %s and pass it", char))) + } else { + body.Append(srcobj.Comment(fmt.Sprintf("Checks if the rest starts with %ss", char))) + } } var cond srcobj.Source @@ -203,19 +226,27 @@ func (g *Generator) checkCharPrefix(char string, offset int, ignore bool) error ), ) - body.Append(srcobj.If{ - Expr: cond, - Then: srcobj.LineAssign{ - Receiver: g.curRestVar(), - Expr: srcobj.SliceFrom(srcobj.Raw(g.curRestVar()), shift), - }, - Else: failure, - }) + if pass { + body.Append(srcobj.If{ + Expr: cond, + Then: srcobj.LineAssign{ + Receiver: g.curRestVar(), + Expr: srcobj.SliceFrom(srcobj.Raw(g.curRestVar()), shift), + }, + Else: failure, + }) + } else { + body.Append(srcobj.If{ + Expr: srcobj.OperatorNot(cond), + Then: failure, + }) + + } g.body.Append(body) return nil } // HeadChar checks if rest starts with the given char -func (g *Generator) HeadChar(char string, ignore bool) error { - return g.checkCharPrefix(char, 0, false) +func (g *Generator) HeadChar(char string, ignore, pass bool) error { + return g.checkCharPrefix(char, 0, false, pass) } diff --git a/internal/generator/gogen/lookup.go b/internal/generator/gogen/lookup.go index 4e3bf13..4b37b4a 100644 --- a/internal/generator/gogen/lookup.go +++ b/internal/generator/gogen/lookup.go @@ -26,7 +26,7 @@ func (g *Generator) regRightVar(name string) error { } // LookupString ... -func (g *Generator) LookupString(anchor string, lower, upper int, close, ignore bool) error { +func (g *Generator) LookupString(anchor string, lower, upper int, close, ignore, pass bool) error { if err := g.regVar("pos", "int"); err != nil { return err } @@ -120,19 +120,30 @@ func (g *Generator) LookupString(anchor string, lower, upper int, close, ignore var offset srcobj.Source if lower == 0 { - offset = srcobj.OperatorAdd( - srcobj.Raw("pos"), - srcobj.NewCall("len", srcobj.Raw(constName)), - ) + if pass { + offset = srcobj.OperatorAdd( + srcobj.Raw("pos"), + srcobj.NewCall("len", srcobj.Raw(constName)), + ) + } else { + offset = srcobj.Raw("pos") + } } else { l := fmt.Sprintf("%d", lower) - offset = srcobj.OperatorAdd( - srcobj.Raw("pos"), - srcobj.OperatorAdd( - srcobj.NewCall("len", srcobj.Raw(constName)), + if pass { + offset = srcobj.OperatorAdd( + srcobj.Raw("pos"), + srcobj.OperatorAdd( + srcobj.NewCall("len", srcobj.Raw(constName)), + srcobj.Raw(l), + ), + ) + } else { + offset = srcobj.OperatorAdd( + srcobj.Raw("pos"), srcobj.Raw(l), - ), - ) + ) + } } body.Append(srcobj.If{ @@ -146,16 +157,17 @@ func (g *Generator) LookupString(anchor string, lower, upper int, close, ignore }, Else: failure, }) + return nil } // LookupFixedString ... -func (g *Generator) LookupFixedString(anchor string, offset int, ignore bool) error { - return g.checkStringPrefix(anchor, offset, ignore) +func (g *Generator) LookupFixedString(anchor string, offset int, ignore, pass bool) error { + return g.checkStringPrefix(anchor, offset, ignore, pass) } // LookupCharEx ... -func (g *Generator) LookupChar(char string, lower, upper int, close, ignore bool) error { +func (g *Generator) LookupChar(char string, lower, upper int, close, ignore, pass bool) error { if err := g.regVar("pos", "int"); err != nil { return err } @@ -242,19 +254,27 @@ func (g *Generator) LookupChar(char string, lower, upper int, close, ignore bool var offset srcobj.Source if lower <= 0 { - offset = srcobj.OperatorAdd( - srcobj.Raw("pos"), - srcobj.Raw("1"), - ) + if pass { + offset = srcobj.OperatorAdd( + srcobj.Raw("pos"), + srcobj.Raw("1"), + ) + } else { + offset = srcobj.Raw("pos") + } } else { l := fmt.Sprintf("%d", lower) - offset = srcobj.OperatorAdd( - srcobj.Raw("pos"), - srcobj.OperatorAdd( - srcobj.Raw("1"), - srcobj.Raw(l), - ), - ) + if pass { + offset = srcobj.OperatorAdd( + srcobj.Raw("pos"), + srcobj.OperatorAdd( + srcobj.Raw("1"), + srcobj.Raw(l), + ), + ) + } else { + offset = srcobj.OperatorAdd(srcobj.Raw("pos"), srcobj.Raw(l)) + } } body.Append(srcobj.If{ @@ -272,6 +292,6 @@ func (g *Generator) LookupChar(char string, lower, upper int, close, ignore bool } // LookupFixedChar ... -func (g *Generator) LookupFixedChar(anchor string, offset int, ignore bool) error { - return g.checkCharPrefix(anchor, offset, ignore) +func (g *Generator) LookupFixedChar(anchor string, offset int, ignore, pass bool) error { + return g.checkCharPrefix(anchor, offset, ignore, pass) } diff --git a/internal/go.mod b/internal/go.mod deleted file mode 100644 index 5f1c2c9..0000000 --- a/internal/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/sirkon/ldetool/internal - -go 1.12 diff --git a/internal/listener/listener.go b/internal/listener/listener.go index e2b25cc..f9a21eb 100644 --- a/internal/listener/listener.go +++ b/internal/listener/listener.go @@ -48,6 +48,7 @@ type Listener struct { optional bool lookup bool stateIsPrefix bool + dontPass bool expectEnd bool mustNotBeExact bool } @@ -126,6 +127,7 @@ func (l *Listener) ExitAtomicAction(ctx *parser.AtomicActionContext) {} // EnterPassTargetPrefix is called when production passTargetPrefix is entered. func (l *Listener) EnterPassTargetPrefix(ctx *parser.PassTargetPrefixContext) { l.stateIsPrefix = true + l.dontPass = false l.prefixJump = 0 if ctx.IntLit() != nil { l.prefixJump, _ = strconv.Atoi(ctx.IntLit().GetText()) @@ -138,6 +140,20 @@ func (l *Listener) ExitPassTargetPrefix(ctx *parser.PassTargetPrefixContext) { l.optional = false } +func (l *Listener) EnterCheckTargetPrefix(ctx *parser.CheckTargetPrefixContext) { + l.stateIsPrefix = true + l.dontPass = true + l.prefixJump = 0 + if ctx.IntLit() != nil { + l.prefixJump, _ = strconv.Atoi(ctx.IntLit().GetText()) + } +} + +func (l *Listener) ExitCheckTargetPrefix(c *parser.CheckTargetPrefixContext) { + l.stateIsPrefix = false + l.optional = false +} + // EnterMayBePassTargetPrefix is called when production mayBePassTargetPrefix is entered. func (l *Listener) EnterMayBePassTargetPrefix(ctx *parser.MayBePassTargetPrefixContext) { l.stateIsPrefix = true @@ -169,7 +185,7 @@ func (l *Listener) ExitPassChars(ctx *parser.PassCharsContext) {} // EnterPassUntil is called when production passUntil is entered. func (l *Listener) EnterPassUntil(ctx *parser.PassUntilContext) { - a := ast.PassUntilTarget() + a := ast.PassAfterTarget() l.seq().Append(a) l.target = a.Limit l.lookup = true @@ -187,9 +203,27 @@ func (l *Listener) ExitPassUntil(ctx *parser.PassUntilContext) { } } +func (l *Listener) EnterGoUntil(c *parser.GoUntilContext) { + a := ast.PassBeforeTarget() + l.seq().Append(a) + l.target = a.Limit + l.lookup = true +} + +func (l *Listener) ExitGoUntil(ctx *parser.GoUntilContext) { + l.lookup = false + if l.mustNotBeExact { + panic(fmt.Sprintf( + "%d:%d: use prefix operator (\033[1m^\033[0m) instead of \033[1m_\033[0m", + ctx.GetStart().GetLine(), + ctx.GetStart().GetColumn()+1, + )) + } +} + // EnterMayPassUntil is called when production mayPassUntil is entered. func (l *Listener) EnterMayPassUntil(ctx *parser.MayPassUntilContext) { - a := ast.PassUntilTargetOrIgnore() + a := ast.PassAfterTargetOrIgnore() l.seq().Append(a) l.target = a.Limit l.lookup = true @@ -207,6 +241,24 @@ func (l *Listener) ExitMayPassUntil(ctx *parser.MayPassUntilContext) { } } +func (l *Listener) EnterMayGoUntil(ctx *parser.MayGoUntilContext) { + a := ast.PassBeforeTarget() + l.seq().Append(a) + l.target = a.Limit + l.lookup = true +} + +func (l *Listener) ExitMayGoUntil(ctx *parser.MayGoUntilContext) { + l.lookup = false + if l.mustNotBeExact { + panic(fmt.Sprintf( + "%d:%d: use prefix operator (\033[1m^\033[0m) instead of \033[1m_\033[0m", + ctx.GetStart().GetLine(), + ctx.GetStart().GetColumn()+1, + )) + } +} + func (l *Listener) EnterRestCheck(c *parser.RestCheckContext) { var operator string if c.ComparisonOperator() != nil { @@ -333,13 +385,22 @@ func (l *Listener) EnterTargetLit(ctx *parser.TargetLitContext) { if l.optional { a = ast.MayBeStartsWithString(ctx.StringLit().GetSymbol()) } else { - a = ast.StartsWithString(ctx.StringLit().GetSymbol()) + if l.dontPass { + a = ast.StartsWithStringWithoutPass(ctx.StringLit().GetSymbol()) + } else { + a = ast.StartsWithString(ctx.StringLit().GetSymbol()) + } } } else if ctx.CharLit() != nil { if l.optional { a = ast.MayBeStartsWithChar(ctx.CharLit().GetSymbol()) } else { - a = ast.StartsWithChar(ctx.CharLit().GetSymbol()) + if l.dontPass { + a = ast.StartsWithCharWithoutPass(ctx.CharLit().GetSymbol()) + a = ast.StartsWithCharWithoutPass(ctx.CharLit().GetSymbol()) + } else { + a = ast.StartsWithChar(ctx.CharLit().GetSymbol()) + } } } if a != nil { @@ -348,11 +409,11 @@ func (l *Listener) EnterTargetLit(ctx *parser.TargetLitContext) { return } else { if l.optional { - ll := ast.PassUntilTargetOrIgnore() + ll := ast.PassAfterTargetOrIgnore() a = ll l.target = ll.Limit } else { - ll := ast.PassUntilTarget() + ll := ast.PassAfterTarget() a = ll l.target = ll.Limit } diff --git a/internal/parser/LDE.interp b/internal/parser/LDE.interp index 14ae053..dac5f11 100644 --- a/internal/parser/LDE.interp +++ b/internal/parser/LDE.interp @@ -8,9 +8,11 @@ null '^' '[' ']' +'@' '?' '_' ':' +'..' '%' '$' '~' @@ -40,6 +42,8 @@ null null null null +null +null ComparisonOperator Identifier IdentifierWithFraction @@ -57,8 +61,11 @@ baseAction atomicAction passHeadingCharacters passTargetPrefix +checkTargetPrefix mayBePassTargetPrefix passChars +goUntil +mayGoUntil passUntil mayPassUntil takeUntil @@ -80,4 +87,4 @@ fieldType atn: -[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 3, 25, 236, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12, 4, 13, 9, 13, 4, 14, 9, 14, 4, 15, 9, 15, 4, 16, 9, 16, 4, 17, 9, 17, 4, 18, 9, 18, 4, 19, 9, 19, 4, 20, 9, 20, 4, 21, 9, 21, 4, 22, 9, 22, 4, 23, 9, 23, 4, 24, 9, 24, 4, 25, 9, 25, 4, 26, 9, 26, 4, 27, 9, 27, 3, 2, 3, 2, 3, 2, 5, 2, 58, 10, 2, 3, 2, 3, 2, 7, 2, 62, 10, 2, 12, 2, 14, 2, 65, 11, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 5, 4, 87, 10, 4, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 5, 5, 104, 10, 5, 3, 6, 3, 6, 3, 6, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 5, 7, 117, 10, 7, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 5, 8, 129, 10, 8, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 10, 3, 10, 3, 10, 3, 11, 3, 11, 3, 11, 3, 11, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, 14, 3, 14, 3, 14, 3, 14, 3, 14, 3, 14, 3, 14, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 5, 19, 191, 10, 19, 3, 20, 3, 20, 3, 21, 3, 21, 3, 21, 3, 21, 3, 21, 3, 21, 3, 21, 3, 21, 3, 21, 3, 21, 3, 21, 3, 21, 3, 21, 3, 21, 3, 21, 5, 21, 210, 10, 21, 3, 22, 3, 22, 3, 23, 3, 23, 3, 23, 3, 23, 3, 23, 3, 23, 3, 24, 3, 24, 3, 24, 3, 24, 3, 24, 3, 25, 3, 25, 3, 25, 3, 25, 3, 25, 3, 26, 3, 26, 3, 26, 3, 26, 3, 27, 3, 27, 3, 27, 2, 3, 2, 28, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 2, 4, 3, 2, 21, 22, 3, 2, 18, 19, 2, 237, 2, 57, 3, 2, 2, 2, 4, 66, 3, 2, 2, 2, 6, 86, 3, 2, 2, 2, 8, 103, 3, 2, 2, 2, 10, 105, 3, 2, 2, 2, 12, 116, 3, 2, 2, 2, 14, 128, 3, 2, 2, 2, 16, 130, 3, 2, 2, 2, 18, 136, 3, 2, 2, 2, 20, 139, 3, 2, 2, 2, 22, 143, 3, 2, 2, 2, 24, 149, 3, 2, 2, 2, 26, 155, 3, 2, 2, 2, 28, 162, 3, 2, 2, 2, 30, 169, 3, 2, 2, 2, 32, 174, 3, 2, 2, 2, 34, 180, 3, 2, 2, 2, 36, 190, 3, 2, 2, 2, 38, 192, 3, 2, 2, 2, 40, 209, 3, 2, 2, 2, 42, 211, 3, 2, 2, 2, 44, 213, 3, 2, 2, 2, 46, 219, 3, 2, 2, 2, 48, 224, 3, 2, 2, 2, 50, 229, 3, 2, 2, 2, 52, 233, 3, 2, 2, 2, 54, 55, 8, 2, 1, 2, 55, 58, 5, 4, 3, 2, 56, 58, 7, 2, 2, 3, 57, 54, 3, 2, 2, 2, 57, 56, 3, 2, 2, 2, 58, 63, 3, 2, 2, 2, 59, 60, 12, 5, 2, 2, 60, 62, 5, 4, 3, 2, 61, 59, 3, 2, 2, 2, 62, 65, 3, 2, 2, 2, 63, 61, 3, 2, 2, 2, 63, 64, 3, 2, 2, 2, 64, 3, 3, 2, 2, 2, 65, 63, 3, 2, 2, 2, 66, 67, 7, 18, 2, 2, 67, 68, 7, 3, 2, 2, 68, 69, 5, 6, 4, 2, 69, 70, 7, 4, 2, 2, 70, 5, 3, 2, 2, 2, 71, 72, 7, 25, 2, 2, 72, 87, 5, 6, 4, 2, 73, 74, 7, 5, 2, 2, 74, 75, 5, 6, 4, 2, 75, 76, 7, 6, 2, 2, 76, 77, 5, 6, 4, 2, 77, 87, 3, 2, 2, 2, 78, 79, 7, 5, 2, 2, 79, 80, 5, 6, 4, 2, 80, 81, 7, 6, 2, 2, 81, 87, 3, 2, 2, 2, 82, 83, 5, 8, 5, 2, 83, 84, 5, 6, 4, 2, 84, 87, 3, 2, 2, 2, 85, 87, 5, 8, 5, 2, 86, 71, 3, 2, 2, 2, 86, 73, 3, 2, 2, 2, 86, 78, 3, 2, 2, 2, 86, 82, 3, 2, 2, 2, 86, 85, 3, 2, 2, 2, 87, 7, 3, 2, 2, 2, 88, 104, 5, 12, 7, 2, 89, 104, 5, 10, 6, 2, 90, 104, 5, 14, 8, 2, 91, 104, 5, 16, 9, 2, 92, 104, 5, 18, 10, 2, 93, 104, 5, 20, 11, 2, 94, 104, 5, 22, 12, 2, 95, 104, 5, 24, 13, 2, 96, 104, 5, 26, 14, 2, 97, 104, 5, 28, 15, 2, 98, 104, 5, 30, 16, 2, 99, 104, 5, 32, 17, 2, 100, 104, 5, 34, 18, 2, 101, 104, 5, 36, 19, 2, 102, 104, 5, 38, 20, 2, 103, 88, 3, 2, 2, 2, 103, 89, 3, 2, 2, 2, 103, 90, 3, 2, 2, 2, 103, 91, 3, 2, 2, 2, 103, 92, 3, 2, 2, 2, 103, 93, 3, 2, 2, 2, 103, 94, 3, 2, 2, 2, 103, 95, 3, 2, 2, 2, 103, 96, 3, 2, 2, 2, 103, 97, 3, 2, 2, 2, 103, 98, 3, 2, 2, 2, 103, 99, 3, 2, 2, 2, 103, 100, 3, 2, 2, 2, 103, 101, 3, 2, 2, 2, 103, 102, 3, 2, 2, 2, 104, 9, 3, 2, 2, 2, 105, 106, 7, 7, 2, 2, 106, 107, 7, 22, 2, 2, 107, 11, 3, 2, 2, 2, 108, 109, 7, 8, 2, 2, 109, 110, 5, 42, 22, 2, 110, 111, 7, 9, 2, 2, 111, 112, 7, 20, 2, 2, 112, 113, 7, 10, 2, 2, 113, 117, 3, 2, 2, 2, 114, 115, 7, 8, 2, 2, 115, 117, 5, 42, 22, 2, 116, 108, 3, 2, 2, 2, 116, 114, 3, 2, 2, 2, 117, 13, 3, 2, 2, 2, 118, 119, 7, 11, 2, 2, 119, 120, 7, 8, 2, 2, 120, 121, 5, 42, 22, 2, 121, 122, 7, 9, 2, 2, 122, 123, 7, 20, 2, 2, 123, 124, 7, 10, 2, 2, 124, 129, 3, 2, 2, 2, 125, 126, 7, 11, 2, 2, 126, 127, 7, 8, 2, 2, 127, 129, 5, 42, 22, 2, 128, 118, 3, 2, 2, 2, 128, 125, 3, 2, 2, 2, 129, 15, 3, 2, 2, 2, 130, 131, 7, 12, 2, 2, 131, 132, 7, 9, 2, 2, 132, 133, 7, 20, 2, 2, 133, 134, 7, 13, 2, 2, 134, 135, 7, 10, 2, 2, 135, 17, 3, 2, 2, 2, 136, 137, 7, 12, 2, 2, 137, 138, 5, 40, 21, 2, 138, 19, 3, 2, 2, 2, 139, 140, 7, 11, 2, 2, 140, 141, 7, 12, 2, 2, 141, 142, 5, 40, 21, 2, 142, 21, 3, 2, 2, 2, 143, 144, 7, 18, 2, 2, 144, 145, 7, 5, 2, 2, 145, 146, 5, 52, 27, 2, 146, 147, 7, 6, 2, 2, 147, 148, 5, 40, 21, 2, 148, 23, 3, 2, 2, 2, 149, 150, 7, 18, 2, 2, 150, 151, 7, 9, 2, 2, 151, 152, 5, 52, 27, 2, 152, 153, 7, 10, 2, 2, 153, 154, 5, 40, 21, 2, 154, 25, 3, 2, 2, 2, 155, 156, 7, 18, 2, 2, 156, 157, 7, 5, 2, 2, 157, 158, 5, 52, 27, 2, 158, 159, 7, 6, 2, 2, 159, 160, 7, 11, 2, 2, 160, 161, 5, 40, 21, 2, 161, 27, 3, 2, 2, 2, 162, 163, 7, 18, 2, 2, 163, 164, 7, 9, 2, 2, 164, 165, 5, 52, 27, 2, 165, 166, 7, 10, 2, 2, 166, 167, 7, 11, 2, 2, 167, 168, 5, 40, 21, 2, 168, 29, 3, 2, 2, 2, 169, 170, 7, 18, 2, 2, 170, 171, 7, 5, 2, 2, 171, 172, 5, 52, 27, 2, 172, 173, 7, 6, 2, 2, 173, 31, 3, 2, 2, 2, 174, 175, 7, 11, 2, 2, 175, 176, 7, 18, 2, 2, 176, 177, 7, 5, 2, 2, 177, 178, 5, 6, 4, 2, 178, 179, 7, 6, 2, 2, 179, 33, 3, 2, 2, 2, 180, 181, 7, 11, 2, 2, 181, 182, 7, 5, 2, 2, 182, 183, 5, 6, 4, 2, 183, 184, 7, 6, 2, 2, 184, 35, 3, 2, 2, 2, 185, 186, 7, 14, 2, 2, 186, 191, 7, 20, 2, 2, 187, 188, 7, 14, 2, 2, 188, 189, 7, 17, 2, 2, 189, 191, 7, 20, 2, 2, 190, 185, 3, 2, 2, 2, 190, 187, 3, 2, 2, 2, 191, 37, 3, 2, 2, 2, 192, 193, 7, 15, 2, 2, 193, 39, 3, 2, 2, 2, 194, 195, 5, 42, 22, 2, 195, 196, 5, 44, 23, 2, 196, 210, 3, 2, 2, 2, 197, 198, 5, 42, 22, 2, 198, 199, 5, 46, 24, 2, 199, 210, 3, 2, 2, 2, 200, 201, 5, 42, 22, 2, 201, 202, 5, 50, 26, 2, 202, 210, 3, 2, 2, 2, 203, 204, 5, 42, 22, 2, 204, 205, 5, 48, 25, 2, 205, 210, 3, 2, 2, 2, 206, 210, 5, 42, 22, 2, 207, 208, 7, 16, 2, 2, 208, 210, 5, 40, 21, 2, 209, 194, 3, 2, 2, 2, 209, 197, 3, 2, 2, 2, 209, 200, 3, 2, 2, 2, 209, 203, 3, 2, 2, 2, 209, 206, 3, 2, 2, 2, 209, 207, 3, 2, 2, 2, 210, 41, 3, 2, 2, 2, 211, 212, 9, 2, 2, 2, 212, 43, 3, 2, 2, 2, 213, 214, 7, 9, 2, 2, 214, 215, 7, 20, 2, 2, 215, 216, 7, 13, 2, 2, 216, 217, 7, 20, 2, 2, 217, 218, 7, 10, 2, 2, 218, 45, 3, 2, 2, 2, 219, 220, 7, 9, 2, 2, 220, 221, 7, 13, 2, 2, 221, 222, 7, 20, 2, 2, 222, 223, 7, 10, 2, 2, 223, 47, 3, 2, 2, 2, 224, 225, 7, 9, 2, 2, 225, 226, 7, 20, 2, 2, 226, 227, 7, 13, 2, 2, 227, 228, 7, 10, 2, 2, 228, 49, 3, 2, 2, 2, 229, 230, 7, 9, 2, 2, 230, 231, 7, 20, 2, 2, 231, 232, 7, 10, 2, 2, 232, 51, 3, 2, 2, 2, 233, 234, 9, 3, 2, 2, 234, 53, 3, 2, 2, 2, 10, 57, 63, 86, 103, 116, 128, 190, 209] \ No newline at end of file +[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 3, 27, 262, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12, 4, 13, 9, 13, 4, 14, 9, 14, 4, 15, 9, 15, 4, 16, 9, 16, 4, 17, 9, 17, 4, 18, 9, 18, 4, 19, 9, 19, 4, 20, 9, 20, 4, 21, 9, 21, 4, 22, 9, 22, 4, 23, 9, 23, 4, 24, 9, 24, 4, 25, 9, 25, 4, 26, 9, 26, 4, 27, 9, 27, 4, 28, 9, 28, 4, 29, 9, 29, 4, 30, 9, 30, 3, 2, 3, 2, 3, 2, 5, 2, 64, 10, 2, 3, 2, 3, 2, 7, 2, 68, 10, 2, 12, 2, 14, 2, 71, 11, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 5, 4, 93, 10, 4, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 5, 5, 113, 10, 5, 3, 6, 3, 6, 3, 6, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 5, 7, 126, 10, 7, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 5, 8, 136, 10, 8, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 5, 9, 148, 10, 9, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 11, 3, 11, 3, 11, 3, 12, 3, 12, 3, 12, 3, 12, 3, 13, 3, 13, 3, 13, 3, 14, 3, 14, 3, 14, 3, 14, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 21, 3, 21, 3, 21, 3, 21, 3, 21, 3, 22, 3, 22, 3, 22, 3, 22, 3, 22, 5, 22, 217, 10, 22, 3, 23, 3, 23, 3, 24, 3, 24, 3, 24, 3, 24, 3, 24, 3, 24, 3, 24, 3, 24, 3, 24, 3, 24, 3, 24, 3, 24, 3, 24, 3, 24, 3, 24, 5, 24, 236, 10, 24, 3, 25, 3, 25, 3, 26, 3, 26, 3, 26, 3, 26, 3, 26, 3, 26, 3, 27, 3, 27, 3, 27, 3, 27, 3, 27, 3, 28, 3, 28, 3, 28, 3, 28, 3, 28, 3, 29, 3, 29, 3, 29, 3, 29, 3, 30, 3, 30, 3, 30, 2, 3, 2, 31, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 2, 4, 3, 2, 23, 24, 3, 2, 20, 21, 2, 264, 2, 63, 3, 2, 2, 2, 4, 72, 3, 2, 2, 2, 6, 92, 3, 2, 2, 2, 8, 112, 3, 2, 2, 2, 10, 114, 3, 2, 2, 2, 12, 125, 3, 2, 2, 2, 14, 135, 3, 2, 2, 2, 16, 147, 3, 2, 2, 2, 18, 149, 3, 2, 2, 2, 20, 155, 3, 2, 2, 2, 22, 158, 3, 2, 2, 2, 24, 162, 3, 2, 2, 2, 26, 165, 3, 2, 2, 2, 28, 169, 3, 2, 2, 2, 30, 175, 3, 2, 2, 2, 32, 181, 3, 2, 2, 2, 34, 188, 3, 2, 2, 2, 36, 195, 3, 2, 2, 2, 38, 200, 3, 2, 2, 2, 40, 206, 3, 2, 2, 2, 42, 216, 3, 2, 2, 2, 44, 218, 3, 2, 2, 2, 46, 235, 3, 2, 2, 2, 48, 237, 3, 2, 2, 2, 50, 239, 3, 2, 2, 2, 52, 245, 3, 2, 2, 2, 54, 250, 3, 2, 2, 2, 56, 255, 3, 2, 2, 2, 58, 259, 3, 2, 2, 2, 60, 61, 8, 2, 1, 2, 61, 64, 5, 4, 3, 2, 62, 64, 7, 2, 2, 3, 63, 60, 3, 2, 2, 2, 63, 62, 3, 2, 2, 2, 64, 69, 3, 2, 2, 2, 65, 66, 12, 5, 2, 2, 66, 68, 5, 4, 3, 2, 67, 65, 3, 2, 2, 2, 68, 71, 3, 2, 2, 2, 69, 67, 3, 2, 2, 2, 69, 70, 3, 2, 2, 2, 70, 3, 3, 2, 2, 2, 71, 69, 3, 2, 2, 2, 72, 73, 7, 20, 2, 2, 73, 74, 7, 3, 2, 2, 74, 75, 5, 6, 4, 2, 75, 76, 7, 4, 2, 2, 76, 5, 3, 2, 2, 2, 77, 78, 7, 27, 2, 2, 78, 93, 5, 6, 4, 2, 79, 80, 7, 5, 2, 2, 80, 81, 5, 6, 4, 2, 81, 82, 7, 6, 2, 2, 82, 83, 5, 6, 4, 2, 83, 93, 3, 2, 2, 2, 84, 85, 7, 5, 2, 2, 85, 86, 5, 6, 4, 2, 86, 87, 7, 6, 2, 2, 87, 93, 3, 2, 2, 2, 88, 89, 5, 8, 5, 2, 89, 90, 5, 6, 4, 2, 90, 93, 3, 2, 2, 2, 91, 93, 5, 8, 5, 2, 92, 77, 3, 2, 2, 2, 92, 79, 3, 2, 2, 2, 92, 84, 3, 2, 2, 2, 92, 88, 3, 2, 2, 2, 92, 91, 3, 2, 2, 2, 93, 7, 3, 2, 2, 2, 94, 113, 5, 12, 7, 2, 95, 113, 5, 14, 8, 2, 96, 113, 5, 10, 6, 2, 97, 113, 5, 16, 9, 2, 98, 113, 5, 18, 10, 2, 99, 113, 5, 24, 13, 2, 100, 113, 5, 26, 14, 2, 101, 113, 5, 20, 11, 2, 102, 113, 5, 22, 12, 2, 103, 113, 5, 28, 15, 2, 104, 113, 5, 30, 16, 2, 105, 113, 5, 32, 17, 2, 106, 113, 5, 34, 18, 2, 107, 113, 5, 36, 19, 2, 108, 113, 5, 38, 20, 2, 109, 113, 5, 40, 21, 2, 110, 113, 5, 42, 22, 2, 111, 113, 5, 44, 23, 2, 112, 94, 3, 2, 2, 2, 112, 95, 3, 2, 2, 2, 112, 96, 3, 2, 2, 2, 112, 97, 3, 2, 2, 2, 112, 98, 3, 2, 2, 2, 112, 99, 3, 2, 2, 2, 112, 100, 3, 2, 2, 2, 112, 101, 3, 2, 2, 2, 112, 102, 3, 2, 2, 2, 112, 103, 3, 2, 2, 2, 112, 104, 3, 2, 2, 2, 112, 105, 3, 2, 2, 2, 112, 106, 3, 2, 2, 2, 112, 107, 3, 2, 2, 2, 112, 108, 3, 2, 2, 2, 112, 109, 3, 2, 2, 2, 112, 110, 3, 2, 2, 2, 112, 111, 3, 2, 2, 2, 113, 9, 3, 2, 2, 2, 114, 115, 7, 7, 2, 2, 115, 116, 7, 24, 2, 2, 116, 11, 3, 2, 2, 2, 117, 118, 7, 8, 2, 2, 118, 119, 5, 48, 25, 2, 119, 120, 7, 9, 2, 2, 120, 121, 7, 22, 2, 2, 121, 122, 7, 10, 2, 2, 122, 126, 3, 2, 2, 2, 123, 124, 7, 8, 2, 2, 124, 126, 5, 48, 25, 2, 125, 117, 3, 2, 2, 2, 125, 123, 3, 2, 2, 2, 126, 13, 3, 2, 2, 2, 127, 128, 7, 11, 2, 2, 128, 129, 5, 48, 25, 2, 129, 130, 7, 9, 2, 2, 130, 131, 7, 22, 2, 2, 131, 132, 7, 10, 2, 2, 132, 136, 3, 2, 2, 2, 133, 134, 7, 11, 2, 2, 134, 136, 5, 48, 25, 2, 135, 127, 3, 2, 2, 2, 135, 133, 3, 2, 2, 2, 136, 15, 3, 2, 2, 2, 137, 138, 7, 12, 2, 2, 138, 139, 7, 8, 2, 2, 139, 140, 5, 48, 25, 2, 140, 141, 7, 9, 2, 2, 141, 142, 7, 22, 2, 2, 142, 143, 7, 10, 2, 2, 143, 148, 3, 2, 2, 2, 144, 145, 7, 12, 2, 2, 145, 146, 7, 8, 2, 2, 146, 148, 5, 48, 25, 2, 147, 137, 3, 2, 2, 2, 147, 144, 3, 2, 2, 2, 148, 17, 3, 2, 2, 2, 149, 150, 7, 13, 2, 2, 150, 151, 7, 9, 2, 2, 151, 152, 7, 22, 2, 2, 152, 153, 7, 14, 2, 2, 153, 154, 7, 10, 2, 2, 154, 19, 3, 2, 2, 2, 155, 156, 7, 15, 2, 2, 156, 157, 5, 46, 24, 2, 157, 21, 3, 2, 2, 2, 158, 159, 7, 12, 2, 2, 159, 160, 7, 15, 2, 2, 160, 161, 5, 46, 24, 2, 161, 23, 3, 2, 2, 2, 162, 163, 7, 13, 2, 2, 163, 164, 5, 46, 24, 2, 164, 25, 3, 2, 2, 2, 165, 166, 7, 12, 2, 2, 166, 167, 7, 13, 2, 2, 167, 168, 5, 46, 24, 2, 168, 27, 3, 2, 2, 2, 169, 170, 7, 20, 2, 2, 170, 171, 7, 5, 2, 2, 171, 172, 5, 58, 30, 2, 172, 173, 7, 6, 2, 2, 173, 174, 5, 46, 24, 2, 174, 29, 3, 2, 2, 2, 175, 176, 7, 20, 2, 2, 176, 177, 7, 9, 2, 2, 177, 178, 5, 58, 30, 2, 178, 179, 7, 10, 2, 2, 179, 180, 5, 46, 24, 2, 180, 31, 3, 2, 2, 2, 181, 182, 7, 20, 2, 2, 182, 183, 7, 5, 2, 2, 183, 184, 5, 58, 30, 2, 184, 185, 7, 6, 2, 2, 185, 186, 7, 12, 2, 2, 186, 187, 5, 46, 24, 2, 187, 33, 3, 2, 2, 2, 188, 189, 7, 20, 2, 2, 189, 190, 7, 9, 2, 2, 190, 191, 5, 58, 30, 2, 191, 192, 7, 10, 2, 2, 192, 193, 7, 12, 2, 2, 193, 194, 5, 46, 24, 2, 194, 35, 3, 2, 2, 2, 195, 196, 7, 20, 2, 2, 196, 197, 7, 5, 2, 2, 197, 198, 5, 58, 30, 2, 198, 199, 7, 6, 2, 2, 199, 37, 3, 2, 2, 2, 200, 201, 7, 12, 2, 2, 201, 202, 7, 20, 2, 2, 202, 203, 7, 5, 2, 2, 203, 204, 5, 6, 4, 2, 204, 205, 7, 6, 2, 2, 205, 39, 3, 2, 2, 2, 206, 207, 7, 12, 2, 2, 207, 208, 7, 5, 2, 2, 208, 209, 5, 6, 4, 2, 209, 210, 7, 6, 2, 2, 210, 41, 3, 2, 2, 2, 211, 212, 7, 16, 2, 2, 212, 217, 7, 22, 2, 2, 213, 214, 7, 16, 2, 2, 214, 215, 7, 19, 2, 2, 215, 217, 7, 22, 2, 2, 216, 211, 3, 2, 2, 2, 216, 213, 3, 2, 2, 2, 217, 43, 3, 2, 2, 2, 218, 219, 7, 17, 2, 2, 219, 45, 3, 2, 2, 2, 220, 221, 5, 48, 25, 2, 221, 222, 5, 50, 26, 2, 222, 236, 3, 2, 2, 2, 223, 224, 5, 48, 25, 2, 224, 225, 5, 52, 27, 2, 225, 236, 3, 2, 2, 2, 226, 227, 5, 48, 25, 2, 227, 228, 5, 56, 29, 2, 228, 236, 3, 2, 2, 2, 229, 230, 5, 48, 25, 2, 230, 231, 5, 54, 28, 2, 231, 236, 3, 2, 2, 2, 232, 236, 5, 48, 25, 2, 233, 234, 7, 18, 2, 2, 234, 236, 5, 46, 24, 2, 235, 220, 3, 2, 2, 2, 235, 223, 3, 2, 2, 2, 235, 226, 3, 2, 2, 2, 235, 229, 3, 2, 2, 2, 235, 232, 3, 2, 2, 2, 235, 233, 3, 2, 2, 2, 236, 47, 3, 2, 2, 2, 237, 238, 9, 2, 2, 2, 238, 49, 3, 2, 2, 2, 239, 240, 7, 9, 2, 2, 240, 241, 7, 22, 2, 2, 241, 242, 7, 14, 2, 2, 242, 243, 7, 22, 2, 2, 243, 244, 7, 10, 2, 2, 244, 51, 3, 2, 2, 2, 245, 246, 7, 9, 2, 2, 246, 247, 7, 14, 2, 2, 247, 248, 7, 22, 2, 2, 248, 249, 7, 10, 2, 2, 249, 53, 3, 2, 2, 2, 250, 251, 7, 9, 2, 2, 251, 252, 7, 22, 2, 2, 252, 253, 7, 14, 2, 2, 253, 254, 7, 10, 2, 2, 254, 55, 3, 2, 2, 2, 255, 256, 7, 9, 2, 2, 256, 257, 7, 22, 2, 2, 257, 258, 7, 10, 2, 2, 258, 57, 3, 2, 2, 2, 259, 260, 9, 3, 2, 2, 260, 59, 3, 2, 2, 2, 11, 63, 69, 92, 112, 125, 135, 147, 216, 235] \ No newline at end of file diff --git a/internal/parser/LDE.tokens b/internal/parser/LDE.tokens index 7f42285..6a3f306 100644 --- a/internal/parser/LDE.tokens +++ b/internal/parser/LDE.tokens @@ -12,15 +12,17 @@ T__10=11 T__11=12 T__12=13 T__13=14 -ComparisonOperator=15 -Identifier=16 -IdentifierWithFraction=17 -IntLit=18 -StringLit=19 -CharLit=20 -WS=21 -LineComment=22 -Stress=23 +T__14=15 +T__15=16 +ComparisonOperator=17 +Identifier=18 +IdentifierWithFraction=19 +IntLit=20 +StringLit=21 +CharLit=22 +WS=23 +LineComment=24 +Stress=25 '='=1 ';'=2 '('=3 @@ -29,10 +31,12 @@ Stress=23 '^'=6 '['=7 ']'=8 -'?'=9 -'_'=10 -':'=11 -'%'=12 -'$'=13 -'~'=14 -'!'=23 +'@'=9 +'?'=10 +'_'=11 +':'=12 +'..'=13 +'%'=14 +'$'=15 +'~'=16 +'!'=25 diff --git a/internal/parser/LDELexer.interp b/internal/parser/LDELexer.interp index 85a59ce..86209bd 100644 --- a/internal/parser/LDELexer.interp +++ b/internal/parser/LDELexer.interp @@ -8,9 +8,11 @@ null '^' '[' ']' +'@' '?' '_' ':' +'..' '%' '$' '~' @@ -40,6 +42,8 @@ null null null null +null +null ComparisonOperator Identifier IdentifierWithFraction @@ -65,6 +69,8 @@ T__10 T__11 T__12 T__13 +T__14 +T__15 ComparisonOperator Identifier IdentifierWithFraction @@ -85,4 +91,4 @@ mode names: DEFAULT_MODE atn: -[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 25, 149, 8, 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12, 4, 13, 9, 13, 4, 14, 9, 14, 4, 15, 9, 15, 4, 16, 9, 16, 4, 17, 9, 17, 4, 18, 9, 18, 4, 19, 9, 19, 4, 20, 9, 20, 4, 21, 9, 21, 4, 22, 9, 22, 4, 23, 9, 23, 4, 24, 9, 24, 4, 25, 9, 25, 4, 26, 9, 26, 3, 2, 3, 2, 3, 3, 3, 3, 3, 4, 3, 4, 3, 5, 3, 5, 3, 6, 3, 6, 3, 7, 3, 7, 3, 8, 3, 8, 3, 9, 3, 9, 3, 10, 3, 10, 3, 11, 3, 11, 3, 12, 3, 12, 3, 13, 3, 13, 3, 14, 3, 14, 3, 15, 3, 15, 3, 16, 3, 16, 3, 17, 3, 17, 7, 17, 86, 10, 17, 12, 17, 14, 17, 89, 11, 17, 3, 18, 3, 18, 7, 18, 93, 10, 18, 12, 18, 14, 18, 96, 11, 18, 3, 18, 3, 18, 6, 18, 100, 10, 18, 13, 18, 14, 18, 101, 3, 19, 6, 19, 105, 10, 19, 13, 19, 14, 19, 106, 3, 20, 3, 20, 3, 20, 3, 21, 3, 21, 3, 21, 7, 21, 115, 10, 21, 12, 21, 14, 21, 118, 11, 21, 3, 21, 3, 21, 3, 22, 3, 22, 3, 22, 3, 23, 3, 23, 3, 23, 7, 23, 128, 10, 23, 12, 23, 14, 23, 131, 11, 23, 3, 23, 3, 23, 3, 24, 3, 24, 3, 24, 3, 24, 3, 25, 3, 25, 7, 25, 141, 10, 25, 12, 25, 14, 25, 144, 11, 25, 3, 25, 3, 25, 3, 26, 3, 26, 4, 116, 129, 2, 27, 3, 3, 5, 4, 7, 5, 9, 6, 11, 7, 13, 8, 15, 9, 17, 10, 19, 11, 21, 12, 23, 13, 25, 14, 27, 15, 29, 16, 31, 17, 33, 18, 35, 19, 37, 20, 39, 2, 41, 21, 43, 2, 45, 22, 47, 23, 49, 24, 51, 25, 3, 2, 9, 4, 2, 62, 62, 64, 64, 5, 2, 67, 92, 97, 97, 99, 124, 6, 2, 50, 59, 67, 92, 97, 97, 99, 124, 3, 2, 50, 59, 4, 2, 11, 12, 15, 15, 5, 2, 11, 12, 15, 15, 34, 34, 4, 2, 12, 12, 15, 15, 2, 155, 2, 3, 3, 2, 2, 2, 2, 5, 3, 2, 2, 2, 2, 7, 3, 2, 2, 2, 2, 9, 3, 2, 2, 2, 2, 11, 3, 2, 2, 2, 2, 13, 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, 17, 3, 2, 2, 2, 2, 19, 3, 2, 2, 2, 2, 21, 3, 2, 2, 2, 2, 23, 3, 2, 2, 2, 2, 25, 3, 2, 2, 2, 2, 27, 3, 2, 2, 2, 2, 29, 3, 2, 2, 2, 2, 31, 3, 2, 2, 2, 2, 33, 3, 2, 2, 2, 2, 35, 3, 2, 2, 2, 2, 37, 3, 2, 2, 2, 2, 41, 3, 2, 2, 2, 2, 45, 3, 2, 2, 2, 2, 47, 3, 2, 2, 2, 2, 49, 3, 2, 2, 2, 2, 51, 3, 2, 2, 2, 3, 53, 3, 2, 2, 2, 5, 55, 3, 2, 2, 2, 7, 57, 3, 2, 2, 2, 9, 59, 3, 2, 2, 2, 11, 61, 3, 2, 2, 2, 13, 63, 3, 2, 2, 2, 15, 65, 3, 2, 2, 2, 17, 67, 3, 2, 2, 2, 19, 69, 3, 2, 2, 2, 21, 71, 3, 2, 2, 2, 23, 73, 3, 2, 2, 2, 25, 75, 3, 2, 2, 2, 27, 77, 3, 2, 2, 2, 29, 79, 3, 2, 2, 2, 31, 81, 3, 2, 2, 2, 33, 83, 3, 2, 2, 2, 35, 90, 3, 2, 2, 2, 37, 104, 3, 2, 2, 2, 39, 108, 3, 2, 2, 2, 41, 111, 3, 2, 2, 2, 43, 121, 3, 2, 2, 2, 45, 124, 3, 2, 2, 2, 47, 134, 3, 2, 2, 2, 49, 138, 3, 2, 2, 2, 51, 147, 3, 2, 2, 2, 53, 54, 7, 63, 2, 2, 54, 4, 3, 2, 2, 2, 55, 56, 7, 61, 2, 2, 56, 6, 3, 2, 2, 2, 57, 58, 7, 42, 2, 2, 58, 8, 3, 2, 2, 2, 59, 60, 7, 43, 2, 2, 60, 10, 3, 2, 2, 2, 61, 62, 7, 44, 2, 2, 62, 12, 3, 2, 2, 2, 63, 64, 7, 96, 2, 2, 64, 14, 3, 2, 2, 2, 65, 66, 7, 93, 2, 2, 66, 16, 3, 2, 2, 2, 67, 68, 7, 95, 2, 2, 68, 18, 3, 2, 2, 2, 69, 70, 7, 65, 2, 2, 70, 20, 3, 2, 2, 2, 71, 72, 7, 97, 2, 2, 72, 22, 3, 2, 2, 2, 73, 74, 7, 60, 2, 2, 74, 24, 3, 2, 2, 2, 75, 76, 7, 39, 2, 2, 76, 26, 3, 2, 2, 2, 77, 78, 7, 38, 2, 2, 78, 28, 3, 2, 2, 2, 79, 80, 7, 128, 2, 2, 80, 30, 3, 2, 2, 2, 81, 82, 9, 2, 2, 2, 82, 32, 3, 2, 2, 2, 83, 87, 9, 3, 2, 2, 84, 86, 9, 4, 2, 2, 85, 84, 3, 2, 2, 2, 86, 89, 3, 2, 2, 2, 87, 85, 3, 2, 2, 2, 87, 88, 3, 2, 2, 2, 88, 34, 3, 2, 2, 2, 89, 87, 3, 2, 2, 2, 90, 94, 9, 3, 2, 2, 91, 93, 9, 4, 2, 2, 92, 91, 3, 2, 2, 2, 93, 96, 3, 2, 2, 2, 94, 92, 3, 2, 2, 2, 94, 95, 3, 2, 2, 2, 95, 97, 3, 2, 2, 2, 96, 94, 3, 2, 2, 2, 97, 99, 7, 48, 2, 2, 98, 100, 9, 5, 2, 2, 99, 98, 3, 2, 2, 2, 100, 101, 3, 2, 2, 2, 101, 99, 3, 2, 2, 2, 101, 102, 3, 2, 2, 2, 102, 36, 3, 2, 2, 2, 103, 105, 9, 5, 2, 2, 104, 103, 3, 2, 2, 2, 105, 106, 3, 2, 2, 2, 106, 104, 3, 2, 2, 2, 106, 107, 3, 2, 2, 2, 107, 38, 3, 2, 2, 2, 108, 109, 7, 94, 2, 2, 109, 110, 7, 36, 2, 2, 110, 40, 3, 2, 2, 2, 111, 116, 7, 36, 2, 2, 112, 115, 5, 39, 20, 2, 113, 115, 10, 6, 2, 2, 114, 112, 3, 2, 2, 2, 114, 113, 3, 2, 2, 2, 115, 118, 3, 2, 2, 2, 116, 117, 3, 2, 2, 2, 116, 114, 3, 2, 2, 2, 117, 119, 3, 2, 2, 2, 118, 116, 3, 2, 2, 2, 119, 120, 7, 36, 2, 2, 120, 42, 3, 2, 2, 2, 121, 122, 7, 94, 2, 2, 122, 123, 7, 41, 2, 2, 123, 44, 3, 2, 2, 2, 124, 129, 7, 41, 2, 2, 125, 128, 5, 43, 22, 2, 126, 128, 10, 6, 2, 2, 127, 125, 3, 2, 2, 2, 127, 126, 3, 2, 2, 2, 128, 131, 3, 2, 2, 2, 129, 130, 3, 2, 2, 2, 129, 127, 3, 2, 2, 2, 130, 132, 3, 2, 2, 2, 131, 129, 3, 2, 2, 2, 132, 133, 7, 41, 2, 2, 133, 46, 3, 2, 2, 2, 134, 135, 9, 7, 2, 2, 135, 136, 3, 2, 2, 2, 136, 137, 8, 24, 2, 2, 137, 48, 3, 2, 2, 2, 138, 142, 7, 37, 2, 2, 139, 141, 10, 8, 2, 2, 140, 139, 3, 2, 2, 2, 141, 144, 3, 2, 2, 2, 142, 140, 3, 2, 2, 2, 142, 143, 3, 2, 2, 2, 143, 145, 3, 2, 2, 2, 144, 142, 3, 2, 2, 2, 145, 146, 8, 25, 2, 2, 146, 50, 3, 2, 2, 2, 147, 148, 7, 35, 2, 2, 148, 52, 3, 2, 2, 2, 12, 2, 87, 94, 101, 106, 114, 116, 127, 129, 142, 3, 8, 2, 2] \ No newline at end of file +[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 27, 158, 8, 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12, 4, 13, 9, 13, 4, 14, 9, 14, 4, 15, 9, 15, 4, 16, 9, 16, 4, 17, 9, 17, 4, 18, 9, 18, 4, 19, 9, 19, 4, 20, 9, 20, 4, 21, 9, 21, 4, 22, 9, 22, 4, 23, 9, 23, 4, 24, 9, 24, 4, 25, 9, 25, 4, 26, 9, 26, 4, 27, 9, 27, 4, 28, 9, 28, 3, 2, 3, 2, 3, 3, 3, 3, 3, 4, 3, 4, 3, 5, 3, 5, 3, 6, 3, 6, 3, 7, 3, 7, 3, 8, 3, 8, 3, 9, 3, 9, 3, 10, 3, 10, 3, 11, 3, 11, 3, 12, 3, 12, 3, 13, 3, 13, 3, 14, 3, 14, 3, 14, 3, 15, 3, 15, 3, 16, 3, 16, 3, 17, 3, 17, 3, 18, 3, 18, 3, 19, 3, 19, 7, 19, 95, 10, 19, 12, 19, 14, 19, 98, 11, 19, 3, 20, 3, 20, 7, 20, 102, 10, 20, 12, 20, 14, 20, 105, 11, 20, 3, 20, 3, 20, 6, 20, 109, 10, 20, 13, 20, 14, 20, 110, 3, 21, 6, 21, 114, 10, 21, 13, 21, 14, 21, 115, 3, 22, 3, 22, 3, 22, 3, 23, 3, 23, 3, 23, 7, 23, 124, 10, 23, 12, 23, 14, 23, 127, 11, 23, 3, 23, 3, 23, 3, 24, 3, 24, 3, 24, 3, 25, 3, 25, 3, 25, 7, 25, 137, 10, 25, 12, 25, 14, 25, 140, 11, 25, 3, 25, 3, 25, 3, 26, 3, 26, 3, 26, 3, 26, 3, 27, 3, 27, 7, 27, 150, 10, 27, 12, 27, 14, 27, 153, 11, 27, 3, 27, 3, 27, 3, 28, 3, 28, 4, 125, 138, 2, 29, 3, 3, 5, 4, 7, 5, 9, 6, 11, 7, 13, 8, 15, 9, 17, 10, 19, 11, 21, 12, 23, 13, 25, 14, 27, 15, 29, 16, 31, 17, 33, 18, 35, 19, 37, 20, 39, 21, 41, 22, 43, 2, 45, 23, 47, 2, 49, 24, 51, 25, 53, 26, 55, 27, 3, 2, 9, 4, 2, 62, 62, 64, 64, 5, 2, 67, 92, 97, 97, 99, 124, 6, 2, 50, 59, 67, 92, 97, 97, 99, 124, 3, 2, 50, 59, 4, 2, 11, 12, 15, 15, 5, 2, 11, 12, 15, 15, 34, 34, 4, 2, 12, 12, 15, 15, 2, 164, 2, 3, 3, 2, 2, 2, 2, 5, 3, 2, 2, 2, 2, 7, 3, 2, 2, 2, 2, 9, 3, 2, 2, 2, 2, 11, 3, 2, 2, 2, 2, 13, 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, 17, 3, 2, 2, 2, 2, 19, 3, 2, 2, 2, 2, 21, 3, 2, 2, 2, 2, 23, 3, 2, 2, 2, 2, 25, 3, 2, 2, 2, 2, 27, 3, 2, 2, 2, 2, 29, 3, 2, 2, 2, 2, 31, 3, 2, 2, 2, 2, 33, 3, 2, 2, 2, 2, 35, 3, 2, 2, 2, 2, 37, 3, 2, 2, 2, 2, 39, 3, 2, 2, 2, 2, 41, 3, 2, 2, 2, 2, 45, 3, 2, 2, 2, 2, 49, 3, 2, 2, 2, 2, 51, 3, 2, 2, 2, 2, 53, 3, 2, 2, 2, 2, 55, 3, 2, 2, 2, 3, 57, 3, 2, 2, 2, 5, 59, 3, 2, 2, 2, 7, 61, 3, 2, 2, 2, 9, 63, 3, 2, 2, 2, 11, 65, 3, 2, 2, 2, 13, 67, 3, 2, 2, 2, 15, 69, 3, 2, 2, 2, 17, 71, 3, 2, 2, 2, 19, 73, 3, 2, 2, 2, 21, 75, 3, 2, 2, 2, 23, 77, 3, 2, 2, 2, 25, 79, 3, 2, 2, 2, 27, 81, 3, 2, 2, 2, 29, 84, 3, 2, 2, 2, 31, 86, 3, 2, 2, 2, 33, 88, 3, 2, 2, 2, 35, 90, 3, 2, 2, 2, 37, 92, 3, 2, 2, 2, 39, 99, 3, 2, 2, 2, 41, 113, 3, 2, 2, 2, 43, 117, 3, 2, 2, 2, 45, 120, 3, 2, 2, 2, 47, 130, 3, 2, 2, 2, 49, 133, 3, 2, 2, 2, 51, 143, 3, 2, 2, 2, 53, 147, 3, 2, 2, 2, 55, 156, 3, 2, 2, 2, 57, 58, 7, 63, 2, 2, 58, 4, 3, 2, 2, 2, 59, 60, 7, 61, 2, 2, 60, 6, 3, 2, 2, 2, 61, 62, 7, 42, 2, 2, 62, 8, 3, 2, 2, 2, 63, 64, 7, 43, 2, 2, 64, 10, 3, 2, 2, 2, 65, 66, 7, 44, 2, 2, 66, 12, 3, 2, 2, 2, 67, 68, 7, 96, 2, 2, 68, 14, 3, 2, 2, 2, 69, 70, 7, 93, 2, 2, 70, 16, 3, 2, 2, 2, 71, 72, 7, 95, 2, 2, 72, 18, 3, 2, 2, 2, 73, 74, 7, 66, 2, 2, 74, 20, 3, 2, 2, 2, 75, 76, 7, 65, 2, 2, 76, 22, 3, 2, 2, 2, 77, 78, 7, 97, 2, 2, 78, 24, 3, 2, 2, 2, 79, 80, 7, 60, 2, 2, 80, 26, 3, 2, 2, 2, 81, 82, 7, 48, 2, 2, 82, 83, 7, 48, 2, 2, 83, 28, 3, 2, 2, 2, 84, 85, 7, 39, 2, 2, 85, 30, 3, 2, 2, 2, 86, 87, 7, 38, 2, 2, 87, 32, 3, 2, 2, 2, 88, 89, 7, 128, 2, 2, 89, 34, 3, 2, 2, 2, 90, 91, 9, 2, 2, 2, 91, 36, 3, 2, 2, 2, 92, 96, 9, 3, 2, 2, 93, 95, 9, 4, 2, 2, 94, 93, 3, 2, 2, 2, 95, 98, 3, 2, 2, 2, 96, 94, 3, 2, 2, 2, 96, 97, 3, 2, 2, 2, 97, 38, 3, 2, 2, 2, 98, 96, 3, 2, 2, 2, 99, 103, 9, 3, 2, 2, 100, 102, 9, 4, 2, 2, 101, 100, 3, 2, 2, 2, 102, 105, 3, 2, 2, 2, 103, 101, 3, 2, 2, 2, 103, 104, 3, 2, 2, 2, 104, 106, 3, 2, 2, 2, 105, 103, 3, 2, 2, 2, 106, 108, 7, 48, 2, 2, 107, 109, 9, 5, 2, 2, 108, 107, 3, 2, 2, 2, 109, 110, 3, 2, 2, 2, 110, 108, 3, 2, 2, 2, 110, 111, 3, 2, 2, 2, 111, 40, 3, 2, 2, 2, 112, 114, 9, 5, 2, 2, 113, 112, 3, 2, 2, 2, 114, 115, 3, 2, 2, 2, 115, 113, 3, 2, 2, 2, 115, 116, 3, 2, 2, 2, 116, 42, 3, 2, 2, 2, 117, 118, 7, 94, 2, 2, 118, 119, 7, 36, 2, 2, 119, 44, 3, 2, 2, 2, 120, 125, 7, 36, 2, 2, 121, 124, 5, 43, 22, 2, 122, 124, 10, 6, 2, 2, 123, 121, 3, 2, 2, 2, 123, 122, 3, 2, 2, 2, 124, 127, 3, 2, 2, 2, 125, 126, 3, 2, 2, 2, 125, 123, 3, 2, 2, 2, 126, 128, 3, 2, 2, 2, 127, 125, 3, 2, 2, 2, 128, 129, 7, 36, 2, 2, 129, 46, 3, 2, 2, 2, 130, 131, 7, 94, 2, 2, 131, 132, 7, 41, 2, 2, 132, 48, 3, 2, 2, 2, 133, 138, 7, 41, 2, 2, 134, 137, 5, 47, 24, 2, 135, 137, 10, 6, 2, 2, 136, 134, 3, 2, 2, 2, 136, 135, 3, 2, 2, 2, 137, 140, 3, 2, 2, 2, 138, 139, 3, 2, 2, 2, 138, 136, 3, 2, 2, 2, 139, 141, 3, 2, 2, 2, 140, 138, 3, 2, 2, 2, 141, 142, 7, 41, 2, 2, 142, 50, 3, 2, 2, 2, 143, 144, 9, 7, 2, 2, 144, 145, 3, 2, 2, 2, 145, 146, 8, 26, 2, 2, 146, 52, 3, 2, 2, 2, 147, 151, 7, 37, 2, 2, 148, 150, 10, 8, 2, 2, 149, 148, 3, 2, 2, 2, 150, 153, 3, 2, 2, 2, 151, 149, 3, 2, 2, 2, 151, 152, 3, 2, 2, 2, 152, 154, 3, 2, 2, 2, 153, 151, 3, 2, 2, 2, 154, 155, 8, 27, 2, 2, 155, 54, 3, 2, 2, 2, 156, 157, 7, 35, 2, 2, 157, 56, 3, 2, 2, 2, 12, 2, 96, 103, 110, 115, 123, 125, 136, 138, 151, 3, 8, 2, 2] \ No newline at end of file diff --git a/internal/parser/LDELexer.tokens b/internal/parser/LDELexer.tokens index 7f42285..6a3f306 100644 --- a/internal/parser/LDELexer.tokens +++ b/internal/parser/LDELexer.tokens @@ -12,15 +12,17 @@ T__10=11 T__11=12 T__12=13 T__13=14 -ComparisonOperator=15 -Identifier=16 -IdentifierWithFraction=17 -IntLit=18 -StringLit=19 -CharLit=20 -WS=21 -LineComment=22 -Stress=23 +T__14=15 +T__15=16 +ComparisonOperator=17 +Identifier=18 +IdentifierWithFraction=19 +IntLit=20 +StringLit=21 +CharLit=22 +WS=23 +LineComment=24 +Stress=25 '='=1 ';'=2 '('=3 @@ -29,10 +31,12 @@ Stress=23 '^'=6 '['=7 ']'=8 -'?'=9 -'_'=10 -':'=11 -'%'=12 -'$'=13 -'~'=14 -'!'=23 +'@'=9 +'?'=10 +'_'=11 +':'=12 +'..'=13 +'%'=14 +'$'=15 +'~'=16 +'!'=25 diff --git a/internal/parser/lde_base_listener.go b/internal/parser/lde_base_listener.go index fed6573..7209eeb 100644 --- a/internal/parser/lde_base_listener.go +++ b/internal/parser/lde_base_listener.go @@ -1,4 +1,4 @@ -// Code generated from LDE.g4 by ANTLR 4.7.1. DO NOT EDIT. +// Code generated from LDE.g4 by ANTLR 4.7.2. DO NOT EDIT. package parser // LDE @@ -57,6 +57,12 @@ func (s *BaseLDEListener) EnterPassTargetPrefix(ctx *PassTargetPrefixContext) {} // ExitPassTargetPrefix is called when production passTargetPrefix is exited. func (s *BaseLDEListener) ExitPassTargetPrefix(ctx *PassTargetPrefixContext) {} +// EnterCheckTargetPrefix is called when production checkTargetPrefix is entered. +func (s *BaseLDEListener) EnterCheckTargetPrefix(ctx *CheckTargetPrefixContext) {} + +// ExitCheckTargetPrefix is called when production checkTargetPrefix is exited. +func (s *BaseLDEListener) ExitCheckTargetPrefix(ctx *CheckTargetPrefixContext) {} + // EnterMayBePassTargetPrefix is called when production mayBePassTargetPrefix is entered. func (s *BaseLDEListener) EnterMayBePassTargetPrefix(ctx *MayBePassTargetPrefixContext) {} @@ -69,6 +75,18 @@ func (s *BaseLDEListener) EnterPassChars(ctx *PassCharsContext) {} // ExitPassChars is called when production passChars is exited. func (s *BaseLDEListener) ExitPassChars(ctx *PassCharsContext) {} +// EnterGoUntil is called when production goUntil is entered. +func (s *BaseLDEListener) EnterGoUntil(ctx *GoUntilContext) {} + +// ExitGoUntil is called when production goUntil is exited. +func (s *BaseLDEListener) ExitGoUntil(ctx *GoUntilContext) {} + +// EnterMayGoUntil is called when production mayGoUntil is entered. +func (s *BaseLDEListener) EnterMayGoUntil(ctx *MayGoUntilContext) {} + +// ExitMayGoUntil is called when production mayGoUntil is exited. +func (s *BaseLDEListener) ExitMayGoUntil(ctx *MayGoUntilContext) {} + // EnterPassUntil is called when production passUntil is entered. func (s *BaseLDEListener) EnterPassUntil(ctx *PassUntilContext) {} diff --git a/internal/parser/lde_lexer.go b/internal/parser/lde_lexer.go index af75acc..c5c3b5c 100644 --- a/internal/parser/lde_lexer.go +++ b/internal/parser/lde_lexer.go @@ -1,4 +1,4 @@ -// Code generated from LDE.g4 by ANTLR 4.7.1. DO NOT EDIT. +// Code generated from LDE.g4 by ANTLR 4.7.2. DO NOT EDIT. package parser @@ -14,73 +14,77 @@ var _ = fmt.Printf var _ = unicode.IsLetter var serializedLexerAtn = []uint16{ - 3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 25, 149, + 3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 27, 158, 8, 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12, 4, 13, 9, 13, 4, 14, 9, 14, 4, 15, 9, 15, 4, 16, 9, 16, 4, 17, 9, 17, 4, 18, 9, 18, 4, 19, 9, 19, 4, 20, 9, 20, 4, 21, 9, 21, 4, 22, 9, 22, 4, 23, - 9, 23, 4, 24, 9, 24, 4, 25, 9, 25, 4, 26, 9, 26, 3, 2, 3, 2, 3, 3, 3, 3, - 3, 4, 3, 4, 3, 5, 3, 5, 3, 6, 3, 6, 3, 7, 3, 7, 3, 8, 3, 8, 3, 9, 3, 9, - 3, 10, 3, 10, 3, 11, 3, 11, 3, 12, 3, 12, 3, 13, 3, 13, 3, 14, 3, 14, 3, - 15, 3, 15, 3, 16, 3, 16, 3, 17, 3, 17, 7, 17, 86, 10, 17, 12, 17, 14, 17, - 89, 11, 17, 3, 18, 3, 18, 7, 18, 93, 10, 18, 12, 18, 14, 18, 96, 11, 18, - 3, 18, 3, 18, 6, 18, 100, 10, 18, 13, 18, 14, 18, 101, 3, 19, 6, 19, 105, - 10, 19, 13, 19, 14, 19, 106, 3, 20, 3, 20, 3, 20, 3, 21, 3, 21, 3, 21, - 7, 21, 115, 10, 21, 12, 21, 14, 21, 118, 11, 21, 3, 21, 3, 21, 3, 22, 3, - 22, 3, 22, 3, 23, 3, 23, 3, 23, 7, 23, 128, 10, 23, 12, 23, 14, 23, 131, - 11, 23, 3, 23, 3, 23, 3, 24, 3, 24, 3, 24, 3, 24, 3, 25, 3, 25, 7, 25, - 141, 10, 25, 12, 25, 14, 25, 144, 11, 25, 3, 25, 3, 25, 3, 26, 3, 26, 4, - 116, 129, 2, 27, 3, 3, 5, 4, 7, 5, 9, 6, 11, 7, 13, 8, 15, 9, 17, 10, 19, - 11, 21, 12, 23, 13, 25, 14, 27, 15, 29, 16, 31, 17, 33, 18, 35, 19, 37, - 20, 39, 2, 41, 21, 43, 2, 45, 22, 47, 23, 49, 24, 51, 25, 3, 2, 9, 4, 2, - 62, 62, 64, 64, 5, 2, 67, 92, 97, 97, 99, 124, 6, 2, 50, 59, 67, 92, 97, - 97, 99, 124, 3, 2, 50, 59, 4, 2, 11, 12, 15, 15, 5, 2, 11, 12, 15, 15, - 34, 34, 4, 2, 12, 12, 15, 15, 2, 155, 2, 3, 3, 2, 2, 2, 2, 5, 3, 2, 2, - 2, 2, 7, 3, 2, 2, 2, 2, 9, 3, 2, 2, 2, 2, 11, 3, 2, 2, 2, 2, 13, 3, 2, - 2, 2, 2, 15, 3, 2, 2, 2, 2, 17, 3, 2, 2, 2, 2, 19, 3, 2, 2, 2, 2, 21, 3, - 2, 2, 2, 2, 23, 3, 2, 2, 2, 2, 25, 3, 2, 2, 2, 2, 27, 3, 2, 2, 2, 2, 29, - 3, 2, 2, 2, 2, 31, 3, 2, 2, 2, 2, 33, 3, 2, 2, 2, 2, 35, 3, 2, 2, 2, 2, - 37, 3, 2, 2, 2, 2, 41, 3, 2, 2, 2, 2, 45, 3, 2, 2, 2, 2, 47, 3, 2, 2, 2, - 2, 49, 3, 2, 2, 2, 2, 51, 3, 2, 2, 2, 3, 53, 3, 2, 2, 2, 5, 55, 3, 2, 2, - 2, 7, 57, 3, 2, 2, 2, 9, 59, 3, 2, 2, 2, 11, 61, 3, 2, 2, 2, 13, 63, 3, - 2, 2, 2, 15, 65, 3, 2, 2, 2, 17, 67, 3, 2, 2, 2, 19, 69, 3, 2, 2, 2, 21, - 71, 3, 2, 2, 2, 23, 73, 3, 2, 2, 2, 25, 75, 3, 2, 2, 2, 27, 77, 3, 2, 2, - 2, 29, 79, 3, 2, 2, 2, 31, 81, 3, 2, 2, 2, 33, 83, 3, 2, 2, 2, 35, 90, - 3, 2, 2, 2, 37, 104, 3, 2, 2, 2, 39, 108, 3, 2, 2, 2, 41, 111, 3, 2, 2, - 2, 43, 121, 3, 2, 2, 2, 45, 124, 3, 2, 2, 2, 47, 134, 3, 2, 2, 2, 49, 138, - 3, 2, 2, 2, 51, 147, 3, 2, 2, 2, 53, 54, 7, 63, 2, 2, 54, 4, 3, 2, 2, 2, - 55, 56, 7, 61, 2, 2, 56, 6, 3, 2, 2, 2, 57, 58, 7, 42, 2, 2, 58, 8, 3, - 2, 2, 2, 59, 60, 7, 43, 2, 2, 60, 10, 3, 2, 2, 2, 61, 62, 7, 44, 2, 2, - 62, 12, 3, 2, 2, 2, 63, 64, 7, 96, 2, 2, 64, 14, 3, 2, 2, 2, 65, 66, 7, - 93, 2, 2, 66, 16, 3, 2, 2, 2, 67, 68, 7, 95, 2, 2, 68, 18, 3, 2, 2, 2, - 69, 70, 7, 65, 2, 2, 70, 20, 3, 2, 2, 2, 71, 72, 7, 97, 2, 2, 72, 22, 3, - 2, 2, 2, 73, 74, 7, 60, 2, 2, 74, 24, 3, 2, 2, 2, 75, 76, 7, 39, 2, 2, - 76, 26, 3, 2, 2, 2, 77, 78, 7, 38, 2, 2, 78, 28, 3, 2, 2, 2, 79, 80, 7, - 128, 2, 2, 80, 30, 3, 2, 2, 2, 81, 82, 9, 2, 2, 2, 82, 32, 3, 2, 2, 2, - 83, 87, 9, 3, 2, 2, 84, 86, 9, 4, 2, 2, 85, 84, 3, 2, 2, 2, 86, 89, 3, - 2, 2, 2, 87, 85, 3, 2, 2, 2, 87, 88, 3, 2, 2, 2, 88, 34, 3, 2, 2, 2, 89, - 87, 3, 2, 2, 2, 90, 94, 9, 3, 2, 2, 91, 93, 9, 4, 2, 2, 92, 91, 3, 2, 2, - 2, 93, 96, 3, 2, 2, 2, 94, 92, 3, 2, 2, 2, 94, 95, 3, 2, 2, 2, 95, 97, - 3, 2, 2, 2, 96, 94, 3, 2, 2, 2, 97, 99, 7, 48, 2, 2, 98, 100, 9, 5, 2, - 2, 99, 98, 3, 2, 2, 2, 100, 101, 3, 2, 2, 2, 101, 99, 3, 2, 2, 2, 101, - 102, 3, 2, 2, 2, 102, 36, 3, 2, 2, 2, 103, 105, 9, 5, 2, 2, 104, 103, 3, - 2, 2, 2, 105, 106, 3, 2, 2, 2, 106, 104, 3, 2, 2, 2, 106, 107, 3, 2, 2, - 2, 107, 38, 3, 2, 2, 2, 108, 109, 7, 94, 2, 2, 109, 110, 7, 36, 2, 2, 110, - 40, 3, 2, 2, 2, 111, 116, 7, 36, 2, 2, 112, 115, 5, 39, 20, 2, 113, 115, - 10, 6, 2, 2, 114, 112, 3, 2, 2, 2, 114, 113, 3, 2, 2, 2, 115, 118, 3, 2, - 2, 2, 116, 117, 3, 2, 2, 2, 116, 114, 3, 2, 2, 2, 117, 119, 3, 2, 2, 2, - 118, 116, 3, 2, 2, 2, 119, 120, 7, 36, 2, 2, 120, 42, 3, 2, 2, 2, 121, - 122, 7, 94, 2, 2, 122, 123, 7, 41, 2, 2, 123, 44, 3, 2, 2, 2, 124, 129, - 7, 41, 2, 2, 125, 128, 5, 43, 22, 2, 126, 128, 10, 6, 2, 2, 127, 125, 3, - 2, 2, 2, 127, 126, 3, 2, 2, 2, 128, 131, 3, 2, 2, 2, 129, 130, 3, 2, 2, - 2, 129, 127, 3, 2, 2, 2, 130, 132, 3, 2, 2, 2, 131, 129, 3, 2, 2, 2, 132, - 133, 7, 41, 2, 2, 133, 46, 3, 2, 2, 2, 134, 135, 9, 7, 2, 2, 135, 136, - 3, 2, 2, 2, 136, 137, 8, 24, 2, 2, 137, 48, 3, 2, 2, 2, 138, 142, 7, 37, - 2, 2, 139, 141, 10, 8, 2, 2, 140, 139, 3, 2, 2, 2, 141, 144, 3, 2, 2, 2, - 142, 140, 3, 2, 2, 2, 142, 143, 3, 2, 2, 2, 143, 145, 3, 2, 2, 2, 144, - 142, 3, 2, 2, 2, 145, 146, 8, 25, 2, 2, 146, 50, 3, 2, 2, 2, 147, 148, - 7, 35, 2, 2, 148, 52, 3, 2, 2, 2, 12, 2, 87, 94, 101, 106, 114, 116, 127, - 129, 142, 3, 8, 2, 2, + 9, 23, 4, 24, 9, 24, 4, 25, 9, 25, 4, 26, 9, 26, 4, 27, 9, 27, 4, 28, 9, + 28, 3, 2, 3, 2, 3, 3, 3, 3, 3, 4, 3, 4, 3, 5, 3, 5, 3, 6, 3, 6, 3, 7, 3, + 7, 3, 8, 3, 8, 3, 9, 3, 9, 3, 10, 3, 10, 3, 11, 3, 11, 3, 12, 3, 12, 3, + 13, 3, 13, 3, 14, 3, 14, 3, 14, 3, 15, 3, 15, 3, 16, 3, 16, 3, 17, 3, 17, + 3, 18, 3, 18, 3, 19, 3, 19, 7, 19, 95, 10, 19, 12, 19, 14, 19, 98, 11, + 19, 3, 20, 3, 20, 7, 20, 102, 10, 20, 12, 20, 14, 20, 105, 11, 20, 3, 20, + 3, 20, 6, 20, 109, 10, 20, 13, 20, 14, 20, 110, 3, 21, 6, 21, 114, 10, + 21, 13, 21, 14, 21, 115, 3, 22, 3, 22, 3, 22, 3, 23, 3, 23, 3, 23, 7, 23, + 124, 10, 23, 12, 23, 14, 23, 127, 11, 23, 3, 23, 3, 23, 3, 24, 3, 24, 3, + 24, 3, 25, 3, 25, 3, 25, 7, 25, 137, 10, 25, 12, 25, 14, 25, 140, 11, 25, + 3, 25, 3, 25, 3, 26, 3, 26, 3, 26, 3, 26, 3, 27, 3, 27, 7, 27, 150, 10, + 27, 12, 27, 14, 27, 153, 11, 27, 3, 27, 3, 27, 3, 28, 3, 28, 4, 125, 138, + 2, 29, 3, 3, 5, 4, 7, 5, 9, 6, 11, 7, 13, 8, 15, 9, 17, 10, 19, 11, 21, + 12, 23, 13, 25, 14, 27, 15, 29, 16, 31, 17, 33, 18, 35, 19, 37, 20, 39, + 21, 41, 22, 43, 2, 45, 23, 47, 2, 49, 24, 51, 25, 53, 26, 55, 27, 3, 2, + 9, 4, 2, 62, 62, 64, 64, 5, 2, 67, 92, 97, 97, 99, 124, 6, 2, 50, 59, 67, + 92, 97, 97, 99, 124, 3, 2, 50, 59, 4, 2, 11, 12, 15, 15, 5, 2, 11, 12, + 15, 15, 34, 34, 4, 2, 12, 12, 15, 15, 2, 164, 2, 3, 3, 2, 2, 2, 2, 5, 3, + 2, 2, 2, 2, 7, 3, 2, 2, 2, 2, 9, 3, 2, 2, 2, 2, 11, 3, 2, 2, 2, 2, 13, + 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, 17, 3, 2, 2, 2, 2, 19, 3, 2, 2, 2, 2, + 21, 3, 2, 2, 2, 2, 23, 3, 2, 2, 2, 2, 25, 3, 2, 2, 2, 2, 27, 3, 2, 2, 2, + 2, 29, 3, 2, 2, 2, 2, 31, 3, 2, 2, 2, 2, 33, 3, 2, 2, 2, 2, 35, 3, 2, 2, + 2, 2, 37, 3, 2, 2, 2, 2, 39, 3, 2, 2, 2, 2, 41, 3, 2, 2, 2, 2, 45, 3, 2, + 2, 2, 2, 49, 3, 2, 2, 2, 2, 51, 3, 2, 2, 2, 2, 53, 3, 2, 2, 2, 2, 55, 3, + 2, 2, 2, 3, 57, 3, 2, 2, 2, 5, 59, 3, 2, 2, 2, 7, 61, 3, 2, 2, 2, 9, 63, + 3, 2, 2, 2, 11, 65, 3, 2, 2, 2, 13, 67, 3, 2, 2, 2, 15, 69, 3, 2, 2, 2, + 17, 71, 3, 2, 2, 2, 19, 73, 3, 2, 2, 2, 21, 75, 3, 2, 2, 2, 23, 77, 3, + 2, 2, 2, 25, 79, 3, 2, 2, 2, 27, 81, 3, 2, 2, 2, 29, 84, 3, 2, 2, 2, 31, + 86, 3, 2, 2, 2, 33, 88, 3, 2, 2, 2, 35, 90, 3, 2, 2, 2, 37, 92, 3, 2, 2, + 2, 39, 99, 3, 2, 2, 2, 41, 113, 3, 2, 2, 2, 43, 117, 3, 2, 2, 2, 45, 120, + 3, 2, 2, 2, 47, 130, 3, 2, 2, 2, 49, 133, 3, 2, 2, 2, 51, 143, 3, 2, 2, + 2, 53, 147, 3, 2, 2, 2, 55, 156, 3, 2, 2, 2, 57, 58, 7, 63, 2, 2, 58, 4, + 3, 2, 2, 2, 59, 60, 7, 61, 2, 2, 60, 6, 3, 2, 2, 2, 61, 62, 7, 42, 2, 2, + 62, 8, 3, 2, 2, 2, 63, 64, 7, 43, 2, 2, 64, 10, 3, 2, 2, 2, 65, 66, 7, + 44, 2, 2, 66, 12, 3, 2, 2, 2, 67, 68, 7, 96, 2, 2, 68, 14, 3, 2, 2, 2, + 69, 70, 7, 93, 2, 2, 70, 16, 3, 2, 2, 2, 71, 72, 7, 95, 2, 2, 72, 18, 3, + 2, 2, 2, 73, 74, 7, 66, 2, 2, 74, 20, 3, 2, 2, 2, 75, 76, 7, 65, 2, 2, + 76, 22, 3, 2, 2, 2, 77, 78, 7, 97, 2, 2, 78, 24, 3, 2, 2, 2, 79, 80, 7, + 60, 2, 2, 80, 26, 3, 2, 2, 2, 81, 82, 7, 48, 2, 2, 82, 83, 7, 48, 2, 2, + 83, 28, 3, 2, 2, 2, 84, 85, 7, 39, 2, 2, 85, 30, 3, 2, 2, 2, 86, 87, 7, + 38, 2, 2, 87, 32, 3, 2, 2, 2, 88, 89, 7, 128, 2, 2, 89, 34, 3, 2, 2, 2, + 90, 91, 9, 2, 2, 2, 91, 36, 3, 2, 2, 2, 92, 96, 9, 3, 2, 2, 93, 95, 9, + 4, 2, 2, 94, 93, 3, 2, 2, 2, 95, 98, 3, 2, 2, 2, 96, 94, 3, 2, 2, 2, 96, + 97, 3, 2, 2, 2, 97, 38, 3, 2, 2, 2, 98, 96, 3, 2, 2, 2, 99, 103, 9, 3, + 2, 2, 100, 102, 9, 4, 2, 2, 101, 100, 3, 2, 2, 2, 102, 105, 3, 2, 2, 2, + 103, 101, 3, 2, 2, 2, 103, 104, 3, 2, 2, 2, 104, 106, 3, 2, 2, 2, 105, + 103, 3, 2, 2, 2, 106, 108, 7, 48, 2, 2, 107, 109, 9, 5, 2, 2, 108, 107, + 3, 2, 2, 2, 109, 110, 3, 2, 2, 2, 110, 108, 3, 2, 2, 2, 110, 111, 3, 2, + 2, 2, 111, 40, 3, 2, 2, 2, 112, 114, 9, 5, 2, 2, 113, 112, 3, 2, 2, 2, + 114, 115, 3, 2, 2, 2, 115, 113, 3, 2, 2, 2, 115, 116, 3, 2, 2, 2, 116, + 42, 3, 2, 2, 2, 117, 118, 7, 94, 2, 2, 118, 119, 7, 36, 2, 2, 119, 44, + 3, 2, 2, 2, 120, 125, 7, 36, 2, 2, 121, 124, 5, 43, 22, 2, 122, 124, 10, + 6, 2, 2, 123, 121, 3, 2, 2, 2, 123, 122, 3, 2, 2, 2, 124, 127, 3, 2, 2, + 2, 125, 126, 3, 2, 2, 2, 125, 123, 3, 2, 2, 2, 126, 128, 3, 2, 2, 2, 127, + 125, 3, 2, 2, 2, 128, 129, 7, 36, 2, 2, 129, 46, 3, 2, 2, 2, 130, 131, + 7, 94, 2, 2, 131, 132, 7, 41, 2, 2, 132, 48, 3, 2, 2, 2, 133, 138, 7, 41, + 2, 2, 134, 137, 5, 47, 24, 2, 135, 137, 10, 6, 2, 2, 136, 134, 3, 2, 2, + 2, 136, 135, 3, 2, 2, 2, 137, 140, 3, 2, 2, 2, 138, 139, 3, 2, 2, 2, 138, + 136, 3, 2, 2, 2, 139, 141, 3, 2, 2, 2, 140, 138, 3, 2, 2, 2, 141, 142, + 7, 41, 2, 2, 142, 50, 3, 2, 2, 2, 143, 144, 9, 7, 2, 2, 144, 145, 3, 2, + 2, 2, 145, 146, 8, 26, 2, 2, 146, 52, 3, 2, 2, 2, 147, 151, 7, 37, 2, 2, + 148, 150, 10, 8, 2, 2, 149, 148, 3, 2, 2, 2, 150, 153, 3, 2, 2, 2, 151, + 149, 3, 2, 2, 2, 151, 152, 3, 2, 2, 2, 152, 154, 3, 2, 2, 2, 153, 151, + 3, 2, 2, 2, 154, 155, 8, 27, 2, 2, 155, 54, 3, 2, 2, 2, 156, 157, 7, 35, + 2, 2, 157, 56, 3, 2, 2, 2, 12, 2, 96, 103, 110, 115, 123, 125, 136, 138, + 151, 3, 8, 2, 2, } var lexerDeserializer = antlr.NewATNDeserializer(nil) @@ -95,21 +99,22 @@ var lexerModeNames = []string{ } var lexerLiteralNames = []string{ - "", "'='", "';'", "'('", "')'", "'*'", "'^'", "'['", "']'", "'?'", "'_'", - "':'", "'%'", "'$'", "'~'", "", "", "", "", "", "", "", "", "'!'", + "", "'='", "';'", "'('", "')'", "'*'", "'^'", "'['", "']'", "'@'", "'?'", + "'_'", "':'", "'..'", "'%'", "'$'", "'~'", "", "", "", "", "", "", "", + "", "'!'", } var lexerSymbolicNames = []string{ - "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "ComparisonOperator", + "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "ComparisonOperator", "Identifier", "IdentifierWithFraction", "IntLit", "StringLit", "CharLit", "WS", "LineComment", "Stress", } var lexerRuleNames = []string{ "T__0", "T__1", "T__2", "T__3", "T__4", "T__5", "T__6", "T__7", "T__8", - "T__9", "T__10", "T__11", "T__12", "T__13", "ComparisonOperator", "Identifier", - "IdentifierWithFraction", "IntLit", "EscapedQuote", "StringLit", "EscapedApo", - "CharLit", "WS", "LineComment", "Stress", + "T__9", "T__10", "T__11", "T__12", "T__13", "T__14", "T__15", "ComparisonOperator", + "Identifier", "IdentifierWithFraction", "IntLit", "EscapedQuote", "StringLit", + "EscapedApo", "CharLit", "WS", "LineComment", "Stress", } type LDELexer struct { @@ -161,13 +166,15 @@ const ( LDELexerT__11 = 12 LDELexerT__12 = 13 LDELexerT__13 = 14 - LDELexerComparisonOperator = 15 - LDELexerIdentifier = 16 - LDELexerIdentifierWithFraction = 17 - LDELexerIntLit = 18 - LDELexerStringLit = 19 - LDELexerCharLit = 20 - LDELexerWS = 21 - LDELexerLineComment = 22 - LDELexerStress = 23 + LDELexerT__14 = 15 + LDELexerT__15 = 16 + LDELexerComparisonOperator = 17 + LDELexerIdentifier = 18 + LDELexerIdentifierWithFraction = 19 + LDELexerIntLit = 20 + LDELexerStringLit = 21 + LDELexerCharLit = 22 + LDELexerWS = 23 + LDELexerLineComment = 24 + LDELexerStress = 25 ) diff --git a/internal/parser/lde_listener.go b/internal/parser/lde_listener.go index e2bce5e..8348a27 100644 --- a/internal/parser/lde_listener.go +++ b/internal/parser/lde_listener.go @@ -1,4 +1,4 @@ -// Code generated from LDE.g4 by ANTLR 4.7.1. DO NOT EDIT. +// Code generated from LDE.g4 by ANTLR 4.7.2. DO NOT EDIT. package parser // LDE @@ -26,12 +26,21 @@ type LDEListener interface { // EnterPassTargetPrefix is called when entering the passTargetPrefix production. EnterPassTargetPrefix(c *PassTargetPrefixContext) + // EnterCheckTargetPrefix is called when entering the checkTargetPrefix production. + EnterCheckTargetPrefix(c *CheckTargetPrefixContext) + // EnterMayBePassTargetPrefix is called when entering the mayBePassTargetPrefix production. EnterMayBePassTargetPrefix(c *MayBePassTargetPrefixContext) // EnterPassChars is called when entering the passChars production. EnterPassChars(c *PassCharsContext) + // EnterGoUntil is called when entering the goUntil production. + EnterGoUntil(c *GoUntilContext) + + // EnterMayGoUntil is called when entering the mayGoUntil production. + EnterMayGoUntil(c *MayGoUntilContext) + // EnterPassUntil is called when entering the passUntil production. EnterPassUntil(c *PassUntilContext) @@ -104,12 +113,21 @@ type LDEListener interface { // ExitPassTargetPrefix is called when exiting the passTargetPrefix production. ExitPassTargetPrefix(c *PassTargetPrefixContext) + // ExitCheckTargetPrefix is called when exiting the checkTargetPrefix production. + ExitCheckTargetPrefix(c *CheckTargetPrefixContext) + // ExitMayBePassTargetPrefix is called when exiting the mayBePassTargetPrefix production. ExitMayBePassTargetPrefix(c *MayBePassTargetPrefixContext) // ExitPassChars is called when exiting the passChars production. ExitPassChars(c *PassCharsContext) + // ExitGoUntil is called when exiting the goUntil production. + ExitGoUntil(c *GoUntilContext) + + // ExitMayGoUntil is called when exiting the mayGoUntil production. + ExitMayGoUntil(c *MayGoUntilContext) + // ExitPassUntil is called when exiting the passUntil production. ExitPassUntil(c *PassUntilContext) diff --git a/internal/parser/lde_parser.go b/internal/parser/lde_parser.go index 5a67ec0..7bdd0a8 100644 --- a/internal/parser/lde_parser.go +++ b/internal/parser/lde_parser.go @@ -1,4 +1,4 @@ -// Code generated from LDE.g4 by ANTLR 4.7.1. DO NOT EDIT. +// Code generated from LDE.g4 by ANTLR 4.7.2. DO NOT EDIT. package parser // LDE @@ -16,123 +16,136 @@ var _ = reflect.Copy var _ = strconv.Itoa var parserATN = []uint16{ - 3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 3, 25, 236, + 3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 3, 27, 262, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12, 4, 13, 9, 13, 4, 14, 9, 14, 4, 15, 9, 15, 4, 16, 9, 16, 4, 17, 9, 17, 4, 18, 9, 18, 4, 19, 9, 19, 4, 20, 9, 20, 4, 21, 9, 21, 4, 22, 9, 22, 4, 23, 9, 23, - 4, 24, 9, 24, 4, 25, 9, 25, 4, 26, 9, 26, 4, 27, 9, 27, 3, 2, 3, 2, 3, - 2, 5, 2, 58, 10, 2, 3, 2, 3, 2, 7, 2, 62, 10, 2, 12, 2, 14, 2, 65, 11, - 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, - 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 5, 4, 87, 10, 4, 3, - 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, - 5, 3, 5, 3, 5, 5, 5, 104, 10, 5, 3, 6, 3, 6, 3, 6, 3, 7, 3, 7, 3, 7, 3, - 7, 3, 7, 3, 7, 3, 7, 3, 7, 5, 7, 117, 10, 7, 3, 8, 3, 8, 3, 8, 3, 8, 3, - 8, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 5, 8, 129, 10, 8, 3, 9, 3, 9, 3, 9, 3, - 9, 3, 9, 3, 9, 3, 10, 3, 10, 3, 10, 3, 11, 3, 11, 3, 11, 3, 11, 3, 12, - 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, - 13, 3, 14, 3, 14, 3, 14, 3, 14, 3, 14, 3, 14, 3, 14, 3, 15, 3, 15, 3, 15, - 3, 15, 3, 15, 3, 15, 3, 15, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 17, 3, - 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 19, - 3, 19, 3, 19, 3, 19, 3, 19, 5, 19, 191, 10, 19, 3, 20, 3, 20, 3, 21, 3, - 21, 3, 21, 3, 21, 3, 21, 3, 21, 3, 21, 3, 21, 3, 21, 3, 21, 3, 21, 3, 21, - 3, 21, 3, 21, 3, 21, 5, 21, 210, 10, 21, 3, 22, 3, 22, 3, 23, 3, 23, 3, - 23, 3, 23, 3, 23, 3, 23, 3, 24, 3, 24, 3, 24, 3, 24, 3, 24, 3, 25, 3, 25, - 3, 25, 3, 25, 3, 25, 3, 26, 3, 26, 3, 26, 3, 26, 3, 27, 3, 27, 3, 27, 2, - 3, 2, 28, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, - 36, 38, 40, 42, 44, 46, 48, 50, 52, 2, 4, 3, 2, 21, 22, 3, 2, 18, 19, 2, - 237, 2, 57, 3, 2, 2, 2, 4, 66, 3, 2, 2, 2, 6, 86, 3, 2, 2, 2, 8, 103, 3, - 2, 2, 2, 10, 105, 3, 2, 2, 2, 12, 116, 3, 2, 2, 2, 14, 128, 3, 2, 2, 2, - 16, 130, 3, 2, 2, 2, 18, 136, 3, 2, 2, 2, 20, 139, 3, 2, 2, 2, 22, 143, - 3, 2, 2, 2, 24, 149, 3, 2, 2, 2, 26, 155, 3, 2, 2, 2, 28, 162, 3, 2, 2, - 2, 30, 169, 3, 2, 2, 2, 32, 174, 3, 2, 2, 2, 34, 180, 3, 2, 2, 2, 36, 190, - 3, 2, 2, 2, 38, 192, 3, 2, 2, 2, 40, 209, 3, 2, 2, 2, 42, 211, 3, 2, 2, - 2, 44, 213, 3, 2, 2, 2, 46, 219, 3, 2, 2, 2, 48, 224, 3, 2, 2, 2, 50, 229, - 3, 2, 2, 2, 52, 233, 3, 2, 2, 2, 54, 55, 8, 2, 1, 2, 55, 58, 5, 4, 3, 2, - 56, 58, 7, 2, 2, 3, 57, 54, 3, 2, 2, 2, 57, 56, 3, 2, 2, 2, 58, 63, 3, - 2, 2, 2, 59, 60, 12, 5, 2, 2, 60, 62, 5, 4, 3, 2, 61, 59, 3, 2, 2, 2, 62, - 65, 3, 2, 2, 2, 63, 61, 3, 2, 2, 2, 63, 64, 3, 2, 2, 2, 64, 3, 3, 2, 2, - 2, 65, 63, 3, 2, 2, 2, 66, 67, 7, 18, 2, 2, 67, 68, 7, 3, 2, 2, 68, 69, - 5, 6, 4, 2, 69, 70, 7, 4, 2, 2, 70, 5, 3, 2, 2, 2, 71, 72, 7, 25, 2, 2, - 72, 87, 5, 6, 4, 2, 73, 74, 7, 5, 2, 2, 74, 75, 5, 6, 4, 2, 75, 76, 7, - 6, 2, 2, 76, 77, 5, 6, 4, 2, 77, 87, 3, 2, 2, 2, 78, 79, 7, 5, 2, 2, 79, - 80, 5, 6, 4, 2, 80, 81, 7, 6, 2, 2, 81, 87, 3, 2, 2, 2, 82, 83, 5, 8, 5, - 2, 83, 84, 5, 6, 4, 2, 84, 87, 3, 2, 2, 2, 85, 87, 5, 8, 5, 2, 86, 71, - 3, 2, 2, 2, 86, 73, 3, 2, 2, 2, 86, 78, 3, 2, 2, 2, 86, 82, 3, 2, 2, 2, - 86, 85, 3, 2, 2, 2, 87, 7, 3, 2, 2, 2, 88, 104, 5, 12, 7, 2, 89, 104, 5, - 10, 6, 2, 90, 104, 5, 14, 8, 2, 91, 104, 5, 16, 9, 2, 92, 104, 5, 18, 10, - 2, 93, 104, 5, 20, 11, 2, 94, 104, 5, 22, 12, 2, 95, 104, 5, 24, 13, 2, - 96, 104, 5, 26, 14, 2, 97, 104, 5, 28, 15, 2, 98, 104, 5, 30, 16, 2, 99, - 104, 5, 32, 17, 2, 100, 104, 5, 34, 18, 2, 101, 104, 5, 36, 19, 2, 102, - 104, 5, 38, 20, 2, 103, 88, 3, 2, 2, 2, 103, 89, 3, 2, 2, 2, 103, 90, 3, - 2, 2, 2, 103, 91, 3, 2, 2, 2, 103, 92, 3, 2, 2, 2, 103, 93, 3, 2, 2, 2, - 103, 94, 3, 2, 2, 2, 103, 95, 3, 2, 2, 2, 103, 96, 3, 2, 2, 2, 103, 97, - 3, 2, 2, 2, 103, 98, 3, 2, 2, 2, 103, 99, 3, 2, 2, 2, 103, 100, 3, 2, 2, - 2, 103, 101, 3, 2, 2, 2, 103, 102, 3, 2, 2, 2, 104, 9, 3, 2, 2, 2, 105, - 106, 7, 7, 2, 2, 106, 107, 7, 22, 2, 2, 107, 11, 3, 2, 2, 2, 108, 109, - 7, 8, 2, 2, 109, 110, 5, 42, 22, 2, 110, 111, 7, 9, 2, 2, 111, 112, 7, - 20, 2, 2, 112, 113, 7, 10, 2, 2, 113, 117, 3, 2, 2, 2, 114, 115, 7, 8, - 2, 2, 115, 117, 5, 42, 22, 2, 116, 108, 3, 2, 2, 2, 116, 114, 3, 2, 2, - 2, 117, 13, 3, 2, 2, 2, 118, 119, 7, 11, 2, 2, 119, 120, 7, 8, 2, 2, 120, - 121, 5, 42, 22, 2, 121, 122, 7, 9, 2, 2, 122, 123, 7, 20, 2, 2, 123, 124, - 7, 10, 2, 2, 124, 129, 3, 2, 2, 2, 125, 126, 7, 11, 2, 2, 126, 127, 7, - 8, 2, 2, 127, 129, 5, 42, 22, 2, 128, 118, 3, 2, 2, 2, 128, 125, 3, 2, - 2, 2, 129, 15, 3, 2, 2, 2, 130, 131, 7, 12, 2, 2, 131, 132, 7, 9, 2, 2, - 132, 133, 7, 20, 2, 2, 133, 134, 7, 13, 2, 2, 134, 135, 7, 10, 2, 2, 135, - 17, 3, 2, 2, 2, 136, 137, 7, 12, 2, 2, 137, 138, 5, 40, 21, 2, 138, 19, - 3, 2, 2, 2, 139, 140, 7, 11, 2, 2, 140, 141, 7, 12, 2, 2, 141, 142, 5, - 40, 21, 2, 142, 21, 3, 2, 2, 2, 143, 144, 7, 18, 2, 2, 144, 145, 7, 5, - 2, 2, 145, 146, 5, 52, 27, 2, 146, 147, 7, 6, 2, 2, 147, 148, 5, 40, 21, - 2, 148, 23, 3, 2, 2, 2, 149, 150, 7, 18, 2, 2, 150, 151, 7, 9, 2, 2, 151, - 152, 5, 52, 27, 2, 152, 153, 7, 10, 2, 2, 153, 154, 5, 40, 21, 2, 154, - 25, 3, 2, 2, 2, 155, 156, 7, 18, 2, 2, 156, 157, 7, 5, 2, 2, 157, 158, - 5, 52, 27, 2, 158, 159, 7, 6, 2, 2, 159, 160, 7, 11, 2, 2, 160, 161, 5, - 40, 21, 2, 161, 27, 3, 2, 2, 2, 162, 163, 7, 18, 2, 2, 163, 164, 7, 9, - 2, 2, 164, 165, 5, 52, 27, 2, 165, 166, 7, 10, 2, 2, 166, 167, 7, 11, 2, - 2, 167, 168, 5, 40, 21, 2, 168, 29, 3, 2, 2, 2, 169, 170, 7, 18, 2, 2, - 170, 171, 7, 5, 2, 2, 171, 172, 5, 52, 27, 2, 172, 173, 7, 6, 2, 2, 173, - 31, 3, 2, 2, 2, 174, 175, 7, 11, 2, 2, 175, 176, 7, 18, 2, 2, 176, 177, - 7, 5, 2, 2, 177, 178, 5, 6, 4, 2, 178, 179, 7, 6, 2, 2, 179, 33, 3, 2, - 2, 2, 180, 181, 7, 11, 2, 2, 181, 182, 7, 5, 2, 2, 182, 183, 5, 6, 4, 2, - 183, 184, 7, 6, 2, 2, 184, 35, 3, 2, 2, 2, 185, 186, 7, 14, 2, 2, 186, - 191, 7, 20, 2, 2, 187, 188, 7, 14, 2, 2, 188, 189, 7, 17, 2, 2, 189, 191, - 7, 20, 2, 2, 190, 185, 3, 2, 2, 2, 190, 187, 3, 2, 2, 2, 191, 37, 3, 2, - 2, 2, 192, 193, 7, 15, 2, 2, 193, 39, 3, 2, 2, 2, 194, 195, 5, 42, 22, - 2, 195, 196, 5, 44, 23, 2, 196, 210, 3, 2, 2, 2, 197, 198, 5, 42, 22, 2, - 198, 199, 5, 46, 24, 2, 199, 210, 3, 2, 2, 2, 200, 201, 5, 42, 22, 2, 201, - 202, 5, 50, 26, 2, 202, 210, 3, 2, 2, 2, 203, 204, 5, 42, 22, 2, 204, 205, - 5, 48, 25, 2, 205, 210, 3, 2, 2, 2, 206, 210, 5, 42, 22, 2, 207, 208, 7, - 16, 2, 2, 208, 210, 5, 40, 21, 2, 209, 194, 3, 2, 2, 2, 209, 197, 3, 2, - 2, 2, 209, 200, 3, 2, 2, 2, 209, 203, 3, 2, 2, 2, 209, 206, 3, 2, 2, 2, - 209, 207, 3, 2, 2, 2, 210, 41, 3, 2, 2, 2, 211, 212, 9, 2, 2, 2, 212, 43, - 3, 2, 2, 2, 213, 214, 7, 9, 2, 2, 214, 215, 7, 20, 2, 2, 215, 216, 7, 13, - 2, 2, 216, 217, 7, 20, 2, 2, 217, 218, 7, 10, 2, 2, 218, 45, 3, 2, 2, 2, - 219, 220, 7, 9, 2, 2, 220, 221, 7, 13, 2, 2, 221, 222, 7, 20, 2, 2, 222, - 223, 7, 10, 2, 2, 223, 47, 3, 2, 2, 2, 224, 225, 7, 9, 2, 2, 225, 226, - 7, 20, 2, 2, 226, 227, 7, 13, 2, 2, 227, 228, 7, 10, 2, 2, 228, 49, 3, - 2, 2, 2, 229, 230, 7, 9, 2, 2, 230, 231, 7, 20, 2, 2, 231, 232, 7, 10, - 2, 2, 232, 51, 3, 2, 2, 2, 233, 234, 9, 3, 2, 2, 234, 53, 3, 2, 2, 2, 10, - 57, 63, 86, 103, 116, 128, 190, 209, + 4, 24, 9, 24, 4, 25, 9, 25, 4, 26, 9, 26, 4, 27, 9, 27, 4, 28, 9, 28, 4, + 29, 9, 29, 4, 30, 9, 30, 3, 2, 3, 2, 3, 2, 5, 2, 64, 10, 2, 3, 2, 3, 2, + 7, 2, 68, 10, 2, 12, 2, 14, 2, 71, 11, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, + 3, 4, 3, 4, 3, 4, 5, 4, 93, 10, 4, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, + 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, + 5, 5, 113, 10, 5, 3, 6, 3, 6, 3, 6, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, + 3, 7, 3, 7, 5, 7, 126, 10, 7, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, + 3, 8, 5, 8, 136, 10, 8, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, + 3, 9, 3, 9, 5, 9, 148, 10, 9, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, + 3, 11, 3, 11, 3, 11, 3, 12, 3, 12, 3, 12, 3, 12, 3, 13, 3, 13, 3, 13, 3, + 14, 3, 14, 3, 14, 3, 14, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 16, + 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, + 17, 3, 17, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 19, 3, 19, + 3, 19, 3, 19, 3, 19, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 21, 3, + 21, 3, 21, 3, 21, 3, 21, 3, 22, 3, 22, 3, 22, 3, 22, 3, 22, 5, 22, 217, + 10, 22, 3, 23, 3, 23, 3, 24, 3, 24, 3, 24, 3, 24, 3, 24, 3, 24, 3, 24, + 3, 24, 3, 24, 3, 24, 3, 24, 3, 24, 3, 24, 3, 24, 3, 24, 5, 24, 236, 10, + 24, 3, 25, 3, 25, 3, 26, 3, 26, 3, 26, 3, 26, 3, 26, 3, 26, 3, 27, 3, 27, + 3, 27, 3, 27, 3, 27, 3, 28, 3, 28, 3, 28, 3, 28, 3, 28, 3, 29, 3, 29, 3, + 29, 3, 29, 3, 30, 3, 30, 3, 30, 2, 3, 2, 31, 2, 4, 6, 8, 10, 12, 14, 16, + 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, + 54, 56, 58, 2, 4, 3, 2, 23, 24, 3, 2, 20, 21, 2, 264, 2, 63, 3, 2, 2, 2, + 4, 72, 3, 2, 2, 2, 6, 92, 3, 2, 2, 2, 8, 112, 3, 2, 2, 2, 10, 114, 3, 2, + 2, 2, 12, 125, 3, 2, 2, 2, 14, 135, 3, 2, 2, 2, 16, 147, 3, 2, 2, 2, 18, + 149, 3, 2, 2, 2, 20, 155, 3, 2, 2, 2, 22, 158, 3, 2, 2, 2, 24, 162, 3, + 2, 2, 2, 26, 165, 3, 2, 2, 2, 28, 169, 3, 2, 2, 2, 30, 175, 3, 2, 2, 2, + 32, 181, 3, 2, 2, 2, 34, 188, 3, 2, 2, 2, 36, 195, 3, 2, 2, 2, 38, 200, + 3, 2, 2, 2, 40, 206, 3, 2, 2, 2, 42, 216, 3, 2, 2, 2, 44, 218, 3, 2, 2, + 2, 46, 235, 3, 2, 2, 2, 48, 237, 3, 2, 2, 2, 50, 239, 3, 2, 2, 2, 52, 245, + 3, 2, 2, 2, 54, 250, 3, 2, 2, 2, 56, 255, 3, 2, 2, 2, 58, 259, 3, 2, 2, + 2, 60, 61, 8, 2, 1, 2, 61, 64, 5, 4, 3, 2, 62, 64, 7, 2, 2, 3, 63, 60, + 3, 2, 2, 2, 63, 62, 3, 2, 2, 2, 64, 69, 3, 2, 2, 2, 65, 66, 12, 5, 2, 2, + 66, 68, 5, 4, 3, 2, 67, 65, 3, 2, 2, 2, 68, 71, 3, 2, 2, 2, 69, 67, 3, + 2, 2, 2, 69, 70, 3, 2, 2, 2, 70, 3, 3, 2, 2, 2, 71, 69, 3, 2, 2, 2, 72, + 73, 7, 20, 2, 2, 73, 74, 7, 3, 2, 2, 74, 75, 5, 6, 4, 2, 75, 76, 7, 4, + 2, 2, 76, 5, 3, 2, 2, 2, 77, 78, 7, 27, 2, 2, 78, 93, 5, 6, 4, 2, 79, 80, + 7, 5, 2, 2, 80, 81, 5, 6, 4, 2, 81, 82, 7, 6, 2, 2, 82, 83, 5, 6, 4, 2, + 83, 93, 3, 2, 2, 2, 84, 85, 7, 5, 2, 2, 85, 86, 5, 6, 4, 2, 86, 87, 7, + 6, 2, 2, 87, 93, 3, 2, 2, 2, 88, 89, 5, 8, 5, 2, 89, 90, 5, 6, 4, 2, 90, + 93, 3, 2, 2, 2, 91, 93, 5, 8, 5, 2, 92, 77, 3, 2, 2, 2, 92, 79, 3, 2, 2, + 2, 92, 84, 3, 2, 2, 2, 92, 88, 3, 2, 2, 2, 92, 91, 3, 2, 2, 2, 93, 7, 3, + 2, 2, 2, 94, 113, 5, 12, 7, 2, 95, 113, 5, 14, 8, 2, 96, 113, 5, 10, 6, + 2, 97, 113, 5, 16, 9, 2, 98, 113, 5, 18, 10, 2, 99, 113, 5, 24, 13, 2, + 100, 113, 5, 26, 14, 2, 101, 113, 5, 20, 11, 2, 102, 113, 5, 22, 12, 2, + 103, 113, 5, 28, 15, 2, 104, 113, 5, 30, 16, 2, 105, 113, 5, 32, 17, 2, + 106, 113, 5, 34, 18, 2, 107, 113, 5, 36, 19, 2, 108, 113, 5, 38, 20, 2, + 109, 113, 5, 40, 21, 2, 110, 113, 5, 42, 22, 2, 111, 113, 5, 44, 23, 2, + 112, 94, 3, 2, 2, 2, 112, 95, 3, 2, 2, 2, 112, 96, 3, 2, 2, 2, 112, 97, + 3, 2, 2, 2, 112, 98, 3, 2, 2, 2, 112, 99, 3, 2, 2, 2, 112, 100, 3, 2, 2, + 2, 112, 101, 3, 2, 2, 2, 112, 102, 3, 2, 2, 2, 112, 103, 3, 2, 2, 2, 112, + 104, 3, 2, 2, 2, 112, 105, 3, 2, 2, 2, 112, 106, 3, 2, 2, 2, 112, 107, + 3, 2, 2, 2, 112, 108, 3, 2, 2, 2, 112, 109, 3, 2, 2, 2, 112, 110, 3, 2, + 2, 2, 112, 111, 3, 2, 2, 2, 113, 9, 3, 2, 2, 2, 114, 115, 7, 7, 2, 2, 115, + 116, 7, 24, 2, 2, 116, 11, 3, 2, 2, 2, 117, 118, 7, 8, 2, 2, 118, 119, + 5, 48, 25, 2, 119, 120, 7, 9, 2, 2, 120, 121, 7, 22, 2, 2, 121, 122, 7, + 10, 2, 2, 122, 126, 3, 2, 2, 2, 123, 124, 7, 8, 2, 2, 124, 126, 5, 48, + 25, 2, 125, 117, 3, 2, 2, 2, 125, 123, 3, 2, 2, 2, 126, 13, 3, 2, 2, 2, + 127, 128, 7, 11, 2, 2, 128, 129, 5, 48, 25, 2, 129, 130, 7, 9, 2, 2, 130, + 131, 7, 22, 2, 2, 131, 132, 7, 10, 2, 2, 132, 136, 3, 2, 2, 2, 133, 134, + 7, 11, 2, 2, 134, 136, 5, 48, 25, 2, 135, 127, 3, 2, 2, 2, 135, 133, 3, + 2, 2, 2, 136, 15, 3, 2, 2, 2, 137, 138, 7, 12, 2, 2, 138, 139, 7, 8, 2, + 2, 139, 140, 5, 48, 25, 2, 140, 141, 7, 9, 2, 2, 141, 142, 7, 22, 2, 2, + 142, 143, 7, 10, 2, 2, 143, 148, 3, 2, 2, 2, 144, 145, 7, 12, 2, 2, 145, + 146, 7, 8, 2, 2, 146, 148, 5, 48, 25, 2, 147, 137, 3, 2, 2, 2, 147, 144, + 3, 2, 2, 2, 148, 17, 3, 2, 2, 2, 149, 150, 7, 13, 2, 2, 150, 151, 7, 9, + 2, 2, 151, 152, 7, 22, 2, 2, 152, 153, 7, 14, 2, 2, 153, 154, 7, 10, 2, + 2, 154, 19, 3, 2, 2, 2, 155, 156, 7, 15, 2, 2, 156, 157, 5, 46, 24, 2, + 157, 21, 3, 2, 2, 2, 158, 159, 7, 12, 2, 2, 159, 160, 7, 15, 2, 2, 160, + 161, 5, 46, 24, 2, 161, 23, 3, 2, 2, 2, 162, 163, 7, 13, 2, 2, 163, 164, + 5, 46, 24, 2, 164, 25, 3, 2, 2, 2, 165, 166, 7, 12, 2, 2, 166, 167, 7, + 13, 2, 2, 167, 168, 5, 46, 24, 2, 168, 27, 3, 2, 2, 2, 169, 170, 7, 20, + 2, 2, 170, 171, 7, 5, 2, 2, 171, 172, 5, 58, 30, 2, 172, 173, 7, 6, 2, + 2, 173, 174, 5, 46, 24, 2, 174, 29, 3, 2, 2, 2, 175, 176, 7, 20, 2, 2, + 176, 177, 7, 9, 2, 2, 177, 178, 5, 58, 30, 2, 178, 179, 7, 10, 2, 2, 179, + 180, 5, 46, 24, 2, 180, 31, 3, 2, 2, 2, 181, 182, 7, 20, 2, 2, 182, 183, + 7, 5, 2, 2, 183, 184, 5, 58, 30, 2, 184, 185, 7, 6, 2, 2, 185, 186, 7, + 12, 2, 2, 186, 187, 5, 46, 24, 2, 187, 33, 3, 2, 2, 2, 188, 189, 7, 20, + 2, 2, 189, 190, 7, 9, 2, 2, 190, 191, 5, 58, 30, 2, 191, 192, 7, 10, 2, + 2, 192, 193, 7, 12, 2, 2, 193, 194, 5, 46, 24, 2, 194, 35, 3, 2, 2, 2, + 195, 196, 7, 20, 2, 2, 196, 197, 7, 5, 2, 2, 197, 198, 5, 58, 30, 2, 198, + 199, 7, 6, 2, 2, 199, 37, 3, 2, 2, 2, 200, 201, 7, 12, 2, 2, 201, 202, + 7, 20, 2, 2, 202, 203, 7, 5, 2, 2, 203, 204, 5, 6, 4, 2, 204, 205, 7, 6, + 2, 2, 205, 39, 3, 2, 2, 2, 206, 207, 7, 12, 2, 2, 207, 208, 7, 5, 2, 2, + 208, 209, 5, 6, 4, 2, 209, 210, 7, 6, 2, 2, 210, 41, 3, 2, 2, 2, 211, 212, + 7, 16, 2, 2, 212, 217, 7, 22, 2, 2, 213, 214, 7, 16, 2, 2, 214, 215, 7, + 19, 2, 2, 215, 217, 7, 22, 2, 2, 216, 211, 3, 2, 2, 2, 216, 213, 3, 2, + 2, 2, 217, 43, 3, 2, 2, 2, 218, 219, 7, 17, 2, 2, 219, 45, 3, 2, 2, 2, + 220, 221, 5, 48, 25, 2, 221, 222, 5, 50, 26, 2, 222, 236, 3, 2, 2, 2, 223, + 224, 5, 48, 25, 2, 224, 225, 5, 52, 27, 2, 225, 236, 3, 2, 2, 2, 226, 227, + 5, 48, 25, 2, 227, 228, 5, 56, 29, 2, 228, 236, 3, 2, 2, 2, 229, 230, 5, + 48, 25, 2, 230, 231, 5, 54, 28, 2, 231, 236, 3, 2, 2, 2, 232, 236, 5, 48, + 25, 2, 233, 234, 7, 18, 2, 2, 234, 236, 5, 46, 24, 2, 235, 220, 3, 2, 2, + 2, 235, 223, 3, 2, 2, 2, 235, 226, 3, 2, 2, 2, 235, 229, 3, 2, 2, 2, 235, + 232, 3, 2, 2, 2, 235, 233, 3, 2, 2, 2, 236, 47, 3, 2, 2, 2, 237, 238, 9, + 2, 2, 2, 238, 49, 3, 2, 2, 2, 239, 240, 7, 9, 2, 2, 240, 241, 7, 22, 2, + 2, 241, 242, 7, 14, 2, 2, 242, 243, 7, 22, 2, 2, 243, 244, 7, 10, 2, 2, + 244, 51, 3, 2, 2, 2, 245, 246, 7, 9, 2, 2, 246, 247, 7, 14, 2, 2, 247, + 248, 7, 22, 2, 2, 248, 249, 7, 10, 2, 2, 249, 53, 3, 2, 2, 2, 250, 251, + 7, 9, 2, 2, 251, 252, 7, 22, 2, 2, 252, 253, 7, 14, 2, 2, 253, 254, 7, + 10, 2, 2, 254, 55, 3, 2, 2, 2, 255, 256, 7, 9, 2, 2, 256, 257, 7, 22, 2, + 2, 257, 258, 7, 10, 2, 2, 258, 57, 3, 2, 2, 2, 259, 260, 9, 3, 2, 2, 260, + 59, 3, 2, 2, 2, 11, 63, 69, 92, 112, 125, 135, 147, 216, 235, } var deserializer = antlr.NewATNDeserializer(nil) var deserializedATN = deserializer.DeserializeFromUInt16(parserATN) var literalNames = []string{ - "", "'='", "';'", "'('", "')'", "'*'", "'^'", "'['", "']'", "'?'", "'_'", - "':'", "'%'", "'$'", "'~'", "", "", "", "", "", "", "", "", "'!'", + "", "'='", "';'", "'('", "')'", "'*'", "'^'", "'['", "']'", "'@'", "'?'", + "'_'", "':'", "'..'", "'%'", "'$'", "'~'", "", "", "", "", "", "", "", + "", "'!'", } var symbolicNames = []string{ - "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "ComparisonOperator", + "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "ComparisonOperator", "Identifier", "IdentifierWithFraction", "IntLit", "StringLit", "CharLit", "WS", "LineComment", "Stress", } var ruleNames = []string{ "rules", "atomicRule", "baseAction", "atomicAction", "passHeadingCharacters", - "passTargetPrefix", "mayBePassTargetPrefix", "passChars", "passUntil", - "mayPassUntil", "takeUntil", "takeUntilIncluding", "takeUntilOrRest", "takeUntilIncludingOrRest", - "takeUntilRest", "optionalNamedArea", "optionalArea", "restCheck", "atEnd", - "target", "targetLit", "bound", "limit", "jump", "exact", "fieldType", + "passTargetPrefix", "checkTargetPrefix", "mayBePassTargetPrefix", "passChars", + "goUntil", "mayGoUntil", "passUntil", "mayPassUntil", "takeUntil", "takeUntilIncluding", + "takeUntilOrRest", "takeUntilIncludingOrRest", "takeUntilRest", "optionalNamedArea", + "optionalArea", "restCheck", "atEnd", "target", "targetLit", "bound", "limit", + "jump", "exact", "fieldType", } var decisionToDFA = make([]*antlr.DFA, len(deserializedATN.DecisionToState)) @@ -177,15 +190,17 @@ const ( LDEParserT__11 = 12 LDEParserT__12 = 13 LDEParserT__13 = 14 - LDEParserComparisonOperator = 15 - LDEParserIdentifier = 16 - LDEParserIdentifierWithFraction = 17 - LDEParserIntLit = 18 - LDEParserStringLit = 19 - LDEParserCharLit = 20 - LDEParserWS = 21 - LDEParserLineComment = 22 - LDEParserStress = 23 + LDEParserT__14 = 15 + LDEParserT__15 = 16 + LDEParserComparisonOperator = 17 + LDEParserIdentifier = 18 + LDEParserIdentifierWithFraction = 19 + LDEParserIntLit = 20 + LDEParserStringLit = 21 + LDEParserCharLit = 22 + LDEParserWS = 23 + LDEParserLineComment = 24 + LDEParserStress = 25 ) // LDEParser rules. @@ -196,26 +211,29 @@ const ( LDEParserRULE_atomicAction = 3 LDEParserRULE_passHeadingCharacters = 4 LDEParserRULE_passTargetPrefix = 5 - LDEParserRULE_mayBePassTargetPrefix = 6 - LDEParserRULE_passChars = 7 - LDEParserRULE_passUntil = 8 - LDEParserRULE_mayPassUntil = 9 - LDEParserRULE_takeUntil = 10 - LDEParserRULE_takeUntilIncluding = 11 - LDEParserRULE_takeUntilOrRest = 12 - LDEParserRULE_takeUntilIncludingOrRest = 13 - LDEParserRULE_takeUntilRest = 14 - LDEParserRULE_optionalNamedArea = 15 - LDEParserRULE_optionalArea = 16 - LDEParserRULE_restCheck = 17 - LDEParserRULE_atEnd = 18 - LDEParserRULE_target = 19 - LDEParserRULE_targetLit = 20 - LDEParserRULE_bound = 21 - LDEParserRULE_limit = 22 - LDEParserRULE_jump = 23 - LDEParserRULE_exact = 24 - LDEParserRULE_fieldType = 25 + LDEParserRULE_checkTargetPrefix = 6 + LDEParserRULE_mayBePassTargetPrefix = 7 + LDEParserRULE_passChars = 8 + LDEParserRULE_goUntil = 9 + LDEParserRULE_mayGoUntil = 10 + LDEParserRULE_passUntil = 11 + LDEParserRULE_mayPassUntil = 12 + LDEParserRULE_takeUntil = 13 + LDEParserRULE_takeUntilIncluding = 14 + LDEParserRULE_takeUntilOrRest = 15 + LDEParserRULE_takeUntilIncludingOrRest = 16 + LDEParserRULE_takeUntilRest = 17 + LDEParserRULE_optionalNamedArea = 18 + LDEParserRULE_optionalArea = 19 + LDEParserRULE_restCheck = 20 + LDEParserRULE_atEnd = 21 + LDEParserRULE_target = 22 + LDEParserRULE_targetLit = 23 + LDEParserRULE_bound = 24 + LDEParserRULE_limit = 25 + LDEParserRULE_jump = 26 + LDEParserRULE_exact = 27 + LDEParserRULE_fieldType = 28 ) // IRulesContext is an interface to support dynamic dispatch. @@ -332,19 +350,19 @@ func (p *LDEParser) rules(_p int) (localctx IRulesContext) { var _alt int p.EnterOuterAlt(localctx, 1) - p.SetState(55) + p.SetState(61) p.GetErrorHandler().Sync(p) switch p.GetTokenStream().LA(1) { case LDEParserIdentifier: { - p.SetState(53) + p.SetState(59) p.AtomicRule() } case LDEParserEOF: { - p.SetState(54) + p.SetState(60) p.Match(LDEParserEOF) } @@ -352,7 +370,7 @@ func (p *LDEParser) rules(_p int) (localctx IRulesContext) { panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) } p.GetParserRuleContext().SetStop(p.GetTokenStream().LT(-1)) - p.SetState(61) + p.SetState(67) p.GetErrorHandler().Sync(p) _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 1, p.GetParserRuleContext()) @@ -364,18 +382,18 @@ func (p *LDEParser) rules(_p int) (localctx IRulesContext) { _prevctx = localctx localctx = NewRulesContext(p, _parentctx, _parentState) p.PushNewRecursionContext(localctx, _startState, LDEParserRULE_rules) - p.SetState(57) + p.SetState(63) if !(p.Precpred(p.GetParserRuleContext(), 3)) { panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 3)", "")) } { - p.SetState(58) + p.SetState(64) p.AtomicRule() } } - p.SetState(63) + p.SetState(69) p.GetErrorHandler().Sync(p) _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 1, p.GetParserRuleContext()) } @@ -477,19 +495,19 @@ func (p *LDEParser) AtomicRule() (localctx IAtomicRuleContext) { p.EnterOuterAlt(localctx, 1) { - p.SetState(64) + p.SetState(70) p.Match(LDEParserIdentifier) } { - p.SetState(65) + p.SetState(71) p.Match(LDEParserT__0) } { - p.SetState(66) + p.SetState(72) p.BaseAction() } { - p.SetState(67) + p.SetState(73) p.Match(LDEParserT__1) } @@ -611,69 +629,69 @@ func (p *LDEParser) BaseAction() (localctx IBaseActionContext) { } }() - p.SetState(84) + p.SetState(90) p.GetErrorHandler().Sync(p) switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 2, p.GetParserRuleContext()) { case 1: p.EnterOuterAlt(localctx, 1) { - p.SetState(69) + p.SetState(75) p.Match(LDEParserStress) } { - p.SetState(70) + p.SetState(76) p.BaseAction() } case 2: p.EnterOuterAlt(localctx, 2) { - p.SetState(71) + p.SetState(77) p.Match(LDEParserT__2) } { - p.SetState(72) + p.SetState(78) p.BaseAction() } { - p.SetState(73) + p.SetState(79) p.Match(LDEParserT__3) } { - p.SetState(74) + p.SetState(80) p.BaseAction() } case 3: p.EnterOuterAlt(localctx, 3) { - p.SetState(76) + p.SetState(82) p.Match(LDEParserT__2) } { - p.SetState(77) + p.SetState(83) p.BaseAction() } { - p.SetState(78) + p.SetState(84) p.Match(LDEParserT__3) } case 4: p.EnterOuterAlt(localctx, 4) { - p.SetState(80) + p.SetState(86) p.AtomicAction() } { - p.SetState(81) + p.SetState(87) p.BaseAction() } case 5: p.EnterOuterAlt(localctx, 5) { - p.SetState(83) + p.SetState(89) p.AtomicAction() } @@ -730,6 +748,16 @@ func (s *AtomicActionContext) PassTargetPrefix() IPassTargetPrefixContext { return t.(IPassTargetPrefixContext) } +func (s *AtomicActionContext) CheckTargetPrefix() ICheckTargetPrefixContext { + var t = s.GetTypedRuleContext(reflect.TypeOf((*ICheckTargetPrefixContext)(nil)).Elem(), 0) + + if t == nil { + return nil + } + + return t.(ICheckTargetPrefixContext) +} + func (s *AtomicActionContext) PassHeadingCharacters() IPassHeadingCharactersContext { var t = s.GetTypedRuleContext(reflect.TypeOf((*IPassHeadingCharactersContext)(nil)).Elem(), 0) @@ -780,6 +808,26 @@ func (s *AtomicActionContext) MayPassUntil() IMayPassUntilContext { return t.(IMayPassUntilContext) } +func (s *AtomicActionContext) GoUntil() IGoUntilContext { + var t = s.GetTypedRuleContext(reflect.TypeOf((*IGoUntilContext)(nil)).Elem(), 0) + + if t == nil { + return nil + } + + return t.(IGoUntilContext) +} + +func (s *AtomicActionContext) MayGoUntil() IMayGoUntilContext { + var t = s.GetTypedRuleContext(reflect.TypeOf((*IMayGoUntilContext)(nil)).Elem(), 0) + + if t == nil { + return nil + } + + return t.(IMayGoUntilContext) +} + func (s *AtomicActionContext) TakeUntil() ITakeUntilContext { var t = s.GetTypedRuleContext(reflect.TypeOf((*ITakeUntilContext)(nil)).Elem(), 0) @@ -910,111 +958,132 @@ func (p *LDEParser) AtomicAction() (localctx IAtomicActionContext) { } }() - p.SetState(101) + p.SetState(110) p.GetErrorHandler().Sync(p) switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 3, p.GetParserRuleContext()) { case 1: p.EnterOuterAlt(localctx, 1) { - p.SetState(86) + p.SetState(92) p.PassTargetPrefix() } case 2: p.EnterOuterAlt(localctx, 2) { - p.SetState(87) - p.PassHeadingCharacters() + p.SetState(93) + p.CheckTargetPrefix() } case 3: p.EnterOuterAlt(localctx, 3) { - p.SetState(88) - p.MayBePassTargetPrefix() + p.SetState(94) + p.PassHeadingCharacters() } case 4: p.EnterOuterAlt(localctx, 4) { - p.SetState(89) - p.PassChars() + p.SetState(95) + p.MayBePassTargetPrefix() } case 5: p.EnterOuterAlt(localctx, 5) { - p.SetState(90) - p.PassUntil() + p.SetState(96) + p.PassChars() } case 6: p.EnterOuterAlt(localctx, 6) { - p.SetState(91) - p.MayPassUntil() + p.SetState(97) + p.PassUntil() } case 7: p.EnterOuterAlt(localctx, 7) { - p.SetState(92) - p.TakeUntil() + p.SetState(98) + p.MayPassUntil() } case 8: p.EnterOuterAlt(localctx, 8) { - p.SetState(93) - p.TakeUntilIncluding() + p.SetState(99) + p.GoUntil() } case 9: p.EnterOuterAlt(localctx, 9) { - p.SetState(94) - p.TakeUntilOrRest() + p.SetState(100) + p.MayGoUntil() } case 10: p.EnterOuterAlt(localctx, 10) { - p.SetState(95) - p.TakeUntilIncludingOrRest() + p.SetState(101) + p.TakeUntil() } case 11: p.EnterOuterAlt(localctx, 11) { - p.SetState(96) - p.TakeUntilRest() + p.SetState(102) + p.TakeUntilIncluding() } case 12: p.EnterOuterAlt(localctx, 12) { - p.SetState(97) - p.OptionalNamedArea() + p.SetState(103) + p.TakeUntilOrRest() } case 13: p.EnterOuterAlt(localctx, 13) { - p.SetState(98) - p.OptionalArea() + p.SetState(104) + p.TakeUntilIncludingOrRest() } case 14: p.EnterOuterAlt(localctx, 14) { - p.SetState(99) - p.RestCheck() + p.SetState(105) + p.TakeUntilRest() } case 15: p.EnterOuterAlt(localctx, 15) { - p.SetState(100) + p.SetState(106) + p.OptionalNamedArea() + } + + case 16: + p.EnterOuterAlt(localctx, 16) + { + p.SetState(107) + p.OptionalArea() + } + + case 17: + p.EnterOuterAlt(localctx, 17) + { + p.SetState(108) + p.RestCheck() + } + + case 18: + p.EnterOuterAlt(localctx, 18) + { + p.SetState(109) p.AtEnd() } @@ -1107,11 +1176,11 @@ func (p *LDEParser) PassHeadingCharacters() (localctx IPassHeadingCharactersCont p.EnterOuterAlt(localctx, 1) { - p.SetState(103) + p.SetState(112) p.Match(LDEParserT__4) } { - p.SetState(104) + p.SetState(113) p.Match(LDEParserCharLit) } @@ -1210,40 +1279,174 @@ func (p *LDEParser) PassTargetPrefix() (localctx IPassTargetPrefixContext) { } }() - p.SetState(114) + p.SetState(123) p.GetErrorHandler().Sync(p) switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 4, p.GetParserRuleContext()) { case 1: p.EnterOuterAlt(localctx, 1) { - p.SetState(106) + p.SetState(115) p.Match(LDEParserT__5) } { - p.SetState(107) + p.SetState(116) p.TargetLit() } { - p.SetState(108) + p.SetState(117) p.Match(LDEParserT__6) } { - p.SetState(109) + p.SetState(118) p.Match(LDEParserIntLit) } { - p.SetState(110) + p.SetState(119) p.Match(LDEParserT__7) } case 2: p.EnterOuterAlt(localctx, 2) { - p.SetState(112) + p.SetState(121) p.Match(LDEParserT__5) } { - p.SetState(113) + p.SetState(122) + p.TargetLit() + } + + } + + return localctx +} + +// ICheckTargetPrefixContext is an interface to support dynamic dispatch. +type ICheckTargetPrefixContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // IsCheckTargetPrefixContext differentiates from other interfaces. + IsCheckTargetPrefixContext() +} + +type CheckTargetPrefixContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyCheckTargetPrefixContext() *CheckTargetPrefixContext { + var p = new(CheckTargetPrefixContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = LDEParserRULE_checkTargetPrefix + return p +} + +func (*CheckTargetPrefixContext) IsCheckTargetPrefixContext() {} + +func NewCheckTargetPrefixContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *CheckTargetPrefixContext { + var p = new(CheckTargetPrefixContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = LDEParserRULE_checkTargetPrefix + + return p +} + +func (s *CheckTargetPrefixContext) GetParser() antlr.Parser { return s.parser } + +func (s *CheckTargetPrefixContext) TargetLit() ITargetLitContext { + var t = s.GetTypedRuleContext(reflect.TypeOf((*ITargetLitContext)(nil)).Elem(), 0) + + if t == nil { + return nil + } + + return t.(ITargetLitContext) +} + +func (s *CheckTargetPrefixContext) IntLit() antlr.TerminalNode { + return s.GetToken(LDEParserIntLit, 0) +} + +func (s *CheckTargetPrefixContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *CheckTargetPrefixContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *CheckTargetPrefixContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(LDEListener); ok { + listenerT.EnterCheckTargetPrefix(s) + } +} + +func (s *CheckTargetPrefixContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(LDEListener); ok { + listenerT.ExitCheckTargetPrefix(s) + } +} + +func (p *LDEParser) CheckTargetPrefix() (localctx ICheckTargetPrefixContext) { + localctx = NewCheckTargetPrefixContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 12, LDEParserRULE_checkTargetPrefix) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.SetState(133) + p.GetErrorHandler().Sync(p) + switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 5, p.GetParserRuleContext()) { + case 1: + p.EnterOuterAlt(localctx, 1) + { + p.SetState(125) + p.Match(LDEParserT__8) + } + { + p.SetState(126) + p.TargetLit() + } + { + p.SetState(127) + p.Match(LDEParserT__6) + } + { + p.SetState(128) + p.Match(LDEParserIntLit) + } + { + p.SetState(129) + p.Match(LDEParserT__7) + } + + case 2: + p.EnterOuterAlt(localctx, 2) + { + p.SetState(131) + p.Match(LDEParserT__8) + } + { + p.SetState(132) p.TargetLit() } @@ -1326,7 +1529,7 @@ func (s *MayBePassTargetPrefixContext) ExitRule(listener antlr.ParseTreeListener func (p *LDEParser) MayBePassTargetPrefix() (localctx IMayBePassTargetPrefixContext) { localctx = NewMayBePassTargetPrefixContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 12, LDEParserRULE_mayBePassTargetPrefix) + p.EnterRule(localctx, 14, LDEParserRULE_mayBePassTargetPrefix) defer func() { p.ExitRule() @@ -1344,48 +1547,48 @@ func (p *LDEParser) MayBePassTargetPrefix() (localctx IMayBePassTargetPrefixCont } }() - p.SetState(126) + p.SetState(145) p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 5, p.GetParserRuleContext()) { + switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 6, p.GetParserRuleContext()) { case 1: p.EnterOuterAlt(localctx, 1) { - p.SetState(116) - p.Match(LDEParserT__8) + p.SetState(135) + p.Match(LDEParserT__9) } { - p.SetState(117) + p.SetState(136) p.Match(LDEParserT__5) } { - p.SetState(118) + p.SetState(137) p.TargetLit() } { - p.SetState(119) + p.SetState(138) p.Match(LDEParserT__6) } { - p.SetState(120) + p.SetState(139) p.Match(LDEParserIntLit) } { - p.SetState(121) + p.SetState(140) p.Match(LDEParserT__7) } case 2: p.EnterOuterAlt(localctx, 2) { - p.SetState(123) - p.Match(LDEParserT__8) + p.SetState(142) + p.Match(LDEParserT__9) } { - p.SetState(124) + p.SetState(143) p.Match(LDEParserT__5) } { - p.SetState(125) + p.SetState(144) p.TargetLit() } @@ -1458,7 +1661,7 @@ func (s *PassCharsContext) ExitRule(listener antlr.ParseTreeListener) { func (p *LDEParser) PassChars() (localctx IPassCharsContext) { localctx = NewPassCharsContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 14, LDEParserRULE_passChars) + p.EnterRule(localctx, 16, LDEParserRULE_passChars) defer func() { p.ExitRule() @@ -1478,29 +1681,235 @@ func (p *LDEParser) PassChars() (localctx IPassCharsContext) { p.EnterOuterAlt(localctx, 1) { - p.SetState(128) - p.Match(LDEParserT__9) + p.SetState(147) + p.Match(LDEParserT__10) } { - p.SetState(129) + p.SetState(148) p.Match(LDEParserT__6) } { - p.SetState(130) + p.SetState(149) p.Match(LDEParserIntLit) } { - p.SetState(131) - p.Match(LDEParserT__10) + p.SetState(150) + p.Match(LDEParserT__11) } { - p.SetState(132) + p.SetState(151) p.Match(LDEParserT__7) } return localctx } +// IGoUntilContext is an interface to support dynamic dispatch. +type IGoUntilContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // IsGoUntilContext differentiates from other interfaces. + IsGoUntilContext() +} + +type GoUntilContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyGoUntilContext() *GoUntilContext { + var p = new(GoUntilContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = LDEParserRULE_goUntil + return p +} + +func (*GoUntilContext) IsGoUntilContext() {} + +func NewGoUntilContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *GoUntilContext { + var p = new(GoUntilContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = LDEParserRULE_goUntil + + return p +} + +func (s *GoUntilContext) GetParser() antlr.Parser { return s.parser } + +func (s *GoUntilContext) Target() ITargetContext { + var t = s.GetTypedRuleContext(reflect.TypeOf((*ITargetContext)(nil)).Elem(), 0) + + if t == nil { + return nil + } + + return t.(ITargetContext) +} + +func (s *GoUntilContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *GoUntilContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *GoUntilContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(LDEListener); ok { + listenerT.EnterGoUntil(s) + } +} + +func (s *GoUntilContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(LDEListener); ok { + listenerT.ExitGoUntil(s) + } +} + +func (p *LDEParser) GoUntil() (localctx IGoUntilContext) { + localctx = NewGoUntilContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 18, LDEParserRULE_goUntil) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(153) + p.Match(LDEParserT__12) + } + { + p.SetState(154) + p.Target() + } + + return localctx +} + +// IMayGoUntilContext is an interface to support dynamic dispatch. +type IMayGoUntilContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // IsMayGoUntilContext differentiates from other interfaces. + IsMayGoUntilContext() +} + +type MayGoUntilContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyMayGoUntilContext() *MayGoUntilContext { + var p = new(MayGoUntilContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = LDEParserRULE_mayGoUntil + return p +} + +func (*MayGoUntilContext) IsMayGoUntilContext() {} + +func NewMayGoUntilContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *MayGoUntilContext { + var p = new(MayGoUntilContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = LDEParserRULE_mayGoUntil + + return p +} + +func (s *MayGoUntilContext) GetParser() antlr.Parser { return s.parser } + +func (s *MayGoUntilContext) Target() ITargetContext { + var t = s.GetTypedRuleContext(reflect.TypeOf((*ITargetContext)(nil)).Elem(), 0) + + if t == nil { + return nil + } + + return t.(ITargetContext) +} + +func (s *MayGoUntilContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *MayGoUntilContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *MayGoUntilContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(LDEListener); ok { + listenerT.EnterMayGoUntil(s) + } +} + +func (s *MayGoUntilContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(LDEListener); ok { + listenerT.ExitMayGoUntil(s) + } +} + +func (p *LDEParser) MayGoUntil() (localctx IMayGoUntilContext) { + localctx = NewMayGoUntilContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 20, LDEParserRULE_mayGoUntil) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(156) + p.Match(LDEParserT__9) + } + { + p.SetState(157) + p.Match(LDEParserT__12) + } + { + p.SetState(158) + p.Target() + } + + return localctx +} + // IPassUntilContext is an interface to support dynamic dispatch. type IPassUntilContext interface { antlr.ParserRuleContext @@ -1571,7 +1980,7 @@ func (s *PassUntilContext) ExitRule(listener antlr.ParseTreeListener) { func (p *LDEParser) PassUntil() (localctx IPassUntilContext) { localctx = NewPassUntilContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 16, LDEParserRULE_passUntil) + p.EnterRule(localctx, 22, LDEParserRULE_passUntil) defer func() { p.ExitRule() @@ -1591,11 +2000,11 @@ func (p *LDEParser) PassUntil() (localctx IPassUntilContext) { p.EnterOuterAlt(localctx, 1) { - p.SetState(134) - p.Match(LDEParserT__9) + p.SetState(160) + p.Match(LDEParserT__10) } { - p.SetState(135) + p.SetState(161) p.Target() } @@ -1672,7 +2081,7 @@ func (s *MayPassUntilContext) ExitRule(listener antlr.ParseTreeListener) { func (p *LDEParser) MayPassUntil() (localctx IMayPassUntilContext) { localctx = NewMayPassUntilContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 18, LDEParserRULE_mayPassUntil) + p.EnterRule(localctx, 24, LDEParserRULE_mayPassUntil) defer func() { p.ExitRule() @@ -1692,15 +2101,15 @@ func (p *LDEParser) MayPassUntil() (localctx IMayPassUntilContext) { p.EnterOuterAlt(localctx, 1) { - p.SetState(137) - p.Match(LDEParserT__8) + p.SetState(163) + p.Match(LDEParserT__9) } { - p.SetState(138) - p.Match(LDEParserT__9) + p.SetState(164) + p.Match(LDEParserT__10) } { - p.SetState(139) + p.SetState(165) p.Target() } @@ -1791,7 +2200,7 @@ func (s *TakeUntilContext) ExitRule(listener antlr.ParseTreeListener) { func (p *LDEParser) TakeUntil() (localctx ITakeUntilContext) { localctx = NewTakeUntilContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 20, LDEParserRULE_takeUntil) + p.EnterRule(localctx, 26, LDEParserRULE_takeUntil) defer func() { p.ExitRule() @@ -1811,23 +2220,23 @@ func (p *LDEParser) TakeUntil() (localctx ITakeUntilContext) { p.EnterOuterAlt(localctx, 1) { - p.SetState(141) + p.SetState(167) p.Match(LDEParserIdentifier) } { - p.SetState(142) + p.SetState(168) p.Match(LDEParserT__2) } { - p.SetState(143) + p.SetState(169) p.FieldType() } { - p.SetState(144) + p.SetState(170) p.Match(LDEParserT__3) } { - p.SetState(145) + p.SetState(171) p.Target() } @@ -1918,7 +2327,7 @@ func (s *TakeUntilIncludingContext) ExitRule(listener antlr.ParseTreeListener) { func (p *LDEParser) TakeUntilIncluding() (localctx ITakeUntilIncludingContext) { localctx = NewTakeUntilIncludingContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 22, LDEParserRULE_takeUntilIncluding) + p.EnterRule(localctx, 28, LDEParserRULE_takeUntilIncluding) defer func() { p.ExitRule() @@ -1938,23 +2347,23 @@ func (p *LDEParser) TakeUntilIncluding() (localctx ITakeUntilIncludingContext) { p.EnterOuterAlt(localctx, 1) { - p.SetState(147) + p.SetState(173) p.Match(LDEParserIdentifier) } { - p.SetState(148) + p.SetState(174) p.Match(LDEParserT__6) } { - p.SetState(149) + p.SetState(175) p.FieldType() } { - p.SetState(150) + p.SetState(176) p.Match(LDEParserT__7) } { - p.SetState(151) + p.SetState(177) p.Target() } @@ -2045,7 +2454,7 @@ func (s *TakeUntilOrRestContext) ExitRule(listener antlr.ParseTreeListener) { func (p *LDEParser) TakeUntilOrRest() (localctx ITakeUntilOrRestContext) { localctx = NewTakeUntilOrRestContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 24, LDEParserRULE_takeUntilOrRest) + p.EnterRule(localctx, 30, LDEParserRULE_takeUntilOrRest) defer func() { p.ExitRule() @@ -2065,27 +2474,27 @@ func (p *LDEParser) TakeUntilOrRest() (localctx ITakeUntilOrRestContext) { p.EnterOuterAlt(localctx, 1) { - p.SetState(153) + p.SetState(179) p.Match(LDEParserIdentifier) } { - p.SetState(154) + p.SetState(180) p.Match(LDEParserT__2) } { - p.SetState(155) + p.SetState(181) p.FieldType() } { - p.SetState(156) + p.SetState(182) p.Match(LDEParserT__3) } { - p.SetState(157) - p.Match(LDEParserT__8) + p.SetState(183) + p.Match(LDEParserT__9) } { - p.SetState(158) + p.SetState(184) p.Target() } @@ -2176,7 +2585,7 @@ func (s *TakeUntilIncludingOrRestContext) ExitRule(listener antlr.ParseTreeListe func (p *LDEParser) TakeUntilIncludingOrRest() (localctx ITakeUntilIncludingOrRestContext) { localctx = NewTakeUntilIncludingOrRestContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 26, LDEParserRULE_takeUntilIncludingOrRest) + p.EnterRule(localctx, 32, LDEParserRULE_takeUntilIncludingOrRest) defer func() { p.ExitRule() @@ -2196,27 +2605,27 @@ func (p *LDEParser) TakeUntilIncludingOrRest() (localctx ITakeUntilIncludingOrRe p.EnterOuterAlt(localctx, 1) { - p.SetState(160) + p.SetState(186) p.Match(LDEParserIdentifier) } { - p.SetState(161) + p.SetState(187) p.Match(LDEParserT__6) } { - p.SetState(162) + p.SetState(188) p.FieldType() } { - p.SetState(163) + p.SetState(189) p.Match(LDEParserT__7) } { - p.SetState(164) - p.Match(LDEParserT__8) + p.SetState(190) + p.Match(LDEParserT__9) } { - p.SetState(165) + p.SetState(191) p.Target() } @@ -2297,7 +2706,7 @@ func (s *TakeUntilRestContext) ExitRule(listener antlr.ParseTreeListener) { func (p *LDEParser) TakeUntilRest() (localctx ITakeUntilRestContext) { localctx = NewTakeUntilRestContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 28, LDEParserRULE_takeUntilRest) + p.EnterRule(localctx, 34, LDEParserRULE_takeUntilRest) defer func() { p.ExitRule() @@ -2317,19 +2726,19 @@ func (p *LDEParser) TakeUntilRest() (localctx ITakeUntilRestContext) { p.EnterOuterAlt(localctx, 1) { - p.SetState(167) + p.SetState(193) p.Match(LDEParserIdentifier) } { - p.SetState(168) + p.SetState(194) p.Match(LDEParserT__2) } { - p.SetState(169) + p.SetState(195) p.FieldType() } { - p.SetState(170) + p.SetState(196) p.Match(LDEParserT__3) } @@ -2410,7 +2819,7 @@ func (s *OptionalNamedAreaContext) ExitRule(listener antlr.ParseTreeListener) { func (p *LDEParser) OptionalNamedArea() (localctx IOptionalNamedAreaContext) { localctx = NewOptionalNamedAreaContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 30, LDEParserRULE_optionalNamedArea) + p.EnterRule(localctx, 36, LDEParserRULE_optionalNamedArea) defer func() { p.ExitRule() @@ -2430,23 +2839,23 @@ func (p *LDEParser) OptionalNamedArea() (localctx IOptionalNamedAreaContext) { p.EnterOuterAlt(localctx, 1) { - p.SetState(172) - p.Match(LDEParserT__8) + p.SetState(198) + p.Match(LDEParserT__9) } { - p.SetState(173) + p.SetState(199) p.Match(LDEParserIdentifier) } { - p.SetState(174) + p.SetState(200) p.Match(LDEParserT__2) } { - p.SetState(175) + p.SetState(201) p.BaseAction() } { - p.SetState(176) + p.SetState(202) p.Match(LDEParserT__3) } @@ -2523,7 +2932,7 @@ func (s *OptionalAreaContext) ExitRule(listener antlr.ParseTreeListener) { func (p *LDEParser) OptionalArea() (localctx IOptionalAreaContext) { localctx = NewOptionalAreaContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 32, LDEParserRULE_optionalArea) + p.EnterRule(localctx, 38, LDEParserRULE_optionalArea) defer func() { p.ExitRule() @@ -2543,19 +2952,19 @@ func (p *LDEParser) OptionalArea() (localctx IOptionalAreaContext) { p.EnterOuterAlt(localctx, 1) { - p.SetState(178) - p.Match(LDEParserT__8) + p.SetState(204) + p.Match(LDEParserT__9) } { - p.SetState(179) + p.SetState(205) p.Match(LDEParserT__2) } { - p.SetState(180) + p.SetState(206) p.BaseAction() } { - p.SetState(181) + p.SetState(207) p.Match(LDEParserT__3) } @@ -2630,7 +3039,7 @@ func (s *RestCheckContext) ExitRule(listener antlr.ParseTreeListener) { func (p *LDEParser) RestCheck() (localctx IRestCheckContext) { localctx = NewRestCheckContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 34, LDEParserRULE_restCheck) + p.EnterRule(localctx, 40, LDEParserRULE_restCheck) defer func() { p.ExitRule() @@ -2648,32 +3057,32 @@ func (p *LDEParser) RestCheck() (localctx IRestCheckContext) { } }() - p.SetState(188) + p.SetState(214) p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 6, p.GetParserRuleContext()) { + switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 7, p.GetParserRuleContext()) { case 1: p.EnterOuterAlt(localctx, 1) { - p.SetState(183) - p.Match(LDEParserT__11) + p.SetState(209) + p.Match(LDEParserT__13) } { - p.SetState(184) + p.SetState(210) p.Match(LDEParserIntLit) } case 2: p.EnterOuterAlt(localctx, 2) { - p.SetState(185) - p.Match(LDEParserT__11) + p.SetState(211) + p.Match(LDEParserT__13) } { - p.SetState(186) + p.SetState(212) p.Match(LDEParserComparisonOperator) } { - p.SetState(187) + p.SetState(213) p.Match(LDEParserIntLit) } @@ -2741,7 +3150,7 @@ func (s *AtEndContext) ExitRule(listener antlr.ParseTreeListener) { func (p *LDEParser) AtEnd() (localctx IAtEndContext) { localctx = NewAtEndContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 36, LDEParserRULE_atEnd) + p.EnterRule(localctx, 42, LDEParserRULE_atEnd) defer func() { p.ExitRule() @@ -2761,8 +3170,8 @@ func (p *LDEParser) AtEnd() (localctx IAtEndContext) { p.EnterOuterAlt(localctx, 1) { - p.SetState(190) - p.Match(LDEParserT__12) + p.SetState(216) + p.Match(LDEParserT__14) } return localctx @@ -2888,7 +3297,7 @@ func (s *TargetContext) ExitRule(listener antlr.ParseTreeListener) { func (p *LDEParser) Target() (localctx ITargetContext) { localctx = NewTargetContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 38, LDEParserRULE_target) + p.EnterRule(localctx, 44, LDEParserRULE_target) defer func() { p.ExitRule() @@ -2906,68 +3315,68 @@ func (p *LDEParser) Target() (localctx ITargetContext) { } }() - p.SetState(207) + p.SetState(233) p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 7, p.GetParserRuleContext()) { + switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 8, p.GetParserRuleContext()) { case 1: p.EnterOuterAlt(localctx, 1) { - p.SetState(192) + p.SetState(218) p.TargetLit() } { - p.SetState(193) + p.SetState(219) p.Bound() } case 2: p.EnterOuterAlt(localctx, 2) { - p.SetState(195) + p.SetState(221) p.TargetLit() } { - p.SetState(196) + p.SetState(222) p.Limit() } case 3: p.EnterOuterAlt(localctx, 3) { - p.SetState(198) + p.SetState(224) p.TargetLit() } { - p.SetState(199) + p.SetState(225) p.Exact() } case 4: p.EnterOuterAlt(localctx, 4) { - p.SetState(201) + p.SetState(227) p.TargetLit() } { - p.SetState(202) + p.SetState(228) p.Jump() } case 5: p.EnterOuterAlt(localctx, 5) { - p.SetState(204) + p.SetState(230) p.TargetLit() } case 6: p.EnterOuterAlt(localctx, 6) { - p.SetState(205) - p.Match(LDEParserT__13) + p.SetState(231) + p.Match(LDEParserT__15) } { - p.SetState(206) + p.SetState(232) p.Target() } @@ -3044,7 +3453,7 @@ func (s *TargetLitContext) ExitRule(listener antlr.ParseTreeListener) { func (p *LDEParser) TargetLit() (localctx ITargetLitContext) { localctx = NewTargetLitContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 40, LDEParserRULE_targetLit) + p.EnterRule(localctx, 46, LDEParserRULE_targetLit) var _la int defer func() { @@ -3065,7 +3474,7 @@ func (p *LDEParser) TargetLit() (localctx ITargetLitContext) { p.EnterOuterAlt(localctx, 1) { - p.SetState(209) + p.SetState(235) _la = p.GetTokenStream().LA(1) if !(_la == LDEParserStringLit || _la == LDEParserCharLit) { @@ -3147,7 +3556,7 @@ func (s *BoundContext) ExitRule(listener antlr.ParseTreeListener) { func (p *LDEParser) Bound() (localctx IBoundContext) { localctx = NewBoundContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 42, LDEParserRULE_bound) + p.EnterRule(localctx, 48, LDEParserRULE_bound) defer func() { p.ExitRule() @@ -3167,23 +3576,23 @@ func (p *LDEParser) Bound() (localctx IBoundContext) { p.EnterOuterAlt(localctx, 1) { - p.SetState(211) + p.SetState(237) p.Match(LDEParserT__6) } { - p.SetState(212) + p.SetState(238) p.Match(LDEParserIntLit) } { - p.SetState(213) - p.Match(LDEParserT__10) + p.SetState(239) + p.Match(LDEParserT__11) } { - p.SetState(214) + p.SetState(240) p.Match(LDEParserIntLit) } { - p.SetState(215) + p.SetState(241) p.Match(LDEParserT__7) } @@ -3254,7 +3663,7 @@ func (s *LimitContext) ExitRule(listener antlr.ParseTreeListener) { func (p *LDEParser) Limit() (localctx ILimitContext) { localctx = NewLimitContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 44, LDEParserRULE_limit) + p.EnterRule(localctx, 50, LDEParserRULE_limit) defer func() { p.ExitRule() @@ -3274,19 +3683,19 @@ func (p *LDEParser) Limit() (localctx ILimitContext) { p.EnterOuterAlt(localctx, 1) { - p.SetState(217) + p.SetState(243) p.Match(LDEParserT__6) } { - p.SetState(218) - p.Match(LDEParserT__10) + p.SetState(244) + p.Match(LDEParserT__11) } { - p.SetState(219) + p.SetState(245) p.Match(LDEParserIntLit) } { - p.SetState(220) + p.SetState(246) p.Match(LDEParserT__7) } @@ -3357,7 +3766,7 @@ func (s *JumpContext) ExitRule(listener antlr.ParseTreeListener) { func (p *LDEParser) Jump() (localctx IJumpContext) { localctx = NewJumpContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 46, LDEParserRULE_jump) + p.EnterRule(localctx, 52, LDEParserRULE_jump) defer func() { p.ExitRule() @@ -3377,19 +3786,19 @@ func (p *LDEParser) Jump() (localctx IJumpContext) { p.EnterOuterAlt(localctx, 1) { - p.SetState(222) + p.SetState(248) p.Match(LDEParserT__6) } { - p.SetState(223) + p.SetState(249) p.Match(LDEParserIntLit) } { - p.SetState(224) - p.Match(LDEParserT__10) + p.SetState(250) + p.Match(LDEParserT__11) } { - p.SetState(225) + p.SetState(251) p.Match(LDEParserT__7) } @@ -3460,7 +3869,7 @@ func (s *ExactContext) ExitRule(listener antlr.ParseTreeListener) { func (p *LDEParser) Exact() (localctx IExactContext) { localctx = NewExactContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 48, LDEParserRULE_exact) + p.EnterRule(localctx, 54, LDEParserRULE_exact) defer func() { p.ExitRule() @@ -3480,15 +3889,15 @@ func (p *LDEParser) Exact() (localctx IExactContext) { p.EnterOuterAlt(localctx, 1) { - p.SetState(227) + p.SetState(253) p.Match(LDEParserT__6) } { - p.SetState(228) + p.SetState(254) p.Match(LDEParserIntLit) } { - p.SetState(229) + p.SetState(255) p.Match(LDEParserT__7) } @@ -3563,7 +3972,7 @@ func (s *FieldTypeContext) ExitRule(listener antlr.ParseTreeListener) { func (p *LDEParser) FieldType() (localctx IFieldTypeContext) { localctx = NewFieldTypeContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 50, LDEParserRULE_fieldType) + p.EnterRule(localctx, 56, LDEParserRULE_fieldType) var _la int defer func() { @@ -3584,7 +3993,7 @@ func (p *LDEParser) FieldType() (localctx IFieldTypeContext) { p.EnterOuterAlt(localctx, 1) { - p.SetState(231) + p.SetState(257) _la = p.GetTokenStream().LA(1) if !(_la == LDEParserIdentifier || _la == LDEParserIdentifierWithFraction) { diff --git a/internal/srcbuilder/dispatching.go b/internal/srcbuilder/dispatching.go index bfaef3b..354b6b8 100644 --- a/internal/srcbuilder/dispatching.go +++ b/internal/srcbuilder/dispatching.go @@ -53,7 +53,7 @@ func (sb *SrcBuilder) DispatchMayBeStartChar(a *ast.MayBeStartChar) error { return err } sb.appendGens(func() error { - return sb.gen.HeadChar(a.Value, true) + return sb.gen.HeadChar(a.Value, true, true) }) return nil } @@ -63,7 +63,7 @@ func (sb *SrcBuilder) DispatchMayBeStartString(a *ast.MayBeStartString) error { return err } sb.appendGens(func() error { - return sb.gen.HeadString(a.Value, true) + return sb.gen.HeadString(a.Value, true, true) }) return nil } @@ -114,7 +114,7 @@ func (sb *SrcBuilder) DispatchPassFirst(a ast.PassFixed) error { return nil } -func (sb *SrcBuilder) DispatchPassUntil(a *ast.PassUntil) error { +func (sb *SrcBuilder) DispatchPassBefore(a *ast.PassBefore) error { if err := sb.gen.RegGravity(sb.prefixCur()); err != nil { return err } @@ -129,11 +129,11 @@ func (sb *SrcBuilder) DispatchPassUntil(a *ast.PassUntil) error { switch l.Type { case ast.String: sb.appendGens(func() error { - return sb.gen.LookupFixedString(l.Value, lower, false) + return sb.gen.LookupFixedString(l.Value, lower, false, false) }) case ast.Char: sb.appendGens(func() error { - return sb.gen.LookupFixedChar(l.Value, lower, false) + return sb.gen.LookupFixedChar(l.Value, lower, false, false) }) default: return fmt.Errorf("fatal flow: passing action integrity error, got unexpected type %s", l.Type) @@ -143,11 +143,11 @@ func (sb *SrcBuilder) DispatchPassUntil(a *ast.PassUntil) error { switch l.Type { case ast.String: sb.appendGens(func() error { - return sb.gen.LookupString(l.Value, lower, upper, l.Close, false) + return sb.gen.LookupString(l.Value, lower, upper, l.Close, false, false) }) case ast.Char: sb.appendGens(func() error { - return sb.gen.LookupChar(l.Value, lower, upper, l.Close, false) + return sb.gen.LookupChar(l.Value, lower, upper, l.Close, false, false) }) default: return fmt.Errorf("fatal flow: passing action integrity error, got unexpected type %s", l.Type) @@ -156,7 +156,7 @@ func (sb *SrcBuilder) DispatchPassUntil(a *ast.PassUntil) error { return nil } -func (sb *SrcBuilder) DispatchPassUntilOrIgnore(a *ast.PassUntilOrIgnore) error { +func (sb *SrcBuilder) DispatchPassBeforeOrIgnore(a *ast.PassBeforeOrIgnore) error { if err := sb.gen.RegGravity(sb.prefixCur()); err != nil { return err } @@ -171,11 +171,11 @@ func (sb *SrcBuilder) DispatchPassUntilOrIgnore(a *ast.PassUntilOrIgnore) error switch l.Type { case ast.String: sb.appendGens(func() error { - return sb.gen.LookupFixedString(l.Value, lower, true) + return sb.gen.LookupFixedString(l.Value, lower, true, false) }) case ast.Char: sb.appendGens(func() error { - return sb.gen.LookupFixedChar(l.Value, lower, true) + return sb.gen.LookupFixedChar(l.Value, lower, true, false) }) } } else { @@ -183,11 +183,91 @@ func (sb *SrcBuilder) DispatchPassUntilOrIgnore(a *ast.PassUntilOrIgnore) error switch l.Type { case ast.String: sb.appendGens(func() error { - return sb.gen.LookupString(l.Value, lower, upper, l.Close, true) + return sb.gen.LookupString(l.Value, lower, upper, l.Close, true, false) }) case ast.Char: sb.appendGens(func() error { - return sb.gen.LookupChar(l.Value, lower, upper, l.Close, true) + return sb.gen.LookupChar(l.Value, lower, upper, l.Close, true, false) + }) + } + } + return nil +} + +func (sb *SrcBuilder) DispatchPassAfter(a *ast.PassAfter) error { + if err := sb.gen.RegGravity(sb.prefixCur()); err != nil { + return err + } + l := a.Limit + var lower int + var upper int + lower = l.Lower + upper = l.Upper + + if lower == upper && lower > 0 { + // Fixed position check + switch l.Type { + case ast.String: + sb.appendGens(func() error { + return sb.gen.LookupFixedString(l.Value, lower, false, true) + }) + case ast.Char: + sb.appendGens(func() error { + return sb.gen.LookupFixedChar(l.Value, lower, false, true) + }) + default: + return fmt.Errorf("fatal flow: passing action integrity error, got unexpected type %s", l.Type) + } + } else { + // It is either short or limited/bounded lookup + switch l.Type { + case ast.String: + sb.appendGens(func() error { + return sb.gen.LookupString(l.Value, lower, upper, l.Close, false, true) + }) + case ast.Char: + sb.appendGens(func() error { + return sb.gen.LookupChar(l.Value, lower, upper, l.Close, false, true) + }) + default: + return fmt.Errorf("fatal flow: passing action integrity error, got unexpected type %s", l.Type) + } + } + return nil +} + +func (sb *SrcBuilder) DispatchPassAfterOrIgnore(a *ast.PassAfterOrIgnore) error { + if err := sb.gen.RegGravity(sb.prefixCur()); err != nil { + return err + } + l := a.Limit + var lower int + var upper int + lower = l.Lower + upper = l.Upper + + if lower == upper && lower > 0 { + // Fixed position check + switch l.Type { + case ast.String: + sb.appendGens(func() error { + return sb.gen.LookupFixedString(l.Value, lower, true, true) + }) + case ast.Char: + sb.appendGens(func() error { + return sb.gen.LookupFixedChar(l.Value, lower, true, true) + }) + } + } else { + // It is either short or limited/bounded lookup + switch l.Type { + case ast.String: + sb.appendGens(func() error { + return sb.gen.LookupString(l.Value, lower, upper, l.Close, true, true) + }) + case ast.Char: + sb.appendGens(func() error { + return sb.gen.LookupChar(l.Value, lower, upper, l.Close, true, true) }) } } @@ -199,7 +279,17 @@ func (sb *SrcBuilder) DispatchStartChar(a *ast.StartChar) error { return err } sb.appendGens(func() error { - return sb.gen.HeadChar(a.Value, false) + return sb.gen.HeadChar(a.Value, false, true) + }) + return nil +} + +func (sb *SrcBuilder) DispatchStartCharWithoutPass(a *ast.StartCharWithoutPass) error { + if err := sb.gen.RegGravity(sb.prefixCur()); err != nil { + return err + } + sb.appendGens(func() error { + return sb.gen.HeadChar(a.Value, false, false) }) return nil } @@ -209,7 +299,17 @@ func (sb *SrcBuilder) DispatchStartString(a *ast.StartString) error { return err } sb.appendGens(func() error { - return sb.gen.HeadString(a.Value, false) + return sb.gen.HeadString(a.Value, false, true) + }) + return nil +} + +func (sb *SrcBuilder) DispatchStartStringWithoutPass(a *ast.StartStringWithoutPass) error { + if err := sb.gen.RegGravity(sb.prefixCur()); err != nil { + return err + } + sb.appendGens(func() error { + return sb.gen.HeadString(a.Value, false, false) }) return nil } diff --git a/ldetool.go b/ldetool.go index 98f02ae..e9bd12b 100644 --- a/ldetool.go +++ b/ldetool.go @@ -1,6 +1,5 @@ package main -//go:generate antlr4 -visitor -no-visitor -listener -o internal/parser -Dlanguage=Go LDE.g4 import ( "go/parser" diff --git a/testing/common.lde b/testing/common.lde index a24fecf..9980033 100644 --- a/testing/common.lde +++ b/testing/common.lde @@ -4,3 +4,7 @@ Rule = ^' ' Unsigned(uint) ' ' Str(str); + +BeforeLookup = .."abc" Data(string); + +CheckPrefix = @"abc" Data(string); \ No newline at end of file diff --git a/testing/common_lde.go b/testing/common_lde.go index 221a277..93b8bca 100644 --- a/testing/common_lde.go +++ b/testing/common_lde.go @@ -15,6 +15,8 @@ import ( "strings" ) +var abc = "abc" + // Rule ... type Rule struct { Rest string @@ -94,3 +96,49 @@ func (p *Rule) Extract(line string) (bool, error) { p.Rest = p.Rest[len(p.Rest):] return true, nil } + +// BeforeLookup ... +type BeforeLookup struct { + Rest string + Data string +} + +// Extract ... +func (p *BeforeLookup) Extract(line string) (bool, error) { + p.Rest = line + var pos int + + // Looking for "abc" and then pass it + pos = strings.Index(p.Rest, abc) + if pos >= 0 { + p.Rest = p.Rest[pos:] + } else { + return false, nil + } + + // Take the rest as Data(string) + p.Data = p.Rest + p.Rest = p.Rest[len(p.Rest):] + return true, nil +} + +// CheckPrefix ... +type CheckPrefix struct { + Rest string + Data string +} + +// Extract ... +func (p *CheckPrefix) Extract(line string) (bool, error) { + p.Rest = line + + // Checks if the rest starts with `"abc"` + if !strings.HasPrefix(p.Rest, abc) { + return false, nil + } + + // Take the rest as Data(string) + p.Data = p.Rest + p.Rest = p.Rest[len(p.Rest):] + return true, nil +} diff --git a/testing/common_test.go b/testing/common_test.go index 829b6dd..119f72e 100644 --- a/testing/common_test.go +++ b/testing/common_test.go @@ -20,3 +20,29 @@ func TestCommon(t *testing.T) { require.Equal(t, uint(234), e.Unsigned) require.Equal(t, "abcdef", e.Str) } + +func TestBeforeLookup(t *testing.T) { + data := " abc123" + var e BeforeLookup + if ok, err := e.Extract(data); !ok || err != nil { + if err != nil { + t.Fatal(err) + } + require.True(t, ok) + } + require.Equal(t, "abc123", e.Data) + require.Equal(t, "", e.Rest) +} + +func TestCheckPrefix(t *testing.T) { + data := "abc123" + var e CheckPrefix + if ok, err := e.Extract(data); !ok || err != nil { + if err != nil { + t.Fatal(err) + } + require.True(t, ok) + } + require.Equal(t, "abc123", e.Data) + require.Equal(t, "", e.Rest) +} diff --git a/testing/parsing_lde.go b/testing/parsing_lde.go index 2a01099..dbe5c72 100644 --- a/testing/parsing_lde.go +++ b/testing/parsing_lde.go @@ -1,3 +1,4 @@ + /* This file was autogenerated via ----------------------------------------------------------------- diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go deleted file mode 100644 index 1592212..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -var ATNInvalidAltNumber int - -type ATN struct { - // DecisionToState is the decision points for all rules, subrules, optional - // blocks, ()+, ()*, etc. Used to build DFA predictors for them. - DecisionToState []DecisionState - - // grammarType is the ATN type and is used for deserializing ATNs from strings. - grammarType int - - // lexerActions is referenced by action transitions in the ATN for lexer ATNs. - lexerActions []LexerAction - - // maxTokenType is the maximum value for any symbol recognized by a transition in the ATN. - maxTokenType int - - modeNameToStartState map[string]*TokensStartState - - modeToStartState []*TokensStartState - - // ruleToStartState maps from rule index to starting state number. - ruleToStartState []*RuleStartState - - // ruleToStopState maps from rule index to stop state number. - ruleToStopState []*RuleStopState - - // ruleToTokenType maps the rule index to the resulting token type for lexer - // ATNs. For parser ATNs, it maps the rule index to the generated bypass token - // type if ATNDeserializationOptions.isGenerateRuleBypassTransitions was - // specified, and otherwise is nil. - ruleToTokenType []int - - states []ATNState -} - -func NewATN(grammarType int, maxTokenType int) *ATN { - return &ATN{ - grammarType: grammarType, - maxTokenType: maxTokenType, - modeNameToStartState: make(map[string]*TokensStartState), - } -} - -// NextTokensInContext computes the set of valid tokens that can occur starting -// in state s. If ctx is nil, the set of tokens will not include what can follow -// the rule surrounding s. In other words, the set will be restricted to tokens -// reachable staying within the rule of s. -func (a *ATN) NextTokensInContext(s ATNState, ctx RuleContext) *IntervalSet { - return NewLL1Analyzer(a).Look(s, nil, ctx) -} - -// NextTokensNoContext computes the set of valid tokens that can occur starting -// in s and staying in same rule. Token.EPSILON is in set if we reach end of -// rule. -func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet { - if s.GetNextTokenWithinRule() != nil { - return s.GetNextTokenWithinRule() - } - - s.SetNextTokenWithinRule(a.NextTokensInContext(s, nil)) - s.GetNextTokenWithinRule().readOnly = true - - return s.GetNextTokenWithinRule() -} - -func (a *ATN) NextTokens(s ATNState, ctx RuleContext) *IntervalSet { - if ctx == nil { - return a.NextTokensNoContext(s) - } - - return a.NextTokensInContext(s, ctx) -} - -func (a *ATN) addState(state ATNState) { - if state != nil { - state.SetATN(a) - state.SetStateNumber(len(a.states)) - } - - a.states = append(a.states, state) -} - -func (a *ATN) removeState(state ATNState) { - a.states[state.GetStateNumber()] = nil // Just free the memory; don't shift states in the slice -} - -func (a *ATN) defineDecisionState(s DecisionState) int { - a.DecisionToState = append(a.DecisionToState, s) - s.setDecision(len(a.DecisionToState) - 1) - - return s.getDecision() -} - -func (a *ATN) getDecisionState(decision int) DecisionState { - if len(a.DecisionToState) == 0 { - return nil - } - - return a.DecisionToState[decision] -} - -// getExpectedTokens computes the set of input symbols which could follow ATN -// state number stateNumber in the specified full parse context ctx and returns -// the set of potentially valid input symbols which could follow the specified -// state in the specified context. This method considers the complete parser -// context, but does not evaluate semantic predicates (i.e. all predicates -// encountered during the calculation are assumed true). If a path in the ATN -// exists from the starting state to the RuleStopState of the outermost context -// without Matching any symbols, Token.EOF is added to the returned set. -// -// A nil ctx defaults to ParserRuleContext.EMPTY. -// -// It panics if the ATN does not contain state stateNumber. -func (a *ATN) getExpectedTokens(stateNumber int, ctx RuleContext) *IntervalSet { - if stateNumber < 0 || stateNumber >= len(a.states) { - panic("Invalid state number.") - } - - s := a.states[stateNumber] - following := a.NextTokens(s, nil) - - if !following.contains(TokenEpsilon) { - return following - } - - expected := NewIntervalSet() - - expected.addSet(following) - expected.removeOne(TokenEpsilon) - - for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) { - invokingState := a.states[ctx.GetInvokingState()] - rt := invokingState.GetTransitions()[0] - - following = a.NextTokens(rt.(*RuleTransition).followState, nil) - expected.addSet(following) - expected.removeOne(TokenEpsilon) - ctx = ctx.GetParent().(RuleContext) - } - - if following.contains(TokenEpsilon) { - expected.addOne(TokenEOF) - } - - return expected -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go deleted file mode 100644 index 0535d52..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go +++ /dev/null @@ -1,295 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" -) - -type comparable interface { - equals(other interface{}) bool -} - -// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic -// context). The syntactic context is a graph-structured stack node whose -// path(s) to the root is the rule invocation(s) chain used to arrive at the -// state. The semantic context is the tree of semantic predicates encountered -// before reaching an ATN state. -type ATNConfig interface { - comparable - - hash() int - - GetState() ATNState - GetAlt() int - GetSemanticContext() SemanticContext - - GetContext() PredictionContext - SetContext(PredictionContext) - - GetReachesIntoOuterContext() int - SetReachesIntoOuterContext(int) - - String() string - - getPrecedenceFilterSuppressed() bool - setPrecedenceFilterSuppressed(bool) -} - -type BaseATNConfig struct { - precedenceFilterSuppressed bool - state ATNState - alt int - context PredictionContext - semanticContext SemanticContext - reachesIntoOuterContext int -} - -func NewBaseATNConfig7(old *BaseATNConfig) *BaseATNConfig { // TODO: Dup - return &BaseATNConfig{ - state: old.state, - alt: old.alt, - context: old.context, - semanticContext: old.semanticContext, - reachesIntoOuterContext: old.reachesIntoOuterContext, - } -} - -func NewBaseATNConfig6(state ATNState, alt int, context PredictionContext) *BaseATNConfig { - return NewBaseATNConfig5(state, alt, context, SemanticContextNone) -} - -func NewBaseATNConfig5(state ATNState, alt int, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig { - if semanticContext == nil { - panic("semanticContext cannot be nil") // TODO: Necessary? - } - - return &BaseATNConfig{state: state, alt: alt, context: context, semanticContext: semanticContext} -} - -func NewBaseATNConfig4(c ATNConfig, state ATNState) *BaseATNConfig { - return NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()) -} - -func NewBaseATNConfig3(c ATNConfig, state ATNState, semanticContext SemanticContext) *BaseATNConfig { - return NewBaseATNConfig(c, state, c.GetContext(), semanticContext) -} - -func NewBaseATNConfig2(c ATNConfig, semanticContext SemanticContext) *BaseATNConfig { - return NewBaseATNConfig(c, c.GetState(), c.GetContext(), semanticContext) -} - -func NewBaseATNConfig1(c ATNConfig, state ATNState, context PredictionContext) *BaseATNConfig { - return NewBaseATNConfig(c, state, context, c.GetSemanticContext()) -} - -func NewBaseATNConfig(c ATNConfig, state ATNState, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig { - if semanticContext == nil { - panic("semanticContext cannot be nil") - } - - return &BaseATNConfig{ - state: state, - alt: c.GetAlt(), - context: context, - semanticContext: semanticContext, - reachesIntoOuterContext: c.GetReachesIntoOuterContext(), - precedenceFilterSuppressed: c.getPrecedenceFilterSuppressed(), - } -} - -func (b *BaseATNConfig) getPrecedenceFilterSuppressed() bool { - return b.precedenceFilterSuppressed -} - -func (b *BaseATNConfig) setPrecedenceFilterSuppressed(v bool) { - b.precedenceFilterSuppressed = v -} - -func (b *BaseATNConfig) GetState() ATNState { - return b.state -} - -func (b *BaseATNConfig) GetAlt() int { - return b.alt -} - -func (b *BaseATNConfig) SetContext(v PredictionContext) { - b.context = v -} -func (b *BaseATNConfig) GetContext() PredictionContext { - return b.context -} - -func (b *BaseATNConfig) GetSemanticContext() SemanticContext { - return b.semanticContext -} - -func (b *BaseATNConfig) GetReachesIntoOuterContext() int { - return b.reachesIntoOuterContext -} - -func (b *BaseATNConfig) SetReachesIntoOuterContext(v int) { - b.reachesIntoOuterContext = v -} - -// An ATN configuration is equal to another if both have the same state, they -// predict the same alternative, and syntactic/semantic contexts are the same. -func (b *BaseATNConfig) equals(o interface{}) bool { - if b == o { - return true - } - - var other, ok = o.(*BaseATNConfig) - - if !ok { - return false - } - - var equal bool - - if b.context == nil { - equal = other.context == nil - } else { - equal = b.context.equals(other.context) - } - - var ( - nums = b.state.GetStateNumber() == other.state.GetStateNumber() - alts = b.alt == other.alt - cons = b.semanticContext.equals(other.semanticContext) - sups = b.precedenceFilterSuppressed == other.precedenceFilterSuppressed - ) - - return nums && alts && cons && sups && equal -} - -func (b *BaseATNConfig) hash() int { - var c int - if b.context != nil { - c = b.context.hash() - } - - h := murmurInit(7) - h = murmurUpdate(h, b.state.GetStateNumber()) - h = murmurUpdate(h, b.alt) - h = murmurUpdate(h, c) - h = murmurUpdate(h, b.semanticContext.hash()) - return murmurFinish(h, 4) -} - -func (b *BaseATNConfig) String() string { - var s1, s2, s3 string - - if b.context != nil { - s1 = ",[" + fmt.Sprint(b.context) + "]" - } - - if b.semanticContext != SemanticContextNone { - s2 = "," + fmt.Sprint(b.semanticContext) - } - - if b.reachesIntoOuterContext > 0 { - s3 = ",up=" + fmt.Sprint(b.reachesIntoOuterContext) - } - - return fmt.Sprintf("(%v,%v%v%v%v)", b.state, b.alt, s1, s2, s3) -} - -type LexerATNConfig struct { - *BaseATNConfig - lexerActionExecutor *LexerActionExecutor - passedThroughNonGreedyDecision bool -} - -func NewLexerATNConfig6(state ATNState, alt int, context PredictionContext) *LexerATNConfig { - return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)} -} - -func NewLexerATNConfig5(state ATNState, alt int, context PredictionContext, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig { - return &LexerATNConfig{ - BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone), - lexerActionExecutor: lexerActionExecutor, - } -} - -func NewLexerATNConfig4(c *LexerATNConfig, state ATNState) *LexerATNConfig { - return &LexerATNConfig{ - BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()), - lexerActionExecutor: c.lexerActionExecutor, - passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state), - } -} - -func NewLexerATNConfig3(c *LexerATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig { - return &LexerATNConfig{ - BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()), - lexerActionExecutor: lexerActionExecutor, - passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state), - } -} - -func NewLexerATNConfig2(c *LexerATNConfig, state ATNState, context PredictionContext) *LexerATNConfig { - return &LexerATNConfig{ - BaseATNConfig: NewBaseATNConfig(c, state, context, c.GetSemanticContext()), - lexerActionExecutor: c.lexerActionExecutor, - passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state), - } -} - -func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *LexerATNConfig { - return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)} -} - -func (l *LexerATNConfig) hash() int { - var f int - if l.passedThroughNonGreedyDecision { - f = 1 - } else { - f = 0 - } - h := murmurInit(7) - h = murmurUpdate(h, l.state.hash()) - h = murmurUpdate(h, l.alt) - h = murmurUpdate(h, l.context.hash()) - h = murmurUpdate(h, l.semanticContext.hash()) - h = murmurUpdate(h, f) - h = murmurUpdate(h, l.lexerActionExecutor.hash()) - h = murmurFinish(h, 6) - return h -} - -func (l *LexerATNConfig) equals(other interface{}) bool { - var othert, ok = other.(*LexerATNConfig) - - if l == other { - return true - } else if !ok { - return false - } else if l.passedThroughNonGreedyDecision != othert.passedThroughNonGreedyDecision { - return false - } - - var b bool - - if l.lexerActionExecutor != nil { - b = !l.lexerActionExecutor.equals(othert.lexerActionExecutor) - } else { - b = othert.lexerActionExecutor != nil - } - - if b { - return false - } - - return l.BaseATNConfig.equals(othert.BaseATNConfig) -} - - -func checkNonGreedyDecision(source *LexerATNConfig, target ATNState) bool { - var ds, ok = target.(DecisionState) - - return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy()) -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go deleted file mode 100644 index d9f7475..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go +++ /dev/null @@ -1,387 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import "fmt" - -type ATNConfigSet interface { - hash() int - Add(ATNConfig, *DoubleDict) bool - AddAll([]ATNConfig) bool - - GetStates() *Set - GetPredicates() []SemanticContext - GetItems() []ATNConfig - - OptimizeConfigs(interpreter *BaseATNSimulator) - - Equals(other interface{}) bool - - Length() int - IsEmpty() bool - Contains(ATNConfig) bool - ContainsFast(ATNConfig) bool - Clear() - String() string - - HasSemanticContext() bool - SetHasSemanticContext(v bool) - - ReadOnly() bool - SetReadOnly(bool) - - GetConflictingAlts() *BitSet - SetConflictingAlts(*BitSet) - - FullContext() bool - - GetUniqueAlt() int - SetUniqueAlt(int) - - GetDipsIntoOuterContext() bool - SetDipsIntoOuterContext(bool) -} - -// BaseATNConfigSet is a specialized set of ATNConfig that tracks information -// about its elements and can combine similar configurations using a -// graph-structured stack. -type BaseATNConfigSet struct { - cachedHash int - - // configLookup is used to determine whether two BaseATNConfigSets are equal. We - // need all configurations with the same (s, i, _, semctx) to be equal. A key - // effectively doubles the number of objects associated with ATNConfigs. All - // keys are hashed by (s, i, _, pi), not including the context. Wiped out when - // read-only because a set becomes a DFA state. - configLookup *Set - - // configs is the added elements. - configs []ATNConfig - - // TODO: These fields make me pretty uncomfortable, but it is nice to pack up - // info together because it saves recomputation. Can we track conflicts as they - // are added to save scanning configs later? - conflictingAlts *BitSet - - // dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates - // we hit a pred while computing a closure operation. Do not make a DFA state - // from the BaseATNConfigSet in this case. TODO: How is this used by parsers? - dipsIntoOuterContext bool - - // fullCtx is whether it is part of a full context LL prediction. Used to - // determine how to merge $. It is a wildcard with SLL, but not for an LL - // context merge. - fullCtx bool - - // Used in parser and lexer. In lexer, it indicates we hit a pred - // while computing a closure operation. Don't make a DFA state from a. - hasSemanticContext bool - - // readOnly is whether it is read-only. Do not - // allow any code to manipulate the set if true because DFA states will point at - // sets and those must not change. It not protect other fields; conflictingAlts - // in particular, which is assigned after readOnly. - readOnly bool - - // TODO: These fields make me pretty uncomfortable, but it is nice to pack up - // info together because it saves recomputation. Can we track conflicts as they - // are added to save scanning configs later? - uniqueAlt int -} - -func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet { - return &BaseATNConfigSet{ - cachedHash: -1, - configLookup: NewSet(nil, equalATNConfigs), - fullCtx: fullCtx, - } -} - -// Add merges contexts with existing configs for (s, i, pi, _), where s is the -// ATNConfig.state, i is the ATNConfig.alt, and pi is the -// ATNConfig.semanticContext. We use (s,i,pi) as the key. Updates -// dipsIntoOuterContext and hasSemanticContext when necessary. -func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool { - if b.readOnly { - panic("set is read-only") - } - - if config.GetSemanticContext() != SemanticContextNone { - b.hasSemanticContext = true - } - - if config.GetReachesIntoOuterContext() > 0 { - b.dipsIntoOuterContext = true - } - - existing := b.configLookup.add(config).(ATNConfig) - - if existing == config { - b.cachedHash = -1 - b.configs = append(b.configs, config) // Track order here - - return true - } - - // Merge a previous (s, i, pi, _) with it and save the result - rootIsWildcard := !b.fullCtx - merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache) - - // No need to check for existing.context because config.context is in the cache, - // since the only way to create new graphs is the "call rule" and here. We cache - // at both places. - existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext())) - - // Preserve the precedence filter suppression during the merge - if config.getPrecedenceFilterSuppressed() { - existing.setPrecedenceFilterSuppressed(true) - } - - // Replace the context because there is no need to do alt mapping - existing.SetContext(merged) - - return true -} - -func (b *BaseATNConfigSet) GetStates() *Set { - states := NewSet(nil, nil) - - for i := 0; i < len(b.configs); i++ { - states.add(b.configs[i].GetState()) - } - - return states -} - -func (b *BaseATNConfigSet) HasSemanticContext() bool { - return b.hasSemanticContext -} - -func (b *BaseATNConfigSet) SetHasSemanticContext(v bool) { - b.hasSemanticContext = v -} - -func (b *BaseATNConfigSet) GetPredicates() []SemanticContext { - preds := make([]SemanticContext, 0) - - for i := 0; i < len(b.configs); i++ { - c := b.configs[i].GetSemanticContext() - - if c != SemanticContextNone { - preds = append(preds, c) - } - } - - return preds -} - -func (b *BaseATNConfigSet) GetItems() []ATNConfig { - return b.configs -} - -func (b *BaseATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) { - if b.readOnly { - panic("set is read-only") - } - - if b.configLookup.length() == 0 { - return - } - - for i := 0; i < len(b.configs); i++ { - config := b.configs[i] - - config.SetContext(interpreter.getCachedContext(config.GetContext())) - } -} - -func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool { - for i := 0; i < len(coll); i++ { - b.Add(coll[i], nil) - } - - return false -} - -func (b *BaseATNConfigSet) Equals(other interface{}) bool { - if b == other { - return true - } else if _, ok := other.(*BaseATNConfigSet); !ok { - return false - } - - other2 := other.(*BaseATNConfigSet) - - return b.configs != nil && - // TODO: b.configs.equals(other2.configs) && // TODO: Is b necessary? - b.fullCtx == other2.fullCtx && - b.uniqueAlt == other2.uniqueAlt && - b.conflictingAlts == other2.conflictingAlts && - b.hasSemanticContext == other2.hasSemanticContext && - b.dipsIntoOuterContext == other2.dipsIntoOuterContext -} - -func (b *BaseATNConfigSet) hash() int { - if b.readOnly { - if b.cachedHash == -1 { - b.cachedHash = b.hashCodeConfigs() - } - - return b.cachedHash - } - - return b.hashCodeConfigs() -} - -func (b *BaseATNConfigSet) hashCodeConfigs() int { - h := murmurInit(1) - for _, c := range b.configs { - if c != nil { - h = murmurUpdate(h, c.hash()) - } - } - return murmurFinish(h, len(b.configs)) -} - -func (b *BaseATNConfigSet) Length() int { - return len(b.configs) -} - -func (b *BaseATNConfigSet) IsEmpty() bool { - return len(b.configs) == 0 -} - -func (b *BaseATNConfigSet) Contains(item ATNConfig) bool { - if b.configLookup == nil { - panic("not implemented for read-only sets") - } - - return b.configLookup.contains(item) -} - -func (b *BaseATNConfigSet) ContainsFast(item ATNConfig) bool { - if b.configLookup == nil { - panic("not implemented for read-only sets") - } - - return b.configLookup.contains(item) // TODO: containsFast is not implemented for Set -} - -func (b *BaseATNConfigSet) Clear() { - if b.readOnly { - panic("set is read-only") - } - - b.configs = make([]ATNConfig, 0) - b.cachedHash = -1 - b.configLookup = NewSet(nil, equalATNConfigs) -} - -func (b *BaseATNConfigSet) FullContext() bool { - return b.fullCtx -} - -func (b *BaseATNConfigSet) GetDipsIntoOuterContext() bool { - return b.dipsIntoOuterContext -} - -func (b *BaseATNConfigSet) SetDipsIntoOuterContext(v bool) { - b.dipsIntoOuterContext = v -} - -func (b *BaseATNConfigSet) GetUniqueAlt() int { - return b.uniqueAlt -} - -func (b *BaseATNConfigSet) SetUniqueAlt(v int) { - b.uniqueAlt = v -} - -func (b *BaseATNConfigSet) GetConflictingAlts() *BitSet { - return b.conflictingAlts -} - -func (b *BaseATNConfigSet) SetConflictingAlts(v *BitSet) { - b.conflictingAlts = v -} - -func (b *BaseATNConfigSet) ReadOnly() bool { - return b.readOnly -} - -func (b *BaseATNConfigSet) SetReadOnly(readOnly bool) { - b.readOnly = readOnly - - if readOnly { - b.configLookup = nil // Read only, so no need for the lookup cache - } -} - -func (b *BaseATNConfigSet) String() string { - s := "[" - - for i, c := range b.configs { - s += c.String() - - if i != len(b.configs)-1 { - s += ", " - } - } - - s += "]" - - if b.hasSemanticContext { - s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext) - } - - if b.uniqueAlt != ATNInvalidAltNumber { - s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt) - } - - if b.conflictingAlts != nil { - s += ",conflictingAlts=" + b.conflictingAlts.String() - } - - if b.dipsIntoOuterContext { - s += ",dipsIntoOuterContext" - } - - return s -} - -type OrderedATNConfigSet struct { - *BaseATNConfigSet -} - -func NewOrderedATNConfigSet() *OrderedATNConfigSet { - b := NewBaseATNConfigSet(false) - - b.configLookup = NewSet(nil, nil) - - return &OrderedATNConfigSet{BaseATNConfigSet: b} -} - -func equalATNConfigs(a, b interface{}) bool { - if a == nil || b == nil { - return false - } - - if a == b { - return true - } - - var ai, ok = a.(ATNConfig) - var bi, ok1 = b.(ATNConfig) - - if !ok || !ok1 { - return false - } - - nums := ai.GetState().GetStateNumber() == bi.GetState().GetStateNumber() - alts := ai.GetAlt() == bi.GetAlt() - cons := ai.GetSemanticContext().equals(bi.GetSemanticContext()) - - return nums && alts && cons -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go deleted file mode 100644 index 18b89ef..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -var ATNDeserializationOptionsdefaultOptions = &ATNDeserializationOptions{true, false, false} - -type ATNDeserializationOptions struct { - readOnly bool - verifyATN bool - generateRuleBypassTransitions bool -} - -func NewATNDeserializationOptions(CopyFrom *ATNDeserializationOptions) *ATNDeserializationOptions { - o := new(ATNDeserializationOptions) - - if CopyFrom != nil { - o.readOnly = CopyFrom.readOnly - o.verifyATN = CopyFrom.verifyATN - o.generateRuleBypassTransitions = CopyFrom.generateRuleBypassTransitions - } - - return o -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go deleted file mode 100644 index 884d39c..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go +++ /dev/null @@ -1,828 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "encoding/hex" - "fmt" - "strconv" - "strings" - "unicode/utf16" -) - -// This is the earliest supported serialized UUID. -// stick to serialized version for now, we don't need a UUID instance -var BaseSerializedUUID = "AADB8D7E-AEEF-4415-AD2B-8204D6CF042E" -var AddedUnicodeSMP = "59627784-3BE5-417A-B9EB-8131A7286089" - -// This list contains all of the currently supported UUIDs, ordered by when -// the feature first appeared in this branch. -var SupportedUUIDs = []string{BaseSerializedUUID, AddedUnicodeSMP} - -var SerializedVersion = 3 - -// This is the current serialized UUID. -var SerializedUUID = AddedUnicodeSMP - -type LoopEndStateIntPair struct { - item0 *LoopEndState - item1 int -} - -type BlockStartStateIntPair struct { - item0 BlockStartState - item1 int -} - -type ATNDeserializer struct { - deserializationOptions *ATNDeserializationOptions - data []rune - pos int - uuid string -} - -func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer { - if options == nil { - options = ATNDeserializationOptionsdefaultOptions - } - - return &ATNDeserializer{deserializationOptions: options} -} - -func stringInSlice(a string, list []string) int { - for i, b := range list { - if b == a { - return i - } - } - - return -1 -} - -// isFeatureSupported determines if a particular serialized representation of an -// ATN supports a particular feature, identified by the UUID used for -// serializing the ATN at the time the feature was first introduced. Feature is -// the UUID marking the first time the feature was supported in the serialized -// ATN. ActualUuid is the UUID of the actual serialized ATN which is currently -// being deserialized. It returns true if actualUuid represents a serialized ATN -// at or after the feature identified by feature was introduced, and otherwise -// false. -func (a *ATNDeserializer) isFeatureSupported(feature, actualUUID string) bool { - idx1 := stringInSlice(feature, SupportedUUIDs) - - if idx1 < 0 { - return false - } - - idx2 := stringInSlice(actualUUID, SupportedUUIDs) - - return idx2 >= idx1 -} - -func (a *ATNDeserializer) DeserializeFromUInt16(data []uint16) *ATN { - a.reset(utf16.Decode(data)) - a.checkVersion() - a.checkUUID() - - atn := a.readATN() - - a.readStates(atn) - a.readRules(atn) - a.readModes(atn) - - sets := make([]*IntervalSet, 0) - - // First, deserialize sets with 16-bit arguments <= U+FFFF. - sets = a.readSets(atn, sets, a.readInt) - // Next, if the ATN was serialized with the Unicode SMP feature, - // deserialize sets with 32-bit arguments <= U+10FFFF. - if (a.isFeatureSupported(AddedUnicodeSMP, a.uuid)) { - sets = a.readSets(atn, sets, a.readInt32) - } - - a.readEdges(atn, sets) - a.readDecisions(atn) - a.readLexerActions(atn) - a.markPrecedenceDecisions(atn) - a.verifyATN(atn) - - if a.deserializationOptions.generateRuleBypassTransitions && atn.grammarType == ATNTypeParser { - a.generateRuleBypassTransitions(atn) - // Re-verify after modification - a.verifyATN(atn) - } - - return atn - -} - -func (a *ATNDeserializer) reset(data []rune) { - temp := make([]rune, len(data)) - - for i, c := range data { - // Don't adjust the first value since that's the version number - if i == 0 { - temp[i] = c - } else if c > 1 { - temp[i] = c - 2 - } else { - temp[i] = c + 65533 - } - } - - a.data = temp - a.pos = 0 -} - -func (a *ATNDeserializer) checkVersion() { - version := a.readInt() - - if version != SerializedVersion { - panic("Could not deserialize ATN with version " + strconv.Itoa(version) + " (expected " + strconv.Itoa(SerializedVersion) + ").") - } -} - -func (a *ATNDeserializer) checkUUID() { - uuid := a.readUUID() - - if stringInSlice(uuid, SupportedUUIDs) < 0 { - panic("Could not deserialize ATN with UUID: " + uuid + " (expected " + SerializedUUID + " or a legacy UUID).") - } - - a.uuid = uuid -} - -func (a *ATNDeserializer) readATN() *ATN { - grammarType := a.readInt() - maxTokenType := a.readInt() - - return NewATN(grammarType, maxTokenType) -} - -func (a *ATNDeserializer) readStates(atn *ATN) { - loopBackStateNumbers := make([]LoopEndStateIntPair, 0) - endStateNumbers := make([]BlockStartStateIntPair, 0) - - nstates := a.readInt() - - for i := 0; i < nstates; i++ { - stype := a.readInt() - - // Ignore bad types of states - if stype == ATNStateInvalidType { - atn.addState(nil) - - continue - } - - ruleIndex := a.readInt() - - if ruleIndex == 0xFFFF { - ruleIndex = -1 - } - - s := a.stateFactory(stype, ruleIndex) - - if stype == ATNStateLoopEnd { - loopBackStateNumber := a.readInt() - - loopBackStateNumbers = append(loopBackStateNumbers, LoopEndStateIntPair{s.(*LoopEndState), loopBackStateNumber}) - } else if s2, ok := s.(BlockStartState); ok { - endStateNumber := a.readInt() - - endStateNumbers = append(endStateNumbers, BlockStartStateIntPair{s2, endStateNumber}) - } - - atn.addState(s) - } - - // Delay the assignment of loop back and end states until we know all the state - // instances have been initialized - for j := 0; j < len(loopBackStateNumbers); j++ { - pair := loopBackStateNumbers[j] - - pair.item0.loopBackState = atn.states[pair.item1] - } - - for j := 0; j < len(endStateNumbers); j++ { - pair := endStateNumbers[j] - - pair.item0.setEndState(atn.states[pair.item1].(*BlockEndState)) - } - - numNonGreedyStates := a.readInt() - - for j := 0; j < numNonGreedyStates; j++ { - stateNumber := a.readInt() - - atn.states[stateNumber].(DecisionState).setNonGreedy(true) - } - - numPrecedenceStates := a.readInt() - - for j := 0; j < numPrecedenceStates; j++ { - stateNumber := a.readInt() - - atn.states[stateNumber].(*RuleStartState).isPrecedenceRule = true - } -} - -func (a *ATNDeserializer) readRules(atn *ATN) { - nrules := a.readInt() - - if atn.grammarType == ATNTypeLexer { - atn.ruleToTokenType = make([]int, nrules) // TODO: initIntArray(nrules, 0) - } - - atn.ruleToStartState = make([]*RuleStartState, nrules) // TODO: initIntArray(nrules, 0) - - for i := 0; i < nrules; i++ { - s := a.readInt() - startState := atn.states[s].(*RuleStartState) - - atn.ruleToStartState[i] = startState - - if atn.grammarType == ATNTypeLexer { - tokenType := a.readInt() - - if tokenType == 0xFFFF { - tokenType = TokenEOF - } - - atn.ruleToTokenType[i] = tokenType - } - } - - atn.ruleToStopState = make([]*RuleStopState, nrules) //initIntArray(nrules, 0) - - for i := 0; i < len(atn.states); i++ { - state := atn.states[i] - - if s2, ok := state.(*RuleStopState); ok { - atn.ruleToStopState[s2.ruleIndex] = s2 - atn.ruleToStartState[s2.ruleIndex].stopState = s2 - } - } -} - -func (a *ATNDeserializer) readModes(atn *ATN) { - nmodes := a.readInt() - - for i := 0; i < nmodes; i++ { - s := a.readInt() - - atn.modeToStartState = append(atn.modeToStartState, atn.states[s].(*TokensStartState)) - } -} - -func (a *ATNDeserializer) readSets(atn *ATN, sets []*IntervalSet, readUnicode func() int) []*IntervalSet { - m := a.readInt() - - for i := 0; i < m; i++ { - iset := NewIntervalSet() - - sets = append(sets, iset) - - n := a.readInt() - containsEOF := a.readInt() - - if containsEOF != 0 { - iset.addOne(-1) - } - - for j := 0; j < n; j++ { - i1 := readUnicode() - i2 := readUnicode() - - iset.addRange(i1, i2) - } - } - - return sets -} - -func (a *ATNDeserializer) readEdges(atn *ATN, sets []*IntervalSet) { - nedges := a.readInt() - - for i := 0; i < nedges; i++ { - var ( - src = a.readInt() - trg = a.readInt() - ttype = a.readInt() - arg1 = a.readInt() - arg2 = a.readInt() - arg3 = a.readInt() - trans = a.edgeFactory(atn, ttype, src, trg, arg1, arg2, arg3, sets) - srcState = atn.states[src] - ) - - srcState.AddTransition(trans, -1) - } - - // Edges for rule stop states can be derived, so they are not serialized - for i := 0; i < len(atn.states); i++ { - state := atn.states[i] - - for j := 0; j < len(state.GetTransitions()); j++ { - var t, ok = state.GetTransitions()[j].(*RuleTransition) - - if !ok { - continue - } - - outermostPrecedenceReturn := -1 - - if atn.ruleToStartState[t.getTarget().GetRuleIndex()].isPrecedenceRule { - if t.precedence == 0 { - outermostPrecedenceReturn = t.getTarget().GetRuleIndex() - } - } - - trans := NewEpsilonTransition(t.followState, outermostPrecedenceReturn) - - atn.ruleToStopState[t.getTarget().GetRuleIndex()].AddTransition(trans, -1) - } - } - - for i := 0; i < len(atn.states); i++ { - state := atn.states[i] - - if s2, ok := state.(*BaseBlockStartState); ok { - // We need to know the end state to set its start state - if s2.endState == nil { - panic("IllegalState") - } - - // Block end states can only be associated to a single block start state - if s2.endState.startState != nil { - panic("IllegalState") - } - - s2.endState.startState = state - } - - if s2, ok := state.(*PlusLoopbackState); ok { - for j := 0; j < len(s2.GetTransitions()); j++ { - target := s2.GetTransitions()[j].getTarget() - - if t2, ok := target.(*PlusBlockStartState); ok { - t2.loopBackState = state - } - } - } else if s2, ok := state.(*StarLoopbackState); ok { - for j := 0; j < len(s2.GetTransitions()); j++ { - target := s2.GetTransitions()[j].getTarget() - - if t2, ok := target.(*StarLoopEntryState); ok { - t2.loopBackState = state - } - } - } - } -} - -func (a *ATNDeserializer) readDecisions(atn *ATN) { - ndecisions := a.readInt() - - for i := 0; i < ndecisions; i++ { - s := a.readInt() - decState := atn.states[s].(DecisionState) - - atn.DecisionToState = append(atn.DecisionToState, decState) - decState.setDecision(i) - } -} - -func (a *ATNDeserializer) readLexerActions(atn *ATN) { - if atn.grammarType == ATNTypeLexer { - count := a.readInt() - - atn.lexerActions = make([]LexerAction, count) // initIntArray(count, nil) - - for i := 0; i < count; i++ { - actionType := a.readInt() - data1 := a.readInt() - - if data1 == 0xFFFF { - data1 = -1 - } - - data2 := a.readInt() - - if data2 == 0xFFFF { - data2 = -1 - } - - lexerAction := a.lexerActionFactory(actionType, data1, data2) - - atn.lexerActions[i] = lexerAction - } - } -} - -func (a *ATNDeserializer) generateRuleBypassTransitions(atn *ATN) { - count := len(atn.ruleToStartState) - - for i := 0; i < count; i++ { - atn.ruleToTokenType[i] = atn.maxTokenType + i + 1 - } - - for i := 0; i < count; i++ { - a.generateRuleBypassTransition(atn, i) - } -} - -func (a *ATNDeserializer) generateRuleBypassTransition(atn *ATN, idx int) { - bypassStart := NewBasicBlockStartState() - - bypassStart.ruleIndex = idx - atn.addState(bypassStart) - - bypassStop := NewBlockEndState() - - bypassStop.ruleIndex = idx - atn.addState(bypassStop) - - bypassStart.endState = bypassStop - - atn.defineDecisionState(bypassStart.BaseDecisionState) - - bypassStop.startState = bypassStart - - var excludeTransition Transition - var endState ATNState - - if atn.ruleToStartState[idx].isPrecedenceRule { - // Wrap from the beginning of the rule to the StarLoopEntryState - endState = nil - - for i := 0; i < len(atn.states); i++ { - state := atn.states[i] - - if a.stateIsEndStateFor(state, idx) != nil { - endState = state - excludeTransition = state.(*StarLoopEntryState).loopBackState.GetTransitions()[0] - - break - } - } - - if excludeTransition == nil { - panic("Couldn't identify final state of the precedence rule prefix section.") - } - } else { - endState = atn.ruleToStopState[idx] - } - - // All non-excluded transitions that currently target end state need to target - // blockEnd instead - for i := 0; i < len(atn.states); i++ { - state := atn.states[i] - - for j := 0; j < len(state.GetTransitions()); j++ { - transition := state.GetTransitions()[j] - - if transition == excludeTransition { - continue - } - - if transition.getTarget() == endState { - transition.setTarget(bypassStop) - } - } - } - - // All transitions leaving the rule start state need to leave blockStart instead - ruleToStartState := atn.ruleToStartState[idx] - count := len(ruleToStartState.GetTransitions()) - - for count > 0 { - bypassStart.AddTransition(ruleToStartState.GetTransitions()[count-1], -1) - ruleToStartState.SetTransitions([]Transition{ruleToStartState.GetTransitions()[len(ruleToStartState.GetTransitions())-1]}) - } - - // Link the new states - atn.ruleToStartState[idx].AddTransition(NewEpsilonTransition(bypassStart, -1), -1) - bypassStop.AddTransition(NewEpsilonTransition(endState, -1), -1) - - MatchState := NewBasicState() - - atn.addState(MatchState) - MatchState.AddTransition(NewAtomTransition(bypassStop, atn.ruleToTokenType[idx]), -1) - bypassStart.AddTransition(NewEpsilonTransition(MatchState, -1), -1) -} - -func (a *ATNDeserializer) stateIsEndStateFor(state ATNState, idx int) ATNState { - if state.GetRuleIndex() != idx { - return nil - } - - if _, ok := state.(*StarLoopEntryState); !ok { - return nil - } - - maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget() - - if _, ok := maybeLoopEndState.(*LoopEndState); !ok { - return nil - } - - var _, ok = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState) - - if maybeLoopEndState.(*LoopEndState).epsilonOnlyTransitions && ok { - return state - } - - return nil -} - -// markPrecedenceDecisions analyzes the StarLoopEntryState states in the -// specified ATN to set the StarLoopEntryState.precedenceRuleDecision field to -// the correct value. -func (a *ATNDeserializer) markPrecedenceDecisions(atn *ATN) { - for _, state := range atn.states { - if _, ok := state.(*StarLoopEntryState); !ok { - continue - } - - // We analyze the ATN to determine if a ATN decision state is the - // decision for the closure block that determines whether a - // precedence rule should continue or complete. - if atn.ruleToStartState[state.GetRuleIndex()].isPrecedenceRule { - maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget() - - if s3, ok := maybeLoopEndState.(*LoopEndState); ok { - var _, ok2 = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState) - - if s3.epsilonOnlyTransitions && ok2 { - state.(*StarLoopEntryState).precedenceRuleDecision = true - } - } - } - } -} - -func (a *ATNDeserializer) verifyATN(atn *ATN) { - if !a.deserializationOptions.verifyATN { - return - } - - // Verify assumptions - for i := 0; i < len(atn.states); i++ { - state := atn.states[i] - - if state == nil { - continue - } - - a.checkCondition(state.GetEpsilonOnlyTransitions() || len(state.GetTransitions()) <= 1, "") - - switch s2 := state.(type) { - case *PlusBlockStartState: - a.checkCondition(s2.loopBackState != nil, "") - - case *StarLoopEntryState: - a.checkCondition(s2.loopBackState != nil, "") - a.checkCondition(len(s2.GetTransitions()) == 2, "") - - switch s2 := state.(type) { - case *StarBlockStartState: - var _, ok2 = s2.GetTransitions()[1].getTarget().(*LoopEndState) - - a.checkCondition(ok2, "") - a.checkCondition(!s2.nonGreedy, "") - - case *LoopEndState: - var s3, ok2 = s2.GetTransitions()[1].getTarget().(*StarBlockStartState) - - a.checkCondition(ok2, "") - a.checkCondition(s3.nonGreedy, "") - - default: - panic("IllegalState") - } - - case *StarLoopbackState: - a.checkCondition(len(state.GetTransitions()) == 1, "") - - var _, ok2 = state.GetTransitions()[0].getTarget().(*StarLoopEntryState) - - a.checkCondition(ok2, "") - - case *LoopEndState: - a.checkCondition(s2.loopBackState != nil, "") - - case *RuleStartState: - a.checkCondition(s2.stopState != nil, "") - - case *BaseBlockStartState: - a.checkCondition(s2.endState != nil, "") - - case *BlockEndState: - a.checkCondition(s2.startState != nil, "") - - case DecisionState: - a.checkCondition(len(s2.GetTransitions()) <= 1 || s2.getDecision() >= 0, "") - - default: - var _, ok = s2.(*RuleStopState) - - a.checkCondition(len(s2.GetTransitions()) <= 1 || ok, "") - } - } -} - -func (a *ATNDeserializer) checkCondition(condition bool, message string) { - if !condition { - if message == "" { - message = "IllegalState" - } - - panic(message) - } -} - -func (a *ATNDeserializer) readInt() int { - v := a.data[a.pos] - - a.pos++ - - return int(v) -} - -func (a *ATNDeserializer) readInt32() int { - var low = a.readInt() - var high = a.readInt() - return low | (high << 16) -} - -//TODO -//func (a *ATNDeserializer) readLong() int64 { -// panic("Not implemented") -// var low = a.readInt32() -// var high = a.readInt32() -// return (low & 0x00000000FFFFFFFF) | (high << int32) -//} - -func createByteToHex() []string { - bth := make([]string, 256) - - for i := 0; i < 256; i++ { - bth[i] = strings.ToUpper(hex.EncodeToString([]byte{byte(i)})) - } - - return bth -} - -var byteToHex = createByteToHex() - -func (a *ATNDeserializer) readUUID() string { - bb := make([]int, 16) - - for i := 7; i >= 0; i-- { - integer := a.readInt() - - bb[(2*i)+1] = integer & 0xFF - bb[2*i] = (integer >> 8) & 0xFF - } - - return byteToHex[bb[0]] + byteToHex[bb[1]] + - byteToHex[bb[2]] + byteToHex[bb[3]] + "-" + - byteToHex[bb[4]] + byteToHex[bb[5]] + "-" + - byteToHex[bb[6]] + byteToHex[bb[7]] + "-" + - byteToHex[bb[8]] + byteToHex[bb[9]] + "-" + - byteToHex[bb[10]] + byteToHex[bb[11]] + - byteToHex[bb[12]] + byteToHex[bb[13]] + - byteToHex[bb[14]] + byteToHex[bb[15]] -} - -func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, src, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition { - target := atn.states[trg] - - switch typeIndex { - case TransitionEPSILON: - return NewEpsilonTransition(target, -1) - - case TransitionRANGE: - if arg3 != 0 { - return NewRangeTransition(target, TokenEOF, arg2) - } - - return NewRangeTransition(target, arg1, arg2) - - case TransitionRULE: - return NewRuleTransition(atn.states[arg1], arg2, arg3, target) - - case TransitionPREDICATE: - return NewPredicateTransition(target, arg1, arg2, arg3 != 0) - - case TransitionPRECEDENCE: - return NewPrecedencePredicateTransition(target, arg1) - - case TransitionATOM: - if arg3 != 0 { - return NewAtomTransition(target, TokenEOF) - } - - return NewAtomTransition(target, arg1) - - case TransitionACTION: - return NewActionTransition(target, arg1, arg2, arg3 != 0) - - case TransitionSET: - return NewSetTransition(target, sets[arg1]) - - case TransitionNOTSET: - return NewNotSetTransition(target, sets[arg1]) - - case TransitionWILDCARD: - return NewWildcardTransition(target) - } - - panic("The specified transition type is not valid.") -} - -func (a *ATNDeserializer) stateFactory(typeIndex, ruleIndex int) ATNState { - var s ATNState - - switch typeIndex { - case ATNStateInvalidType: - return nil - - case ATNStateBasic: - s = NewBasicState() - - case ATNStateRuleStart: - s = NewRuleStartState() - - case ATNStateBlockStart: - s = NewBasicBlockStartState() - - case ATNStatePlusBlockStart: - s = NewPlusBlockStartState() - - case ATNStateStarBlockStart: - s = NewStarBlockStartState() - - case ATNStateTokenStart: - s = NewTokensStartState() - - case ATNStateRuleStop: - s = NewRuleStopState() - - case ATNStateBlockEnd: - s = NewBlockEndState() - - case ATNStateStarLoopBack: - s = NewStarLoopbackState() - - case ATNStateStarLoopEntry: - s = NewStarLoopEntryState() - - case ATNStatePlusLoopBack: - s = NewPlusLoopbackState() - - case ATNStateLoopEnd: - s = NewLoopEndState() - - default: - panic(fmt.Sprintf("state type %d is invalid", typeIndex)) - } - - s.SetRuleIndex(ruleIndex) - - return s -} - -func (a *ATNDeserializer) lexerActionFactory(typeIndex, data1, data2 int) LexerAction { - switch typeIndex { - case LexerActionTypeChannel: - return NewLexerChannelAction(data1) - - case LexerActionTypeCustom: - return NewLexerCustomAction(data1, data2) - - case LexerActionTypeMode: - return NewLexerModeAction(data1) - - case LexerActionTypeMore: - return LexerMoreActionINSTANCE - - case LexerActionTypePopMode: - return LexerPopModeActionINSTANCE - - case LexerActionTypePushMode: - return NewLexerPushModeAction(data1) - - case LexerActionTypeSkip: - return LexerSkipActionINSTANCE - - case LexerActionTypeType: - return NewLexerTypeAction(data1) - - default: - panic(fmt.Sprintf("lexer action %d is invalid", typeIndex)) - } -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go deleted file mode 100644 index d5454d6..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewBaseATNConfigSet(false)) - -type IATNSimulator interface { - SharedContextCache() *PredictionContextCache - ATN() *ATN - DecisionToDFA() []*DFA -} - -type BaseATNSimulator struct { - atn *ATN - sharedContextCache *PredictionContextCache - decisionToDFA []*DFA -} - -func NewBaseATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) *BaseATNSimulator { - b := new(BaseATNSimulator) - - b.atn = atn - b.sharedContextCache = sharedContextCache - - return b -} - -func (b *BaseATNSimulator) getCachedContext(context PredictionContext) PredictionContext { - if b.sharedContextCache == nil { - return context - } - - visited := make(map[PredictionContext]PredictionContext) - - return getCachedBasePredictionContext(context, b.sharedContextCache, visited) -} - -func (b *BaseATNSimulator) SharedContextCache() *PredictionContextCache { - return b.sharedContextCache -} - -func (b *BaseATNSimulator) ATN() *ATN { - return b.atn -} - -func (b *BaseATNSimulator) DecisionToDFA() []*DFA { - return b.decisionToDFA -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go deleted file mode 100644 index 563d5db..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go +++ /dev/null @@ -1,386 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import "strconv" - -// Constants for serialization. -const ( - ATNStateInvalidType = 0 - ATNStateBasic = 1 - ATNStateRuleStart = 2 - ATNStateBlockStart = 3 - ATNStatePlusBlockStart = 4 - ATNStateStarBlockStart = 5 - ATNStateTokenStart = 6 - ATNStateRuleStop = 7 - ATNStateBlockEnd = 8 - ATNStateStarLoopBack = 9 - ATNStateStarLoopEntry = 10 - ATNStatePlusLoopBack = 11 - ATNStateLoopEnd = 12 - - ATNStateInvalidStateNumber = -1 -) - -var ATNStateInitialNumTransitions = 4 - -type ATNState interface { - GetEpsilonOnlyTransitions() bool - - GetRuleIndex() int - SetRuleIndex(int) - - GetNextTokenWithinRule() *IntervalSet - SetNextTokenWithinRule(*IntervalSet) - - GetATN() *ATN - SetATN(*ATN) - - GetStateType() int - - GetStateNumber() int - SetStateNumber(int) - - GetTransitions() []Transition - SetTransitions([]Transition) - AddTransition(Transition, int) - - String() string - hash() int -} - -type BaseATNState struct { - // NextTokenWithinRule caches lookahead during parsing. Not used during construction. - NextTokenWithinRule *IntervalSet - - // atn is the current ATN. - atn *ATN - - epsilonOnlyTransitions bool - - // ruleIndex tracks the Rule index because there are no Rule objects at runtime. - ruleIndex int - - stateNumber int - - stateType int - - // Track the transitions emanating from this ATN state. - transitions []Transition -} - -func NewBaseATNState() *BaseATNState { - return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType} -} - -func (as *BaseATNState) GetRuleIndex() int { - return as.ruleIndex -} - -func (as *BaseATNState) SetRuleIndex(v int) { - as.ruleIndex = v -} -func (as *BaseATNState) GetEpsilonOnlyTransitions() bool { - return as.epsilonOnlyTransitions -} - -func (as *BaseATNState) GetATN() *ATN { - return as.atn -} - -func (as *BaseATNState) SetATN(atn *ATN) { - as.atn = atn -} - -func (as *BaseATNState) GetTransitions() []Transition { - return as.transitions -} - -func (as *BaseATNState) SetTransitions(t []Transition) { - as.transitions = t -} - -func (as *BaseATNState) GetStateType() int { - return as.stateType -} - -func (as *BaseATNState) GetStateNumber() int { - return as.stateNumber -} - -func (as *BaseATNState) SetStateNumber(stateNumber int) { - as.stateNumber = stateNumber -} - -func (as *BaseATNState) GetNextTokenWithinRule() *IntervalSet { - return as.NextTokenWithinRule -} - -func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet) { - as.NextTokenWithinRule = v -} - -func (as *BaseATNState) hash() int { - return as.stateNumber -} - -func (as *BaseATNState) String() string { - return strconv.Itoa(as.stateNumber) -} - -func (as *BaseATNState) equals(other interface{}) bool { - if ot, ok := other.(ATNState); ok { - return as.stateNumber == ot.GetStateNumber() - } - - return false -} - -func (as *BaseATNState) isNonGreedyExitState() bool { - return false -} - -func (as *BaseATNState) AddTransition(trans Transition, index int) { - if len(as.transitions) == 0 { - as.epsilonOnlyTransitions = trans.getIsEpsilon() - } else if as.epsilonOnlyTransitions != trans.getIsEpsilon() { - as.epsilonOnlyTransitions = false - } - - if index == -1 { - as.transitions = append(as.transitions, trans) - } else { - as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...) - // TODO: as.transitions.splice(index, 1, trans) - } -} - -type BasicState struct { - *BaseATNState -} - -func NewBasicState() *BasicState { - b := NewBaseATNState() - - b.stateType = ATNStateBasic - - return &BasicState{BaseATNState: b} -} - -type DecisionState interface { - ATNState - - getDecision() int - setDecision(int) - - getNonGreedy() bool - setNonGreedy(bool) -} - -type BaseDecisionState struct { - *BaseATNState - decision int - nonGreedy bool -} - -func NewBaseDecisionState() *BaseDecisionState { - return &BaseDecisionState{BaseATNState: NewBaseATNState(), decision: -1} -} - -func (s *BaseDecisionState) getDecision() int { - return s.decision -} - -func (s *BaseDecisionState) setDecision(b int) { - s.decision = b -} - -func (s *BaseDecisionState) getNonGreedy() bool { - return s.nonGreedy -} - -func (s *BaseDecisionState) setNonGreedy(b bool) { - s.nonGreedy = b -} - -type BlockStartState interface { - DecisionState - - getEndState() *BlockEndState - setEndState(*BlockEndState) -} - -// BaseBlockStartState is the start of a regular (...) block. -type BaseBlockStartState struct { - *BaseDecisionState - endState *BlockEndState -} - -func NewBlockStartState() *BaseBlockStartState { - return &BaseBlockStartState{BaseDecisionState: NewBaseDecisionState()} -} - -func (s *BaseBlockStartState) getEndState() *BlockEndState { - return s.endState -} - -func (s *BaseBlockStartState) setEndState(b *BlockEndState) { - s.endState = b -} - -type BasicBlockStartState struct { - *BaseBlockStartState -} - -func NewBasicBlockStartState() *BasicBlockStartState { - b := NewBlockStartState() - - b.stateType = ATNStateBlockStart - - return &BasicBlockStartState{BaseBlockStartState: b} -} - -// BlockEndState is a terminal node of a simple (a|b|c) block. -type BlockEndState struct { - *BaseATNState - startState ATNState -} - -func NewBlockEndState() *BlockEndState { - b := NewBaseATNState() - - b.stateType = ATNStateBlockEnd - - return &BlockEndState{BaseATNState: b} -} - -// RuleStopState is the last node in the ATN for a rule, unless that rule is the -// start symbol. In that case, there is one transition to EOF. Later, we might -// encode references to all calls to this rule to compute FOLLOW sets for error -// handling. -type RuleStopState struct { - *BaseATNState -} - -func NewRuleStopState() *RuleStopState { - b := NewBaseATNState() - - b.stateType = ATNStateRuleStop - - return &RuleStopState{BaseATNState: b} -} - -type RuleStartState struct { - *BaseATNState - stopState ATNState - isPrecedenceRule bool -} - -func NewRuleStartState() *RuleStartState { - b := NewBaseATNState() - - b.stateType = ATNStateRuleStart - - return &RuleStartState{BaseATNState: b} -} - -// PlusLoopbackState is a decision state for A+ and (A|B)+. It has two -// transitions: one to the loop back to start of the block, and one to exit. -type PlusLoopbackState struct { - *BaseDecisionState -} - -func NewPlusLoopbackState() *PlusLoopbackState { - b := NewBaseDecisionState() - - b.stateType = ATNStatePlusLoopBack - - return &PlusLoopbackState{BaseDecisionState: b} -} - -// PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a -// decision state; we don't use it for code generation. Somebody might need it, -// it is included for completeness. In reality, PlusLoopbackState is the real -// decision-making node for A+. -type PlusBlockStartState struct { - *BaseBlockStartState - loopBackState ATNState -} - -func NewPlusBlockStartState() *PlusBlockStartState { - b := NewBlockStartState() - - b.stateType = ATNStatePlusBlockStart - - return &PlusBlockStartState{BaseBlockStartState: b} -} - -// StarBlockStartState is the block that begins a closure loop. -type StarBlockStartState struct { - *BaseBlockStartState -} - -func NewStarBlockStartState() *StarBlockStartState { - b := NewBlockStartState() - - b.stateType = ATNStateStarBlockStart - - return &StarBlockStartState{BaseBlockStartState: b} -} - -type StarLoopbackState struct { - *BaseATNState -} - -func NewStarLoopbackState() *StarLoopbackState { - b := NewBaseATNState() - - b.stateType = ATNStateStarLoopBack - - return &StarLoopbackState{BaseATNState: b} -} - -type StarLoopEntryState struct { - *BaseDecisionState - loopBackState ATNState - precedenceRuleDecision bool -} - -func NewStarLoopEntryState() *StarLoopEntryState { - b := NewBaseDecisionState() - - b.stateType = ATNStateStarLoopEntry - - // False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making. - return &StarLoopEntryState{BaseDecisionState: b} -} - -// LoopEndState marks the end of a * or + loop. -type LoopEndState struct { - *BaseATNState - loopBackState ATNState -} - -func NewLoopEndState() *LoopEndState { - b := NewBaseATNState() - - b.stateType = ATNStateLoopEnd - - return &LoopEndState{BaseATNState: b} -} - -// TokensStartState is the Tokens rule start state linking to each lexer rule start state. -type TokensStartState struct { - *BaseDecisionState -} - -func NewTokensStartState() *TokensStartState { - b := NewBaseDecisionState() - - b.stateType = ATNStateTokenStart - - return &TokensStartState{BaseDecisionState: b} -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go deleted file mode 100644 index a7b4897..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// Represent the type of recognizer an ATN applies to. -const ( - ATNTypeLexer = 0 - ATNTypeParser = 1 -) diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go deleted file mode 100644 index 70c1207..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -type CharStream interface { - IntStream - GetText(int, int) string - GetTextFromTokens(start, end Token) string - GetTextFromInterval(*Interval) string -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go deleted file mode 100644 index 330ff8f..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// TokenFactory creates CommonToken objects. -type TokenFactory interface { - Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token -} - -// CommonTokenFactory is the default TokenFactory implementation. -type CommonTokenFactory struct { - // copyText indicates whether CommonToken.setText should be called after - // constructing tokens to explicitly set the text. This is useful for cases - // where the input stream might not be able to provide arbitrary substrings of - // text from the input after the lexer creates a token (e.g. the - // implementation of CharStream.GetText in UnbufferedCharStream panics an - // UnsupportedOperationException). Explicitly setting the token text allows - // Token.GetText to be called at any time regardless of the input stream - // implementation. - // - // The default value is false to avoid the performance and memory overhead of - // copying text for every token unless explicitly requested. - copyText bool -} - -func NewCommonTokenFactory(copyText bool) *CommonTokenFactory { - return &CommonTokenFactory{copyText: copyText} -} - -// CommonTokenFactoryDEFAULT is the default CommonTokenFactory. It does not -// explicitly copy token text when constructing tokens. -var CommonTokenFactoryDEFAULT = NewCommonTokenFactory(false) - -func (c *CommonTokenFactory) Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token { - t := NewCommonToken(source, ttype, channel, start, stop) - - t.line = line - t.column = column - - if text != "" { - t.SetText(text) - } else if c.copyText && source.charStream != nil { - t.SetText(source.charStream.GetTextFromInterval(NewInterval(start, stop))) - } - - return t -} - -func (c *CommonTokenFactory) createThin(ttype int, text string) Token { - t := NewCommonToken(nil, ttype, TokenDefaultChannel, -1, -1) - t.SetText(text) - - return t -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go deleted file mode 100644 index c90e9b8..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go +++ /dev/null @@ -1,447 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "strconv" -) - -// CommonTokenStream is an implementation of TokenStream that loads tokens from -// a TokenSource on-demand and places the tokens in a buffer to provide access -// to any previous token by index. This token stream ignores the value of -// Token.getChannel. If your parser requires the token stream filter tokens to -// only those on a particular channel, such as Token.DEFAULT_CHANNEL or -// Token.HIDDEN_CHANNEL, use a filtering token stream such a CommonTokenStream. -type CommonTokenStream struct { - channel int - - // fetchedEOF indicates whether the Token.EOF token has been fetched from - // tokenSource and added to tokens. This field improves performance for the - // following cases: - // - // consume: The lookahead check in consume to preven consuming the EOF symbol is - // optimized by checking the values of fetchedEOF and p instead of calling LA. - // - // fetch: The check to prevent adding multiple EOF symbols into tokens is - // trivial with bt field. - fetchedEOF bool - - // index indexs into tokens of the current token (next token to consume). - // tokens[p] should be LT(1). It is set to -1 when the stream is first - // constructed or when SetTokenSource is called, indicating that the first token - // has not yet been fetched from the token source. For additional information, - // see the documentation of IntStream for a description of initializing methods. - index int - - // tokenSource is the TokenSource from which tokens for the bt stream are - // fetched. - tokenSource TokenSource - - // tokens is all tokens fetched from the token source. The list is considered a - // complete view of the input once fetchedEOF is set to true. - tokens []Token -} - -func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream { - return &CommonTokenStream{ - channel: channel, - index: -1, - tokenSource: lexer, - tokens: make([]Token, 0), - } -} - -func (c *CommonTokenStream) GetAllTokens() []Token { - return c.tokens -} - -func (c *CommonTokenStream) Mark() int { - return 0 -} - -func (c *CommonTokenStream) Release(marker int) {} - -func (c *CommonTokenStream) reset() { - c.Seek(0) -} - -func (c *CommonTokenStream) Seek(index int) { - c.lazyInit() - c.index = c.adjustSeekIndex(index) -} - -func (c *CommonTokenStream) Get(index int) Token { - c.lazyInit() - - return c.tokens[index] -} - -func (c *CommonTokenStream) Consume() { - SkipEOFCheck := false - - if c.index >= 0 { - if c.fetchedEOF { - // The last token in tokens is EOF. Skip the check if p indexes any fetched. - // token except the last. - SkipEOFCheck = c.index < len(c.tokens)-1 - } else { - // No EOF token in tokens. Skip the check if p indexes a fetched token. - SkipEOFCheck = c.index < len(c.tokens) - } - } else { - // Not yet initialized - SkipEOFCheck = false - } - - if !SkipEOFCheck && c.LA(1) == TokenEOF { - panic("cannot consume EOF") - } - - if c.Sync(c.index + 1) { - c.index = c.adjustSeekIndex(c.index + 1) - } -} - -// Sync makes sure index i in tokens has a token and returns true if a token is -// located at index i and otherwise false. -func (c *CommonTokenStream) Sync(i int) bool { - n := i - len(c.tokens) + 1 // TODO: How many more elements do we need? - - if n > 0 { - fetched := c.fetch(n) - return fetched >= n - } - - return true -} - -// fetch adds n elements to buffer and returns the actual number of elements -// added to the buffer. -func (c *CommonTokenStream) fetch(n int) int { - if c.fetchedEOF { - return 0 - } - - for i := 0; i < n; i++ { - t := c.tokenSource.NextToken() - - t.SetTokenIndex(len(c.tokens)) - c.tokens = append(c.tokens, t) - - if t.GetTokenType() == TokenEOF { - c.fetchedEOF = true - - return i + 1 - } - } - - return n -} - -// GetTokens gets all tokens from start to stop inclusive. -func (c *CommonTokenStream) GetTokens(start int, stop int, types *IntervalSet) []Token { - if start < 0 || stop < 0 { - return nil - } - - c.lazyInit() - - subset := make([]Token, 0) - - if stop >= len(c.tokens) { - stop = len(c.tokens) - 1 - } - - for i := start; i < stop; i++ { - t := c.tokens[i] - - if t.GetTokenType() == TokenEOF { - break - } - - if types == nil || types.contains(t.GetTokenType()) { - subset = append(subset, t) - } - } - - return subset -} - -func (c *CommonTokenStream) LA(i int) int { - return c.LT(i).GetTokenType() -} - -func (c *CommonTokenStream) lazyInit() { - if c.index == -1 { - c.setup() - } -} - -func (c *CommonTokenStream) setup() { - c.Sync(0) - c.index = c.adjustSeekIndex(0) -} - -func (c *CommonTokenStream) GetTokenSource() TokenSource { - return c.tokenSource -} - -// SetTokenSource resets the c token stream by setting its token source. -func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource) { - c.tokenSource = tokenSource - c.tokens = make([]Token, 0) - c.index = -1 -} - -// NextTokenOnChannel returns the index of the next token on channel given a -// starting index. Returns i if tokens[i] is on channel. Returns -1 if there are -// no tokens on channel between i and EOF. -func (c *CommonTokenStream) NextTokenOnChannel(i, channel int) int { - c.Sync(i) - - if i >= len(c.tokens) { - return -1 - } - - token := c.tokens[i] - - for token.GetChannel() != c.channel { - if token.GetTokenType() == TokenEOF { - return -1 - } - - i++ - c.Sync(i) - token = c.tokens[i] - } - - return i -} - -// previousTokenOnChannel returns the index of the previous token on channel -// given a starting index. Returns i if tokens[i] is on channel. Returns -1 if -// there are no tokens on channel between i and 0. -func (c *CommonTokenStream) previousTokenOnChannel(i, channel int) int { - for i >= 0 && c.tokens[i].GetChannel() != channel { - i-- - } - - return i -} - -// GetHiddenTokensToRight collects all tokens on a specified channel to the -// right of the current token up until we see a token on DEFAULT_TOKEN_CHANNEL -// or EOF. If channel is -1, it finds any non-default channel token. -func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []Token { - c.lazyInit() - - if tokenIndex < 0 || tokenIndex >= len(c.tokens) { - panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1)) - } - - nextOnChannel := c.NextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel) - from := tokenIndex + 1 - - // If no onchannel to the right, then nextOnChannel == -1, so set to to last token - var to int - - if nextOnChannel == -1 { - to = len(c.tokens) - 1 - } else { - to = nextOnChannel - } - - return c.filterForChannel(from, to, channel) -} - -// GetHiddenTokensToLeft collects all tokens on channel to the left of the -// current token until we see a token on DEFAULT_TOKEN_CHANNEL. If channel is -// -1, it finds any non default channel token. -func (c *CommonTokenStream) GetHiddenTokensToLeft(tokenIndex, channel int) []Token { - c.lazyInit() - - if tokenIndex < 0 || tokenIndex >= len(c.tokens) { - panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1)) - } - - prevOnChannel := c.previousTokenOnChannel(tokenIndex-1, LexerDefaultTokenChannel) - - if prevOnChannel == tokenIndex-1 { - return nil - } - - // If there are none on channel to the left and prevOnChannel == -1 then from = 0 - from := prevOnChannel + 1 - to := tokenIndex - 1 - - return c.filterForChannel(from, to, channel) -} - -func (c *CommonTokenStream) filterForChannel(left, right, channel int) []Token { - hidden := make([]Token, 0) - - for i := left; i < right+1; i++ { - t := c.tokens[i] - - if channel == -1 { - if t.GetChannel() != LexerDefaultTokenChannel { - hidden = append(hidden, t) - } - } else if t.GetChannel() == channel { - hidden = append(hidden, t) - } - } - - if len(hidden) == 0 { - return nil - } - - return hidden -} - -func (c *CommonTokenStream) GetSourceName() string { - return c.tokenSource.GetSourceName() -} - -func (c *CommonTokenStream) Size() int { - return len(c.tokens) -} - -func (c *CommonTokenStream) Index() int { - return c.index -} - -func (c *CommonTokenStream) GetAllText() string { - return c.GetTextFromInterval(nil) -} - -func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string { - if start == nil || end == nil { - return "" - } - - return c.GetTextFromInterval(NewInterval(start.GetTokenIndex(), end.GetTokenIndex())) -} - -func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string { - return c.GetTextFromInterval(interval.GetSourceInterval()) -} - -func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string { - c.lazyInit() - c.Fill() - - if interval == nil { - interval = NewInterval(0, len(c.tokens)-1) - } - - start := interval.Start - stop := interval.Stop - - if start < 0 || stop < 0 { - return "" - } - - if stop >= len(c.tokens) { - stop = len(c.tokens) - 1 - } - - s := "" - - for i := start; i < stop+1; i++ { - t := c.tokens[i] - - if t.GetTokenType() == TokenEOF { - break - } - - s += t.GetText() - } - - return s -} - -// Fill gets all tokens from the lexer until EOF. -func (c *CommonTokenStream) Fill() { - c.lazyInit() - - for c.fetch(1000) == 1000 { - continue - } -} - -func (c *CommonTokenStream) adjustSeekIndex(i int) int { - return c.NextTokenOnChannel(i, c.channel) -} - -func (c *CommonTokenStream) LB(k int) Token { - if k == 0 || c.index-k < 0 { - return nil - } - - i := c.index - n := 1 - - // Find k good tokens looking backward - for n <= k { - // Skip off-channel tokens - i = c.previousTokenOnChannel(i-1, c.channel) - n++ - } - - if i < 0 { - return nil - } - - return c.tokens[i] -} - -func (c *CommonTokenStream) LT(k int) Token { - c.lazyInit() - - if k == 0 { - return nil - } - - if k < 0 { - return c.LB(-k) - } - - i := c.index - n := 1 // We know tokens[n] is valid - - // Find k good tokens - for n < k { - // Skip off-channel tokens, but make sure to not look past EOF - if c.Sync(i + 1) { - i = c.NextTokenOnChannel(i+1, c.channel) - } - - n++ - } - - return c.tokens[i] -} - -// getNumberOfOnChannelTokens counts EOF once. -func (c *CommonTokenStream) getNumberOfOnChannelTokens() int { - var n int - - c.Fill() - - for i := 0; i < len(c.tokens); i++ { - t := c.tokens[i] - - if t.GetChannel() == c.channel { - n++ - } - - if t.GetTokenType() == TokenEOF { - break - } - } - - return n -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go deleted file mode 100644 index d6079aa..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "sort" - "sync" -) - -type DFA struct { - // atnStartState is the ATN state in which this was created - atnStartState DecisionState - - decision int - - // states is all the DFA states. Use Map to get the old state back; Set can only - // indicate whether it is there. - states map[int]*DFAState - statesMu sync.RWMutex - - s0 *DFAState - s0Mu sync.RWMutex - - // precedenceDfa is the backing field for isPrecedenceDfa and setPrecedenceDfa. - // True if the DFA is for a precedence decision and false otherwise. - precedenceDfa bool -} - -func NewDFA(atnStartState DecisionState, decision int) *DFA { - return &DFA{ - atnStartState: atnStartState, - decision: decision, - states: make(map[int]*DFAState), - } -} - -// getPrecedenceStartState gets the start state for the current precedence and -// returns the start state corresponding to the specified precedence if a start -// state exists for the specified precedence and nil otherwise. d must be a -// precedence DFA. See also isPrecedenceDfa. -func (d *DFA) getPrecedenceStartState(precedence int) *DFAState { - if !d.precedenceDfa { - panic("only precedence DFAs may contain a precedence start state") - } - - d.s0Mu.RLock() - defer d.s0Mu.RUnlock() - - // s0.edges is never nil for a precedence DFA - if precedence < 0 || precedence >= len(d.s0.edges) { - return nil - } - - return d.s0.edges[precedence] -} - -// setPrecedenceStartState sets the start state for the current precedence. d -// must be a precedence DFA. See also isPrecedenceDfa. -func (d *DFA) setPrecedenceStartState(precedence int, startState *DFAState) { - if !d.precedenceDfa { - panic("only precedence DFAs may contain a precedence start state") - } - - if precedence < 0 { - return - } - - d.s0Mu.Lock() - defer d.s0Mu.Unlock() - - // Synchronization on s0 here is ok. When the DFA is turned into a - // precedence DFA, s0 will be initialized once and not updated again. s0.edges - // is never nil for a precedence DFA. - if precedence >= len(d.s0.edges) { - d.s0.edges = append(d.s0.edges, make([]*DFAState, precedence+1-len(d.s0.edges))...) - } - - d.s0.edges[precedence] = startState -} - -// setPrecedenceDfa sets whether d is a precedence DFA. If precedenceDfa differs -// from the current DFA configuration, then d.states is cleared, the initial -// state s0 is set to a new DFAState with an empty outgoing DFAState.edges to -// store the start states for individual precedence values if precedenceDfa is -// true or nil otherwise, and d.precedenceDfa is updated. -func (d *DFA) setPrecedenceDfa(precedenceDfa bool) { - if d.precedenceDfa != precedenceDfa { - d.states = make(map[int]*DFAState) - - if precedenceDfa { - precedenceState := NewDFAState(-1, NewBaseATNConfigSet(false)) - - precedenceState.edges = make([]*DFAState, 0) - precedenceState.isAcceptState = false - precedenceState.requiresFullContext = false - d.s0 = precedenceState - } else { - d.s0 = nil - } - - d.precedenceDfa = precedenceDfa - } -} - -func (d *DFA) getS0() *DFAState { - d.s0Mu.RLock() - defer d.s0Mu.RUnlock() - return d.s0 -} - -func (d *DFA) setS0(s *DFAState) { - d.s0Mu.Lock() - defer d.s0Mu.Unlock() - d.s0 = s -} - -func (d *DFA) getState(hash int) (*DFAState, bool) { - d.statesMu.RLock() - defer d.statesMu.RUnlock() - s, ok := d.states[hash] - return s, ok -} - -func (d *DFA) setState(hash int, state *DFAState) { - d.statesMu.Lock() - defer d.statesMu.Unlock() - d.states[hash] = state -} - -func (d *DFA) numStates() int { - d.statesMu.RLock() - defer d.statesMu.RUnlock() - return len(d.states) -} - -type dfaStateList []*DFAState - -func (d dfaStateList) Len() int { return len(d) } -func (d dfaStateList) Less(i, j int) bool { return d[i].stateNumber < d[j].stateNumber } -func (d dfaStateList) Swap(i, j int) { d[i], d[j] = d[j], d[i] } - -// sortedStates returns the states in d sorted by their state number. -func (d *DFA) sortedStates() []*DFAState { - vs := make([]*DFAState, 0, len(d.states)) - - for _, v := range d.states { - vs = append(vs, v) - } - - sort.Sort(dfaStateList(vs)) - - return vs -} - -func (d *DFA) String(literalNames []string, symbolicNames []string) string { - if d.s0 == nil { - return "" - } - - return NewDFASerializer(d, literalNames, symbolicNames).String() -} - -func (d *DFA) ToLexerString() string { - if d.s0 == nil { - return "" - } - - return NewLexerDFASerializer(d).String() -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go deleted file mode 100644 index 4c0f690..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strconv" -) - -// DFASerializer is a DFA walker that knows how to dump them to serialized -// strings. -type DFASerializer struct { - dfa *DFA - literalNames []string - symbolicNames []string -} - -func NewDFASerializer(dfa *DFA, literalNames, symbolicNames []string) *DFASerializer { - if literalNames == nil { - literalNames = make([]string, 0) - } - - if symbolicNames == nil { - symbolicNames = make([]string, 0) - } - - return &DFASerializer{ - dfa: dfa, - literalNames: literalNames, - symbolicNames: symbolicNames, - } -} - -func (d *DFASerializer) String() string { - if d.dfa.s0 == nil { - return "" - } - - buf := "" - states := d.dfa.sortedStates() - - for _, s := range states { - if s.edges != nil { - n := len(s.edges) - - for j := 0; j < n; j++ { - t := s.edges[j] - - if t != nil && t.stateNumber != 0x7FFFFFFF { - buf += d.GetStateString(s) - buf += "-" - buf += d.getEdgeLabel(j) - buf += "->" - buf += d.GetStateString(t) - buf += "\n" - } - } - } - } - - if len(buf) == 0 { - return "" - } - - return buf -} - -func (d *DFASerializer) getEdgeLabel(i int) string { - if i == 0 { - return "EOF" - } else if d.literalNames != nil && i-1 < len(d.literalNames) { - return d.literalNames[i-1] - } else if d.symbolicNames != nil && i-1 < len(d.symbolicNames) { - return d.symbolicNames[i-1] - } - - return strconv.Itoa(i - 1) -} - -func (d *DFASerializer) GetStateString(s *DFAState) string { - var a, b string - - if s.isAcceptState { - a = ":" - } - - if s.requiresFullContext { - b = "^" - } - - baseStateStr := a + "s" + strconv.Itoa(s.stateNumber) + b - - if s.isAcceptState { - if s.predicates != nil { - return baseStateStr + "=>" + fmt.Sprint(s.predicates) - } - - return baseStateStr + "=>" + fmt.Sprint(s.prediction) - } - - return baseStateStr -} - -type LexerDFASerializer struct { - *DFASerializer -} - -func NewLexerDFASerializer(dfa *DFA) *LexerDFASerializer { - return &LexerDFASerializer{DFASerializer: NewDFASerializer(dfa, nil, nil)} -} - -func (l *LexerDFASerializer) getEdgeLabel(i int) string { - return "'" + string(i) + "'" -} - -func (l *LexerDFASerializer) String() string { - if l.dfa.s0 == nil { - return "" - } - - buf := "" - states := l.dfa.sortedStates() - - for i := 0; i < len(states); i++ { - s := states[i] - - if s.edges != nil { - n := len(s.edges) - - for j := 0; j < n; j++ { - t := s.edges[j] - - if t != nil && t.stateNumber != 0x7FFFFFFF { - buf += l.GetStateString(s) - buf += "-" - buf += l.getEdgeLabel(j) - buf += "->" - buf += l.GetStateString(t) - buf += "\n" - } - } - } - } - - if len(buf) == 0 { - return "" - } - - return buf -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go deleted file mode 100644 index 38e918a..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" -) - -// PredPrediction maps a predicate to a predicted alternative. -type PredPrediction struct { - alt int - pred SemanticContext -} - -func NewPredPrediction(pred SemanticContext, alt int) *PredPrediction { - return &PredPrediction{alt: alt, pred: pred} -} - -func (p *PredPrediction) String() string { - return "(" + fmt.Sprint(p.pred) + ", " + fmt.Sprint(p.alt) + ")" -} - -// DFAState represents a set of possible ATN configurations. As Aho, Sethi, -// Ullman p. 117 says: "The DFA uses its state to keep track of all possible -// states the ATN can be in after reading each input symbol. That is to say, -// after reading input a1a2..an, the DFA is in a state that represents the -// subset T of the states of the ATN that are reachable from the ATN's start -// state along some path labeled a1a2..an." In conventional NFA-to-DFA -// conversion, therefore, the subset T would be a bitset representing the set of -// states the ATN could be in. We need to track the alt predicted by each state -// as well, however. More importantly, we need to maintain a stack of states, -// tracking the closure operations as they jump from rule to rule, emulating -// rule invocations (method calls). I have to add a stack to simulate the proper -// lookahead sequences for the underlying LL grammar from which the ATN was -// derived. -// -// I use a set of ATNConfig objects, not simple states. An ATNConfig is both a -// state (ala normal conversion) and a RuleContext describing the chain of rules -// (if any) followed to arrive at that state. -// -// A DFAState may have multiple references to a particular state, but with -// different ATN contexts (with same or different alts) meaning that state was -// reached via a different set of rule invocations. -type DFAState struct { - stateNumber int - configs ATNConfigSet - - // edges elements point to the target of the symbol. Shift up by 1 so (-1) - // Token.EOF maps to the first element. - edges []*DFAState - - isAcceptState bool - - // prediction is the ttype we match or alt we predict if the state is accept. - // Set to ATN.INVALID_ALT_NUMBER when predicates != nil or - // requiresFullContext. - prediction int - - lexerActionExecutor *LexerActionExecutor - - // requiresFullContext indicates it was created during an SLL prediction that - // discovered a conflict between the configurations in the state. Future - // ParserATNSimulator.execATN invocations immediately jump doing - // full context prediction if true. - requiresFullContext bool - - // predicates is the predicates associated with the ATN configurations of the - // DFA state during SLL parsing. When we have predicates, requiresFullContext - // is false, since full context prediction evaluates predicates on-the-fly. If - // d is - // not nil, then prediction is ATN.INVALID_ALT_NUMBER. - // - // We only use these for non-requiresFullContext but conflicting states. That - // means we know from the context (it's $ or we don't dip into outer context) - // that it's an ambiguity not a conflict. - // - // This list is computed by - // ParserATNSimulator.predicateDFAState. - predicates []*PredPrediction -} - -func NewDFAState(stateNumber int, configs ATNConfigSet) *DFAState { - if configs == nil { - configs = NewBaseATNConfigSet(false) - } - - return &DFAState{configs: configs, stateNumber: stateNumber} -} - -// GetAltSet gets the set of all alts mentioned by all ATN configurations in d. -func (d *DFAState) GetAltSet() *Set { - alts := NewSet(nil, nil) - - if d.configs != nil { - for _, c := range d.configs.GetItems() { - alts.add(c.GetAlt()) - } - } - - if alts.length() == 0 { - return nil - } - - return alts -} - -func (d *DFAState) setPrediction(v int) { - d.prediction = v -} - -// equals returns whether d equals other. Two DFAStates are equal if their ATN -// configuration sets are the same. This method is used to see if a state -// already exists. -// -// Because the number of alternatives and number of ATN configurations are -// finite, there is a finite number of DFA states that can be processed. This is -// necessary to show that the algorithm terminates. -// -// Cannot test the DFA state numbers here because in -// ParserATNSimulator.addDFAState we need to know if any other state exists that -// has d exact set of ATN configurations. The stateNumber is irrelevant. -func (d *DFAState) equals(other interface{}) bool { - if d == other { - return true - } else if _, ok := other.(*DFAState); !ok { - return false - } - - return d.configs.Equals(other.(*DFAState).configs) -} - -func (d *DFAState) String() string { - var s string - if d.isAcceptState { - if d.predicates != nil { - s = "=>" + fmt.Sprint(d.predicates) - } else { - s = "=>" + fmt.Sprint(d.prediction) - } - } - - return fmt.Sprintf("%d:%s%s", fmt.Sprint(d.configs), s) -} - -func (d *DFAState) hash() int { - h := murmurInit(11) - - c := 1 - if d.isAcceptState { - if d.predicates != nil { - for _, p := range d.predicates { - h = murmurUpdate(h, p.alt) - h = murmurUpdate(h, p.pred.hash()) - c += 2 - } - } else { - h = murmurUpdate(h, d.prediction) - c += 1 - } - } - - h = murmurUpdate(h, d.configs.hash()) - return murmurFinish(h, c) -} \ No newline at end of file diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go deleted file mode 100644 index 1fec43d..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "strconv" -) - -// -// This implementation of {@link ANTLRErrorListener} can be used to identify -// certain potential correctness and performance problems in grammars. "reports" -// are made by calling {@link Parser//NotifyErrorListeners} with the appropriate -// message. -// -//
    -//
  • Ambiguities: These are cases where more than one path through the -// grammar can Match the input.
  • -//
  • Weak context sensitivity: These are cases where full-context -// prediction resolved an SLL conflict to a unique alternative which equaled the -// minimum alternative of the SLL conflict.
  • -//
  • Strong (forced) context sensitivity: These are cases where the -// full-context prediction resolved an SLL conflict to a unique alternative, -// and the minimum alternative of the SLL conflict was found to not be -// a truly viable alternative. Two-stage parsing cannot be used for inputs where -// d situation occurs.
  • -//
- -type DiagnosticErrorListener struct { - *DefaultErrorListener - - exactOnly bool -} - -func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener { - - n := new(DiagnosticErrorListener) - - // whether all ambiguities or only exact ambiguities are Reported. - n.exactOnly = exactOnly - return n -} - -func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { - if d.exactOnly && !exact { - return - } - msg := "reportAmbiguity d=" + - d.getDecisionDescription(recognizer, dfa) + - ": ambigAlts=" + - d.getConflictingAlts(ambigAlts, configs).String() + - ", input='" + - recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" - recognizer.NotifyErrorListeners(msg, nil, nil) -} - -func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { - - msg := "reportAttemptingFullContext d=" + - d.getDecisionDescription(recognizer, dfa) + - ", input='" + - recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" - recognizer.NotifyErrorListeners(msg, nil, nil) -} - -func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { - msg := "reportContextSensitivity d=" + - d.getDecisionDescription(recognizer, dfa) + - ", input='" + - recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" - recognizer.NotifyErrorListeners(msg, nil, nil) -} - -func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa *DFA) string { - decision := dfa.decision - ruleIndex := dfa.atnStartState.GetRuleIndex() - - ruleNames := recognizer.GetRuleNames() - if ruleIndex < 0 || ruleIndex >= len(ruleNames) { - return strconv.Itoa(decision) - } - ruleName := ruleNames[ruleIndex] - if ruleName == "" { - return strconv.Itoa(decision) - } - return strconv.Itoa(decision) + " (" + ruleName + ")" -} - -// -// Computes the set of conflicting or ambiguous alternatives from a -// configuration set, if that information was not already provided by the -// parser. -// -// @param ReportedAlts The set of conflicting or ambiguous alternatives, as -// Reported by the parser. -// @param configs The conflicting or ambiguous configuration set. -// @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise -// returns the set of alternatives represented in {@code configs}. -// -func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set ATNConfigSet) *BitSet { - if ReportedAlts != nil { - return ReportedAlts - } - result := NewBitSet() - for _, c := range set.GetItems() { - result.add(c.GetAlt()) - } - - return result -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go deleted file mode 100644 index 028e1a9..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "os" - "strconv" -) - -// Provides an empty default implementation of {@link ANTLRErrorListener}. The -// default implementation of each method does nothing, but can be overridden as -// necessary. - -type ErrorListener interface { - SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) - ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) - ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) - ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) -} - -type DefaultErrorListener struct { -} - -func NewDefaultErrorListener() *DefaultErrorListener { - return new(DefaultErrorListener) -} - -func (d *DefaultErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { -} - -func (d *DefaultErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { -} - -func (d *DefaultErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { -} - -func (d *DefaultErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { -} - -type ConsoleErrorListener struct { - *DefaultErrorListener -} - -func NewConsoleErrorListener() *ConsoleErrorListener { - return new(ConsoleErrorListener) -} - -// -// Provides a default instance of {@link ConsoleErrorListener}. -// -var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener() - -// -// {@inheritDoc} -// -//

-// This implementation prints messages to {@link System//err} containing the -// values of {@code line}, {@code charPositionInLine}, and {@code msg} using -// the following format.

-// -//
-// line line:charPositionInLine msg
-// 
-// -func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { - fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg) -} - -type ProxyErrorListener struct { - *DefaultErrorListener - delegates []ErrorListener -} - -func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener { - if delegates == nil { - panic("delegates is not provided") - } - l := new(ProxyErrorListener) - l.delegates = delegates - return l -} - -func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { - for _, d := range p.delegates { - d.SyntaxError(recognizer, offendingSymbol, line, column, msg, e) - } -} - -func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { - for _, d := range p.delegates { - d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs) - } -} - -func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { - for _, d := range p.delegates { - d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs) - } -} - -func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { - for _, d := range p.delegates { - d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs) - } -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go deleted file mode 100644 index 977a6e4..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go +++ /dev/null @@ -1,758 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "reflect" - "strconv" - "strings" -) - -type ErrorStrategy interface { - reset(Parser) - RecoverInline(Parser) Token - Recover(Parser, RecognitionException) - Sync(Parser) - inErrorRecoveryMode(Parser) bool - ReportError(Parser, RecognitionException) - ReportMatch(Parser) -} - -// This is the default implementation of {@link ANTLRErrorStrategy} used for -// error Reporting and recovery in ANTLR parsers. -// -type DefaultErrorStrategy struct { - errorRecoveryMode bool - lastErrorIndex int - lastErrorStates *IntervalSet -} - -var _ ErrorStrategy = &DefaultErrorStrategy{} - -func NewDefaultErrorStrategy() *DefaultErrorStrategy { - - d := new(DefaultErrorStrategy) - - // Indicates whether the error strategy is currently "recovering from an - // error". This is used to suppress Reporting multiple error messages while - // attempting to recover from a detected syntax error. - // - // @see //inErrorRecoveryMode - // - d.errorRecoveryMode = false - - // The index into the input stream where the last error occurred. - // This is used to prevent infinite loops where an error is found - // but no token is consumed during recovery...another error is found, - // ad nauseum. This is a failsafe mechanism to guarantee that at least - // one token/tree node is consumed for two errors. - // - d.lastErrorIndex = -1 - d.lastErrorStates = nil - return d -} - -//

The default implementation simply calls {@link //endErrorCondition} to -// ensure that the handler is not in error recovery mode.

-func (d *DefaultErrorStrategy) reset(recognizer Parser) { - d.endErrorCondition(recognizer) -} - -// -// This method is called to enter error recovery mode when a recognition -// exception is Reported. -// -// @param recognizer the parser instance -// -func (d *DefaultErrorStrategy) beginErrorCondition(recognizer Parser) { - d.errorRecoveryMode = true -} - -func (d *DefaultErrorStrategy) inErrorRecoveryMode(recognizer Parser) bool { - return d.errorRecoveryMode -} - -// -// This method is called to leave error recovery mode after recovering from -// a recognition exception. -// -// @param recognizer -// -func (d *DefaultErrorStrategy) endErrorCondition(recognizer Parser) { - d.errorRecoveryMode = false - d.lastErrorStates = nil - d.lastErrorIndex = -1 -} - -// -// {@inheritDoc} -// -//

The default implementation simply calls {@link //endErrorCondition}.

-// -func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) { - d.endErrorCondition(recognizer) -} - -// -// {@inheritDoc} -// -//

The default implementation returns immediately if the handler is already -// in error recovery mode. Otherwise, it calls {@link //beginErrorCondition} -// and dispatches the Reporting task based on the runtime type of {@code e} -// according to the following table.

-// -//
    -//
  • {@link NoViableAltException}: Dispatches the call to -// {@link //ReportNoViableAlternative}
  • -//
  • {@link InputMisMatchException}: Dispatches the call to -// {@link //ReportInputMisMatch}
  • -//
  • {@link FailedPredicateException}: Dispatches the call to -// {@link //ReportFailedPredicate}
  • -//
  • All other types: calls {@link Parser//NotifyErrorListeners} to Report -// the exception
  • -//
-// -func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) { - // if we've already Reported an error and have not Matched a token - // yet successfully, don't Report any errors. - if d.inErrorRecoveryMode(recognizer) { - return // don't Report spurious errors - } - d.beginErrorCondition(recognizer) - - switch t := e.(type) { - default: - fmt.Println("unknown recognition error type: " + reflect.TypeOf(e).Name()) - // fmt.Println(e.stack) - recognizer.NotifyErrorListeners(e.GetMessage(), e.GetOffendingToken(), e) - case *NoViableAltException: - d.ReportNoViableAlternative(recognizer, t) - case *InputMisMatchException: - d.ReportInputMisMatch(recognizer, t) - case *FailedPredicateException: - d.ReportFailedPredicate(recognizer, t) - } -} - -// {@inheritDoc} -// -//

The default implementation reSynchronizes the parser by consuming tokens -// until we find one in the reSynchronization set--loosely the set of tokens -// that can follow the current rule.

-// -func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException) { - - if d.lastErrorIndex == recognizer.GetInputStream().Index() && - d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) { - // uh oh, another error at same token index and previously-Visited - // state in ATN must be a case where LT(1) is in the recovery - // token set so nothing got consumed. Consume a single token - // at least to prevent an infinite loop d is a failsafe. - recognizer.Consume() - } - d.lastErrorIndex = recognizer.GetInputStream().Index() - if d.lastErrorStates == nil { - d.lastErrorStates = NewIntervalSet() - } - d.lastErrorStates.addOne(recognizer.GetState()) - followSet := d.getErrorRecoverySet(recognizer) - d.consumeUntil(recognizer, followSet) -} - -// The default implementation of {@link ANTLRErrorStrategy//Sync} makes sure -// that the current lookahead symbol is consistent with what were expecting -// at d point in the ATN. You can call d anytime but ANTLR only -// generates code to check before subrules/loops and each iteration. -// -//

Implements Jim Idle's magic Sync mechanism in closures and optional -// subrules. E.g.,

-// -//
-// a : Sync ( stuff Sync )*
-// Sync : {consume to what can follow Sync}
-// 
-// -// At the start of a sub rule upon error, {@link //Sync} performs single -// token deletion, if possible. If it can't do that, it bails on the current -// rule and uses the default error recovery, which consumes until the -// reSynchronization set of the current rule. -// -//

If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block -// with an empty alternative), then the expected set includes what follows -// the subrule.

-// -//

During loop iteration, it consumes until it sees a token that can start a -// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to -// stay in the loop as long as possible.

-// -//

ORIGINS

-// -//

Previous versions of ANTLR did a poor job of their recovery within loops. -// A single mismatch token or missing token would force the parser to bail -// out of the entire rules surrounding the loop. So, for rule

-// -//
-// classfunc : 'class' ID '{' member* '}'
-// 
-// -// input with an extra token between members would force the parser to -// consume until it found the next class definition rather than the next -// member definition of the current class. -// -//

This functionality cost a little bit of effort because the parser has to -// compare token set at the start of the loop and at each iteration. If for -// some reason speed is suffering for you, you can turn off d -// functionality by simply overriding d method as a blank { }.

-// -func (d *DefaultErrorStrategy) Sync(recognizer Parser) { - // If already recovering, don't try to Sync - if d.inErrorRecoveryMode(recognizer) { - return - } - - s := recognizer.GetInterpreter().atn.states[recognizer.GetState()] - la := recognizer.GetTokenStream().LA(1) - - // try cheaper subset first might get lucky. seems to shave a wee bit off - nextTokens := recognizer.GetATN().NextTokens(s, nil) - if nextTokens.contains(TokenEpsilon) || nextTokens.contains(la) { - return - } - - switch s.GetStateType() { - case ATNStateBlockStart, ATNStateStarBlockStart, ATNStatePlusBlockStart, ATNStateStarLoopEntry: - // Report error and recover if possible - if d.SingleTokenDeletion(recognizer) != nil { - return - } - panic(NewInputMisMatchException(recognizer)) - case ATNStatePlusLoopBack, ATNStateStarLoopBack: - d.ReportUnwantedToken(recognizer) - expecting := NewIntervalSet() - expecting.addSet(recognizer.GetExpectedTokens()) - whatFollowsLoopIterationOrRule := expecting.addSet(d.getErrorRecoverySet(recognizer)) - d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule) - default: - // do nothing if we can't identify the exact kind of ATN state - } -} - -// This is called by {@link //ReportError} when the exception is a -// {@link NoViableAltException}. -// -// @see //ReportError -// -// @param recognizer the parser instance -// @param e the recognition exception -// -func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) { - tokens := recognizer.GetTokenStream() - var input string - if tokens != nil { - if e.startToken.GetTokenType() == TokenEOF { - input = "" - } else { - input = tokens.GetTextFromTokens(e.startToken, e.offendingToken) - } - } else { - input = "" - } - msg := "no viable alternative at input " + d.escapeWSAndQuote(input) - recognizer.NotifyErrorListeners(msg, e.offendingToken, e) -} - -// -// This is called by {@link //ReportError} when the exception is an -// {@link InputMisMatchException}. -// -// @see //ReportError -// -// @param recognizer the parser instance -// @param e the recognition exception -// -func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) { - msg := "mismatched input " + this.GetTokenErrorDisplay(e.offendingToken) + - " expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) - recognizer.NotifyErrorListeners(msg, e.offendingToken, e) -} - -// -// This is called by {@link //ReportError} when the exception is a -// {@link FailedPredicateException}. -// -// @see //ReportError -// -// @param recognizer the parser instance -// @param e the recognition exception -// -func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) { - ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()] - msg := "rule " + ruleName + " " + e.message - recognizer.NotifyErrorListeners(msg, e.offendingToken, e) -} - -// This method is called to Report a syntax error which requires the removal -// of a token from the input stream. At the time d method is called, the -// erroneous symbol is current {@code LT(1)} symbol and has not yet been -// removed from the input stream. When d method returns, -// {@code recognizer} is in error recovery mode. -// -//

This method is called when {@link //singleTokenDeletion} identifies -// single-token deletion as a viable recovery strategy for a mismatched -// input error.

-// -//

The default implementation simply returns if the handler is already in -// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to -// enter error recovery mode, followed by calling -// {@link Parser//NotifyErrorListeners}.

-// -// @param recognizer the parser instance -// -func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) { - if d.inErrorRecoveryMode(recognizer) { - return - } - d.beginErrorCondition(recognizer) - t := recognizer.GetCurrentToken() - tokenName := d.GetTokenErrorDisplay(t) - expecting := d.GetExpectedTokens(recognizer) - msg := "extraneous input " + tokenName + " expecting " + - expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) - recognizer.NotifyErrorListeners(msg, t, nil) -} - -// This method is called to Report a syntax error which requires the -// insertion of a missing token into the input stream. At the time d -// method is called, the missing token has not yet been inserted. When d -// method returns, {@code recognizer} is in error recovery mode. -// -//

This method is called when {@link //singleTokenInsertion} identifies -// single-token insertion as a viable recovery strategy for a mismatched -// input error.

-// -//

The default implementation simply returns if the handler is already in -// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to -// enter error recovery mode, followed by calling -// {@link Parser//NotifyErrorListeners}.

-// -// @param recognizer the parser instance -// -func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) { - if d.inErrorRecoveryMode(recognizer) { - return - } - d.beginErrorCondition(recognizer) - t := recognizer.GetCurrentToken() - expecting := d.GetExpectedTokens(recognizer) - msg := "missing " + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) + - " at " + d.GetTokenErrorDisplay(t) - recognizer.NotifyErrorListeners(msg, t, nil) -} - -//

The default implementation attempts to recover from the mismatched input -// by using single token insertion and deletion as described below. If the -// recovery attempt fails, d method panics an -// {@link InputMisMatchException}.

-// -//

EXTRA TOKEN (single token deletion)

-// -//

{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the -// right token, however, then assume {@code LA(1)} is some extra spurious -// token and delete it. Then consume and return the next token (which was -// the {@code LA(2)} token) as the successful result of the Match operation.

-// -//

This recovery strategy is implemented by {@link -// //singleTokenDeletion}.

-// -//

MISSING TOKEN (single token insertion)

-// -//

If current token (at {@code LA(1)}) is consistent with what could come -// after the expected {@code LA(1)} token, then assume the token is missing -// and use the parser's {@link TokenFactory} to create it on the fly. The -// "insertion" is performed by returning the created token as the successful -// result of the Match operation.

-// -//

This recovery strategy is implemented by {@link -// //singleTokenInsertion}.

-// -//

EXAMPLE

-// -//

For example, Input {@code i=(3} is clearly missing the {@code ')'}. When -// the parser returns from the nested call to {@code expr}, it will have -// call chain:

-// -//
-// stat &rarr expr &rarr atom
-// 
-// -// and it will be trying to Match the {@code ')'} at d point in the -// derivation: -// -//
-// => ID '=' '(' INT ')' ('+' atom)* ''
-// ^
-// 
-// -// The attempt to Match {@code ')'} will fail when it sees {@code ''} and -// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==''} -// is in the set of tokens that can follow the {@code ')'} token reference -// in rule {@code atom}. It can assume that you forgot the {@code ')'}. -// -func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token { - // SINGLE TOKEN DELETION - MatchedSymbol := d.SingleTokenDeletion(recognizer) - if MatchedSymbol != nil { - // we have deleted the extra token. - // now, move past ttype token as if all were ok - recognizer.Consume() - return MatchedSymbol - } - // SINGLE TOKEN INSERTION - if d.SingleTokenInsertion(recognizer) { - return d.GetMissingSymbol(recognizer) - } - // even that didn't work must panic the exception - panic(NewInputMisMatchException(recognizer)) -} - -// -// This method implements the single-token insertion inline error recovery -// strategy. It is called by {@link //recoverInline} if the single-token -// deletion strategy fails to recover from the mismatched input. If this -// method returns {@code true}, {@code recognizer} will be in error recovery -// mode. -// -//

This method determines whether or not single-token insertion is viable by -// checking if the {@code LA(1)} input symbol could be successfully Matched -// if it were instead the {@code LA(2)} symbol. If d method returns -// {@code true}, the caller is responsible for creating and inserting a -// token with the correct type to produce d behavior.

-// -// @param recognizer the parser instance -// @return {@code true} if single-token insertion is a viable recovery -// strategy for the current mismatched input, otherwise {@code false} -// -func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool { - currentSymbolType := recognizer.GetTokenStream().LA(1) - // if current token is consistent with what could come after current - // ATN state, then we know we're missing a token error recovery - // is free to conjure up and insert the missing token - atn := recognizer.GetInterpreter().atn - currentState := atn.states[recognizer.GetState()] - next := currentState.GetTransitions()[0].getTarget() - expectingAtLL2 := atn.NextTokens(next, recognizer.GetParserRuleContext()) - if expectingAtLL2.contains(currentSymbolType) { - d.ReportMissingToken(recognizer) - return true - } - - return false -} - -// This method implements the single-token deletion inline error recovery -// strategy. It is called by {@link //recoverInline} to attempt to recover -// from mismatched input. If this method returns nil, the parser and error -// handler state will not have changed. If this method returns non-nil, -// {@code recognizer} will not be in error recovery mode since the -// returned token was a successful Match. -// -//

If the single-token deletion is successful, d method calls -// {@link //ReportUnwantedToken} to Report the error, followed by -// {@link Parser//consume} to actually "delete" the extraneous token. Then, -// before returning {@link //ReportMatch} is called to signal a successful -// Match.

-// -// @param recognizer the parser instance -// @return the successfully Matched {@link Token} instance if single-token -// deletion successfully recovers from the mismatched input, otherwise -// {@code nil} -// -func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token { - NextTokenType := recognizer.GetTokenStream().LA(2) - expecting := d.GetExpectedTokens(recognizer) - if expecting.contains(NextTokenType) { - d.ReportUnwantedToken(recognizer) - // print("recoverFromMisMatchedToken deleting " \ - // + str(recognizer.GetTokenStream().LT(1)) \ - // + " since " + str(recognizer.GetTokenStream().LT(2)) \ - // + " is what we want", file=sys.stderr) - recognizer.Consume() // simply delete extra token - // we want to return the token we're actually Matching - MatchedSymbol := recognizer.GetCurrentToken() - d.ReportMatch(recognizer) // we know current token is correct - return MatchedSymbol - } - - return nil -} - -// Conjure up a missing token during error recovery. -// -// The recognizer attempts to recover from single missing -// symbols. But, actions might refer to that missing symbol. -// For example, x=ID {f($x)}. The action clearly assumes -// that there has been an identifier Matched previously and that -// $x points at that token. If that token is missing, but -// the next token in the stream is what we want we assume that -// d token is missing and we keep going. Because we -// have to return some token to replace the missing token, -// we have to conjure one up. This method gives the user control -// over the tokens returned for missing tokens. Mostly, -// you will want to create something special for identifier -// tokens. For literals such as '{' and ',', the default -// action in the parser or tree parser works. It simply creates -// a CommonToken of the appropriate type. The text will be the token. -// If you change what tokens must be created by the lexer, -// override d method to create the appropriate tokens. -// -func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token { - currentSymbol := recognizer.GetCurrentToken() - expecting := d.GetExpectedTokens(recognizer) - expectedTokenType := expecting.first() - var tokenText string - - if expectedTokenType == TokenEOF { - tokenText = "" - } else { - ln := recognizer.GetLiteralNames() - if expectedTokenType > 0 && expectedTokenType < len(ln) { - tokenText = "" - } else { - tokenText = "" // TODO matches the JS impl - } - } - current := currentSymbol - lookback := recognizer.GetTokenStream().LT(-1) - if current.GetTokenType() == TokenEOF && lookback != nil { - current = lookback - } - - tf := recognizer.GetTokenFactory() - - return tf.Create(current.GetSource(), expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.GetLine(), current.GetColumn()) -} - -func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet { - return recognizer.GetExpectedTokens() -} - -// How should a token be displayed in an error message? The default -// is to display just the text, but during development you might -// want to have a lot of information spit out. Override in that case -// to use t.String() (which, for CommonToken, dumps everything about -// the token). This is better than forcing you to override a method in -// your token objects because you don't have to go modify your lexer -// so that it creates a NewJava type. -// -func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string { - if t == nil { - return "" - } - s := t.GetText() - if s == "" { - if t.GetTokenType() == TokenEOF { - s = "" - } else { - s = "<" + strconv.Itoa(t.GetTokenType()) + ">" - } - } - return d.escapeWSAndQuote(s) -} - -func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string { - s = strings.Replace(s, "\t", "\\t", -1) - s = strings.Replace(s, "\n", "\\n", -1) - s = strings.Replace(s, "\r", "\\r", -1) - return "'" + s + "'" -} - -// Compute the error recovery set for the current rule. During -// rule invocation, the parser pushes the set of tokens that can -// follow that rule reference on the stack d amounts to -// computing FIRST of what follows the rule reference in the -// enclosing rule. See LinearApproximator.FIRST(). -// This local follow set only includes tokens -// from within the rule i.e., the FIRST computation done by -// ANTLR stops at the end of a rule. -// -// EXAMPLE -// -// When you find a "no viable alt exception", the input is not -// consistent with any of the alternatives for rule r. The best -// thing to do is to consume tokens until you see something that -// can legally follow a call to r//or* any rule that called r. -// You don't want the exact set of viable next tokens because the -// input might just be missing a token--you might consume the -// rest of the input looking for one of the missing tokens. -// -// Consider grammar: -// -// a : '[' b ']' -// | '(' b ')' -// -// b : c '^' INT -// c : ID -// | INT -// -// -// At each rule invocation, the set of tokens that could follow -// that rule is pushed on a stack. Here are the various -// context-sensitive follow sets: -// -// FOLLOW(b1_in_a) = FIRST(']') = ']' -// FOLLOW(b2_in_a) = FIRST(')') = ')' -// FOLLOW(c_in_b) = FIRST('^') = '^' -// -// Upon erroneous input "[]", the call chain is -// -// a -> b -> c -// -// and, hence, the follow context stack is: -// -// depth follow set start of rule execution -// 0 a (from main()) -// 1 ']' b -// 2 '^' c -// -// Notice that ')' is not included, because b would have to have -// been called from a different context in rule a for ')' to be -// included. -// -// For error recovery, we cannot consider FOLLOW(c) -// (context-sensitive or otherwise). We need the combined set of -// all context-sensitive FOLLOW sets--the set of all tokens that -// could follow any reference in the call chain. We need to -// reSync to one of those tokens. Note that FOLLOW(c)='^' and if -// we reSync'd to that token, we'd consume until EOF. We need to -// Sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}. -// In this case, for input "[]", LA(1) is ']' and in the set, so we would -// not consume anything. After printing an error, rule c would -// return normally. Rule b would not find the required '^' though. -// At this point, it gets a mismatched token error and panics an -// exception (since LA(1) is not in the viable following token -// set). The rule exception handler tries to recover, but finds -// the same recovery set and doesn't consume anything. Rule b -// exits normally returning to rule a. Now it finds the ']' (and -// with the successful Match exits errorRecovery mode). -// -// So, you can see that the parser walks up the call chain looking -// for the token that was a member of the recovery set. -// -// Errors are not generated in errorRecovery mode. -// -// ANTLR's error recovery mechanism is based upon original ideas: -// -// "Algorithms + Data Structures = Programs" by Niklaus Wirth -// -// and -// -// "A note on error recovery in recursive descent parsers": -// http://portal.acm.org/citation.cfm?id=947902.947905 -// -// Later, Josef Grosch had some good ideas: -// -// "Efficient and Comfortable Error Recovery in Recursive Descent -// Parsers": -// ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip -// -// Like Grosch I implement context-sensitive FOLLOW sets that are combined -// at run-time upon error to avoid overhead during parsing. -// -func (d *DefaultErrorStrategy) getErrorRecoverySet(recognizer Parser) *IntervalSet { - atn := recognizer.GetInterpreter().atn - ctx := recognizer.GetParserRuleContext() - recoverSet := NewIntervalSet() - for ctx != nil && ctx.GetInvokingState() >= 0 { - // compute what follows who invoked us - invokingState := atn.states[ctx.GetInvokingState()] - rt := invokingState.GetTransitions()[0] - follow := atn.NextTokens(rt.(*RuleTransition).followState, nil) - recoverSet.addSet(follow) - ctx = ctx.GetParent().(ParserRuleContext) - } - recoverSet.removeOne(TokenEpsilon) - return recoverSet -} - -// Consume tokens until one Matches the given token set.// -func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet) { - ttype := recognizer.GetTokenStream().LA(1) - for ttype != TokenEOF && !set.contains(ttype) { - recognizer.Consume() - ttype = recognizer.GetTokenStream().LA(1) - } -} - -// -// This implementation of {@link ANTLRErrorStrategy} responds to syntax errors -// by immediately canceling the parse operation with a -// {@link ParseCancellationException}. The implementation ensures that the -// {@link ParserRuleContext//exception} field is set for all parse tree nodes -// that were not completed prior to encountering the error. -// -//

-// This error strategy is useful in the following scenarios.

-// -//
    -//
  • Two-stage parsing: This error strategy allows the first -// stage of two-stage parsing to immediately terminate if an error is -// encountered, and immediately fall back to the second stage. In addition to -// avoiding wasted work by attempting to recover from errors here, the empty -// implementation of {@link BailErrorStrategy//Sync} improves the performance of -// the first stage.
  • -//
  • Silent validation: When syntax errors are not being -// Reported or logged, and the parse result is simply ignored if errors occur, -// the {@link BailErrorStrategy} avoids wasting work on recovering from errors -// when the result will be ignored either way.
  • -//
-// -//

-// {@code myparser.setErrorHandler(NewBailErrorStrategy())}

-// -// @see Parser//setErrorHandler(ANTLRErrorStrategy) - -type BailErrorStrategy struct { - *DefaultErrorStrategy -} - -var _ ErrorStrategy = &BailErrorStrategy{} - -func NewBailErrorStrategy() *BailErrorStrategy { - - b := new(BailErrorStrategy) - - b.DefaultErrorStrategy = NewDefaultErrorStrategy() - - return b -} - -// Instead of recovering from exception {@code e}, re-panic it wrapped -// in a {@link ParseCancellationException} so it is not caught by the -// rule func catches. Use {@link Exception//getCause()} to get the -// original {@link RecognitionException}. -// -func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) { - context := recognizer.GetParserRuleContext() - for context != nil { - context.SetException(e) - context = context.GetParent().(ParserRuleContext) - } - panic(NewParseCancellationException()) // TODO we don't emit e properly -} - -// Make sure we don't attempt to recover inline if the parser -// successfully recovers, it won't panic an exception. -// -func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token { - b.Recover(recognizer, NewInputMisMatchException(recognizer)) - - return nil -} - -// Make sure we don't attempt to recover from problems in subrules.// -func (b *BailErrorStrategy) Sync(recognizer Parser) { - // pass -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go deleted file mode 100644 index 2ef7492..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go +++ /dev/null @@ -1,241 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just -// 3 kinds of errors: prediction errors, failed predicate errors, and -// mismatched input errors. In each case, the parser knows where it is -// in the input, where it is in the ATN, the rule invocation stack, -// and what kind of problem occurred. - -type RecognitionException interface { - GetOffendingToken() Token - GetMessage() string - GetInputStream() IntStream -} - -type BaseRecognitionException struct { - message string - recognizer Recognizer - offendingToken Token - offendingState int - ctx RuleContext - input IntStream -} - -func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException { - - // todo - // Error.call(this) - // - // if (!!Error.captureStackTrace) { - // Error.captureStackTrace(this, RecognitionException) - // } else { - // stack := NewError().stack - // } - // TODO may be able to use - "runtime" func Stack(buf []byte, all bool) int - - t := new(BaseRecognitionException) - - t.message = message - t.recognizer = recognizer - t.input = input - t.ctx = ctx - // The current {@link Token} when an error occurred. Since not all streams - // support accessing symbols by index, we have to track the {@link Token} - // instance itself. - t.offendingToken = nil - // Get the ATN state number the parser was in at the time the error - // occurred. For {@link NoViableAltException} and - // {@link LexerNoViableAltException} exceptions, this is the - // {@link DecisionState} number. For others, it is the state whose outgoing - // edge we couldn't Match. - t.offendingState = -1 - if t.recognizer != nil { - t.offendingState = t.recognizer.GetState() - } - - return t -} - -func (b *BaseRecognitionException) GetMessage() string { - return b.message -} - -func (b *BaseRecognitionException) GetOffendingToken() Token { - return b.offendingToken -} - -func (b *BaseRecognitionException) GetInputStream() IntStream { - return b.input -} - -//

If the state number is not known, b method returns -1.

- -// -// Gets the set of input symbols which could potentially follow the -// previously Matched symbol at the time b exception was panicn. -// -//

If the set of expected tokens is not known and could not be computed, -// b method returns {@code nil}.

-// -// @return The set of token types that could potentially follow the current -// state in the ATN, or {@code nil} if the information is not available. -// / -func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet { - if b.recognizer != nil { - return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx) - } - - return nil -} - -func (b *BaseRecognitionException) String() string { - return b.message -} - -type LexerNoViableAltException struct { - *BaseRecognitionException - - startIndex int - deadEndConfigs ATNConfigSet -} - -func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs ATNConfigSet) *LexerNoViableAltException { - - l := new(LexerNoViableAltException) - - l.BaseRecognitionException = NewBaseRecognitionException("", lexer, input, nil) - - l.startIndex = startIndex - l.deadEndConfigs = deadEndConfigs - - return l -} - -func (l *LexerNoViableAltException) String() string { - symbol := "" - if l.startIndex >= 0 && l.startIndex < l.input.Size() { - symbol = l.input.(CharStream).GetTextFromInterval(NewInterval(l.startIndex, l.startIndex)) - } - return "LexerNoViableAltException" + symbol -} - -type NoViableAltException struct { - *BaseRecognitionException - - startToken Token - offendingToken Token - ctx ParserRuleContext - deadEndConfigs ATNConfigSet -} - -// Indicates that the parser could not decide which of two or more paths -// to take based upon the remaining input. It tracks the starting token -// of the offending input and also knows where the parser was -// in the various paths when the error. Reported by ReportNoViableAlternative() -// -func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException { - - if ctx == nil { - ctx = recognizer.GetParserRuleContext() - } - - if offendingToken == nil { - offendingToken = recognizer.GetCurrentToken() - } - - if startToken == nil { - startToken = recognizer.GetCurrentToken() - } - - if input == nil { - input = recognizer.GetInputStream().(TokenStream) - } - - n := new(NoViableAltException) - n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx) - - // Which configurations did we try at input.Index() that couldn't Match - // input.LT(1)?// - n.deadEndConfigs = deadEndConfigs - // The token object at the start index the input stream might - // not be buffering tokens so get a reference to it. (At the - // time the error occurred, of course the stream needs to keep a - // buffer all of the tokens but later we might not have access to those.) - n.startToken = startToken - n.offendingToken = offendingToken - - return n -} - -type InputMisMatchException struct { - *BaseRecognitionException -} - -// This signifies any kind of mismatched input exceptions such as -// when the current input does not Match the expected token. -// -func NewInputMisMatchException(recognizer Parser) *InputMisMatchException { - - i := new(InputMisMatchException) - i.BaseRecognitionException = NewBaseRecognitionException("", recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) - - i.offendingToken = recognizer.GetCurrentToken() - - return i - -} - -// A semantic predicate failed during validation. Validation of predicates -// occurs when normally parsing the alternative just like Matching a token. -// Disambiguating predicate evaluation occurs when we test a predicate during -// prediction. - -type FailedPredicateException struct { - *BaseRecognitionException - - ruleIndex int - predicateIndex int - predicate string -} - -func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException { - - f := new(FailedPredicateException) - - f.BaseRecognitionException = NewBaseRecognitionException(f.formatMessage(predicate, message), recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) - - s := recognizer.GetInterpreter().atn.states[recognizer.GetState()] - trans := s.GetTransitions()[0] - if trans2, ok := trans.(*PredicateTransition); ok { - f.ruleIndex = trans2.ruleIndex - f.predicateIndex = trans2.predIndex - } else { - f.ruleIndex = 0 - f.predicateIndex = 0 - } - f.predicate = predicate - f.offendingToken = recognizer.GetCurrentToken() - - return f -} - -func (f *FailedPredicateException) formatMessage(predicate, message string) string { - if message != "" { - return message - } - - return "failed predicate: {" + predicate + "}?" -} - -type ParseCancellationException struct { -} - -func NewParseCancellationException() *ParseCancellationException { - // Error.call(this) - // Error.captureStackTrace(this, ParseCancellationException) - return new(ParseCancellationException) -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go deleted file mode 100644 index 842170c..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "bytes" - "io" - "os" -) - -// This is an InputStream that is loaded from a file all at once -// when you construct the object. - -type FileStream struct { - *InputStream - - filename string -} - -func NewFileStream(fileName string) (*FileStream, error) { - - buf := bytes.NewBuffer(nil) - - f, err := os.Open(fileName) - if err != nil { - return nil, err - } - defer f.Close() - _, err = io.Copy(buf, f) - if err != nil { - return nil, err - } - - fs := new(FileStream) - - fs.filename = fileName - s := string(buf.Bytes()) - - fs.InputStream = NewInputStream(s) - - return fs, nil - -} - -func (f *FileStream) GetSourceName() string { - return f.filename -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go deleted file mode 100644 index 5ff270f..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -type InputStream struct { - name string - index int - data []rune - size int -} - -func NewInputStream(data string) *InputStream { - - is := new(InputStream) - - is.name = "" - is.index = 0 - is.data = []rune(data) - is.size = len(is.data) // number of runes - - return is -} - -func (is *InputStream) reset() { - is.index = 0 -} - -func (is *InputStream) Consume() { - if is.index >= is.size { - // assert is.LA(1) == TokenEOF - panic("cannot consume EOF") - } - is.index++ -} - -func (is *InputStream) LA(offset int) int { - - if offset == 0 { - return 0 // nil - } - if offset < 0 { - offset++ // e.g., translate LA(-1) to use offset=0 - } - pos := is.index + offset - 1 - - if pos < 0 || pos >= is.size { // invalid - return TokenEOF - } - - return int(is.data[pos]) -} - -func (is *InputStream) LT(offset int) int { - return is.LA(offset) -} - -func (is *InputStream) Index() int { - return is.index -} - -func (is *InputStream) Size() int { - return is.size -} - -// mark/release do nothing we have entire buffer -func (is *InputStream) Mark() int { - return -1 -} - -func (is *InputStream) Release(marker int) { -} - -func (is *InputStream) Seek(index int) { - if index <= is.index { - is.index = index // just jump don't update stream state (line,...) - return - } - // seek forward - is.index = intMin(index, is.size) -} - -func (is *InputStream) GetText(start int, stop int) string { - if stop >= is.size { - stop = is.size - 1 - } - if start >= is.size { - return "" - } - - return string(is.data[start : stop+1]) -} - -func (is *InputStream) GetTextFromTokens(start, stop Token) string { - if start != nil && stop != nil { - return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex())) - } - - return "" -} - -func (is *InputStream) GetTextFromInterval(i *Interval) string { - return is.GetText(i.Start, i.Stop) -} - -func (*InputStream) GetSourceName() string { - return "Obtained from string" -} - -func (is *InputStream) String() string { - return string(is.data) -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go deleted file mode 100644 index 438e0ea..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -type IntStream interface { - Consume() - LA(int) int - Mark() int - Release(marker int) - Index() int - Seek(index int) - Size() int - GetSourceName() string -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go deleted file mode 100644 index 510d909..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go +++ /dev/null @@ -1,296 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "strconv" - "strings" -) - -type Interval struct { - Start int - Stop int -} - -/* stop is not included! */ -func NewInterval(start, stop int) *Interval { - i := new(Interval) - - i.Start = start - i.Stop = stop - return i -} - -func (i *Interval) Contains(item int) bool { - return item >= i.Start && item < i.Stop -} - -func (i *Interval) String() string { - if i.Start == i.Stop-1 { - return strconv.Itoa(i.Start) - } - - return strconv.Itoa(i.Start) + ".." + strconv.Itoa(i.Stop-1) -} - -func (i *Interval) length() int { - return i.Stop - i.Start -} - -type IntervalSet struct { - intervals []*Interval - readOnly bool -} - -func NewIntervalSet() *IntervalSet { - - i := new(IntervalSet) - - i.intervals = nil - i.readOnly = false - - return i -} - -func (i *IntervalSet) first() int { - if len(i.intervals) == 0 { - return TokenInvalidType - } - - return i.intervals[0].Start -} - -func (i *IntervalSet) addOne(v int) { - i.addInterval(NewInterval(v, v+1)) -} - -func (i *IntervalSet) addRange(l, h int) { - i.addInterval(NewInterval(l, h+1)) -} - -func (i *IntervalSet) addInterval(v *Interval) { - if i.intervals == nil { - i.intervals = make([]*Interval, 0) - i.intervals = append(i.intervals, v) - } else { - // find insert pos - for k, interval := range i.intervals { - // distinct range -> insert - if v.Stop < interval.Start { - i.intervals = append(i.intervals[0:k], append([]*Interval{v}, i.intervals[k:]...)...) - return - } else if v.Stop == interval.Start { - i.intervals[k].Start = v.Start - return - } else if v.Start <= interval.Stop { - i.intervals[k] = NewInterval(intMin(interval.Start, v.Start), intMax(interval.Stop, v.Stop)) - - // if not applying to end, merge potential overlaps - if k < len(i.intervals)-1 { - l := i.intervals[k] - r := i.intervals[k+1] - // if r contained in l - if l.Stop >= r.Stop { - i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...) - } else if l.Stop >= r.Start { // partial overlap - i.intervals[k] = NewInterval(l.Start, r.Stop) - i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...) - } - } - return - } - } - // greater than any exiting - i.intervals = append(i.intervals, v) - } -} - -func (i *IntervalSet) addSet(other *IntervalSet) *IntervalSet { - if other.intervals != nil { - for k := 0; k < len(other.intervals); k++ { - i2 := other.intervals[k] - i.addInterval(NewInterval(i2.Start, i2.Stop)) - } - } - return i -} - -func (i *IntervalSet) complement(start int, stop int) *IntervalSet { - result := NewIntervalSet() - result.addInterval(NewInterval(start, stop+1)) - for j := 0; j < len(i.intervals); j++ { - result.removeRange(i.intervals[j]) - } - return result -} - -func (i *IntervalSet) contains(item int) bool { - if i.intervals == nil { - return false - } - for k := 0; k < len(i.intervals); k++ { - if i.intervals[k].Contains(item) { - return true - } - } - return false -} - -func (i *IntervalSet) length() int { - len := 0 - - for _, v := range i.intervals { - len += v.length() - } - - return len -} - -func (i *IntervalSet) removeRange(v *Interval) { - if v.Start == v.Stop-1 { - i.removeOne(v.Start) - } else if i.intervals != nil { - k := 0 - for n := 0; n < len(i.intervals); n++ { - ni := i.intervals[k] - // intervals are ordered - if v.Stop <= ni.Start { - return - } else if v.Start > ni.Start && v.Stop < ni.Stop { - i.intervals[k] = NewInterval(ni.Start, v.Start) - x := NewInterval(v.Stop, ni.Stop) - // i.intervals.splice(k, 0, x) - i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...) - return - } else if v.Start <= ni.Start && v.Stop >= ni.Stop { - // i.intervals.splice(k, 1) - i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...) - k = k - 1 // need another pass - } else if v.Start < ni.Stop { - i.intervals[k] = NewInterval(ni.Start, v.Start) - } else if v.Stop < ni.Stop { - i.intervals[k] = NewInterval(v.Stop, ni.Stop) - } - k++ - } - } -} - -func (i *IntervalSet) removeOne(v int) { - if i.intervals != nil { - for k := 0; k < len(i.intervals); k++ { - ki := i.intervals[k] - // intervals i ordered - if v < ki.Start { - return - } else if v == ki.Start && v == ki.Stop-1 { - // i.intervals.splice(k, 1) - i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...) - return - } else if v == ki.Start { - i.intervals[k] = NewInterval(ki.Start+1, ki.Stop) - return - } else if v == ki.Stop-1 { - i.intervals[k] = NewInterval(ki.Start, ki.Stop-1) - return - } else if v < ki.Stop-1 { - x := NewInterval(ki.Start, v) - ki.Start = v + 1 - // i.intervals.splice(k, 0, x) - i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...) - return - } - } - } -} - -func (i *IntervalSet) String() string { - return i.StringVerbose(nil, nil, false) -} - -func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []string, elemsAreChar bool) string { - - if i.intervals == nil { - return "{}" - } else if literalNames != nil || symbolicNames != nil { - return i.toTokenString(literalNames, symbolicNames) - } else if elemsAreChar { - return i.toCharString() - } - - return i.toIndexString() -} - -func (i *IntervalSet) toCharString() string { - names := make([]string, len(i.intervals)) - - for j := 0; j < len(i.intervals); j++ { - v := i.intervals[j] - if v.Stop == v.Start+1 { - if v.Start == TokenEOF { - names = append(names, "") - } else { - names = append(names, ("'" + string(v.Start) + "'")) - } - } else { - names = append(names, "'"+string(v.Start)+"'..'"+string(v.Stop-1)+"'") - } - } - if len(names) > 1 { - return "{" + strings.Join(names, ", ") + "}" - } - - return names[0] -} - -func (i *IntervalSet) toIndexString() string { - - names := make([]string, 0) - for j := 0; j < len(i.intervals); j++ { - v := i.intervals[j] - if v.Stop == v.Start+1 { - if v.Start == TokenEOF { - names = append(names, "") - } else { - names = append(names, strconv.Itoa(v.Start)) - } - } else { - names = append(names, strconv.Itoa(v.Start)+".."+strconv.Itoa(v.Stop-1)) - } - } - if len(names) > 1 { - return "{" + strings.Join(names, ", ") + "}" - } - - return names[0] -} - -func (i *IntervalSet) toTokenString(literalNames []string, symbolicNames []string) string { - names := make([]string, 0) - for _, v := range i.intervals { - for j := v.Start; j < v.Stop; j++ { - names = append(names, i.elementName(literalNames, symbolicNames, j)) - } - } - if len(names) > 1 { - return "{" + strings.Join(names, ", ") + "}" - } - - return names[0] -} - -func (i *IntervalSet) elementName(literalNames []string, symbolicNames []string, a int) string { - if a == TokenEOF { - return "" - } else if a == TokenEpsilon { - return "" - } else { - if a < len(literalNames) && literalNames[a] != "" { - return literalNames[a] - } - - return symbolicNames[a] - } -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go deleted file mode 100644 index 02deaf9..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go +++ /dev/null @@ -1,417 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strconv" -) - -// A lexer is recognizer that draws input symbols from a character stream. -// lexer grammars result in a subclass of this object. A Lexer object -// uses simplified Match() and error recovery mechanisms in the interest -// of speed. -/// - -type Lexer interface { - TokenSource - Recognizer - - Emit() Token - - SetChannel(int) - PushMode(int) - PopMode() int - SetType(int) - SetMode(int) -} - -type BaseLexer struct { - *BaseRecognizer - - Interpreter ILexerATNSimulator - TokenStartCharIndex int - TokenStartLine int - TokenStartColumn int - ActionType int - Virt Lexer // The most derived lexer implementation. Allows virtual method calls. - - input CharStream - factory TokenFactory - tokenFactorySourcePair *TokenSourceCharStreamPair - token Token - hitEOF bool - channel int - thetype int - modeStack IntStack - mode int - text string -} - -func NewBaseLexer(input CharStream) *BaseLexer { - - lexer := new(BaseLexer) - - lexer.BaseRecognizer = NewBaseRecognizer() - - lexer.input = input - lexer.factory = CommonTokenFactoryDEFAULT - lexer.tokenFactorySourcePair = &TokenSourceCharStreamPair{lexer, input} - - lexer.Virt = lexer - - lexer.Interpreter = nil // child classes must populate it - - // The goal of all lexer rules/methods is to create a token object. - // l is an instance variable as multiple rules may collaborate to - // create a single token. NextToken will return l object after - // Matching lexer rule(s). If you subclass to allow multiple token - // emissions, then set l to the last token to be Matched or - // something nonnil so that the auto token emit mechanism will not - // emit another token. - lexer.token = nil - - // What character index in the stream did the current token start at? - // Needed, for example, to get the text for current token. Set at - // the start of NextToken. - lexer.TokenStartCharIndex = -1 - - // The line on which the first character of the token resides/// - lexer.TokenStartLine = -1 - - // The character position of first character within the line/// - lexer.TokenStartColumn = -1 - - // Once we see EOF on char stream, next token will be EOF. - // If you have DONE : EOF then you see DONE EOF. - lexer.hitEOF = false - - // The channel number for the current token/// - lexer.channel = TokenDefaultChannel - - // The token type for the current token/// - lexer.thetype = TokenInvalidType - - lexer.modeStack = make([]int, 0) - lexer.mode = LexerDefaultMode - - // You can set the text for the current token to override what is in - // the input char buffer. Use setText() or can set l instance var. - // / - lexer.text = "" - - return lexer -} - -const ( - LexerDefaultMode = 0 - LexerMore = -2 - LexerSkip = -3 -) - -const ( - LexerDefaultTokenChannel = TokenDefaultChannel - LexerHidden = TokenHiddenChannel - LexerMinCharValue = 0x0000 - LexerMaxCharValue = 0x10FFFF -) - -func (b *BaseLexer) reset() { - // wack Lexer state variables - if b.input != nil { - b.input.Seek(0) // rewind the input - } - b.token = nil - b.thetype = TokenInvalidType - b.channel = TokenDefaultChannel - b.TokenStartCharIndex = -1 - b.TokenStartColumn = -1 - b.TokenStartLine = -1 - b.text = "" - - b.hitEOF = false - b.mode = LexerDefaultMode - b.modeStack = make([]int, 0) - - b.Interpreter.reset() -} - -func (b *BaseLexer) GetInterpreter() ILexerATNSimulator { - return b.Interpreter -} - -func (b *BaseLexer) GetInputStream() CharStream { - return b.input -} - -func (b *BaseLexer) GetSourceName() string { - return b.GrammarFileName -} - -func (b *BaseLexer) SetChannel(v int) { - b.channel = v -} - -func (b *BaseLexer) GetTokenFactory() TokenFactory { - return b.factory -} - -func (b *BaseLexer) setTokenFactory(f TokenFactory) { - b.factory = f -} - -func (b *BaseLexer) safeMatch() (ret int) { - defer func() { - if e := recover(); e != nil { - if re, ok := e.(RecognitionException); ok { - b.notifyListeners(re) // Report error - b.Recover(re) - ret = LexerSkip // default - } - } - }() - - return b.Interpreter.Match(b.input, b.mode) -} - -// Return a token from l source i.e., Match a token on the char stream. -func (b *BaseLexer) NextToken() Token { - if b.input == nil { - panic("NextToken requires a non-nil input stream.") - } - - tokenStartMarker := b.input.Mark() - - // previously in finally block - defer func() { - // make sure we release marker after Match or - // unbuffered char stream will keep buffering - b.input.Release(tokenStartMarker) - }() - - for { - if b.hitEOF { - b.EmitEOF() - return b.token - } - b.token = nil - b.channel = TokenDefaultChannel - b.TokenStartCharIndex = b.input.Index() - b.TokenStartColumn = b.Interpreter.GetCharPositionInLine() - b.TokenStartLine = b.Interpreter.GetLine() - b.text = "" - continueOuter := false - for { - b.thetype = TokenInvalidType - ttype := LexerSkip - - ttype = b.safeMatch() - - if b.input.LA(1) == TokenEOF { - b.hitEOF = true - } - if b.thetype == TokenInvalidType { - b.thetype = ttype - } - if b.thetype == LexerSkip { - continueOuter = true - break - } - if b.thetype != LexerMore { - break - } - } - - if continueOuter { - continue - } - if b.token == nil { - b.Virt.Emit() - } - return b.token - } - - return nil -} - -// Instruct the lexer to Skip creating a token for current lexer rule -// and look for another token. NextToken() knows to keep looking when -// a lexer rule finishes with token set to SKIPTOKEN. Recall that -// if token==nil at end of any token rule, it creates one for you -// and emits it. -// / -func (b *BaseLexer) Skip() { - b.thetype = LexerSkip -} - -func (b *BaseLexer) More() { - b.thetype = LexerMore -} - -func (b *BaseLexer) SetMode(m int) { - b.mode = m -} - -func (b *BaseLexer) PushMode(m int) { - if LexerATNSimulatorDebug { - fmt.Println("pushMode " + strconv.Itoa(m)) - } - b.modeStack.Push(b.mode) - b.mode = m -} - -func (b *BaseLexer) PopMode() int { - if len(b.modeStack) == 0 { - panic("Empty Stack") - } - if LexerATNSimulatorDebug { - fmt.Println("popMode back to " + fmt.Sprint(b.modeStack[0:len(b.modeStack)-1])) - } - i, _ := b.modeStack.Pop() - b.mode = i - return b.mode -} - -func (b *BaseLexer) inputStream() CharStream { - return b.input -} - -func (b *BaseLexer) setInputStream(input CharStream) { - b.input = nil - b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input} - b.reset() - b.input = input - b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input} -} - -func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair { - return b.tokenFactorySourcePair -} - -// By default does not support multiple emits per NextToken invocation -// for efficiency reasons. Subclass and override l method, NextToken, -// and GetToken (to push tokens into a list and pull from that list -// rather than a single variable as l implementation does). -// / -func (b *BaseLexer) EmitToken(token Token) { - b.token = token -} - -// The standard method called to automatically emit a token at the -// outermost lexical rule. The token object should point into the -// char buffer start..stop. If there is a text override in 'text', -// use that to set the token's text. Override l method to emit -// custom Token objects or provide a Newfactory. -// / -func (b *BaseLexer) Emit() Token { - t := b.factory.Create(b.tokenFactorySourcePair, b.thetype, b.text, b.channel, b.TokenStartCharIndex, b.GetCharIndex()-1, b.TokenStartLine, b.TokenStartColumn) - b.EmitToken(t) - return t -} - -func (b *BaseLexer) EmitEOF() Token { - cpos := b.GetCharPositionInLine() - lpos := b.GetLine() - eof := b.factory.Create(b.tokenFactorySourcePair, TokenEOF, "", TokenDefaultChannel, b.input.Index(), b.input.Index()-1, lpos, cpos) - b.EmitToken(eof) - return eof -} - -func (b *BaseLexer) GetCharPositionInLine() int { - return b.Interpreter.GetCharPositionInLine() -} - -func (b *BaseLexer) GetLine() int { - return b.Interpreter.GetLine() -} - -func (b *BaseLexer) GetType() int { - return b.thetype -} - -func (b *BaseLexer) SetType(t int) { - b.thetype = t -} - -// What is the index of the current character of lookahead?/// -func (b *BaseLexer) GetCharIndex() int { - return b.input.Index() -} - -// Return the text Matched so far for the current token or any text override. -//Set the complete text of l token it wipes any previous changes to the text. -func (b *BaseLexer) GetText() string { - if b.text != "" { - return b.text - } - - return b.Interpreter.GetText(b.input) -} - -func (b *BaseLexer) SetText(text string) { - b.text = text -} - -func (b *BaseLexer) GetATN() *ATN { - return b.Interpreter.ATN() -} - -// Return a list of all Token objects in input char stream. -// Forces load of all tokens. Does not include EOF token. -// / -func (b *BaseLexer) GetAllTokens() []Token { - vl := b.Virt - tokens := make([]Token, 0) - t := vl.NextToken() - for t.GetTokenType() != TokenEOF { - tokens = append(tokens, t) - t = vl.NextToken() - } - return tokens -} - -func (b *BaseLexer) notifyListeners(e RecognitionException) { - start := b.TokenStartCharIndex - stop := b.input.Index() - text := b.input.GetTextFromInterval(NewInterval(start, stop)) - msg := "token recognition error at: '" + text + "'" - listener := b.GetErrorListenerDispatch() - listener.SyntaxError(b, nil, b.TokenStartLine, b.TokenStartColumn, msg, e) -} - -func (b *BaseLexer) getErrorDisplayForChar(c rune) string { - if c == TokenEOF { - return "" - } else if c == '\n' { - return "\\n" - } else if c == '\t' { - return "\\t" - } else if c == '\r' { - return "\\r" - } else { - return string(c) - } -} - -func (b *BaseLexer) getCharErrorDisplay(c rune) string { - return "'" + b.getErrorDisplayForChar(c) + "'" -} - -// Lexers can normally Match any char in it's vocabulary after Matching -// a token, so do the easy thing and just kill a character and hope -// it all works out. You can instead use the rule invocation stack -// to do sophisticated error recovery if you are in a fragment rule. -// / -func (b *BaseLexer) Recover(re RecognitionException) { - if b.input.LA(1) != TokenEOF { - if _, ok := re.(*LexerNoViableAltException); ok { - // Skip a char and try again - b.Interpreter.Consume(b.input) - } else { - // TODO: Do we lose character or line position information? - b.input.Consume() - } - } -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go deleted file mode 100644 index 20df84f..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go +++ /dev/null @@ -1,431 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import "strconv" - -const ( - LexerActionTypeChannel = 0 //The type of a {@link LexerChannelAction} action. - LexerActionTypeCustom = 1 //The type of a {@link LexerCustomAction} action. - LexerActionTypeMode = 2 //The type of a {@link LexerModeAction} action. - LexerActionTypeMore = 3 //The type of a {@link LexerMoreAction} action. - LexerActionTypePopMode = 4 //The type of a {@link LexerPopModeAction} action. - LexerActionTypePushMode = 5 //The type of a {@link LexerPushModeAction} action. - LexerActionTypeSkip = 6 //The type of a {@link LexerSkipAction} action. - LexerActionTypeType = 7 //The type of a {@link LexerTypeAction} action. -) - -type LexerAction interface { - getActionType() int - getIsPositionDependent() bool - execute(lexer Lexer) - hash() int - equals(other LexerAction) bool -} - -type BaseLexerAction struct { - actionType int - isPositionDependent bool -} - -func NewBaseLexerAction(action int) *BaseLexerAction { - la := new(BaseLexerAction) - - la.actionType = action - la.isPositionDependent = false - - return la -} - -func (b *BaseLexerAction) execute(lexer Lexer) { - panic("Not implemented") -} - -func (b *BaseLexerAction) getActionType() int { - return b.actionType -} - -func (b *BaseLexerAction) getIsPositionDependent() bool { - return b.isPositionDependent -} - -func (b *BaseLexerAction) hash() int { - return b.actionType -} - -func (b *BaseLexerAction) equals(other LexerAction) bool { - return b == other -} - -// -// Implements the {@code Skip} lexer action by calling {@link Lexer//Skip}. -// -//

The {@code Skip} command does not have any parameters, so l action is -// implemented as a singleton instance exposed by {@link //INSTANCE}.

-type LexerSkipAction struct { - *BaseLexerAction -} - -func NewLexerSkipAction() *LexerSkipAction { - la := new(LexerSkipAction) - la.BaseLexerAction = NewBaseLexerAction(LexerActionTypeSkip) - return la -} - -// Provides a singleton instance of l parameterless lexer action. -var LexerSkipActionINSTANCE = NewLexerSkipAction() - -func (l *LexerSkipAction) execute(lexer Lexer) { - lexer.Skip() -} - -func (l *LexerSkipAction) String() string { - return "skip" -} - -// Implements the {@code type} lexer action by calling {@link Lexer//setType} -// with the assigned type. -type LexerTypeAction struct { - *BaseLexerAction - - thetype int -} - -func NewLexerTypeAction(thetype int) *LexerTypeAction { - l := new(LexerTypeAction) - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeType) - l.thetype = thetype - return l -} - -func (l *LexerTypeAction) execute(lexer Lexer) { - lexer.SetType(l.thetype) -} - -func (l *LexerTypeAction) hash() int { - h := murmurInit(0) - h = murmurUpdate(h, l.actionType) - h = murmurUpdate(h, l.thetype) - return murmurFinish(h, 2) -} - -func (l *LexerTypeAction) equals(other LexerAction) bool { - if l == other { - return true - } else if _, ok := other.(*LexerTypeAction); !ok { - return false - } else { - return l.thetype == other.(*LexerTypeAction).thetype - } -} - -func (l *LexerTypeAction) String() string { - return "actionType(" + strconv.Itoa(l.thetype) + ")" -} - -// Implements the {@code pushMode} lexer action by calling -// {@link Lexer//pushMode} with the assigned mode. -type LexerPushModeAction struct { - *BaseLexerAction - - mode int -} - -func NewLexerPushModeAction(mode int) *LexerPushModeAction { - - l := new(LexerPushModeAction) - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePushMode) - - l.mode = mode - return l -} - -//

This action is implemented by calling {@link Lexer//pushMode} with the -// value provided by {@link //getMode}.

-func (l *LexerPushModeAction) execute(lexer Lexer) { - lexer.PushMode(l.mode) -} - -func (l *LexerPushModeAction) hash() int { - h := murmurInit(0) - h = murmurUpdate(h, l.actionType) - h = murmurUpdate(h, l.mode) - return murmurFinish(h, 2) -} - -func (l *LexerPushModeAction) equals(other LexerAction) bool { - if l == other { - return true - } else if _, ok := other.(*LexerPushModeAction); !ok { - return false - } else { - return l.mode == other.(*LexerPushModeAction).mode - } -} - -func (l *LexerPushModeAction) String() string { - return "pushMode(" + strconv.Itoa(l.mode) + ")" -} - -// Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}. -// -//

The {@code popMode} command does not have any parameters, so l action is -// implemented as a singleton instance exposed by {@link //INSTANCE}.

-type LexerPopModeAction struct { - *BaseLexerAction -} - -func NewLexerPopModeAction() *LexerPopModeAction { - - l := new(LexerPopModeAction) - - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePopMode) - - return l -} - -var LexerPopModeActionINSTANCE = NewLexerPopModeAction() - -//

This action is implemented by calling {@link Lexer//popMode}.

-func (l *LexerPopModeAction) execute(lexer Lexer) { - lexer.PopMode() -} - -func (l *LexerPopModeAction) String() string { - return "popMode" -} - -// Implements the {@code more} lexer action by calling {@link Lexer//more}. -// -//

The {@code more} command does not have any parameters, so l action is -// implemented as a singleton instance exposed by {@link //INSTANCE}.

- -type LexerMoreAction struct { - *BaseLexerAction -} - -func NewLexerMoreAction() *LexerMoreAction { - l := new(LexerMoreAction) - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMore) - - return l -} - -var LexerMoreActionINSTANCE = NewLexerMoreAction() - -//

This action is implemented by calling {@link Lexer//popMode}.

-func (l *LexerMoreAction) execute(lexer Lexer) { - lexer.More() -} - -func (l *LexerMoreAction) String() string { - return "more" -} - -// Implements the {@code mode} lexer action by calling {@link Lexer//mode} with -// the assigned mode. -type LexerModeAction struct { - *BaseLexerAction - - mode int -} - -func NewLexerModeAction(mode int) *LexerModeAction { - l := new(LexerModeAction) - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMode) - l.mode = mode - return l -} - -//

This action is implemented by calling {@link Lexer//mode} with the -// value provided by {@link //getMode}.

-func (l *LexerModeAction) execute(lexer Lexer) { - lexer.SetMode(l.mode) -} - -func (l *LexerModeAction) hash() int { - h := murmurInit(0) - h = murmurUpdate(h, l.actionType) - h = murmurUpdate(h, l.mode) - return murmurFinish(h, 2) -} - -func (l *LexerModeAction) equals(other LexerAction) bool { - if l == other { - return true - } else if _, ok := other.(*LexerModeAction); !ok { - return false - } else { - return l.mode == other.(*LexerModeAction).mode - } -} - -func (l *LexerModeAction) String() string { - return "mode(" + strconv.Itoa(l.mode) + ")" -} - -// Executes a custom lexer action by calling {@link Recognizer//action} with the -// rule and action indexes assigned to the custom action. The implementation of -// a custom action is added to the generated code for the lexer in an override -// of {@link Recognizer//action} when the grammar is compiled. -// -//

This class may represent embedded actions created with the {...} -// syntax in ANTLR 4, as well as actions created for lexer commands where the -// command argument could not be evaluated when the grammar was compiled.

- -// Constructs a custom lexer action with the specified rule and action -// indexes. -// -// @param ruleIndex The rule index to use for calls to -// {@link Recognizer//action}. -// @param actionIndex The action index to use for calls to -// {@link Recognizer//action}. - -type LexerCustomAction struct { - *BaseLexerAction - ruleIndex, actionIndex int -} - -func NewLexerCustomAction(ruleIndex, actionIndex int) *LexerCustomAction { - l := new(LexerCustomAction) - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeCustom) - l.ruleIndex = ruleIndex - l.actionIndex = actionIndex - l.isPositionDependent = true - return l -} - -//

Custom actions are implemented by calling {@link Lexer//action} with the -// appropriate rule and action indexes.

-func (l *LexerCustomAction) execute(lexer Lexer) { - lexer.Action(nil, l.ruleIndex, l.actionIndex) -} - -func (l *LexerCustomAction) hash() int { - h := murmurInit(0) - h = murmurUpdate(h, l.actionType) - h = murmurUpdate(h, l.ruleIndex) - h = murmurUpdate(h, l.actionIndex) - return murmurFinish(h, 3) -} - -func (l *LexerCustomAction) equals(other LexerAction) bool { - if l == other { - return true - } else if _, ok := other.(*LexerCustomAction); !ok { - return false - } else { - return l.ruleIndex == other.(*LexerCustomAction).ruleIndex && l.actionIndex == other.(*LexerCustomAction).actionIndex - } -} - -// Implements the {@code channel} lexer action by calling -// {@link Lexer//setChannel} with the assigned channel. -// Constructs a New{@code channel} action with the specified channel value. -// @param channel The channel value to pass to {@link Lexer//setChannel}. -type LexerChannelAction struct { - *BaseLexerAction - - channel int -} - -func NewLexerChannelAction(channel int) *LexerChannelAction { - l := new(LexerChannelAction) - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel) - l.channel = channel - return l -} - -//

This action is implemented by calling {@link Lexer//setChannel} with the -// value provided by {@link //getChannel}.

-func (l *LexerChannelAction) execute(lexer Lexer) { - lexer.SetChannel(l.channel) -} - -func (l *LexerChannelAction) hash() int { - h := murmurInit(0) - h = murmurUpdate(h, l.actionType) - h = murmurUpdate(h, l.channel) - return murmurFinish(h, 2) -} - -func (l *LexerChannelAction) equals(other LexerAction) bool { - if l == other { - return true - } else if _, ok := other.(*LexerChannelAction); !ok { - return false - } else { - return l.channel == other.(*LexerChannelAction).channel - } -} - -func (l *LexerChannelAction) String() string { - return "channel(" + strconv.Itoa(l.channel) + ")" -} - -// This implementation of {@link LexerAction} is used for tracking input offsets -// for position-dependent actions within a {@link LexerActionExecutor}. -// -//

This action is not serialized as part of the ATN, and is only required for -// position-dependent lexer actions which appear at a location other than the -// end of a rule. For more information about DFA optimizations employed for -// lexer actions, see {@link LexerActionExecutor//append} and -// {@link LexerActionExecutor//fixOffsetBeforeMatch}.

- -// Constructs a Newindexed custom action by associating a character offset -// with a {@link LexerAction}. -// -//

Note: This class is only required for lexer actions for which -// {@link LexerAction//isPositionDependent} returns {@code true}.

-// -// @param offset The offset into the input {@link CharStream}, relative to -// the token start index, at which the specified lexer action should be -// executed. -// @param action The lexer action to execute at a particular offset in the -// input {@link CharStream}. -type LexerIndexedCustomAction struct { - *BaseLexerAction - - offset int - lexerAction LexerAction - isPositionDependent bool -} - -func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction { - - l := new(LexerIndexedCustomAction) - l.BaseLexerAction = NewBaseLexerAction(lexerAction.getActionType()) - - l.offset = offset - l.lexerAction = lexerAction - l.isPositionDependent = true - - return l -} - -//

This method calls {@link //execute} on the result of {@link //getAction} -// using the provided {@code lexer}.

-func (l *LexerIndexedCustomAction) execute(lexer Lexer) { - // assume the input stream position was properly set by the calling code - l.lexerAction.execute(lexer) -} - -func (l *LexerIndexedCustomAction) hash() int { - h := murmurInit(0) - h = murmurUpdate(h, l.actionType) - h = murmurUpdate(h, l.offset) - h = murmurUpdate(h, l.lexerAction.hash()) - return murmurFinish(h, 3) -} - -func (l *LexerIndexedCustomAction) equals(other LexerAction) bool { - if l == other { - return true - } else if _, ok := other.(*LexerIndexedCustomAction); !ok { - return false - } else { - return l.offset == other.(*LexerIndexedCustomAction).offset && l.lexerAction == other.(*LexerIndexedCustomAction).lexerAction - } -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go deleted file mode 100644 index 80b949a..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// Represents an executor for a sequence of lexer actions which traversed during -// the Matching operation of a lexer rule (token). -// -//

The executor tracks position information for position-dependent lexer actions -// efficiently, ensuring that actions appearing only at the end of the rule do -// not cause bloating of the {@link DFA} created for the lexer.

- -type LexerActionExecutor struct { - lexerActions []LexerAction - cachedHash int -} - -func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor { - - if lexerActions == nil { - lexerActions = make([]LexerAction, 0) - } - - l := new(LexerActionExecutor) - - l.lexerActions = lexerActions - - // Caches the result of {@link //hashCode} since the hash code is an element - // of the performance-critical {@link LexerATNConfig//hashCode} operation. - l.cachedHash = murmurInit(57) - for _, a := range lexerActions { - l.cachedHash = murmurUpdate(l.cachedHash, a.hash()) - } - - return l -} - -// Creates a {@link LexerActionExecutor} which executes the actions for -// the input {@code lexerActionExecutor} followed by a specified -// {@code lexerAction}. -// -// @param lexerActionExecutor The executor for actions already traversed by -// the lexer while Matching a token within a particular -// {@link LexerATNConfig}. If this is {@code nil}, the method behaves as -// though it were an empty executor. -// @param lexerAction The lexer action to execute after the actions -// specified in {@code lexerActionExecutor}. -// -// @return A {@link LexerActionExecutor} for executing the combine actions -// of {@code lexerActionExecutor} and {@code lexerAction}. -func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor { - if lexerActionExecutor == nil { - return NewLexerActionExecutor([]LexerAction{lexerAction}) - } - - return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction)) -} - -// Creates a {@link LexerActionExecutor} which encodes the current offset -// for position-dependent lexer actions. -// -//

Normally, when the executor encounters lexer actions where -// {@link LexerAction//isPositionDependent} returns {@code true}, it calls -// {@link IntStream//seek} on the input {@link CharStream} to set the input -// position to the end of the current token. This behavior provides -// for efficient DFA representation of lexer actions which appear at the end -// of a lexer rule, even when the lexer rule Matches a variable number of -// characters.

-// -//

Prior to traversing a Match transition in the ATN, the current offset -// from the token start index is assigned to all position-dependent lexer -// actions which have not already been assigned a fixed offset. By storing -// the offsets relative to the token start index, the DFA representation of -// lexer actions which appear in the middle of tokens remains efficient due -// to sharing among tokens of the same length, regardless of their absolute -// position in the input stream.

-// -//

If the current executor already has offsets assigned to all -// position-dependent lexer actions, the method returns {@code this}.

-// -// @param offset The current offset to assign to all position-dependent -// lexer actions which do not already have offsets assigned. -// -// @return A {@link LexerActionExecutor} which stores input stream offsets -// for all position-dependent lexer actions. -// / -func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor { - var updatedLexerActions []LexerAction - for i := 0; i < len(l.lexerActions); i++ { - _, ok := l.lexerActions[i].(*LexerIndexedCustomAction) - if l.lexerActions[i].getIsPositionDependent() && !ok { - if updatedLexerActions == nil { - updatedLexerActions = make([]LexerAction, 0) - - for _, a := range l.lexerActions { - updatedLexerActions = append(updatedLexerActions, a) - } - } - - updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i]) - } - } - if updatedLexerActions == nil { - return l - } - - return NewLexerActionExecutor(updatedLexerActions) -} - -// Execute the actions encapsulated by l executor within the context of a -// particular {@link Lexer}. -// -//

This method calls {@link IntStream//seek} to set the position of the -// {@code input} {@link CharStream} prior to calling -// {@link LexerAction//execute} on a position-dependent action. Before the -// method returns, the input position will be restored to the same position -// it was in when the method was invoked.

-// -// @param lexer The lexer instance. -// @param input The input stream which is the source for the current token. -// When l method is called, the current {@link IntStream//index} for -// {@code input} should be the start of the following token, i.e. 1 -// character past the end of the current token. -// @param startIndex The token start index. This value may be passed to -// {@link IntStream//seek} to set the {@code input} position to the beginning -// of the token. -// / -func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex int) { - requiresSeek := false - stopIndex := input.Index() - - defer func() { - if requiresSeek { - input.Seek(stopIndex) - } - }() - - for i := 0; i < len(l.lexerActions); i++ { - lexerAction := l.lexerActions[i] - if la, ok := lexerAction.(*LexerIndexedCustomAction); ok { - offset := la.offset - input.Seek(startIndex + offset) - lexerAction = la.lexerAction - requiresSeek = (startIndex + offset) != stopIndex - } else if lexerAction.getIsPositionDependent() { - input.Seek(stopIndex) - requiresSeek = false - } - lexerAction.execute(lexer) - } -} - -func (l *LexerActionExecutor) hash() int { - if l == nil { - return 61 - } - return l.cachedHash -} - -func (l *LexerActionExecutor) equals(other interface{}) bool { - if l == other { - return true - } else if _, ok := other.(*LexerActionExecutor); !ok { - return false - } else { - return l.cachedHash == other.(*LexerActionExecutor).cachedHash && - &l.lexerActions == &other.(*LexerActionExecutor).lexerActions - } -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go deleted file mode 100644 index 131364f..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go +++ /dev/null @@ -1,658 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strconv" -) - -var ( - LexerATNSimulatorDebug = false - LexerATNSimulatorDFADebug = false - - LexerATNSimulatorMinDFAEdge = 0 - LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN - - LexerATNSimulatorMatchCalls = 0 -) - -type ILexerATNSimulator interface { - IATNSimulator - - reset() - Match(input CharStream, mode int) int - GetCharPositionInLine() int - GetLine() int - GetText(input CharStream) string - Consume(input CharStream) -} - -type LexerATNSimulator struct { - *BaseATNSimulator - - recog Lexer - predictionMode int - mergeCache DoubleDict - startIndex int - Line int - CharPositionInLine int - mode int - prevAccept *SimState - MatchCalls int -} - -func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator { - l := new(LexerATNSimulator) - - l.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache) - - l.decisionToDFA = decisionToDFA - l.recog = recog - // The current token's starting index into the character stream. - // Shared across DFA to ATN simulation in case the ATN fails and the - // DFA did not have a previous accept state. In l case, we use the - // ATN-generated exception object. - l.startIndex = -1 - // line number 1..n within the input/// - l.Line = 1 - // The index of the character relative to the beginning of the line - // 0..n-1/// - l.CharPositionInLine = 0 - l.mode = LexerDefaultMode - // Used during DFA/ATN exec to record the most recent accept configuration - // info - l.prevAccept = NewSimState() - // done - return l -} - -func (l *LexerATNSimulator) copyState(simulator *LexerATNSimulator) { - l.CharPositionInLine = simulator.CharPositionInLine - l.Line = simulator.Line - l.mode = simulator.mode - l.startIndex = simulator.startIndex -} - -func (l *LexerATNSimulator) Match(input CharStream, mode int) int { - l.MatchCalls++ - l.mode = mode - mark := input.Mark() - - defer func() { - input.Release(mark) - }() - - l.startIndex = input.Index() - l.prevAccept.reset() - - dfa := l.decisionToDFA[mode] - - if dfa.s0 == nil { - return l.MatchATN(input) - } - - return l.execATN(input, dfa.s0) -} - -func (l *LexerATNSimulator) reset() { - l.prevAccept.reset() - l.startIndex = -1 - l.Line = 1 - l.CharPositionInLine = 0 - l.mode = LexerDefaultMode -} - -func (l *LexerATNSimulator) MatchATN(input CharStream) int { - startState := l.atn.modeToStartState[l.mode] - - if LexerATNSimulatorDebug { - fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String()) - } - oldMode := l.mode - s0Closure := l.computeStartState(input, startState) - suppressEdge := s0Closure.hasSemanticContext - s0Closure.hasSemanticContext = false - - next := l.addDFAState(s0Closure) - - if !suppressEdge { - l.decisionToDFA[l.mode].setS0(next) - } - - predict := l.execATN(input, next) - - if LexerATNSimulatorDebug { - fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString()) - } - return predict -} - -func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int { - - if LexerATNSimulatorDebug { - fmt.Println("start state closure=" + ds0.configs.String()) - } - if ds0.isAcceptState { - // allow zero-length tokens - l.captureSimState(l.prevAccept, input, ds0) - } - t := input.LA(1) - s := ds0 // s is current/from DFA state - - for { // while more work - if LexerATNSimulatorDebug { - fmt.Println("execATN loop starting closure: " + s.configs.String()) - } - - // As we move src->trg, src->trg, we keep track of the previous trg to - // avoid looking up the DFA state again, which is expensive. - // If the previous target was already part of the DFA, we might - // be able to avoid doing a reach operation upon t. If s!=nil, - // it means that semantic predicates didn't prevent us from - // creating a DFA state. Once we know s!=nil, we check to see if - // the DFA state has an edge already for t. If so, we can just reuse - // it's configuration set there's no point in re-computing it. - // This is kind of like doing DFA simulation within the ATN - // simulation because DFA simulation is really just a way to avoid - // computing reach/closure sets. Technically, once we know that - // we have a previously added DFA state, we could jump over to - // the DFA simulator. But, that would mean popping back and forth - // a lot and making things more complicated algorithmically. - // This optimization makes a lot of sense for loops within DFA. - // A character will take us back to an existing DFA state - // that already has lots of edges out of it. e.g., .* in comments. - target := l.getExistingTargetState(s, t) - if target == nil { - target = l.computeTargetState(input, s, t) - // print("Computed:" + str(target)) - } - if target == ATNSimulatorError { - break - } - // If l is a consumable input element, make sure to consume before - // capturing the accept state so the input index, line, and char - // position accurately reflect the state of the interpreter at the - // end of the token. - if t != TokenEOF { - l.Consume(input) - } - if target.isAcceptState { - l.captureSimState(l.prevAccept, input, target) - if t == TokenEOF { - break - } - } - t = input.LA(1) - s = target // flip current DFA target becomes Newsrc/from state - } - - return l.failOrAccept(l.prevAccept, input, s.configs, t) -} - -// Get an existing target state for an edge in the DFA. If the target state -// for the edge has not yet been computed or is otherwise not available, -// l method returns {@code nil}. -// -// @param s The current DFA state -// @param t The next input symbol -// @return The existing target DFA state for the given input symbol -// {@code t}, or {@code nil} if the target state for l edge is not -// already cached -func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState { - if s.edges == nil || t < LexerATNSimulatorMinDFAEdge || t > LexerATNSimulatorMaxDFAEdge { - return nil - } - - target := s.edges[t-LexerATNSimulatorMinDFAEdge] - if LexerATNSimulatorDebug && target != nil { - fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber)) - } - return target -} - -// Compute a target state for an edge in the DFA, and attempt to add the -// computed state and corresponding edge to the DFA. -// -// @param input The input stream -// @param s The current DFA state -// @param t The next input symbol -// -// @return The computed target DFA state for the given input symbol -// {@code t}. If {@code t} does not lead to a valid DFA state, l method -// returns {@link //ERROR}. -func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState { - reach := NewOrderedATNConfigSet() - - // if we don't find an existing DFA state - // Fill reach starting from closure, following t transitions - l.getReachableConfigSet(input, s.configs, reach.BaseATNConfigSet, t) - - if len(reach.configs) == 0 { // we got nowhere on t from s - if !reach.hasSemanticContext { - // we got nowhere on t, don't panic out l knowledge it'd - // cause a failover from DFA later. - l.addDFAEdge(s, t, ATNSimulatorError, nil) - } - // stop when we can't Match any more char - return ATNSimulatorError - } - // Add an edge from s to target DFA found/created for reach - return l.addDFAEdge(s, t, nil, reach.BaseATNConfigSet) -} - -func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach ATNConfigSet, t int) int { - if l.prevAccept.dfaState != nil { - lexerActionExecutor := prevAccept.dfaState.lexerActionExecutor - l.accept(input, lexerActionExecutor, l.startIndex, prevAccept.index, prevAccept.line, prevAccept.column) - return prevAccept.dfaState.prediction - } - - // if no accept and EOF is first char, return EOF - if t == TokenEOF && input.Index() == l.startIndex { - return TokenEOF - } - - panic(NewLexerNoViableAltException(l.recog, input, l.startIndex, reach)) -} - -// Given a starting configuration set, figure out all ATN configurations -// we can reach upon input {@code t}. Parameter {@code reach} is a return -// parameter. -func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNConfigSet, reach ATNConfigSet, t int) { - // l is used to Skip processing for configs which have a lower priority - // than a config that already reached an accept state for the same rule - SkipAlt := ATNInvalidAltNumber - - for _, cfg := range closure.GetItems() { - currentAltReachedAcceptState := (cfg.GetAlt() == SkipAlt) - if currentAltReachedAcceptState && cfg.(*LexerATNConfig).passedThroughNonGreedyDecision { - continue - } - - if LexerATNSimulatorDebug { - - fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) // l.recog, true)) - } - - for _, trans := range cfg.GetState().GetTransitions() { - target := l.getReachableTarget(trans, t) - if target != nil { - lexerActionExecutor := cfg.(*LexerATNConfig).lexerActionExecutor - if lexerActionExecutor != nil { - lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.Index() - l.startIndex) - } - treatEOFAsEpsilon := (t == TokenEOF) - config := NewLexerATNConfig3(cfg.(*LexerATNConfig), target, lexerActionExecutor) - if l.closure(input, config, reach, - currentAltReachedAcceptState, true, treatEOFAsEpsilon) { - // any remaining configs for l alt have a lower priority - // than the one that just reached an accept state. - SkipAlt = cfg.GetAlt() - } - } - } - } -} - -func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) { - if LexerATNSimulatorDebug { - fmt.Printf("ACTION %s\n", lexerActionExecutor) - } - // seek to after last char in token - input.Seek(index) - l.Line = line - l.CharPositionInLine = charPos - if lexerActionExecutor != nil && l.recog != nil { - lexerActionExecutor.execute(l.recog, input, startIndex) - } -} - -func (l *LexerATNSimulator) getReachableTarget(trans Transition, t int) ATNState { - if trans.Matches(t, 0, LexerMaxCharValue) { - return trans.getTarget() - } - - return nil -} - -func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *OrderedATNConfigSet { - configs := NewOrderedATNConfigSet() - for i := 0; i < len(p.GetTransitions()); i++ { - target := p.GetTransitions()[i].getTarget() - cfg := NewLexerATNConfig6(target, i+1, BasePredictionContextEMPTY) - l.closure(input, cfg, configs, false, false, false) - } - - return configs -} - -// Since the alternatives within any lexer decision are ordered by -// preference, l method stops pursuing the closure as soon as an accept -// state is reached. After the first accept state is reached by depth-first -// search from {@code config}, all other (potentially reachable) states for -// l rule would have a lower priority. -// -// @return {@code true} if an accept state is reached, otherwise -// {@code false}. -func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, configs ATNConfigSet, - currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool { - - if LexerATNSimulatorDebug { - fmt.Println("closure(" + config.String() + ")") // config.String(l.recog, true) + ")") - } - - _, ok := config.state.(*RuleStopState) - if ok { - - if LexerATNSimulatorDebug { - if l.recog != nil { - fmt.Printf("closure at %s rule stop %s\n", l.recog.GetRuleNames()[config.state.GetRuleIndex()], config) - } else { - fmt.Printf("closure at rule stop %s\n", config) - } - } - - if config.context == nil || config.context.hasEmptyPath() { - if config.context == nil || config.context.isEmpty() { - configs.Add(config, nil) - return true - } - - configs.Add(NewLexerATNConfig2(config, config.state, BasePredictionContextEMPTY), nil) - currentAltReachedAcceptState = true - } - if config.context != nil && !config.context.isEmpty() { - for i := 0; i < config.context.length(); i++ { - if config.context.getReturnState(i) != BasePredictionContextEmptyReturnState { - newContext := config.context.GetParent(i) // "pop" return state - returnState := l.atn.states[config.context.getReturnState(i)] - cfg := NewLexerATNConfig2(config, returnState, newContext) - currentAltReachedAcceptState = l.closure(input, cfg, configs, currentAltReachedAcceptState, speculative, treatEOFAsEpsilon) - } - } - } - return currentAltReachedAcceptState - } - // optimization - if !config.state.GetEpsilonOnlyTransitions() { - if !currentAltReachedAcceptState || !config.passedThroughNonGreedyDecision { - configs.Add(config, nil) - } - } - for j := 0; j < len(config.state.GetTransitions()); j++ { - trans := config.state.GetTransitions()[j] - cfg := l.getEpsilonTarget(input, config, trans, configs, speculative, treatEOFAsEpsilon) - if cfg != nil { - currentAltReachedAcceptState = l.closure(input, cfg, configs, - currentAltReachedAcceptState, speculative, treatEOFAsEpsilon) - } - } - return currentAltReachedAcceptState -} - -// side-effect: can alter configs.hasSemanticContext -func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNConfig, trans Transition, - configs ATNConfigSet, speculative, treatEOFAsEpsilon bool) *LexerATNConfig { - - var cfg *LexerATNConfig - - if trans.getSerializationType() == TransitionRULE { - - rt := trans.(*RuleTransition) - newContext := SingletonBasePredictionContextCreate(config.context, rt.followState.GetStateNumber()) - cfg = NewLexerATNConfig2(config, trans.getTarget(), newContext) - - } else if trans.getSerializationType() == TransitionPRECEDENCE { - panic("Precedence predicates are not supported in lexers.") - } else if trans.getSerializationType() == TransitionPREDICATE { - // Track traversing semantic predicates. If we traverse, - // we cannot add a DFA state for l "reach" computation - // because the DFA would not test the predicate again in the - // future. Rather than creating collections of semantic predicates - // like v3 and testing them on prediction, v4 will test them on the - // fly all the time using the ATN not the DFA. This is slower but - // semantically it's not used that often. One of the key elements to - // l predicate mechanism is not adding DFA states that see - // predicates immediately afterwards in the ATN. For example, - - // a : ID {p1}? | ID {p2}? - - // should create the start state for rule 'a' (to save start state - // competition), but should not create target of ID state. The - // collection of ATN states the following ID references includes - // states reached by traversing predicates. Since l is when we - // test them, we cannot cash the DFA state target of ID. - - pt := trans.(*PredicateTransition) - - if LexerATNSimulatorDebug { - fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex)) - } - configs.SetHasSemanticContext(true) - if l.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) { - cfg = NewLexerATNConfig4(config, trans.getTarget()) - } - } else if trans.getSerializationType() == TransitionACTION { - if config.context == nil || config.context.hasEmptyPath() { - // execute actions anywhere in the start rule for a token. - // - // TODO: if the entry rule is invoked recursively, some - // actions may be executed during the recursive call. The - // problem can appear when hasEmptyPath() is true but - // isEmpty() is false. In l case, the config needs to be - // split into two contexts - one with just the empty path - // and another with everything but the empty path. - // Unfortunately, the current algorithm does not allow - // getEpsilonTarget to return two configurations, so - // additional modifications are needed before we can support - // the split operation. - lexerActionExecutor := LexerActionExecutorappend(config.lexerActionExecutor, l.atn.lexerActions[trans.(*ActionTransition).actionIndex]) - cfg = NewLexerATNConfig3(config, trans.getTarget(), lexerActionExecutor) - } else { - // ignore actions in referenced rules - cfg = NewLexerATNConfig4(config, trans.getTarget()) - } - } else if trans.getSerializationType() == TransitionEPSILON { - cfg = NewLexerATNConfig4(config, trans.getTarget()) - } else if trans.getSerializationType() == TransitionATOM || - trans.getSerializationType() == TransitionRANGE || - trans.getSerializationType() == TransitionSET { - if treatEOFAsEpsilon { - if trans.Matches(TokenEOF, 0, LexerMaxCharValue) { - cfg = NewLexerATNConfig4(config, trans.getTarget()) - } - } - } - return cfg -} - -// Evaluate a predicate specified in the lexer. -// -//

If {@code speculative} is {@code true}, l method was called before -// {@link //consume} for the Matched character. This method should call -// {@link //consume} before evaluating the predicate to ensure position -// sensitive values, including {@link Lexer//GetText}, {@link Lexer//GetLine}, -// and {@link Lexer//getcolumn}, properly reflect the current -// lexer state. This method should restore {@code input} and the simulator -// to the original state before returning (i.e. undo the actions made by the -// call to {@link //consume}.

-// -// @param input The input stream. -// @param ruleIndex The rule containing the predicate. -// @param predIndex The index of the predicate within the rule. -// @param speculative {@code true} if the current index in {@code input} is -// one character before the predicate's location. -// -// @return {@code true} if the specified predicate evaluates to -// {@code true}. -// / -func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool { - // assume true if no recognizer was provided - if l.recog == nil { - return true - } - if !speculative { - return l.recog.Sempred(nil, ruleIndex, predIndex) - } - savedcolumn := l.CharPositionInLine - savedLine := l.Line - index := input.Index() - marker := input.Mark() - - defer func() { - l.CharPositionInLine = savedcolumn - l.Line = savedLine - input.Seek(index) - input.Release(marker) - }() - - l.Consume(input) - return l.recog.Sempred(nil, ruleIndex, predIndex) -} - -func (l *LexerATNSimulator) captureSimState(settings *SimState, input CharStream, dfaState *DFAState) { - settings.index = input.Index() - settings.line = l.Line - settings.column = l.CharPositionInLine - settings.dfaState = dfaState -} - -func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs ATNConfigSet) *DFAState { - if to == nil && cfgs != nil { - // leading to l call, ATNConfigSet.hasSemanticContext is used as a - // marker indicating dynamic predicate evaluation makes l edge - // dependent on the specific input sequence, so the static edge in the - // DFA should be omitted. The target DFAState is still created since - // execATN has the ability to reSynchronize with the DFA state cache - // following the predicate evaluation step. - // - // TJP notes: next time through the DFA, we see a pred again and eval. - // If that gets us to a previously created (but dangling) DFA - // state, we can continue in pure DFA mode from there. - // / - suppressEdge := cfgs.HasSemanticContext() - cfgs.SetHasSemanticContext(false) - - to = l.addDFAState(cfgs) - - if suppressEdge { - return to - } - } - // add the edge - if tk < LexerATNSimulatorMinDFAEdge || tk > LexerATNSimulatorMaxDFAEdge { - // Only track edges within the DFA bounds - return to - } - if LexerATNSimulatorDebug { - fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk)) - } - if from.edges == nil { - // make room for tokens 1..n and -1 masquerading as index 0 - from.edges = make([]*DFAState, LexerATNSimulatorMaxDFAEdge-LexerATNSimulatorMinDFAEdge+1) - } - from.edges[tk-LexerATNSimulatorMinDFAEdge] = to // connect - - return to -} - -// Add a NewDFA state if there isn't one with l set of -// configurations already. This method also detects the first -// configuration containing an ATN rule stop state. Later, when -// traversing the DFA, we will know which rule to accept. -func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet) *DFAState { - - proposed := NewDFAState(-1, configs) - var firstConfigWithRuleStopState ATNConfig - - for _, cfg := range configs.GetItems() { - - _, ok := cfg.GetState().(*RuleStopState) - - if ok { - firstConfigWithRuleStopState = cfg - break - } - } - if firstConfigWithRuleStopState != nil { - proposed.isAcceptState = true - proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor - proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()]) - } - hash := proposed.hash() - dfa := l.decisionToDFA[l.mode] - existing, ok := dfa.getState(hash) - if ok { - return existing - } - newState := proposed - newState.stateNumber = dfa.numStates() - configs.SetReadOnly(true) - newState.configs = configs - dfa.setState(hash, newState) - return newState -} - -func (l *LexerATNSimulator) getDFA(mode int) *DFA { - return l.decisionToDFA[mode] -} - -// Get the text Matched so far for the current token. -func (l *LexerATNSimulator) GetText(input CharStream) string { - // index is first lookahead char, don't include. - return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1)) -} - -func (l *LexerATNSimulator) Consume(input CharStream) { - curChar := input.LA(1) - if curChar == int('\n') { - l.Line++ - l.CharPositionInLine = 0 - } else { - l.CharPositionInLine++ - } - input.Consume() -} - -func (l *LexerATNSimulator) GetCharPositionInLine() int { - return l.CharPositionInLine -} - -func (l *LexerATNSimulator) GetLine() int { - return l.Line -} - -func (l *LexerATNSimulator) GetTokenName(tt int) string { - if tt == -1 { - return "EOF" - } - - return "'" + string(tt) + "'" -} - -func resetSimState(sim *SimState) { - sim.index = -1 - sim.line = 0 - sim.column = -1 - sim.dfaState = nil -} - -type SimState struct { - index int - line int - column int - dfaState *DFAState -} - -func NewSimState() *SimState { - s := new(SimState) - resetSimState(s) - return s -} - -func (s *SimState) reset() { - resetSimState(s) -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go deleted file mode 100644 index f5afd09..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -type LL1Analyzer struct { - atn *ATN -} - -func NewLL1Analyzer(atn *ATN) *LL1Analyzer { - la := new(LL1Analyzer) - la.atn = atn - return la -} - -//* Special value added to the lookahead sets to indicate that we hit -// a predicate during analysis if {@code seeThruPreds==false}. -/// -const ( - LL1AnalyzerHitPred = TokenInvalidType -) - -//* -// Calculates the SLL(1) expected lookahead set for each outgoing transition -// of an {@link ATNState}. The returned array has one element for each -// outgoing transition in {@code s}. If the closure from transition -// i leads to a semantic predicate before Matching a symbol, the -// element at index i of the result will be {@code nil}. -// -// @param s the ATN state -// @return the expected symbols for each outgoing transition of {@code s}. -func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet { - if s == nil { - return nil - } - count := len(s.GetTransitions()) - look := make([]*IntervalSet, count) - for alt := 0; alt < count; alt++ { - look[alt] = NewIntervalSet() - lookBusy := NewSet(nil, nil) - seeThruPreds := false // fail to get lookahead upon pred - la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false) - // Wipe out lookahead for la alternative if we found nothing - // or we had a predicate when we !seeThruPreds - if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) { - look[alt] = nil - } - } - return look -} - -//* -// Compute set of tokens that can follow {@code s} in the ATN in the -// specified {@code ctx}. -// -//

If {@code ctx} is {@code nil} and the end of the rule containing -// {@code s} is reached, {@link Token//EPSILON} is added to the result set. -// If {@code ctx} is not {@code nil} and the end of the outermost rule is -// reached, {@link Token//EOF} is added to the result set.

-// -// @param s the ATN state -// @param stopState the ATN state to stop at. This can be a -// {@link BlockEndState} to detect epsilon paths through a closure. -// @param ctx the complete parser context, or {@code nil} if the context -// should be ignored -// -// @return The set of tokens that can follow {@code s} in the ATN in the -// specified {@code ctx}. -/// -func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet { - r := NewIntervalSet() - seeThruPreds := true // ignore preds get all lookahead - var lookContext PredictionContext - if ctx != nil { - lookContext = predictionContextFromRuleContext(s.GetATN(), ctx) - } - la.look1(s, stopState, lookContext, r, NewSet(nil, nil), NewBitSet(), seeThruPreds, true) - return r -} - -//* -// Compute set of tokens that can follow {@code s} in the ATN in the -// specified {@code ctx}. -// -//

If {@code ctx} is {@code nil} and {@code stopState} or the end of the -// rule containing {@code s} is reached, {@link Token//EPSILON} is added to -// the result set. If {@code ctx} is not {@code nil} and {@code addEOF} is -// {@code true} and {@code stopState} or the end of the outermost rule is -// reached, {@link Token//EOF} is added to the result set.

-// -// @param s the ATN state. -// @param stopState the ATN state to stop at. This can be a -// {@link BlockEndState} to detect epsilon paths through a closure. -// @param ctx The outer context, or {@code nil} if the outer context should -// not be used. -// @param look The result lookahead set. -// @param lookBusy A set used for preventing epsilon closures in the ATN -// from causing a stack overflow. Outside code should pass -// {@code NewSet} for la argument. -// @param calledRuleStack A set used for preventing left recursion in the -// ATN from causing a stack overflow. Outside code should pass -// {@code NewBitSet()} for la argument. -// @param seeThruPreds {@code true} to true semantic predicates as -// implicitly {@code true} and "see through them", otherwise {@code false} -// to treat semantic predicates as opaque and add {@link //HitPred} to the -// result if one is encountered. -// @param addEOF Add {@link Token//EOF} to the result if the end of the -// outermost context is reached. This parameter has no effect if {@code ctx} -// is {@code nil}. - -func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) { - - returnState := la.atn.states[ctx.getReturnState(i)] - - removed := calledRuleStack.contains(returnState.GetRuleIndex()) - - defer func() { - if removed { - calledRuleStack.add(returnState.GetRuleIndex()) - } - }() - - calledRuleStack.remove(returnState.GetRuleIndex()) - la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF) - -} - -func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool) { - - c := NewBaseATNConfig6(s, 0, ctx) - - if lookBusy.contains(c) { - return - } - - lookBusy.add(c) - - if s == stopState { - if ctx == nil { - look.addOne(TokenEpsilon) - return - } else if ctx.isEmpty() && addEOF { - look.addOne(TokenEOF) - return - } - } - - _, ok := s.(*RuleStopState) - - if ok { - if ctx == nil { - look.addOne(TokenEpsilon) - return - } else if ctx.isEmpty() && addEOF { - look.addOne(TokenEOF) - return - } - - if ctx != BasePredictionContextEMPTY { - // run thru all possible stack tops in ctx - for i := 0; i < ctx.length(); i++ { - returnState := la.atn.states[ctx.getReturnState(i)] - la.look2(returnState, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, i) - } - return - } - } - - n := len(s.GetTransitions()) - - for i := 0; i < n; i++ { - t := s.GetTransitions()[i] - - if t1, ok := t.(*RuleTransition); ok { - if calledRuleStack.contains(t1.getTarget().GetRuleIndex()) { - continue - } - - newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber()) - la.look3(stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, t1) - } else if t2, ok := t.(AbstractPredicateTransition); ok { - if seeThruPreds { - la.look1(t2.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) - } else { - look.addOne(LL1AnalyzerHitPred) - } - } else if t.getIsEpsilon() { - la.look1(t.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) - } else if _, ok := t.(*WildcardTransition); ok { - look.addRange(TokenMinUserTokenType, la.atn.maxTokenType) - } else { - set := t.getLabel() - if set != nil { - if _, ok := t.(*NotSetTransition); ok { - set = set.complement(TokenMinUserTokenType, la.atn.maxTokenType) - } - look.addSet(set) - } - } - } -} - -func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) { - - newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber()) - - defer func() { - calledRuleStack.remove(t1.getTarget().GetRuleIndex()) - }() - - calledRuleStack.add(t1.getTarget().GetRuleIndex()) - la.look1(t1.getTarget(), stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) - -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go deleted file mode 100644 index fb60258..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go +++ /dev/null @@ -1,718 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strconv" -) - -type Parser interface { - Recognizer - - GetInterpreter() *ParserATNSimulator - - GetTokenStream() TokenStream - GetTokenFactory() TokenFactory - GetParserRuleContext() ParserRuleContext - SetParserRuleContext(ParserRuleContext) - Consume() Token - GetParseListeners() []ParseTreeListener - - GetErrorHandler() ErrorStrategy - SetErrorHandler(ErrorStrategy) - GetInputStream() IntStream - GetCurrentToken() Token - GetExpectedTokens() *IntervalSet - NotifyErrorListeners(string, Token, RecognitionException) - IsExpectedToken(int) bool - GetPrecedence() int - GetRuleInvocationStack(ParserRuleContext) []string -} - -type BaseParser struct { - *BaseRecognizer - - Interpreter *ParserATNSimulator - BuildParseTrees bool - - input TokenStream - errHandler ErrorStrategy - precedenceStack IntStack - ctx ParserRuleContext - - tracer *TraceListener - parseListeners []ParseTreeListener - _SyntaxErrors int -} - -// p.is all the parsing support code essentially most of it is error -// recovery stuff.// -func NewBaseParser(input TokenStream) *BaseParser { - - p := new(BaseParser) - - p.BaseRecognizer = NewBaseRecognizer() - - // The input stream. - p.input = nil - // The error handling strategy for the parser. The default value is a new - // instance of {@link DefaultErrorStrategy}. - p.errHandler = NewDefaultErrorStrategy() - p.precedenceStack = make([]int, 0) - p.precedenceStack.Push(0) - // The {@link ParserRuleContext} object for the currently executing rule. - // p.is always non-nil during the parsing process. - p.ctx = nil - // Specifies whether or not the parser should construct a parse tree during - // the parsing process. The default value is {@code true}. - p.BuildParseTrees = true - // When {@link //setTrace}{@code (true)} is called, a reference to the - // {@link TraceListener} is stored here so it can be easily removed in a - // later call to {@link //setTrace}{@code (false)}. The listener itself is - // implemented as a parser listener so p.field is not directly used by - // other parser methods. - p.tracer = nil - // The list of {@link ParseTreeListener} listeners registered to receive - // events during the parse. - p.parseListeners = nil - // The number of syntax errors Reported during parsing. p.value is - // incremented each time {@link //NotifyErrorListeners} is called. - p._SyntaxErrors = 0 - p.SetInputStream(input) - - return p -} - -// p.field maps from the serialized ATN string to the deserialized {@link -// ATN} with -// bypass alternatives. -// -// @see ATNDeserializationOptions//isGenerateRuleBypassTransitions() -// -var bypassAltsAtnCache = make(map[string]int) - -// reset the parser's state// -func (p *BaseParser) reset() { - if p.input != nil { - p.input.Seek(0) - } - p.errHandler.reset(p) - p.ctx = nil - p._SyntaxErrors = 0 - p.SetTrace(nil) - p.precedenceStack = make([]int, 0) - p.precedenceStack.Push(0) - if p.Interpreter != nil { - p.Interpreter.reset() - } -} - -func (p *BaseParser) GetErrorHandler() ErrorStrategy { - return p.errHandler -} - -func (p *BaseParser) SetErrorHandler(e ErrorStrategy) { - p.errHandler = e -} - -// Match current input symbol against {@code ttype}. If the symbol type -// Matches, {@link ANTLRErrorStrategy//ReportMatch} and {@link //consume} are -// called to complete the Match process. -// -//

If the symbol type does not Match, -// {@link ANTLRErrorStrategy//recoverInline} is called on the current error -// strategy to attempt recovery. If {@link //getBuildParseTree} is -// {@code true} and the token index of the symbol returned by -// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to -// the parse tree by calling {@link ParserRuleContext//addErrorNode}.

-// -// @param ttype the token type to Match -// @return the Matched symbol -// @panics RecognitionException if the current input symbol did not Match -// {@code ttype} and the error strategy could not recover from the -// mismatched symbol - -func (p *BaseParser) Match(ttype int) Token { - - t := p.GetCurrentToken() - - if t.GetTokenType() == ttype { - p.errHandler.ReportMatch(p) - p.Consume() - } else { - t = p.errHandler.RecoverInline(p) - if p.BuildParseTrees && t.GetTokenIndex() == -1 { - // we must have conjured up a Newtoken during single token - // insertion - // if it's not the current symbol - p.ctx.AddErrorNode(t) - } - } - - return t -} - -// Match current input symbol as a wildcard. If the symbol type Matches -// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//ReportMatch} -// and {@link //consume} are called to complete the Match process. -// -//

If the symbol type does not Match, -// {@link ANTLRErrorStrategy//recoverInline} is called on the current error -// strategy to attempt recovery. If {@link //getBuildParseTree} is -// {@code true} and the token index of the symbol returned by -// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to -// the parse tree by calling {@link ParserRuleContext//addErrorNode}.

-// -// @return the Matched symbol -// @panics RecognitionException if the current input symbol did not Match -// a wildcard and the error strategy could not recover from the mismatched -// symbol - -func (p *BaseParser) MatchWildcard() Token { - t := p.GetCurrentToken() - if t.GetTokenType() > 0 { - p.errHandler.ReportMatch(p) - p.Consume() - } else { - t = p.errHandler.RecoverInline(p) - if p.BuildParseTrees && t.GetTokenIndex() == -1 { - // we must have conjured up a Newtoken during single token - // insertion - // if it's not the current symbol - p.ctx.AddErrorNode(t) - } - } - return t -} - -func (p *BaseParser) GetParserRuleContext() ParserRuleContext { - return p.ctx -} - -func (p *BaseParser) SetParserRuleContext(v ParserRuleContext) { - p.ctx = v -} - -func (p *BaseParser) GetParseListeners() []ParseTreeListener { - if p.parseListeners == nil { - return make([]ParseTreeListener, 0) - } - return p.parseListeners -} - -// Registers {@code listener} to receive events during the parsing process. -// -//

To support output-preserving grammar transformations (including but not -// limited to left-recursion removal, automated left-factoring, and -// optimized code generation), calls to listener methods during the parse -// may differ substantially from calls made by -// {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In -// particular, rule entry and exit events may occur in a different order -// during the parse than after the parser. In addition, calls to certain -// rule entry methods may be omitted.

-// -//

With the following specific exceptions, calls to listener events are -// deterministic, i.e. for identical input the calls to listener -// methods will be the same.

-// -//
    -//
  • Alterations to the grammar used to generate code may change the -// behavior of the listener calls.
  • -//
  • Alterations to the command line options passed to ANTLR 4 when -// generating the parser may change the behavior of the listener calls.
  • -//
  • Changing the version of the ANTLR Tool used to generate the parser -// may change the behavior of the listener calls.
  • -//
-// -// @param listener the listener to add -// -// @panics nilPointerException if {@code} listener is {@code nil} -// -func (p *BaseParser) AddParseListener(listener ParseTreeListener) { - if listener == nil { - panic("listener") - } - if p.parseListeners == nil { - p.parseListeners = make([]ParseTreeListener, 0) - } - p.parseListeners = append(p.parseListeners, listener) -} - -// -// Remove {@code listener} from the list of parse listeners. -// -//

If {@code listener} is {@code nil} or has not been added as a parse -// listener, p.method does nothing.

-// @param listener the listener to remove -// -func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) { - - if p.parseListeners != nil { - - idx := -1 - for i, v := range p.parseListeners { - if v == listener { - idx = i - break - } - } - - if idx == -1 { - return - } - - // remove the listener from the slice - p.parseListeners = append(p.parseListeners[0:idx], p.parseListeners[idx+1:]...) - - if len(p.parseListeners) == 0 { - p.parseListeners = nil - } - } -} - -// Remove all parse listeners. -func (p *BaseParser) removeParseListeners() { - p.parseListeners = nil -} - -// Notify any parse listeners of an enter rule event. -func (p *BaseParser) TriggerEnterRuleEvent() { - if p.parseListeners != nil { - ctx := p.ctx - for _, listener := range p.parseListeners { - listener.EnterEveryRule(ctx) - ctx.EnterRule(listener) - } - } -} - -// -// Notify any parse listeners of an exit rule event. -// -// @see //addParseListener -// -func (p *BaseParser) TriggerExitRuleEvent() { - if p.parseListeners != nil { - // reverse order walk of listeners - ctx := p.ctx - l := len(p.parseListeners) - 1 - - for i := range p.parseListeners { - listener := p.parseListeners[l-i] - ctx.ExitRule(listener) - listener.ExitEveryRule(ctx) - } - } -} - -func (p *BaseParser) GetInterpreter() *ParserATNSimulator { - return p.Interpreter -} - -func (p *BaseParser) GetATN() *ATN { - return p.Interpreter.atn -} - -func (p *BaseParser) GetTokenFactory() TokenFactory { - return p.input.GetTokenSource().GetTokenFactory() -} - -// Tell our token source and error strategy about a Newway to create tokens.// -func (p *BaseParser) setTokenFactory(factory TokenFactory) { - p.input.GetTokenSource().setTokenFactory(factory) -} - -// The ATN with bypass alternatives is expensive to create so we create it -// lazily. -// -// @panics UnsupportedOperationException if the current parser does not -// implement the {@link //getSerializedATN()} method. -// -func (p *BaseParser) GetATNWithBypassAlts() { - - // TODO - panic("Not implemented!") - - // serializedAtn := p.getSerializedATN() - // if (serializedAtn == nil) { - // panic("The current parser does not support an ATN with bypass alternatives.") - // } - // result := p.bypassAltsAtnCache[serializedAtn] - // if (result == nil) { - // deserializationOptions := NewATNDeserializationOptions(nil) - // deserializationOptions.generateRuleBypassTransitions = true - // result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn) - // p.bypassAltsAtnCache[serializedAtn] = result - // } - // return result -} - -// The preferred method of getting a tree pattern. For example, here's a -// sample use: -// -//
-// ParseTree t = parser.expr()
-// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0",
-// MyParser.RULE_expr)
-// ParseTreeMatch m = p.Match(t)
-// String id = m.Get("ID")
-// 
- -func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) { - - panic("NewParseTreePatternMatcher not implemented!") - // - // if (lexer == nil) { - // if (p.GetTokenStream() != nil) { - // tokenSource := p.GetTokenStream().GetTokenSource() - // if _, ok := tokenSource.(ILexer); ok { - // lexer = tokenSource - // } - // } - // } - // if (lexer == nil) { - // panic("Parser can't discover a lexer to use") - // } - - // m := NewParseTreePatternMatcher(lexer, p) - // return m.compile(pattern, patternRuleIndex) -} - -func (p *BaseParser) GetInputStream() IntStream { - return p.GetTokenStream() -} - -func (p *BaseParser) SetInputStream(input TokenStream) { - p.SetTokenStream(input) -} - -func (p *BaseParser) GetTokenStream() TokenStream { - return p.input -} - -// Set the token stream and reset the parser.// -func (p *BaseParser) SetTokenStream(input TokenStream) { - p.input = nil - p.reset() - p.input = input -} - -// Match needs to return the current input symbol, which gets put -// into the label for the associated token ref e.g., x=ID. -// -func (p *BaseParser) GetCurrentToken() Token { - return p.input.LT(1) -} - -func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException) { - if offendingToken == nil { - offendingToken = p.GetCurrentToken() - } - p._SyntaxErrors++ - line := offendingToken.GetLine() - column := offendingToken.GetColumn() - listener := p.GetErrorListenerDispatch() - listener.SyntaxError(p, offendingToken, line, column, msg, err) -} - -func (p *BaseParser) Consume() Token { - o := p.GetCurrentToken() - if o.GetTokenType() != TokenEOF { - p.GetInputStream().Consume() - } - hasListener := p.parseListeners != nil && len(p.parseListeners) > 0 - if p.BuildParseTrees || hasListener { - if p.errHandler.inErrorRecoveryMode(p) { - node := p.ctx.AddErrorNode(o) - if p.parseListeners != nil { - for _, l := range p.parseListeners { - l.VisitErrorNode(node) - } - } - - } else { - node := p.ctx.AddTokenNode(o) - if p.parseListeners != nil { - for _, l := range p.parseListeners { - l.VisitTerminal(node) - } - } - } - // node.invokingState = p.state - } - - return o -} - -func (p *BaseParser) addContextToParseTree() { - // add current context to parent if we have a parent - if p.ctx.GetParent() != nil { - p.ctx.GetParent().(ParserRuleContext).AddChild(p.ctx) - } -} - -func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int) { - p.SetState(state) - p.ctx = localctx - p.ctx.SetStart(p.input.LT(1)) - if p.BuildParseTrees { - p.addContextToParseTree() - } - if p.parseListeners != nil { - p.TriggerEnterRuleEvent() - } -} - -func (p *BaseParser) ExitRule() { - p.ctx.SetStop(p.input.LT(-1)) - // trigger event on ctx, before it reverts to parent - if p.parseListeners != nil { - p.TriggerExitRuleEvent() - } - p.SetState(p.ctx.GetInvokingState()) - if p.ctx.GetParent() != nil { - p.ctx = p.ctx.GetParent().(ParserRuleContext) - } else { - p.ctx = nil - } -} - -func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) { - localctx.SetAltNumber(altNum) - // if we have Newlocalctx, make sure we replace existing ctx - // that is previous child of parse tree - if p.BuildParseTrees && p.ctx != localctx { - if p.ctx.GetParent() != nil { - p.ctx.GetParent().(ParserRuleContext).RemoveLastChild() - p.ctx.GetParent().(ParserRuleContext).AddChild(localctx) - } - } - p.ctx = localctx -} - -// Get the precedence level for the top-most precedence rule. -// -// @return The precedence level for the top-most precedence rule, or -1 if -// the parser context is not nested within a precedence rule. - -func (p *BaseParser) GetPrecedence() int { - if len(p.precedenceStack) == 0 { - return -1 - } - - return p.precedenceStack[len(p.precedenceStack)-1] -} - -func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int) { - p.SetState(state) - p.precedenceStack.Push(precedence) - p.ctx = localctx - p.ctx.SetStart(p.input.LT(1)) - if p.parseListeners != nil { - p.TriggerEnterRuleEvent() // simulates rule entry for - // left-recursive rules - } -} - -// -// Like {@link //EnterRule} but for recursive rules. - -func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int) { - previous := p.ctx - previous.SetParent(localctx) - previous.SetInvokingState(state) - previous.SetStop(p.input.LT(-1)) - - p.ctx = localctx - p.ctx.SetStart(previous.GetStart()) - if p.BuildParseTrees { - p.ctx.AddChild(previous) - } - if p.parseListeners != nil { - p.TriggerEnterRuleEvent() // simulates rule entry for - // left-recursive rules - } -} - -func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) { - p.precedenceStack.Pop() - p.ctx.SetStop(p.input.LT(-1)) - retCtx := p.ctx // save current ctx (return value) - // unroll so ctx is as it was before call to recursive method - if p.parseListeners != nil { - for p.ctx != parentCtx { - p.TriggerExitRuleEvent() - p.ctx = p.ctx.GetParent().(ParserRuleContext) - } - } else { - p.ctx = parentCtx - } - // hook into tree - retCtx.SetParent(parentCtx) - if p.BuildParseTrees && parentCtx != nil { - // add return ctx into invoking rule's tree - parentCtx.AddChild(retCtx) - } -} - -func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext { - ctx := p.ctx - for ctx != nil { - if ctx.GetRuleIndex() == ruleIndex { - return ctx - } - ctx = ctx.GetParent().(ParserRuleContext) - } - return nil -} - -func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool { - return precedence >= p.precedenceStack[len(p.precedenceStack)-1] -} - -func (p *BaseParser) inContext(context ParserRuleContext) bool { - // TODO: useful in parser? - return false -} - -// -// Checks whether or not {@code symbol} can follow the current state in the -// ATN. The behavior of p.method is equivalent to the following, but is -// implemented such that the complete context-sensitive follow set does not -// need to be explicitly constructed. -// -//
-// return getExpectedTokens().contains(symbol)
-// 
-// -// @param symbol the symbol type to check -// @return {@code true} if {@code symbol} can follow the current state in -// the ATN, otherwise {@code false}. - -func (p *BaseParser) IsExpectedToken(symbol int) bool { - atn := p.Interpreter.atn - ctx := p.ctx - s := atn.states[p.state] - following := atn.NextTokens(s, nil) - if following.contains(symbol) { - return true - } - if !following.contains(TokenEpsilon) { - return false - } - for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) { - invokingState := atn.states[ctx.GetInvokingState()] - rt := invokingState.GetTransitions()[0] - following = atn.NextTokens(rt.(*RuleTransition).followState, nil) - if following.contains(symbol) { - return true - } - ctx = ctx.GetParent().(ParserRuleContext) - } - if following.contains(TokenEpsilon) && symbol == TokenEOF { - return true - } - - return false -} - -// Computes the set of input symbols which could follow the current parser -// state and context, as given by {@link //GetState} and {@link //GetContext}, -// respectively. -// -// @see ATN//getExpectedTokens(int, RuleContext) -// -func (p *BaseParser) GetExpectedTokens() *IntervalSet { - return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx) -} - -func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet { - atn := p.Interpreter.atn - s := atn.states[p.state] - return atn.NextTokens(s, nil) -} - -// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.// -func (p *BaseParser) GetRuleIndex(ruleName string) int { - var ruleIndex, ok = p.GetRuleIndexMap()[ruleName] - if ok { - return ruleIndex - } - - return -1 -} - -// Return List<String> of the rule names in your parser instance -// leading up to a call to the current rule. You could override if -// you want more details such as the file/line info of where -// in the ATN a rule is invoked. -// -// this very useful for error messages. - -func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string { - if c == nil { - c = p.ctx - } - stack := make([]string, 0) - for c != nil { - // compute what follows who invoked us - ruleIndex := c.GetRuleIndex() - if ruleIndex < 0 { - stack = append(stack, "n/a") - } else { - stack = append(stack, p.GetRuleNames()[ruleIndex]) - } - - vp := c.GetParent() - - if vp == nil { - break - } - - c = vp.(ParserRuleContext) - } - return stack -} - -// For debugging and other purposes.// -func (p *BaseParser) GetDFAStrings() string { - return fmt.Sprint(p.Interpreter.decisionToDFA) -} - -// For debugging and other purposes.// -func (p *BaseParser) DumpDFA() { - seenOne := false - for _, dfa := range p.Interpreter.decisionToDFA { - if dfa.numStates() > 0 { - if seenOne { - fmt.Println() - } - fmt.Println("Decision " + strconv.Itoa(dfa.decision) + ":") - fmt.Print(dfa.String(p.LiteralNames, p.SymbolicNames)) - seenOne = true - } - } -} - -func (p *BaseParser) GetSourceName() string { - return p.GrammarFileName -} - -// During a parse is sometimes useful to listen in on the rule entry and exit -// events as well as token Matches. p.is for quick and dirty debugging. -// -func (p *BaseParser) SetTrace(trace *TraceListener) { - if trace == nil { - p.RemoveParseListener(p.tracer) - p.tracer = nil - } else { - if p.tracer != nil { - p.RemoveParseListener(p.tracer) - } - p.tracer = NewTraceListener(p) - p.AddParseListener(p.tracer) - } -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go deleted file mode 100644 index 128b9a9..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go +++ /dev/null @@ -1,1473 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strconv" - "strings" -) - -var ( - ParserATNSimulatorDebug = false - ParserATNSimulatorListATNDecisions = false - ParserATNSimulatorDFADebug = false - ParserATNSimulatorRetryDebug = false -) - -type ParserATNSimulator struct { - *BaseATNSimulator - - parser Parser - predictionMode int - input TokenStream - startIndex int - dfa *DFA - mergeCache *DoubleDict - outerContext ParserRuleContext -} - -func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator { - - p := new(ParserATNSimulator) - - p.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache) - - p.parser = parser - p.decisionToDFA = decisionToDFA - // SLL, LL, or LL + exact ambig detection?// - p.predictionMode = PredictionModeLL - // LAME globals to avoid parameters!!!!! I need these down deep in predTransition - p.input = nil - p.startIndex = 0 - p.outerContext = nil - p.dfa = nil - // Each prediction operation uses a cache for merge of prediction contexts. - // Don't keep around as it wastes huge amounts of memory. DoubleKeyMap - // isn't Synchronized but we're ok since two threads shouldn't reuse same - // parser/atnsim object because it can only handle one input at a time. - // This maps graphs a and b to merged result c. (a,b)&rarrc. We can avoid - // the merge if we ever see a and b again. Note that (b,a)&rarrc should - // also be examined during cache lookup. - // - p.mergeCache = nil - - return p -} - -func (p *ParserATNSimulator) GetPredictionMode() int { - return p.predictionMode -} - -func (p *ParserATNSimulator) SetPredictionMode(v int) { - p.predictionMode = v -} - -func (p *ParserATNSimulator) reset() { -} - -func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, outerContext ParserRuleContext) int { - if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions { - fmt.Println("AdaptivePredict decision " + strconv.Itoa(decision) + - " exec LA(1)==" + p.getLookaheadName(input) + - " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + - strconv.Itoa(input.LT(1).GetColumn())) - } - - p.input = input - p.startIndex = input.Index() - p.outerContext = outerContext - - dfa := p.decisionToDFA[decision] - p.dfa = dfa - m := input.Mark() - index := input.Index() - - defer func() { - p.dfa = nil - p.mergeCache = nil // wack cache after each prediction - input.Seek(index) - input.Release(m) - }() - - // Now we are certain to have a specific decision's DFA - // But, do we still need an initial state? - var s0 *DFAState - if dfa.precedenceDfa { - // the start state for a precedence DFA depends on the current - // parser precedence, and is provided by a DFA method. - s0 = dfa.getPrecedenceStartState(p.parser.GetPrecedence()) - } else { - // the start state for a "regular" DFA is just s0 - s0 = dfa.s0 - } - - if s0 == nil { - if outerContext == nil { - outerContext = RuleContextEmpty - } - if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions { - fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) + - " exec LA(1)==" + p.getLookaheadName(input) + - ", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil)) - } - // If p is not a precedence DFA, we check the ATN start state - // to determine if p ATN start state is the decision for the - // closure block that determines whether a precedence rule - // should continue or complete. - - t2 := dfa.atnStartState - t, ok := t2.(*StarLoopEntryState) - if !dfa.precedenceDfa && ok { - if t.precedenceRuleDecision { - dfa.setPrecedenceDfa(true) - } - } - fullCtx := false - s0Closure := p.computeStartState(dfa.atnStartState, RuleContextEmpty, fullCtx) - - if dfa.precedenceDfa { - // If p is a precedence DFA, we use applyPrecedenceFilter - // to convert the computed start state to a precedence start - // state. We then use DFA.setPrecedenceStartState to set the - // appropriate start state for the precedence level rather - // than simply setting DFA.s0. - // - s0Closure = p.applyPrecedenceFilter(s0Closure) - s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure)) - dfa.setPrecedenceStartState(p.parser.GetPrecedence(), s0) - } else { - s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure)) - dfa.s0 = s0 - } - } - alt := p.execATN(dfa, s0, input, index, outerContext) - if ParserATNSimulatorDebug { - fmt.Println("DFA after predictATN: " + dfa.String(p.parser.GetLiteralNames(), nil)) - } - return alt - -} - -// Performs ATN simulation to compute a predicted alternative based -// upon the remaining input, but also updates the DFA cache to avoid -// having to traverse the ATN again for the same input sequence. - -// There are some key conditions we're looking for after computing a new -// set of ATN configs (proposed DFA state): -// if the set is empty, there is no viable alternative for current symbol -// does the state uniquely predict an alternative? -// does the state have a conflict that would prevent us from -// putting it on the work list? - -// We also have some key operations to do: -// add an edge from previous DFA state to potentially NewDFA state, D, -// upon current symbol but only if adding to work list, which means in all -// cases except no viable alternative (and possibly non-greedy decisions?) -// collecting predicates and adding semantic context to DFA accept states -// adding rule context to context-sensitive DFA accept states -// consuming an input symbol -// Reporting a conflict -// Reporting an ambiguity -// Reporting a context sensitivity -// Reporting insufficient predicates - -// cover these cases: -// dead end -// single alt -// single alt + preds -// conflict -// conflict + preds -// -func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) int { - - if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions { - fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) + - " exec LA(1)==" + p.getLookaheadName(input) + - " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + strconv.Itoa(input.LT(1).GetColumn())) - } - - previousD := s0 - - if ParserATNSimulatorDebug { - fmt.Println("s0 = " + s0.String()) - } - t := input.LA(1) - for { // for more work - D := p.getExistingTargetState(previousD, t) - if D == nil { - D = p.computeTargetState(dfa, previousD, t) - } - if D == ATNSimulatorError { - // if any configs in previous dipped into outer context, that - // means that input up to t actually finished entry rule - // at least for SLL decision. Full LL doesn't dip into outer - // so don't need special case. - // We will get an error no matter what so delay until after - // decision better error message. Also, no reachable target - // ATN states in SLL implies LL will also get nowhere. - // If conflict in states that dip out, choose min since we - // will get error no matter what. - e := p.noViableAlt(input, outerContext, previousD.configs, startIndex) - input.Seek(startIndex) - alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext) - if alt != ATNInvalidAltNumber { - return alt - } - - panic(e) - } - if D.requiresFullContext && p.predictionMode != PredictionModeSLL { - // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error) - conflictingAlts := D.configs.GetConflictingAlts() - if D.predicates != nil { - if ParserATNSimulatorDebug { - fmt.Println("DFA state has preds in DFA sim LL failover") - } - conflictIndex := input.Index() - if conflictIndex != startIndex { - input.Seek(startIndex) - } - conflictingAlts = p.evalSemanticContext(D.predicates, outerContext, true) - if conflictingAlts.length() == 1 { - if ParserATNSimulatorDebug { - fmt.Println("Full LL avoided") - } - return conflictingAlts.minValue() - } - if conflictIndex != startIndex { - // restore the index so Reporting the fallback to full - // context occurs with the index at the correct spot - input.Seek(conflictIndex) - } - } - if ParserATNSimulatorDFADebug { - fmt.Println("ctx sensitive state " + outerContext.String(nil, nil) + " in " + D.String()) - } - fullCtx := true - s0Closure := p.computeStartState(dfa.atnStartState, outerContext, fullCtx) - p.ReportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.Index()) - alt := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext) - return alt - } - if D.isAcceptState { - if D.predicates == nil { - return D.prediction - } - stopIndex := input.Index() - input.Seek(startIndex) - alts := p.evalSemanticContext(D.predicates, outerContext, true) - if alts.length() == 0 { - panic(p.noViableAlt(input, outerContext, D.configs, startIndex)) - } else if alts.length() == 1 { - return alts.minValue() - } else { - // Report ambiguity after predicate evaluation to make sure the correct set of ambig alts is Reported. - p.ReportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs) - return alts.minValue() - } - } - previousD = D - - if t != TokenEOF { - input.Consume() - t = input.LA(1) - } - } - - panic("Should not have reached p state") -} - -// Get an existing target state for an edge in the DFA. If the target state -// for the edge has not yet been computed or is otherwise not available, -// p method returns {@code nil}. -// -// @param previousD The current DFA state -// @param t The next input symbol -// @return The existing target DFA state for the given input symbol -// {@code t}, or {@code nil} if the target state for p edge is not -// already cached - -func (p *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int) *DFAState { - edges := previousD.edges - if edges == nil { - return nil - } - - return edges[t+1] -} - -// Compute a target state for an edge in the DFA, and attempt to add the -// computed state and corresponding edge to the DFA. -// -// @param dfa The DFA -// @param previousD The current DFA state -// @param t The next input symbol -// -// @return The computed target DFA state for the given input symbol -// {@code t}. If {@code t} does not lead to a valid DFA state, p method -// returns {@link //ERROR}. - -func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t int) *DFAState { - reach := p.computeReachSet(previousD.configs, t, false) - - if reach == nil { - p.addDFAEdge(dfa, previousD, t, ATNSimulatorError) - return ATNSimulatorError - } - // create Newtarget state we'll add to DFA after it's complete - D := NewDFAState(-1, reach) - - predictedAlt := p.getUniqueAlt(reach) - - if ParserATNSimulatorDebug { - altSubSets := PredictionModegetConflictingAltSubsets(reach) - fmt.Println("SLL altSubSets=" + fmt.Sprint(altSubSets) + - ", previous=" + previousD.configs.String() + - ", configs=" + reach.String() + - ", predict=" + strconv.Itoa(predictedAlt) + - ", allSubsetsConflict=" + - fmt.Sprint(PredictionModeallSubsetsConflict(altSubSets)) + - ", conflictingAlts=" + p.getConflictingAlts(reach).String()) - } - if predictedAlt != ATNInvalidAltNumber { - // NO CONFLICT, UNIQUELY PREDICTED ALT - D.isAcceptState = true - D.configs.SetUniqueAlt(predictedAlt) - D.setPrediction(predictedAlt) - } else if PredictionModehasSLLConflictTerminatingPrediction(p.predictionMode, reach) { - // MORE THAN ONE VIABLE ALTERNATIVE - D.configs.SetConflictingAlts(p.getConflictingAlts(reach)) - D.requiresFullContext = true - // in SLL-only mode, we will stop at p state and return the minimum alt - D.isAcceptState = true - D.setPrediction(D.configs.GetConflictingAlts().minValue()) - } - if D.isAcceptState && D.configs.HasSemanticContext() { - p.predicateDFAState(D, p.atn.getDecisionState(dfa.decision)) - if D.predicates != nil { - D.setPrediction(ATNInvalidAltNumber) - } - } - // all adds to dfa are done after we've created full D state - D = p.addDFAEdge(dfa, previousD, t, D) - return D -} - -func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState DecisionState) { - // We need to test all predicates, even in DFA states that - // uniquely predict alternative. - nalts := len(decisionState.GetTransitions()) - // Update DFA so reach becomes accept state with (predicate,alt) - // pairs if preds found for conflicting alts - altsToCollectPredsFrom := p.getConflictingAltsOrUniqueAlt(dfaState.configs) - altToPred := p.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts) - if altToPred != nil { - dfaState.predicates = p.getPredicatePredictions(altsToCollectPredsFrom, altToPred) - dfaState.setPrediction(ATNInvalidAltNumber) // make sure we use preds - } else { - // There are preds in configs but they might go away - // when OR'd together like {p}? || NONE == NONE. If neither - // alt has preds, resolve to min alt - dfaState.setPrediction(altsToCollectPredsFrom.minValue()) - } -} - -// comes back with reach.uniqueAlt set to a valid alt -func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) int { - - if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions { - fmt.Println("execATNWithFullContext " + s0.String()) - } - - fullCtx := true - foundExactAmbig := false - var reach ATNConfigSet - previous := s0 - input.Seek(startIndex) - t := input.LA(1) - predictedAlt := -1 - - for { // for more work - reach = p.computeReachSet(previous, t, fullCtx) - if reach == nil { - // if any configs in previous dipped into outer context, that - // means that input up to t actually finished entry rule - // at least for LL decision. Full LL doesn't dip into outer - // so don't need special case. - // We will get an error no matter what so delay until after - // decision better error message. Also, no reachable target - // ATN states in SLL implies LL will also get nowhere. - // If conflict in states that dip out, choose min since we - // will get error no matter what. - e := p.noViableAlt(input, outerContext, previous, startIndex) - input.Seek(startIndex) - alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext) - if alt != ATNInvalidAltNumber { - return alt - } - - panic(e) - } - altSubSets := PredictionModegetConflictingAltSubsets(reach) - if ParserATNSimulatorDebug { - fmt.Println("LL altSubSets=" + fmt.Sprint(altSubSets) + ", predict=" + - strconv.Itoa(PredictionModegetUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" + - fmt.Sprint(PredictionModeresolvesToJustOneViableAlt(altSubSets))) - } - reach.SetUniqueAlt(p.getUniqueAlt(reach)) - // unique prediction? - if reach.GetUniqueAlt() != ATNInvalidAltNumber { - predictedAlt = reach.GetUniqueAlt() - break - } else if p.predictionMode != PredictionModeLLExactAmbigDetection { - predictedAlt = PredictionModeresolvesToJustOneViableAlt(altSubSets) - if predictedAlt != ATNInvalidAltNumber { - break - } - } else { - // In exact ambiguity mode, we never try to terminate early. - // Just keeps scarfing until we know what the conflict is - if PredictionModeallSubsetsConflict(altSubSets) && PredictionModeallSubsetsEqual(altSubSets) { - foundExactAmbig = true - predictedAlt = PredictionModegetSingleViableAlt(altSubSets) - break - } - // else there are multiple non-conflicting subsets or - // we're not sure what the ambiguity is yet. - // So, keep going. - } - previous = reach - if t != TokenEOF { - input.Consume() - t = input.LA(1) - } - } - // If the configuration set uniquely predicts an alternative, - // without conflict, then we know that it's a full LL decision - // not SLL. - if reach.GetUniqueAlt() != ATNInvalidAltNumber { - p.ReportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.Index()) - return predictedAlt - } - // We do not check predicates here because we have checked them - // on-the-fly when doing full context prediction. - - // - // In non-exact ambiguity detection mode, we might actually be able to - // detect an exact ambiguity, but I'm not going to spend the cycles - // needed to check. We only emit ambiguity warnings in exact ambiguity - // mode. - // - // For example, we might know that we have conflicting configurations. - // But, that does not mean that there is no way forward without a - // conflict. It's possible to have nonconflicting alt subsets as in: - - // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}] - - // from - // - // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]), - // (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])] - // - // In p case, (17,1,[5 $]) indicates there is some next sequence that - // would resolve p without conflict to alternative 1. Any other viable - // next sequence, however, is associated with a conflict. We stop - // looking for input because no amount of further lookahead will alter - // the fact that we should predict alternative 1. We just can't say for - // sure that there is an ambiguity without looking further. - - p.ReportAmbiguity(dfa, D, startIndex, input.Index(), foundExactAmbig, nil, reach) - - return predictedAlt -} - -func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCtx bool) ATNConfigSet { - if ParserATNSimulatorDebug { - fmt.Println("in computeReachSet, starting closure: " + closure.String()) - } - if p.mergeCache == nil { - p.mergeCache = NewDoubleDict() - } - intermediate := NewBaseATNConfigSet(fullCtx) - - // Configurations already in a rule stop state indicate reaching the end - // of the decision rule (local context) or end of the start rule (full - // context). Once reached, these configurations are never updated by a - // closure operation, so they are handled separately for the performance - // advantage of having a smaller intermediate set when calling closure. - // - // For full-context reach operations, separate handling is required to - // ensure that the alternative Matching the longest overall sequence is - // chosen when multiple such configurations can Match the input. - - var SkippedStopStates []*BaseATNConfig - - // First figure out where we can reach on input t - for _, c := range closure.GetItems() { - if ParserATNSimulatorDebug { - fmt.Println("testing " + p.GetTokenName(t) + " at " + c.String()) - } - - _, ok := c.GetState().(*RuleStopState) - - if ok { - if fullCtx || t == TokenEOF { - if SkippedStopStates == nil { - SkippedStopStates = make([]*BaseATNConfig, 0) - } - SkippedStopStates = append(SkippedStopStates, c.(*BaseATNConfig)) - if ParserATNSimulatorDebug { - fmt.Println("added " + c.String() + " to SkippedStopStates") - } - } - continue - } - - for j := 0; j < len(c.GetState().GetTransitions()); j++ { - trans := c.GetState().GetTransitions()[j] - target := p.getReachableTarget(trans, t) - if target != nil { - cfg := NewBaseATNConfig4(c, target) - intermediate.Add(cfg, p.mergeCache) - if ParserATNSimulatorDebug { - fmt.Println("added " + cfg.String() + " to intermediate") - } - } - } - } - // Now figure out where the reach operation can take us... - var reach ATNConfigSet - - // This block optimizes the reach operation for intermediate sets which - // trivially indicate a termination state for the overall - // AdaptivePredict operation. - // - // The conditions assume that intermediate - // contains all configurations relevant to the reach set, but p - // condition is not true when one or more configurations have been - // withheld in SkippedStopStates, or when the current symbol is EOF. - // - if SkippedStopStates == nil && t != TokenEOF { - if len(intermediate.configs) == 1 { - // Don't pursue the closure if there is just one state. - // It can only have one alternative just add to result - // Also don't pursue the closure if there is unique alternative - // among the configurations. - reach = intermediate - } else if p.getUniqueAlt(intermediate) != ATNInvalidAltNumber { - // Also don't pursue the closure if there is unique alternative - // among the configurations. - reach = intermediate - } - } - // If the reach set could not be trivially determined, perform a closure - // operation on the intermediate set to compute its initial value. - // - if reach == nil { - reach = NewBaseATNConfigSet(fullCtx) - closureBusy := NewSet(nil, nil) - treatEOFAsEpsilon := t == TokenEOF - for k := 0; k < len(intermediate.configs); k++ { - p.closure(intermediate.configs[k], reach, closureBusy, false, fullCtx, treatEOFAsEpsilon) - } - } - if t == TokenEOF { - // After consuming EOF no additional input is possible, so we are - // only interested in configurations which reached the end of the - // decision rule (local context) or end of the start rule (full - // context). Update reach to contain only these configurations. This - // handles both explicit EOF transitions in the grammar and implicit - // EOF transitions following the end of the decision or start rule. - // - // When reach==intermediate, no closure operation was performed. In - // p case, removeAllConfigsNotInRuleStopState needs to check for - // reachable rule stop states as well as configurations already in - // a rule stop state. - // - // This is handled before the configurations in SkippedStopStates, - // because any configurations potentially added from that list are - // already guaranteed to meet p condition whether or not it's - // required. - // - reach = p.removeAllConfigsNotInRuleStopState(reach, reach == intermediate) - } - // If SkippedStopStates!=nil, then it contains at least one - // configuration. For full-context reach operations, these - // configurations reached the end of the start rule, in which case we - // only add them back to reach if no configuration during the current - // closure operation reached such a state. This ensures AdaptivePredict - // chooses an alternative Matching the longest overall sequence when - // multiple alternatives are viable. - // - if SkippedStopStates != nil && ((!fullCtx) || (!PredictionModehasConfigInRuleStopState(reach))) { - for l := 0; l < len(SkippedStopStates); l++ { - reach.Add(SkippedStopStates[l], p.mergeCache) - } - } - if len(reach.GetItems()) == 0 { - return nil - } - - return reach -} - -// -// Return a configuration set containing only the configurations from -// {@code configs} which are in a {@link RuleStopState}. If all -// configurations in {@code configs} are already in a rule stop state, p -// method simply returns {@code configs}. -// -//

When {@code lookToEndOfRule} is true, p method uses -// {@link ATN//NextTokens} for each configuration in {@code configs} which is -// not already in a rule stop state to see if a rule stop state is reachable -// from the configuration via epsilon-only transitions.

-// -// @param configs the configuration set to update -// @param lookToEndOfRule when true, p method checks for rule stop states -// reachable by epsilon-only transitions from each configuration in -// {@code configs}. -// -// @return {@code configs} if all configurations in {@code configs} are in a -// rule stop state, otherwise return a Newconfiguration set containing only -// the configurations from {@code configs} which are in a rule stop state -// -func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs ATNConfigSet, lookToEndOfRule bool) ATNConfigSet { - if PredictionModeallConfigsInRuleStopStates(configs) { - return configs - } - result := NewBaseATNConfigSet(configs.FullContext()) - for _, config := range configs.GetItems() { - - _, ok := config.GetState().(*RuleStopState) - - if ok { - result.Add(config, p.mergeCache) - continue - } - if lookToEndOfRule && config.GetState().GetEpsilonOnlyTransitions() { - NextTokens := p.atn.NextTokens(config.GetState(), nil) - if NextTokens.contains(TokenEpsilon) { - endOfRuleState := p.atn.ruleToStopState[config.GetState().GetRuleIndex()] - result.Add(NewBaseATNConfig4(config, endOfRuleState), p.mergeCache) - } - } - } - return result -} - -func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) ATNConfigSet { - // always at least the implicit call to start rule - initialContext := predictionContextFromRuleContext(p.atn, ctx) - configs := NewBaseATNConfigSet(fullCtx) - for i := 0; i < len(a.GetTransitions()); i++ { - target := a.GetTransitions()[i].getTarget() - c := NewBaseATNConfig6(target, i+1, initialContext) - closureBusy := NewSet(nil, nil) - p.closure(c, configs, closureBusy, true, fullCtx, false) - } - return configs -} - -// -// This method transforms the start state computed by -// {@link //computeStartState} to the special start state used by a -// precedence DFA for a particular precedence value. The transformation -// process applies the following changes to the start state's configuration -// set. -// -//
    -//
  1. Evaluate the precedence predicates for each configuration using -// {@link SemanticContext//evalPrecedence}.
  2. -//
  3. Remove all configurations which predict an alternative greater than -// 1, for which another configuration that predicts alternative 1 is in the -// same ATN state with the same prediction context. This transformation is -// valid for the following reasons: -//
      -//
    • The closure block cannot contain any epsilon transitions which bypass -// the body of the closure, so all states reachable via alternative 1 are -// part of the precedence alternatives of the transformed left-recursive -// rule.
    • -//
    • The "primary" portion of a left recursive rule cannot contain an -// epsilon transition, so the only way an alternative other than 1 can exist -// in a state that is also reachable via alternative 1 is by nesting calls -// to the left-recursive rule, with the outer calls not being at the -// preferred precedence level.
    • -//
    -//
  4. -//
-// -//

-// The prediction context must be considered by p filter to address -// situations like the following. -//

-// -//
-// grammar TA
-// prog: statement* EOF
-// statement: letterA | statement letterA 'b'
-// letterA: 'a'
-// 
-//
-//

-// If the above grammar, the ATN state immediately before the token -// reference {@code 'a'} in {@code letterA} is reachable from the left edge -// of both the primary and closure blocks of the left-recursive rule -// {@code statement}. The prediction context associated with each of these -// configurations distinguishes between them, and prevents the alternative -// which stepped out to {@code prog} (and then back in to {@code statement} -// from being eliminated by the filter. -//

-// -// @param configs The configuration set computed by -// {@link //computeStartState} as the start state for the DFA. -// @return The transformed configuration set representing the start state -// for a precedence DFA at a particular precedence level (determined by -// calling {@link Parser//getPrecedence}). -// -func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConfigSet { - - statesFromAlt1 := make(map[int]PredictionContext) - configSet := NewBaseATNConfigSet(configs.FullContext()) - - for _, config := range configs.GetItems() { - // handle alt 1 first - if config.GetAlt() != 1 { - continue - } - updatedContext := config.GetSemanticContext().evalPrecedence(p.parser, p.outerContext) - if updatedContext == nil { - // the configuration was eliminated - continue - } - statesFromAlt1[config.GetState().GetStateNumber()] = config.GetContext() - if updatedContext != config.GetSemanticContext() { - configSet.Add(NewBaseATNConfig2(config, updatedContext), p.mergeCache) - } else { - configSet.Add(config, p.mergeCache) - } - } - for _, config := range configs.GetItems() { - - if config.GetAlt() == 1 { - // already handled - continue - } - // In the future, p elimination step could be updated to also - // filter the prediction context for alternatives predicting alt>1 - // (basically a graph subtraction algorithm). - if !config.getPrecedenceFilterSuppressed() { - context := statesFromAlt1[config.GetState().GetStateNumber()] - if context != nil && context.equals(config.GetContext()) { - // eliminated - continue - } - } - configSet.Add(config, p.mergeCache) - } - return configSet -} - -func (p *ParserATNSimulator) getReachableTarget(trans Transition, ttype int) ATNState { - if trans.Matches(ttype, 0, p.atn.maxTokenType) { - return trans.getTarget() - } - - return nil -} - -func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs ATNConfigSet, nalts int) []SemanticContext { - - altToPred := make([]SemanticContext, nalts+1) - for _, c := range configs.GetItems() { - if ambigAlts.contains(c.GetAlt()) { - altToPred[c.GetAlt()] = SemanticContextorContext(altToPred[c.GetAlt()], c.GetSemanticContext()) - } - } - nPredAlts := 0 - for i := 1; i < nalts+1; i++ { - pred := altToPred[i] - if pred == nil { - altToPred[i] = SemanticContextNone - } else if pred != SemanticContextNone { - nPredAlts++ - } - } - // nonambig alts are nil in altToPred - if nPredAlts == 0 { - altToPred = nil - } - if ParserATNSimulatorDebug { - fmt.Println("getPredsForAmbigAlts result " + fmt.Sprint(altToPred)) - } - return altToPred -} - -func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPred []SemanticContext) []*PredPrediction { - pairs := make([]*PredPrediction, 0) - containsPredicate := false - for i := 1; i < len(altToPred); i++ { - pred := altToPred[i] - // unpredicated is indicated by SemanticContextNONE - if ambigAlts != nil && ambigAlts.contains(i) { - pairs = append(pairs, NewPredPrediction(pred, i)) - } - if pred != SemanticContextNone { - containsPredicate = true - } - } - if !containsPredicate { - return nil - } - return pairs -} - -// -// This method is used to improve the localization of error messages by -// choosing an alternative rather than panicing a -// {@link NoViableAltException} in particular prediction scenarios where the -// {@link //ERROR} state was reached during ATN simulation. -// -//

-// The default implementation of p method uses the following -// algorithm to identify an ATN configuration which successfully parsed the -// decision entry rule. Choosing such an alternative ensures that the -// {@link ParserRuleContext} returned by the calling rule will be complete -// and valid, and the syntax error will be Reported later at a more -// localized location.

-// -//
    -//
  • If a syntactically valid path or paths reach the end of the decision rule and -// they are semantically valid if predicated, return the min associated alt.
  • -//
  • Else, if a semantically invalid but syntactically valid path exist -// or paths exist, return the minimum associated alt. -//
  • -//
  • Otherwise, return {@link ATN//INVALID_ALT_NUMBER}.
  • -//
-// -//

-// In some scenarios, the algorithm described above could predict an -// alternative which will result in a {@link FailedPredicateException} in -// the parser. Specifically, p could occur if the only configuration -// capable of successfully parsing to the end of the decision rule is -// blocked by a semantic predicate. By choosing p alternative within -// {@link //AdaptivePredict} instead of panicing a -// {@link NoViableAltException}, the resulting -// {@link FailedPredicateException} in the parser will identify the specific -// predicate which is preventing the parser from successfully parsing the -// decision rule, which helps developers identify and correct logic errors -// in semantic predicates. -//

-// -// @param configs The ATN configurations which were valid immediately before -// the {@link //ERROR} state was reached -// @param outerContext The is the \gamma_0 initial parser context from the paper -// or the parser stack at the instant before prediction commences. -// -// @return The value to return from {@link //AdaptivePredict}, or -// {@link ATN//INVALID_ALT_NUMBER} if a suitable alternative was not -// identified and {@link //AdaptivePredict} should Report an error instead. -// -func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs ATNConfigSet, outerContext ParserRuleContext) int { - cfgs := p.splitAccordingToSemanticValidity(configs, outerContext) - semValidConfigs := cfgs[0] - semInvalidConfigs := cfgs[1] - alt := p.GetAltThatFinishedDecisionEntryRule(semValidConfigs) - if alt != ATNInvalidAltNumber { // semantically/syntactically viable path exists - return alt - } - // Is there a syntactically valid path with a failed pred? - if len(semInvalidConfigs.GetItems()) > 0 { - alt = p.GetAltThatFinishedDecisionEntryRule(semInvalidConfigs) - if alt != ATNInvalidAltNumber { // syntactically viable path exists - return alt - } - } - return ATNInvalidAltNumber -} - -func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs ATNConfigSet) int { - alts := NewIntervalSet() - - for _, c := range configs.GetItems() { - _, ok := c.GetState().(*RuleStopState) - - if c.GetReachesIntoOuterContext() > 0 || (ok && c.GetContext().hasEmptyPath()) { - alts.addOne(c.GetAlt()) - } - } - if alts.length() == 0 { - return ATNInvalidAltNumber - } - - return alts.first() -} - -// Walk the list of configurations and split them according to -// those that have preds evaluating to true/false. If no pred, assume -// true pred and include in succeeded set. Returns Pair of sets. -// -// Create a NewSet so as not to alter the incoming parameter. -// -// Assumption: the input stream has been restored to the starting point -// prediction, which is where predicates need to evaluate. - -type ATNConfigSetPair struct { - item0, item1 ATNConfigSet -} - -func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs ATNConfigSet, outerContext ParserRuleContext) []ATNConfigSet { - succeeded := NewBaseATNConfigSet(configs.FullContext()) - failed := NewBaseATNConfigSet(configs.FullContext()) - - for _, c := range configs.GetItems() { - if c.GetSemanticContext() != SemanticContextNone { - predicateEvaluationResult := c.GetSemanticContext().evaluate(p.parser, outerContext) - if predicateEvaluationResult { - succeeded.Add(c, nil) - } else { - failed.Add(c, nil) - } - } else { - succeeded.Add(c, nil) - } - } - return []ATNConfigSet{succeeded, failed} -} - -// Look through a list of predicate/alt pairs, returning alts for the -// pairs that win. A {@code NONE} predicate indicates an alt containing an -// unpredicated config which behaves as "always true." If !complete -// then we stop at the first predicate that evaluates to true. This -// includes pairs with nil predicates. -// -func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet { - predictions := NewBitSet() - for i := 0; i < len(predPredictions); i++ { - pair := predPredictions[i] - if pair.pred == SemanticContextNone { - predictions.add(pair.alt) - if !complete { - break - } - continue - } - - predicateEvaluationResult := pair.pred.evaluate(p.parser, outerContext) - if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug { - fmt.Println("eval pred " + pair.String() + "=" + fmt.Sprint(predicateEvaluationResult)) - } - if predicateEvaluationResult { - if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug { - fmt.Println("PREDICT " + fmt.Sprint(pair.alt)) - } - predictions.add(pair.alt) - if !complete { - break - } - } - } - return predictions -} - -func (p *ParserATNSimulator) closure(config ATNConfig, configs ATNConfigSet, closureBusy *Set, collectPredicates, fullCtx, treatEOFAsEpsilon bool) { - initialDepth := 0 - p.closureCheckingStopState(config, configs, closureBusy, collectPredicates, - fullCtx, initialDepth, treatEOFAsEpsilon) -} - -func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs ATNConfigSet, closureBusy *Set, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { - - if ParserATNSimulatorDebug { - fmt.Println("closure(" + config.String() + ")") - fmt.Println("configs(" + configs.String() + ")") - if config.GetReachesIntoOuterContext() > 50 { - panic("problem") - } - } - - _, ok := config.GetState().(*RuleStopState) - if ok { - // We hit rule end. If we have context info, use it - // run thru all possible stack tops in ctx - if !config.GetContext().isEmpty() { - for i := 0; i < config.GetContext().length(); i++ { - if config.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState { - if fullCtx { - configs.Add(NewBaseATNConfig1(config, config.GetState(), BasePredictionContextEMPTY), p.mergeCache) - continue - } else { - // we have no context info, just chase follow links (if greedy) - if ParserATNSimulatorDebug { - fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) - } - p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) - } - continue - } - returnState := p.atn.states[config.GetContext().getReturnState(i)] - newContext := config.GetContext().GetParent(i) // "pop" return state - - c := NewBaseATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext()) - // While we have context to pop back from, we may have - // gotten that context AFTER having falling off a rule. - // Make sure we track that we are now out of context. - c.SetReachesIntoOuterContext(config.GetReachesIntoOuterContext()) - p.closureCheckingStopState(c, configs, closureBusy, collectPredicates, fullCtx, depth-1, treatEOFAsEpsilon) - } - return - } else if fullCtx { - // reached end of start rule - configs.Add(config, p.mergeCache) - return - } else { - // else if we have no context info, just chase follow links (if greedy) - if ParserATNSimulatorDebug { - fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) - } - } - } - p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) -} - -// Do the actual work of walking epsilon edges// -func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, closureBusy *Set, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { - state := config.GetState() - // optimization - if !state.GetEpsilonOnlyTransitions() { - configs.Add(config, p.mergeCache) - // make sure to not return here, because EOF transitions can act as - // both epsilon transitions and non-epsilon transitions. - } - for i := 0; i < len(state.GetTransitions()); i++ { - t := state.GetTransitions()[i] - _, ok := t.(*ActionTransition) - continueCollecting := collectPredicates && !ok - c := p.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEOFAsEpsilon) - if ci, ok := c.(*BaseATNConfig); ok && ci != nil { - if !t.getIsEpsilon() && closureBusy.add(c) != c { - // avoid infinite recursion for EOF* and EOF+ - continue - } - newDepth := depth - - if _, ok := config.GetState().(*RuleStopState); ok { - - // target fell off end of rule mark resulting c as having dipped into outer context - // We can't get here if incoming config was rule stop and we had context - // track how far we dip into outer context. Might - // come in handy and we avoid evaluating context dependent - // preds if p is > 0. - - if closureBusy.add(c) != c { - // avoid infinite recursion for right-recursive rules - continue - } - - if p.dfa != nil && p.dfa.precedenceDfa { - if t.(*EpsilonTransition).outermostPrecedenceReturn == p.dfa.atnStartState.GetRuleIndex() { - c.setPrecedenceFilterSuppressed(true) - } - } - - c.SetReachesIntoOuterContext(c.GetReachesIntoOuterContext() + 1) - configs.SetDipsIntoOuterContext(true) // TODO: can remove? only care when we add to set per middle of p method - newDepth-- - if ParserATNSimulatorDebug { - fmt.Println("dips into outer ctx: " + c.String()) - } - } else if _, ok := t.(*RuleTransition); ok { - // latch when newDepth goes negative - once we step out of the entry context we can't return - if newDepth >= 0 { - newDepth++ - } - } - p.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEOFAsEpsilon) - } - } -} - -func (p *ParserATNSimulator) getRuleName(index int) string { - if p.parser != nil && index >= 0 { - return p.parser.GetRuleNames()[index] - } - - return "" -} - -func (p *ParserATNSimulator) getEpsilonTarget(config ATNConfig, t Transition, collectPredicates, inContext, fullCtx, treatEOFAsEpsilon bool) ATNConfig { - - switch t.getSerializationType() { - case TransitionRULE: - return p.ruleTransition(config, t.(*RuleTransition)) - case TransitionPRECEDENCE: - return p.precedenceTransition(config, t.(*PrecedencePredicateTransition), collectPredicates, inContext, fullCtx) - case TransitionPREDICATE: - return p.predTransition(config, t.(*PredicateTransition), collectPredicates, inContext, fullCtx) - case TransitionACTION: - return p.actionTransition(config, t.(*ActionTransition)) - case TransitionEPSILON: - return NewBaseATNConfig4(config, t.getTarget()) - case TransitionATOM: - // EOF transitions act like epsilon transitions after the first EOF - // transition is traversed - if treatEOFAsEpsilon { - if t.Matches(TokenEOF, 0, 1) { - return NewBaseATNConfig4(config, t.getTarget()) - } - } - return nil - case TransitionRANGE: - // EOF transitions act like epsilon transitions after the first EOF - // transition is traversed - if treatEOFAsEpsilon { - if t.Matches(TokenEOF, 0, 1) { - return NewBaseATNConfig4(config, t.getTarget()) - } - } - return nil - case TransitionSET: - // EOF transitions act like epsilon transitions after the first EOF - // transition is traversed - if treatEOFAsEpsilon { - if t.Matches(TokenEOF, 0, 1) { - return NewBaseATNConfig4(config, t.getTarget()) - } - } - return nil - default: - return nil - } -} - -func (p *ParserATNSimulator) actionTransition(config ATNConfig, t *ActionTransition) *BaseATNConfig { - if ParserATNSimulatorDebug { - fmt.Println("ACTION edge " + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex)) - } - return NewBaseATNConfig4(config, t.getTarget()) -} - -func (p *ParserATNSimulator) precedenceTransition(config ATNConfig, - pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig { - - if ParserATNSimulatorDebug { - fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + - strconv.Itoa(pt.precedence) + ">=_p, ctx dependent=true") - if p.parser != nil { - fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil))) - } - } - var c *BaseATNConfig - if collectPredicates && inContext { - if fullCtx { - // In full context mode, we can evaluate predicates on-the-fly - // during closure, which dramatically reduces the size of - // the config sets. It also obviates the need to test predicates - // later during conflict resolution. - currentPosition := p.input.Index() - p.input.Seek(p.startIndex) - predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext) - p.input.Seek(currentPosition) - if predSucceeds { - c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context - } - } else { - newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate()) - c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx) - } - } else { - c = NewBaseATNConfig4(config, pt.getTarget()) - } - if ParserATNSimulatorDebug { - fmt.Println("config from pred transition=" + c.String()) - } - return c -} - -func (p *ParserATNSimulator) predTransition(config ATNConfig, pt *PredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig { - - if ParserATNSimulatorDebug { - fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + strconv.Itoa(pt.ruleIndex) + - ":" + strconv.Itoa(pt.predIndex) + ", ctx dependent=" + fmt.Sprint(pt.isCtxDependent)) - if p.parser != nil { - fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil))) - } - } - var c *BaseATNConfig - if collectPredicates && ((pt.isCtxDependent && inContext) || !pt.isCtxDependent) { - if fullCtx { - // In full context mode, we can evaluate predicates on-the-fly - // during closure, which dramatically reduces the size of - // the config sets. It also obviates the need to test predicates - // later during conflict resolution. - currentPosition := p.input.Index() - p.input.Seek(p.startIndex) - predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext) - p.input.Seek(currentPosition) - if predSucceeds { - c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context - } - } else { - newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate()) - c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx) - } - } else { - c = NewBaseATNConfig4(config, pt.getTarget()) - } - if ParserATNSimulatorDebug { - fmt.Println("config from pred transition=" + c.String()) - } - return c -} - -func (p *ParserATNSimulator) ruleTransition(config ATNConfig, t *RuleTransition) *BaseATNConfig { - if ParserATNSimulatorDebug { - fmt.Println("CALL rule " + p.getRuleName(t.getTarget().GetRuleIndex()) + ", ctx=" + config.GetContext().String()) - } - returnState := t.followState - newContext := SingletonBasePredictionContextCreate(config.GetContext(), returnState.GetStateNumber()) - return NewBaseATNConfig1(config, t.getTarget(), newContext) -} - -func (p *ParserATNSimulator) getConflictingAlts(configs ATNConfigSet) *BitSet { - altsets := PredictionModegetConflictingAltSubsets(configs) - return PredictionModeGetAlts(altsets) -} - -// Sam pointed out a problem with the previous definition, v3, of -// ambiguous states. If we have another state associated with conflicting -// alternatives, we should keep going. For example, the following grammar -// -// s : (ID | ID ID?) '' -// -// When the ATN simulation reaches the state before '', it has a DFA -// state that looks like: [12|1|[], 6|2|[], 12|2|[]]. Naturally -// 12|1|[] and 12|2|[] conflict, but we cannot stop processing p node -// because alternative to has another way to continue, via [6|2|[]]. -// The key is that we have a single state that has config's only associated -// with a single alternative, 2, and crucially the state transitions -// among the configurations are all non-epsilon transitions. That means -// we don't consider any conflicts that include alternative 2. So, we -// ignore the conflict between alts 1 and 2. We ignore a set of -// conflicting alts when there is an intersection with an alternative -// associated with a single alt state in the state&rarrconfig-list map. -// -// It's also the case that we might have two conflicting configurations but -// also a 3rd nonconflicting configuration for a different alternative: -// [1|1|[], 1|2|[], 8|3|[]]. This can come about from grammar: -// -// a : A | A | A B -// -// After Matching input A, we reach the stop state for rule A, state 1. -// State 8 is the state right before B. Clearly alternatives 1 and 2 -// conflict and no amount of further lookahead will separate the two. -// However, alternative 3 will be able to continue and so we do not -// stop working on p state. In the previous example, we're concerned -// with states associated with the conflicting alternatives. Here alt -// 3 is not associated with the conflicting configs, but since we can continue -// looking for input reasonably, I don't declare the state done. We -// ignore a set of conflicting alts when we have an alternative -// that we still need to pursue. -// - -func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs ATNConfigSet) *BitSet { - var conflictingAlts *BitSet - if configs.GetUniqueAlt() != ATNInvalidAltNumber { - conflictingAlts = NewBitSet() - conflictingAlts.add(configs.GetUniqueAlt()) - } else { - conflictingAlts = configs.GetConflictingAlts() - } - return conflictingAlts -} - -func (p *ParserATNSimulator) GetTokenName(t int) string { - if t == TokenEOF { - return "EOF" - } - - if p.parser != nil && p.parser.GetLiteralNames() != nil { - if t >= len(p.parser.GetLiteralNames()) { - fmt.Println(strconv.Itoa(t) + " ttype out of range: " + strings.Join(p.parser.GetLiteralNames(), ",")) - // fmt.Println(p.parser.GetInputStream().(TokenStream).GetAllText()) // p seems incorrect - } else { - return p.parser.GetLiteralNames()[t] + "<" + strconv.Itoa(t) + ">" - } - } - - return strconv.Itoa(t) -} - -func (p *ParserATNSimulator) getLookaheadName(input TokenStream) string { - return p.GetTokenName(input.LA(1)) -} - -// Used for debugging in AdaptivePredict around execATN but I cut -// it out for clarity now that alg. works well. We can leave p -// "dead" code for a bit. -// -func (p *ParserATNSimulator) dumpDeadEndConfigs(nvae *NoViableAltException) { - - panic("Not implemented") - - // fmt.Println("dead end configs: ") - // var decs = nvae.deadEndConfigs - // - // for i:=0; i0) { - // var t = c.state.GetTransitions()[0] - // if t2, ok := t.(*AtomTransition); ok { - // trans = "Atom "+ p.GetTokenName(t2.label) - // } else if t3, ok := t.(SetTransition); ok { - // _, ok := t.(*NotSetTransition) - // - // var s string - // if (ok){ - // s = "~" - // } - // - // trans = s + "Set " + t3.set - // } - // } - // fmt.Errorf(c.String(p.parser, true) + ":" + trans) - // } -} - -func (p *ParserATNSimulator) noViableAlt(input TokenStream, outerContext ParserRuleContext, configs ATNConfigSet, startIndex int) *NoViableAltException { - return NewNoViableAltException(p.parser, input, input.Get(startIndex), input.LT(1), configs, outerContext) -} - -func (p *ParserATNSimulator) getUniqueAlt(configs ATNConfigSet) int { - alt := ATNInvalidAltNumber - for _, c := range configs.GetItems() { - if alt == ATNInvalidAltNumber { - alt = c.GetAlt() // found first alt - } else if c.GetAlt() != alt { - return ATNInvalidAltNumber - } - } - return alt -} - -// -// Add an edge to the DFA, if possible. This method calls -// {@link //addDFAState} to ensure the {@code to} state is present in the -// DFA. If {@code from} is {@code nil}, or if {@code t} is outside the -// range of edges that can be represented in the DFA tables, p method -// returns without adding the edge to the DFA. -// -//

If {@code to} is {@code nil}, p method returns {@code nil}. -// Otherwise, p method returns the {@link DFAState} returned by calling -// {@link //addDFAState} for the {@code to} state.

-// -// @param dfa The DFA -// @param from The source state for the edge -// @param t The input symbol -// @param to The target state for the edge -// -// @return If {@code to} is {@code nil}, p method returns {@code nil} -// otherwise p method returns the result of calling {@link //addDFAState} -// on {@code to} -// -func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState { - if ParserATNSimulatorDebug { - fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t)) - } - if to == nil { - return nil - } - to = p.addDFAState(dfa, to) // used existing if possible not incoming - if from == nil || t < -1 || t > p.atn.maxTokenType { - return to - } - if from.edges == nil { - from.edges = make([]*DFAState, p.atn.maxTokenType+1+1) - } - from.edges[t+1] = to // connect - - if ParserATNSimulatorDebug { - var names []string - if p.parser != nil { - names = p.parser.GetLiteralNames() - } - - fmt.Println("DFA=\n" + dfa.String(names, nil)) - } - return to -} - -// -// Add state {@code D} to the DFA if it is not already present, and return -// the actual instance stored in the DFA. If a state equivalent to {@code D} -// is already in the DFA, the existing state is returned. Otherwise p -// method returns {@code D} after adding it to the DFA. -// -//

If {@code D} is {@link //ERROR}, p method returns {@link //ERROR} and -// does not change the DFA.

-// -// @param dfa The dfa -// @param D The DFA state to add -// @return The state stored in the DFA. This will be either the existing -// state if {@code D} is already in the DFA, or {@code D} itself if the -// state was not already present. -// -func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState { - if d == ATNSimulatorError { - return d - } - hash := d.hash() - existing, ok := dfa.getState(hash) - if ok { - return existing - } - d.stateNumber = dfa.numStates() - if !d.configs.ReadOnly() { - d.configs.OptimizeConfigs(p.BaseATNSimulator) - d.configs.SetReadOnly(true) - } - dfa.setState(hash, d) - if ParserATNSimulatorDebug { - fmt.Println("adding NewDFA state: " + d.String()) - } - return d -} - -func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs ATNConfigSet, startIndex, stopIndex int) { - if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { - interval := NewInterval(startIndex, stopIndex+1) - fmt.Println("ReportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + - ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) - } - if p.parser != nil { - p.parser.GetErrorListenerDispatch().ReportAttemptingFullContext(p.parser, dfa, startIndex, stopIndex, conflictingAlts, configs) - } -} - -func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs ATNConfigSet, startIndex, stopIndex int) { - if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { - interval := NewInterval(startIndex, stopIndex+1) - fmt.Println("ReportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + - ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) - } - if p.parser != nil { - p.parser.GetErrorListenerDispatch().ReportContextSensitivity(p.parser, dfa, startIndex, stopIndex, prediction, configs) - } -} - -// If context sensitive parsing, we know it's ambiguity not conflict// -func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, D *DFAState, startIndex, stopIndex int, - exact bool, ambigAlts *BitSet, configs ATNConfigSet) { - if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { - interval := NewInterval(startIndex, stopIndex+1) - fmt.Println("ReportAmbiguity " + ambigAlts.String() + ":" + configs.String() + - ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) - } - if p.parser != nil { - p.parser.GetErrorListenerDispatch().ReportAmbiguity(p.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs) - } -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go deleted file mode 100644 index 49cd10c..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go +++ /dev/null @@ -1,362 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "reflect" - "strconv" -) - -type ParserRuleContext interface { - RuleContext - - SetException(RecognitionException) - - AddTokenNode(token Token) *TerminalNodeImpl - AddErrorNode(badToken Token) *ErrorNodeImpl - - EnterRule(listener ParseTreeListener) - ExitRule(listener ParseTreeListener) - - SetStart(Token) - GetStart() Token - - SetStop(Token) - GetStop() Token - - AddChild(child RuleContext) RuleContext - RemoveLastChild() -} - -type BaseParserRuleContext struct { - *BaseRuleContext - - start, stop Token - exception RecognitionException - children []Tree -} - -func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext { - prc := new(BaseParserRuleContext) - - prc.BaseRuleContext = NewBaseRuleContext(parent, invokingStateNumber) - - prc.RuleIndex = -1 - // * If we are debugging or building a parse tree for a Visitor, - // we need to track all of the tokens and rule invocations associated - // with prc rule's context. This is empty for parsing w/o tree constr. - // operation because we don't the need to track the details about - // how we parse prc rule. - // / - prc.children = nil - prc.start = nil - prc.stop = nil - // The exception that forced prc rule to return. If the rule successfully - // completed, prc is {@code nil}. - prc.exception = nil - - return prc -} - -func (prc *BaseParserRuleContext) SetException(e RecognitionException) { - prc.exception = e -} - -func (prc *BaseParserRuleContext) GetChildren() []Tree { - return prc.children -} - -func (prc *BaseParserRuleContext) CopyFrom(ctx *BaseParserRuleContext) { - // from RuleContext - prc.parentCtx = ctx.parentCtx - prc.invokingState = ctx.invokingState - prc.children = nil - prc.start = ctx.start - prc.stop = ctx.stop -} - -func (prc *BaseParserRuleContext) GetText() string { - if prc.GetChildCount() == 0 { - return "" - } - - var s string - for _, child := range prc.children { - s += child.(ParseTree).GetText() - } - - return s -} - -// Double dispatch methods for listeners -func (prc *BaseParserRuleContext) EnterRule(listener ParseTreeListener) { -} - -func (prc *BaseParserRuleContext) ExitRule(listener ParseTreeListener) { -} - -// * Does not set parent link other add methods do that/// -func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode { - if prc.children == nil { - prc.children = make([]Tree, 0) - } - if child == nil { - panic("Child may not be null") - } - prc.children = append(prc.children, child) - return child -} - -func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext { - if prc.children == nil { - prc.children = make([]Tree, 0) - } - if child == nil { - panic("Child may not be null") - } - prc.children = append(prc.children, child) - return child -} - -// * Used by EnterOuterAlt to toss out a RuleContext previously added as -// we entered a rule. If we have // label, we will need to remove -// generic ruleContext object. -// / -func (prc *BaseParserRuleContext) RemoveLastChild() { - if prc.children != nil && len(prc.children) > 0 { - prc.children = prc.children[0 : len(prc.children)-1] - } -} - -func (prc *BaseParserRuleContext) AddTokenNode(token Token) *TerminalNodeImpl { - - node := NewTerminalNodeImpl(token) - prc.addTerminalNodeChild(node) - node.parentCtx = prc - return node - -} - -func (prc *BaseParserRuleContext) AddErrorNode(badToken Token) *ErrorNodeImpl { - node := NewErrorNodeImpl(badToken) - prc.addTerminalNodeChild(node) - node.parentCtx = prc - return node -} - -func (prc *BaseParserRuleContext) GetChild(i int) Tree { - if prc.children != nil && len(prc.children) >= i { - return prc.children[i] - } - - return nil -} - -func (prc *BaseParserRuleContext) GetChildOfType(i int, childType reflect.Type) RuleContext { - if childType == nil { - return prc.GetChild(i).(RuleContext) - } - - for j := 0; j < len(prc.children); j++ { - child := prc.children[j] - if reflect.TypeOf(child) == childType { - if i == 0 { - return child.(RuleContext) - } - - i-- - } - } - - return nil -} - -func (prc *BaseParserRuleContext) ToStringTree(ruleNames []string, recog Recognizer) string { - return TreesStringTree(prc, ruleNames, recog) -} - -func (prc *BaseParserRuleContext) GetRuleContext() RuleContext { - return prc -} - -func (prc *BaseParserRuleContext) Accept(visitor ParseTreeVisitor) interface{} { - return visitor.VisitChildren(prc) -} - -func (prc *BaseParserRuleContext) SetStart(t Token) { - prc.start = t -} - -func (prc *BaseParserRuleContext) GetStart() Token { - return prc.start -} - -func (prc *BaseParserRuleContext) SetStop(t Token) { - prc.stop = t -} - -func (prc *BaseParserRuleContext) GetStop() Token { - return prc.stop -} - -func (prc *BaseParserRuleContext) GetToken(ttype int, i int) TerminalNode { - - for j := 0; j < len(prc.children); j++ { - child := prc.children[j] - if c2, ok := child.(TerminalNode); ok { - if c2.GetSymbol().GetTokenType() == ttype { - if i == 0 { - return c2 - } - - i-- - } - } - } - return nil -} - -func (prc *BaseParserRuleContext) GetTokens(ttype int) []TerminalNode { - if prc.children == nil { - return make([]TerminalNode, 0) - } - - tokens := make([]TerminalNode, 0) - - for j := 0; j < len(prc.children); j++ { - child := prc.children[j] - if tchild, ok := child.(TerminalNode); ok { - if tchild.GetSymbol().GetTokenType() == ttype { - tokens = append(tokens, tchild) - } - } - } - - return tokens -} - -func (prc *BaseParserRuleContext) GetPayload() interface{} { - return prc -} - -func (prc *BaseParserRuleContext) getChild(ctxType reflect.Type, i int) RuleContext { - if prc.children == nil || i < 0 || i >= len(prc.children) { - return nil - } - - j := -1 // what element have we found with ctxType? - for _, o := range prc.children { - - childType := reflect.TypeOf(o) - - if childType.Implements(ctxType) { - j++ - if j == i { - return o.(RuleContext) - } - } - } - return nil -} - -// Go lacks generics, so it's not possible for us to return the child with the correct type, but we do -// check for convertibility - -func (prc *BaseParserRuleContext) GetTypedRuleContext(ctxType reflect.Type, i int) RuleContext { - return prc.getChild(ctxType, i) -} - -func (prc *BaseParserRuleContext) GetTypedRuleContexts(ctxType reflect.Type) []RuleContext { - if prc.children == nil { - return make([]RuleContext, 0) - } - - contexts := make([]RuleContext, 0) - - for _, child := range prc.children { - childType := reflect.TypeOf(child) - - if childType.ConvertibleTo(ctxType) { - contexts = append(contexts, child.(RuleContext)) - } - } - return contexts -} - -func (prc *BaseParserRuleContext) GetChildCount() int { - if prc.children == nil { - return 0 - } - - return len(prc.children) -} - -func (prc *BaseParserRuleContext) GetSourceInterval() *Interval { - if prc.start == nil || prc.stop == nil { - return TreeInvalidInterval - } - - return NewInterval(prc.start.GetTokenIndex(), prc.stop.GetTokenIndex()) -} - -//need to manage circular dependencies, so export now - -// Print out a whole tree, not just a node, in LISP format -// (root child1 .. childN). Print just a node if b is a leaf. -// - -func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) string { - - var p ParserRuleContext = prc - s := "[" - for p != nil && p != stop { - if ruleNames == nil { - if !p.IsEmpty() { - s += strconv.Itoa(p.GetInvokingState()) - } - } else { - ri := p.GetRuleIndex() - var ruleName string - if ri >= 0 && ri < len(ruleNames) { - ruleName = ruleNames[ri] - } else { - ruleName = strconv.Itoa(ri) - } - s += ruleName - } - if p.GetParent() != nil && (ruleNames != nil || !p.GetParent().(ParserRuleContext).IsEmpty()) { - s += " " - } - pi := p.GetParent() - if pi != nil { - p = pi.(ParserRuleContext) - } else { - p = nil - } - } - s += "]" - return s -} - -var RuleContextEmpty = NewBaseParserRuleContext(nil, -1) - -type InterpreterRuleContext interface { - ParserRuleContext -} - -type BaseInterpreterRuleContext struct { - *BaseParserRuleContext -} - -func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext { - - prc := new(BaseInterpreterRuleContext) - - prc.BaseParserRuleContext = NewBaseParserRuleContext(parent, invokingStateNumber) - - prc.RuleIndex = ruleIndex - - return prc -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go deleted file mode 100644 index 99acb33..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go +++ /dev/null @@ -1,756 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "strconv" -) - -// Represents {@code $} in local context prediction, which means wildcard. -// {@code//+x =//}. -// / -const ( - BasePredictionContextEmptyReturnState = 0x7FFFFFFF -) - -// Represents {@code $} in an array in full context mode, when {@code $} -// doesn't mean wildcard: {@code $ + x = [$,x]}. Here, -// {@code $} = {@link //EmptyReturnState}. -// / - -var ( - BasePredictionContextglobalNodeCount = 1 - BasePredictionContextid = BasePredictionContextglobalNodeCount -) - -type PredictionContext interface { - hash() int - GetParent(int) PredictionContext - getReturnState(int) int - equals(PredictionContext) bool - length() int - isEmpty() bool - hasEmptyPath() bool - String() string -} - -type BasePredictionContext struct { - cachedHash int -} - -func NewBasePredictionContext(cachedHash int) *BasePredictionContext { - pc := new(BasePredictionContext) - pc.cachedHash = cachedHash - - return pc -} - -func (b *BasePredictionContext) isEmpty() bool { - return false -} - -func calculateHash(parent PredictionContext, returnState int) int { - h := murmurInit(1) - h = murmurUpdate(h, parent.hash()) - h = murmurUpdate(h, returnState) - return murmurFinish(h, 2) -} - -func calculateEmptyHash() int { - h := murmurInit(1) - return murmurFinish(h, 0) -} - -// Used to cache {@link BasePredictionContext} objects. Its used for the shared -// context cash associated with contexts in DFA states. This cache -// can be used for both lexers and parsers. - -type PredictionContextCache struct { - cache map[PredictionContext]PredictionContext -} - -func NewPredictionContextCache() *PredictionContextCache { - t := new(PredictionContextCache) - t.cache = make(map[PredictionContext]PredictionContext) - return t -} - -// Add a context to the cache and return it. If the context already exists, -// return that one instead and do not add a Newcontext to the cache. -// Protect shared cache from unsafe thread access. -// -func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext { - if ctx == BasePredictionContextEMPTY { - return BasePredictionContextEMPTY - } - existing := p.cache[ctx] - if existing != nil { - return existing - } - p.cache[ctx] = ctx - return ctx -} - -func (p *PredictionContextCache) Get(ctx PredictionContext) PredictionContext { - return p.cache[ctx] -} - -func (p *PredictionContextCache) length() int { - return len(p.cache) -} - -type SingletonPredictionContext interface { - PredictionContext -} - -type BaseSingletonPredictionContext struct { - *BasePredictionContext - - parentCtx PredictionContext - returnState int -} - -func NewBaseSingletonPredictionContext(parent PredictionContext, returnState int) *BaseSingletonPredictionContext { - - s := new(BaseSingletonPredictionContext) - s.BasePredictionContext = NewBasePredictionContext(37) - - if parent != nil { - s.cachedHash = calculateHash(parent, returnState) - } else { - s.cachedHash = calculateEmptyHash() - } - - s.parentCtx = parent - s.returnState = returnState - - return s -} - -func SingletonBasePredictionContextCreate(parent PredictionContext, returnState int) PredictionContext { - if returnState == BasePredictionContextEmptyReturnState && parent == nil { - // someone can pass in the bits of an array ctx that mean $ - return BasePredictionContextEMPTY - } - - return NewBaseSingletonPredictionContext(parent, returnState) -} - -func (b *BaseSingletonPredictionContext) length() int { - return 1 -} - -func (b *BaseSingletonPredictionContext) GetParent(index int) PredictionContext { - return b.parentCtx -} - -func (b *BaseSingletonPredictionContext) getReturnState(index int) int { - return b.returnState -} - -func (b *BaseSingletonPredictionContext) hasEmptyPath() bool { - return b.returnState == BasePredictionContextEmptyReturnState -} - -func (b *BaseSingletonPredictionContext) equals(other PredictionContext) bool { - if b == other { - return true - } else if _, ok := other.(*BaseSingletonPredictionContext); !ok { - return false - } else if b.hash() != other.hash() { - return false // can't be same if hash is different - } - - otherP := other.(*BaseSingletonPredictionContext) - - if b.returnState != other.getReturnState(0) { - return false - } else if b.parentCtx == nil { - return otherP.parentCtx == nil - } - - return b.parentCtx.equals(otherP.parentCtx) -} - -func (b *BaseSingletonPredictionContext) hash() int { - h := murmurInit(1) - - if b.parentCtx == nil { - return murmurFinish(h, 0) - } - - h = murmurUpdate(h, b.parentCtx.hash()) - h = murmurUpdate(h, b.returnState) - return murmurFinish(h, 2) -} - -func (b *BaseSingletonPredictionContext) String() string { - var up string - - if b.parentCtx == nil { - up = "" - } else { - up = b.parentCtx.String() - } - - if len(up) == 0 { - if b.returnState == BasePredictionContextEmptyReturnState { - return "$" - } - - return strconv.Itoa(b.returnState) - } - - return strconv.Itoa(b.returnState) + " " + up -} - -var BasePredictionContextEMPTY = NewEmptyPredictionContext() - -type EmptyPredictionContext struct { - *BaseSingletonPredictionContext -} - -func NewEmptyPredictionContext() *EmptyPredictionContext { - - p := new(EmptyPredictionContext) - - p.BaseSingletonPredictionContext = NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState) - - return p -} - -func (e *EmptyPredictionContext) isEmpty() bool { - return true -} - -func (e *EmptyPredictionContext) GetParent(index int) PredictionContext { - return nil -} - -func (e *EmptyPredictionContext) getReturnState(index int) int { - return e.returnState -} - -func (e *EmptyPredictionContext) equals(other PredictionContext) bool { - return e == other -} - -func (e *EmptyPredictionContext) String() string { - return "$" -} - -type ArrayPredictionContext struct { - *BasePredictionContext - - parents []PredictionContext - returnStates []int -} - -func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) *ArrayPredictionContext { - // Parent can be nil only if full ctx mode and we make an array - // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using - // nil parent and - // returnState == {@link //EmptyReturnState}. - - c := new(ArrayPredictionContext) - c.BasePredictionContext = NewBasePredictionContext(37) - - for i := range parents { - c.cachedHash += calculateHash(parents[i], returnStates[i]) - } - - c.parents = parents - c.returnStates = returnStates - - return c -} - -func (a *ArrayPredictionContext) GetReturnStates() []int { - return a.returnStates -} - -func (a *ArrayPredictionContext) hasEmptyPath() bool { - return a.getReturnState(a.length()-1) == BasePredictionContextEmptyReturnState -} - -func (a *ArrayPredictionContext) isEmpty() bool { - // since EmptyReturnState can only appear in the last position, we - // don't need to verify that size==1 - return a.returnStates[0] == BasePredictionContextEmptyReturnState -} - -func (a *ArrayPredictionContext) length() int { - return len(a.returnStates) -} - -func (a *ArrayPredictionContext) GetParent(index int) PredictionContext { - return a.parents[index] -} - -func (a *ArrayPredictionContext) getReturnState(index int) int { - return a.returnStates[index] -} - -func (a *ArrayPredictionContext) equals(other PredictionContext) bool { - if _, ok := other.(*ArrayPredictionContext); !ok { - return false - } else if a.cachedHash != other.hash() { - return false // can't be same if hash is different - } else { - otherP := other.(*ArrayPredictionContext) - return &a.returnStates == &otherP.returnStates && &a.parents == &otherP.parents - } -} - -func (a *ArrayPredictionContext) hash() int { - h := murmurInit(1) - - for _, p := range a.parents { - h = murmurUpdate(h, p.hash()) - } - - for _, r := range a.returnStates { - h = murmurUpdate(h, r) - } - - return murmurFinish(h, 2 * len(a.parents)) -} - -func (a *ArrayPredictionContext) String() string { - if a.isEmpty() { - return "[]" - } - - s := "[" - for i := 0; i < len(a.returnStates); i++ { - if i > 0 { - s = s + ", " - } - if a.returnStates[i] == BasePredictionContextEmptyReturnState { - s = s + "$" - continue - } - s = s + strconv.Itoa(a.returnStates[i]) - if a.parents[i] != nil { - s = s + " " + a.parents[i].String() - } else { - s = s + "nil" - } - } - - return s + "]" -} - -// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph. -// Return {@link //EMPTY} if {@code outerContext} is empty or nil. -// / -func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext { - if outerContext == nil { - outerContext = RuleContextEmpty - } - // if we are in RuleContext of start rule, s, then BasePredictionContext - // is EMPTY. Nobody called us. (if we are empty, return empty) - if outerContext.GetParent() == nil || outerContext == RuleContextEmpty { - return BasePredictionContextEMPTY - } - // If we have a parent, convert it to a BasePredictionContext graph - parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext)) - state := a.states[outerContext.GetInvokingState()] - transition := state.GetTransitions()[0] - - return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber()) -} - -func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { - // share same graph if both same - if a == b { - return a - } - - ac, ok1 := a.(*BaseSingletonPredictionContext) - bc, ok2 := b.(*BaseSingletonPredictionContext) - - if ok1 && ok2 { - return mergeSingletons(ac, bc, rootIsWildcard, mergeCache) - } - // At least one of a or b is array - // If one is $ and rootIsWildcard, return $ as// wildcard - if rootIsWildcard { - if _, ok := a.(*EmptyPredictionContext); ok { - return a - } - if _, ok := b.(*EmptyPredictionContext); ok { - return b - } - } - // convert singleton so both are arrays to normalize - if _, ok := a.(*BaseSingletonPredictionContext); ok { - a = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)}) - } - if _, ok := b.(*BaseSingletonPredictionContext); ok { - b = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)}) - } - return mergeArrays(a.(*ArrayPredictionContext), b.(*ArrayPredictionContext), rootIsWildcard, mergeCache) -} - -// -// Merge two {@link SingletonBasePredictionContext} instances. -// -//

Stack tops equal, parents merge is same return left graph.
-//

-// -//

Same stack top, parents differ merge parents giving array node, then -// remainders of those graphs. A Newroot node is created to point to the -// merged parents.
-//

-// -//

Different stack tops pointing to same parent. Make array node for the -// root where both element in the root point to the same (original) -// parent.
-//

-// -//

Different stack tops pointing to different parents. Make array node for -// the root where each element points to the corresponding original -// parent.
-//

-// -// @param a the first {@link SingletonBasePredictionContext} -// @param b the second {@link SingletonBasePredictionContext} -// @param rootIsWildcard {@code true} if this is a local-context merge, -// otherwise false to indicate a full-context merge -// @param mergeCache -// / -func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { - if mergeCache != nil { - previous := mergeCache.Get(a.hash(), b.hash()) - if previous != nil { - return previous.(PredictionContext) - } - previous = mergeCache.Get(b.hash(), a.hash()) - if previous != nil { - return previous.(PredictionContext) - } - } - - rootMerge := mergeRoot(a, b, rootIsWildcard) - if rootMerge != nil { - if mergeCache != nil { - mergeCache.set(a.hash(), b.hash(), rootMerge) - } - return rootMerge - } - if a.returnState == b.returnState { - parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache) - // if parent is same as existing a or b parent or reduced to a parent, - // return it - if parent == a.parentCtx { - return a // ax + bx = ax, if a=b - } - if parent == b.parentCtx { - return b // ax + bx = bx, if a=b - } - // else: ax + ay = a'[x,y] - // merge parents x and y, giving array node with x,y then remainders - // of those graphs. dup a, a' points at merged array - // Newjoined parent so create Newsingleton pointing to it, a' - spc := SingletonBasePredictionContextCreate(parent, a.returnState) - if mergeCache != nil { - mergeCache.set(a.hash(), b.hash(), spc) - } - return spc - } - // a != b payloads differ - // see if we can collapse parents due to $+x parents if local ctx - var singleParent PredictionContext - if a == b || (a.parentCtx != nil && a.parentCtx == b.parentCtx) { // ax + - // bx = - // [a,b]x - singleParent = a.parentCtx - } - if singleParent != nil { // parents are same - // sort payloads and use same parent - payloads := []int{a.returnState, b.returnState} - if a.returnState > b.returnState { - payloads[0] = b.returnState - payloads[1] = a.returnState - } - parents := []PredictionContext{singleParent, singleParent} - apc := NewArrayPredictionContext(parents, payloads) - if mergeCache != nil { - mergeCache.set(a.hash(), b.hash(), apc) - } - return apc - } - // parents differ and can't merge them. Just pack together - // into array can't merge. - // ax + by = [ax,by] - payloads := []int{a.returnState, b.returnState} - parents := []PredictionContext{a.parentCtx, b.parentCtx} - if a.returnState > b.returnState { // sort by payload - payloads[0] = b.returnState - payloads[1] = a.returnState - parents = []PredictionContext{b.parentCtx, a.parentCtx} - } - apc := NewArrayPredictionContext(parents, payloads) - if mergeCache != nil { - mergeCache.set(a.hash(), b.hash(), apc) - } - return apc -} - -// -// Handle case where at least one of {@code a} or {@code b} is -// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used -// to represent {@link //EMPTY}. -// -//

Local-Context Merges

-// -//

These local-context merge operations are used when {@code rootIsWildcard} -// is true.

-// -//

{@link //EMPTY} is superset of any graph return {@link //EMPTY}.
-//

-// -//

{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is -// {@code //EMPTY} return left graph.
-//

-// -//

Special case of last merge if local context.
-//

-// -//

Full-Context Merges

-// -//

These full-context merge operations are used when {@code rootIsWildcard} -// is false.

-// -//

-// -//

Must keep all contexts {@link //EMPTY} in array is a special value (and -// nil parent).
-//

-// -//

-// -// @param a the first {@link SingletonBasePredictionContext} -// @param b the second {@link SingletonBasePredictionContext} -// @param rootIsWildcard {@code true} if this is a local-context merge, -// otherwise false to indicate a full-context merge -// / -func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionContext { - if rootIsWildcard { - if a == BasePredictionContextEMPTY { - return BasePredictionContextEMPTY // // + b =// - } - if b == BasePredictionContextEMPTY { - return BasePredictionContextEMPTY // a +// =// - } - } else { - if a == BasePredictionContextEMPTY && b == BasePredictionContextEMPTY { - return BasePredictionContextEMPTY // $ + $ = $ - } else if a == BasePredictionContextEMPTY { // $ + x = [$,x] - payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState} - parents := []PredictionContext{b.GetParent(-1), nil} - return NewArrayPredictionContext(parents, payloads) - } else if b == BasePredictionContextEMPTY { // x + $ = [$,x] ($ is always first if present) - payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState} - parents := []PredictionContext{a.GetParent(-1), nil} - return NewArrayPredictionContext(parents, payloads) - } - } - return nil -} - -// -// Merge two {@link ArrayBasePredictionContext} instances. -// -//

Different tops, different parents.
-//

-// -//

Shared top, same parents.
-//

-// -//

Shared top, different parents.
-//

-// -//

Shared top, all shared parents.
-//

-// -//

Equal tops, merge parents and reduce top to -// {@link SingletonBasePredictionContext}.
-//

-// / -func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { - if mergeCache != nil { - previous := mergeCache.Get(a.hash(), b.hash()) - if previous != nil { - return previous.(PredictionContext) - } - previous = mergeCache.Get(b.hash(), a.hash()) - if previous != nil { - return previous.(PredictionContext) - } - } - // merge sorted payloads a + b => M - i := 0 // walks a - j := 0 // walks b - k := 0 // walks target M array - - mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates)) - mergedParents := make([]PredictionContext, len(a.returnStates)+len(b.returnStates)) - // walk and merge to yield mergedParents, mergedReturnStates - for i < len(a.returnStates) && j < len(b.returnStates) { - aParent := a.parents[i] - bParent := b.parents[j] - if a.returnStates[i] == b.returnStates[j] { - // same payload (stack tops are equal), must yield merged singleton - payload := a.returnStates[i] - // $+$ = $ - bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil - axAX := (aParent != nil && bParent != nil && aParent == bParent) // ax+ax - // -> - // ax - if bothDollars || axAX { - mergedParents[k] = aParent // choose left - mergedReturnStates[k] = payload - } else { // ax+ay -> a'[x,y] - mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache) - mergedParents[k] = mergedParent - mergedReturnStates[k] = payload - } - i++ // hop over left one as usual - j++ // but also Skip one in right side since we merge - } else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M - mergedParents[k] = aParent - mergedReturnStates[k] = a.returnStates[i] - i++ - } else { // b > a, copy b[j] to M - mergedParents[k] = bParent - mergedReturnStates[k] = b.returnStates[j] - j++ - } - k++ - } - // copy over any payloads remaining in either array - if i < len(a.returnStates) { - for p := i; p < len(a.returnStates); p++ { - mergedParents[k] = a.parents[p] - mergedReturnStates[k] = a.returnStates[p] - k++ - } - } else { - for p := j; p < len(b.returnStates); p++ { - mergedParents[k] = b.parents[p] - mergedReturnStates[k] = b.returnStates[p] - k++ - } - } - // trim merged if we combined a few that had same stack tops - if k < len(mergedParents) { // write index < last position trim - if k == 1 { // for just one merged element, return singleton top - pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0]) - if mergeCache != nil { - mergeCache.set(a.hash(), b.hash(), pc) - } - return pc - } - mergedParents = mergedParents[0:k] - mergedReturnStates = mergedReturnStates[0:k] - } - - M := NewArrayPredictionContext(mergedParents, mergedReturnStates) - - // if we created same array as a or b, return that instead - // TODO: track whether this is possible above during merge sort for speed - if M == a { - if mergeCache != nil { - mergeCache.set(a.hash(), b.hash(), a) - } - return a - } - if M == b { - if mergeCache != nil { - mergeCache.set(a.hash(), b.hash(), b) - } - return b - } - combineCommonParents(mergedParents) - - if mergeCache != nil { - mergeCache.set(a.hash(), b.hash(), M) - } - return M -} - -// -// Make pass over all M {@code parents} merge any {@code equals()} -// ones. -// / -func combineCommonParents(parents []PredictionContext) { - uniqueParents := make(map[PredictionContext]PredictionContext) - - for p := 0; p < len(parents); p++ { - parent := parents[p] - if uniqueParents[parent] == nil { - uniqueParents[parent] = parent - } - } - for q := 0; q < len(parents); q++ { - parents[q] = uniqueParents[parents[q]] - } -} - -func getCachedBasePredictionContext(context PredictionContext, contextCache *PredictionContextCache, visited map[PredictionContext]PredictionContext) PredictionContext { - - if context.isEmpty() { - return context - } - existing := visited[context] - if existing != nil { - return existing - } - existing = contextCache.Get(context) - if existing != nil { - visited[context] = existing - return existing - } - changed := false - parents := make([]PredictionContext, context.length()) - for i := 0; i < len(parents); i++ { - parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited) - if changed || parent != context.GetParent(i) { - if !changed { - parents = make([]PredictionContext, context.length()) - for j := 0; j < context.length(); j++ { - parents[j] = context.GetParent(j) - } - changed = true - } - parents[i] = parent - } - } - if !changed { - contextCache.add(context) - visited[context] = context - return context - } - var updated PredictionContext - if len(parents) == 0 { - updated = BasePredictionContextEMPTY - } else if len(parents) == 1 { - updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0)) - } else { - updated = NewArrayPredictionContext(parents, context.(*ArrayPredictionContext).GetReturnStates()) - } - contextCache.add(updated) - visited[updated] = updated - visited[context] = updated - - return updated -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go deleted file mode 100644 index 15718f9..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go +++ /dev/null @@ -1,553 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// This enumeration defines the prediction modes available in ANTLR 4 along with -// utility methods for analyzing configuration sets for conflicts and/or -// ambiguities. - -const ( - // - // The SLL(*) prediction mode. This prediction mode ignores the current - // parser context when making predictions. This is the fastest prediction - // mode, and provides correct results for many grammars. This prediction - // mode is more powerful than the prediction mode provided by ANTLR 3, but - // may result in syntax errors for grammar and input combinations which are - // not SLL. - // - //

- // When using this prediction mode, the parser will either return a correct - // parse tree (i.e. the same parse tree that would be returned with the - // {@link //LL} prediction mode), or it will Report a syntax error. If a - // syntax error is encountered when using the {@link //SLL} prediction mode, - // it may be due to either an actual syntax error in the input or indicate - // that the particular combination of grammar and input requires the more - // powerful {@link //LL} prediction abilities to complete successfully.

- // - //

- // This prediction mode does not provide any guarantees for prediction - // behavior for syntactically-incorrect inputs.

- // - PredictionModeSLL = 0 - // - // The LL(*) prediction mode. This prediction mode allows the current parser - // context to be used for resolving SLL conflicts that occur during - // prediction. This is the fastest prediction mode that guarantees correct - // parse results for all combinations of grammars with syntactically correct - // inputs. - // - //

- // When using this prediction mode, the parser will make correct decisions - // for all syntactically-correct grammar and input combinations. However, in - // cases where the grammar is truly ambiguous this prediction mode might not - // Report a precise answer for exactly which alternatives are - // ambiguous.

- // - //

- // This prediction mode does not provide any guarantees for prediction - // behavior for syntactically-incorrect inputs.

- // - PredictionModeLL = 1 - // - // The LL(*) prediction mode with exact ambiguity detection. In addition to - // the correctness guarantees provided by the {@link //LL} prediction mode, - // this prediction mode instructs the prediction algorithm to determine the - // complete and exact set of ambiguous alternatives for every ambiguous - // decision encountered while parsing. - // - //

- // This prediction mode may be used for diagnosing ambiguities during - // grammar development. Due to the performance overhead of calculating sets - // of ambiguous alternatives, this prediction mode should be avoided when - // the exact results are not necessary.

- // - //

- // This prediction mode does not provide any guarantees for prediction - // behavior for syntactically-incorrect inputs.

- // - PredictionModeLLExactAmbigDetection = 2 -) - -// -// Computes the SLL prediction termination condition. -// -//

-// This method computes the SLL prediction termination condition for both of -// the following cases.

-// -//
    -//
  • The usual SLL+LL fallback upon SLL conflict
  • -//
  • Pure SLL without LL fallback
  • -//
-// -//

COMBINED SLL+LL PARSING

-// -//

When LL-fallback is enabled upon SLL conflict, correct predictions are -// ensured regardless of how the termination condition is computed by this -// method. Due to the substantially higher cost of LL prediction, the -// prediction should only fall back to LL when the additional lookahead -// cannot lead to a unique SLL prediction.

-// -//

Assuming combined SLL+LL parsing, an SLL configuration set with only -// conflicting subsets should fall back to full LL, even if the -// configuration sets don't resolve to the same alternative (e.g. -// {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting -// configuration, SLL could continue with the hopes that more lookahead will -// resolve via one of those non-conflicting configurations.

-// -//

Here's the prediction termination rule them: SLL (for SLL+LL parsing) -// stops when it sees only conflicting configuration subsets. In contrast, -// full LL keeps going when there is uncertainty.

-// -//

HEURISTIC

-// -//

As a heuristic, we stop prediction when we see any conflicting subset -// unless we see a state that only has one alternative associated with it. -// The single-alt-state thing lets prediction continue upon rules like -// (otherwise, it would admit defeat too soon):

-// -//

{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) '' }

-// -//

When the ATN simulation reaches the state before {@code ''}, it has a -// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally -// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop -// processing this node because alternative to has another way to continue, -// via {@code [6|2|[]]}.

-// -//

It also let's us continue for this rule:

-// -//

{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }

-// -//

After Matching input A, we reach the stop state for rule A, state 1. -// State 8 is the state right before B. Clearly alternatives 1 and 2 -// conflict and no amount of further lookahead will separate the two. -// However, alternative 3 will be able to continue and so we do not stop -// working on this state. In the previous example, we're concerned with -// states associated with the conflicting alternatives. Here alt 3 is not -// associated with the conflicting configs, but since we can continue -// looking for input reasonably, don't declare the state done.

-// -//

PURE SLL PARSING

-// -//

To handle pure SLL parsing, all we have to do is make sure that we -// combine stack contexts for configurations that differ only by semantic -// predicate. From there, we can do the usual SLL termination heuristic.

-// -//

PREDICATES IN SLL+LL PARSING

-// -//

SLL decisions don't evaluate predicates until after they reach DFA stop -// states because they need to create the DFA cache that works in all -// semantic situations. In contrast, full LL evaluates predicates collected -// during start state computation so it can ignore predicates thereafter. -// This means that SLL termination detection can totally ignore semantic -// predicates.

-// -//

Implementation-wise, {@link ATNConfigSet} combines stack contexts but not -// semantic predicate contexts so we might see two configurations like the -// following.

-// -//

{@code (s, 1, x, {}), (s, 1, x', {p})}

-// -//

Before testing these configurations against others, we have to merge -// {@code x} and {@code x'} (without modifying the existing configurations). -// For example, we test {@code (x+x')==x''} when looking for conflicts in -// the following configurations.

-// -//

{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})}

-// -//

If the configuration set has predicates (as indicated by -// {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of -// the configurations to strip out all of the predicates so that a standard -// {@link ATNConfigSet} will merge everything ignoring predicates.

-// -func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool { - // Configs in rule stop states indicate reaching the end of the decision - // rule (local context) or end of start rule (full context). If all - // configs meet this condition, then none of the configurations is able - // to Match additional input so we terminate prediction. - // - if PredictionModeallConfigsInRuleStopStates(configs) { - return true - } - // pure SLL mode parsing - if mode == PredictionModeSLL { - // Don't bother with combining configs from different semantic - // contexts if we can fail over to full LL costs more time - // since we'll often fail over anyway. - if configs.HasSemanticContext() { - // dup configs, tossing out semantic predicates - dup := NewBaseATNConfigSet(false) - for _, c := range configs.GetItems() { - - // NewBaseATNConfig({semanticContext:}, c) - c = NewBaseATNConfig2(c, SemanticContextNone) - dup.Add(c, nil) - } - configs = dup - } - // now we have combined contexts for configs with dissimilar preds - } - // pure SLL or combined SLL+LL mode parsing - altsets := PredictionModegetConflictingAltSubsets(configs) - return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs) -} - -// Checks if any configuration in {@code configs} is in a -// {@link RuleStopState}. Configurations meeting this condition have reached -// the end of the decision rule (local context) or end of start rule (full -// context). -// -// @param configs the configuration set to test -// @return {@code true} if any configuration in {@code configs} is in a -// {@link RuleStopState}, otherwise {@code false} -func PredictionModehasConfigInRuleStopState(configs ATNConfigSet) bool { - for _, c := range configs.GetItems() { - if _, ok := c.GetState().(*RuleStopState); ok { - return true - } - } - return false -} - -// Checks if all configurations in {@code configs} are in a -// {@link RuleStopState}. Configurations meeting this condition have reached -// the end of the decision rule (local context) or end of start rule (full -// context). -// -// @param configs the configuration set to test -// @return {@code true} if all configurations in {@code configs} are in a -// {@link RuleStopState}, otherwise {@code false} -func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool { - - for _, c := range configs.GetItems() { - if _, ok := c.GetState().(*RuleStopState); !ok { - return false - } - } - return true -} - -// -// Full LL prediction termination. -// -//

Can we stop looking ahead during ATN simulation or is there some -// uncertainty as to which alternative we will ultimately pick, after -// consuming more input? Even if there are partial conflicts, we might know -// that everything is going to resolve to the same minimum alternative. That -// means we can stop since no more lookahead will change that fact. On the -// other hand, there might be multiple conflicts that resolve to different -// minimums. That means we need more look ahead to decide which of those -// alternatives we should predict.

-// -//

The basic idea is to split the set of configurations {@code C}, into -// conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with -// non-conflicting configurations. Two configurations conflict if they have -// identical {@link ATNConfig//state} and {@link ATNConfig//context} values -// but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)} -// and {@code (s, j, ctx, _)} for {@code i!=j}.

-// -//

Reduce these configuration subsets to the set of possible alternatives. -// You can compute the alternative subsets in one pass as follows:

-// -//

{@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in -// {@code C} holding {@code s} and {@code ctx} fixed.

-// -//

Or in pseudo-code, for each configuration {@code c} in {@code C}:

-// -//
-// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
-// alt and not pred
-// 
-// -//

The values in {@code map} are the set of {@code A_s,ctx} sets.

-// -//

If {@code |A_s,ctx|=1} then there is no conflict associated with -// {@code s} and {@code ctx}.

-// -//

Reduce the subsets to singletons by choosing a minimum of each subset. If -// the union of these alternative subsets is a singleton, then no amount of -// more lookahead will help us. We will always pick that alternative. If, -// however, there is more than one alternative, then we are uncertain which -// alternative to predict and must continue looking for resolution. We may -// or may not discover an ambiguity in the future, even if there are no -// conflicting subsets this round.

-// -//

The biggest sin is to terminate early because it means we've made a -// decision but were uncertain as to the eventual outcome. We haven't used -// enough lookahead. On the other hand, announcing a conflict too late is no -// big deal you will still have the conflict. It's just inefficient. It -// might even look until the end of file.

-// -//

No special consideration for semantic predicates is required because -// predicates are evaluated on-the-fly for full LL prediction, ensuring that -// no configuration contains a semantic context during the termination -// check.

-// -//

CONFLICTING CONFIGS

-// -//

Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict -// when {@code i!=j} but {@code x=x'}. Because we merge all -// {@code (s, i, _)} configurations together, that means that there are at -// most {@code n} configurations associated with state {@code s} for -// {@code n} possible alternatives in the decision. The merged stacks -// complicate the comparison of configuration contexts {@code x} and -// {@code x'}. Sam checks to see if one is a subset of the other by calling -// merge and checking to see if the merged result is either {@code x} or -// {@code x'}. If the {@code x} associated with lowest alternative {@code i} -// is the superset, then {@code i} is the only possible prediction since the -// others resolve to {@code min(i)} as well. However, if {@code x} is -// associated with {@code j>i} then at least one stack configuration for -// {@code j} is not in conflict with alternative {@code i}. The algorithm -// should keep going, looking for more lookahead due to the uncertainty.

-// -//

For simplicity, I'm doing a equality check between {@code x} and -// {@code x'} that lets the algorithm continue to consume lookahead longer -// than necessary. The reason I like the equality is of course the -// simplicity but also because that is the test you need to detect the -// alternatives that are actually in conflict.

-// -//

CONTINUE/STOP RULE

-// -//

Continue if union of resolved alternative sets from non-conflicting and -// conflicting alternative subsets has more than one alternative. We are -// uncertain about which alternative to predict.

-// -//

The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which -// alternatives are still in the running for the amount of input we've -// consumed at this point. The conflicting sets let us to strip away -// configurations that won't lead to more states because we resolve -// conflicts to the configuration with a minimum alternate for the -// conflicting set.

-// -//

CASES

-// -//
    -// -//
  • no conflicts and more than 1 alternative in set => continue
  • -// -//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)}, -// {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set -// {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} = -// {@code {1,3}} => continue -//
  • -// -//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)}, -// {@code (s', 2, y)}, {@code (s'', 1, z)} yields non-conflicting set -// {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} = -// {@code {1}} => stop and predict 1
  • -// -//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)}, -// {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U -// {@code {1}} = {@code {1}} => stop and predict 1, can announce -// ambiguity {@code {1,2}}
  • -// -//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)}, -// {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U -// {@code {2}} = {@code {1,2}} => continue
  • -// -//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)}, -// {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U -// {@code {3}} = {@code {1,3}} => continue
  • -// -//
-// -//

EXACT AMBIGUITY DETECTION

-// -//

If all states Report the same conflicting set of alternatives, then we -// know we have the exact ambiguity set.

-// -//

|A_i|>1 and -// A_i = A_j for all i, j.

-// -//

In other words, we continue examining lookahead until all {@code A_i} -// have more than one alternative and all {@code A_i} are the same. If -// {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate -// because the resolved set is {@code {1}}. To determine what the real -// ambiguity is, we have to know whether the ambiguity is between one and -// two or one and three so we keep going. We can only stop prediction when -// we need exact ambiguity detection when the sets look like -// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...

-// -func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int { - return PredictionModegetSingleViableAlt(altsets) -} - -// -// Determines if every alternative subset in {@code altsets} contains more -// than one alternative. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if every {@link BitSet} in {@code altsets} has -// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} -// -func PredictionModeallSubsetsConflict(altsets []*BitSet) bool { - return !PredictionModehasNonConflictingAltSet(altsets) -} - -// -// Determines if any single alternative subset in {@code altsets} contains -// exactly one alternative. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if {@code altsets} contains a {@link BitSet} with -// {@link BitSet//cardinality cardinality} 1, otherwise {@code false} -// -func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool { - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - if alts.length() == 1 { - return true - } - } - return false -} - -// -// Determines if any single alternative subset in {@code altsets} contains -// more than one alternative. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if {@code altsets} contains a {@link BitSet} with -// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} -// -func PredictionModehasConflictingAltSet(altsets []*BitSet) bool { - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - if alts.length() > 1 { - return true - } - } - return false -} - -// -// Determines if every alternative subset in {@code altsets} is equivalent. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if every member of {@code altsets} is equal to the -// others, otherwise {@code false} -// -func PredictionModeallSubsetsEqual(altsets []*BitSet) bool { - var first *BitSet - - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - if first == nil { - first = alts - } else if alts != first { - return false - } - } - - return true -} - -// -// Returns the unique alternative predicted by all alternative subsets in -// {@code altsets}. If no such alternative exists, this method returns -// {@link ATN//INVALID_ALT_NUMBER}. -// -// @param altsets a collection of alternative subsets -// -func PredictionModegetUniqueAlt(altsets []*BitSet) int { - all := PredictionModeGetAlts(altsets) - if all.length() == 1 { - return all.minValue() - } - - return ATNInvalidAltNumber -} - -// Gets the complete set of represented alternatives for a collection of -// alternative subsets. This method returns the union of each {@link BitSet} -// in {@code altsets}. -// -// @param altsets a collection of alternative subsets -// @return the set of represented alternatives in {@code altsets} -// -func PredictionModeGetAlts(altsets []*BitSet) *BitSet { - all := NewBitSet() - for _, alts := range altsets { - all.or(alts) - } - return all -} - -// -// This func gets the conflicting alt subsets from a configuration set. -// For each configuration {@code c} in {@code configs}: -// -//
-// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
-// alt and not pred
-// 
-// -func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet { - configToAlts := make(map[int]*BitSet) - - for _, c := range configs.GetItems() { - key := 31 * c.GetState().GetStateNumber() + c.GetContext().hash() - - alts, ok := configToAlts[key] - if !ok { - alts = NewBitSet() - configToAlts[key] = alts - } - alts.add(c.GetAlt()) - } - - values := make([]*BitSet, 0, 10) - for _, v := range configToAlts { - values = append(values, v) - } - return values -} - -// -// Get a map from state to alt subset from a configuration set. For each -// configuration {@code c} in {@code configs}: -// -//
-// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt}
-// 
-// -func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict { - m := NewAltDict() - - for _, c := range configs.GetItems() { - alts := m.Get(c.GetState().String()) - if alts == nil { - alts = NewBitSet() - m.put(c.GetState().String(), alts) - } - alts.(*BitSet).add(c.GetAlt()) - } - return m -} - -func PredictionModehasStateAssociatedWithOneAlt(configs ATNConfigSet) bool { - values := PredictionModeGetStateToAltMap(configs).values() - for i := 0; i < len(values); i++ { - if values[i].(*BitSet).length() == 1 { - return true - } - } - return false -} - -func PredictionModegetSingleViableAlt(altsets []*BitSet) int { - result := ATNInvalidAltNumber - - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - minAlt := alts.minValue() - if result == ATNInvalidAltNumber { - result = minAlt - } else if result != minAlt { // more than 1 viable alt - return ATNInvalidAltNumber - } - } - return result -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go deleted file mode 100644 index 188b9d9..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go +++ /dev/null @@ -1,217 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strings" - - "strconv" -) - -type Recognizer interface { - GetLiteralNames() []string - GetSymbolicNames() []string - GetRuleNames() []string - - Sempred(RuleContext, int, int) bool - Precpred(RuleContext, int) bool - - GetState() int - SetState(int) - Action(RuleContext, int, int) - AddErrorListener(ErrorListener) - RemoveErrorListeners() - GetATN() *ATN - GetErrorListenerDispatch() ErrorListener -} - -type BaseRecognizer struct { - listeners []ErrorListener - state int - - RuleNames []string - LiteralNames []string - SymbolicNames []string - GrammarFileName string -} - -func NewBaseRecognizer() *BaseRecognizer { - rec := new(BaseRecognizer) - rec.listeners = []ErrorListener{ConsoleErrorListenerINSTANCE} - rec.state = -1 - return rec -} - -var tokenTypeMapCache = make(map[string]int) -var ruleIndexMapCache = make(map[string]int) - -func (b *BaseRecognizer) checkVersion(toolVersion string) { - runtimeVersion := "4.7.1" - if runtimeVersion != toolVersion { - fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion) - } -} - -func (b *BaseRecognizer) Action(context RuleContext, ruleIndex, actionIndex int) { - panic("action not implemented on Recognizer!") -} - -func (b *BaseRecognizer) AddErrorListener(listener ErrorListener) { - b.listeners = append(b.listeners, listener) -} - -func (b *BaseRecognizer) RemoveErrorListeners() { - b.listeners = make([]ErrorListener, 0) -} - -func (b *BaseRecognizer) GetRuleNames() []string { - return b.RuleNames -} - -func (b *BaseRecognizer) GetTokenNames() []string { - return b.LiteralNames -} - -func (b *BaseRecognizer) GetSymbolicNames() []string { - return b.SymbolicNames -} - -func (b *BaseRecognizer) GetLiteralNames() []string { - return b.LiteralNames -} - -func (b *BaseRecognizer) GetState() int { - return b.state -} - -func (b *BaseRecognizer) SetState(v int) { - b.state = v -} - -//func (b *Recognizer) GetTokenTypeMap() { -// var tokenNames = b.GetTokenNames() -// if (tokenNames==nil) { -// panic("The current recognizer does not provide a list of token names.") -// } -// var result = tokenTypeMapCache[tokenNames] -// if(result==nil) { -// result = tokenNames.reduce(function(o, k, i) { o[k] = i }) -// result.EOF = TokenEOF -// tokenTypeMapCache[tokenNames] = result -// } -// return result -//} - -// Get a map from rule names to rule indexes. -// -//

Used for XPath and tree pattern compilation.

-// -func (b *BaseRecognizer) GetRuleIndexMap() map[string]int { - - panic("Method not defined!") - // var ruleNames = b.GetRuleNames() - // if (ruleNames==nil) { - // panic("The current recognizer does not provide a list of rule names.") - // } - // - // var result = ruleIndexMapCache[ruleNames] - // if(result==nil) { - // result = ruleNames.reduce(function(o, k, i) { o[k] = i }) - // ruleIndexMapCache[ruleNames] = result - // } - // return result -} - -func (b *BaseRecognizer) GetTokenType(tokenName string) int { - panic("Method not defined!") - // var ttype = b.GetTokenTypeMap()[tokenName] - // if (ttype !=nil) { - // return ttype - // } else { - // return TokenInvalidType - // } -} - -//func (b *Recognizer) GetTokenTypeMap() map[string]int { -// Vocabulary vocabulary = getVocabulary() -// -// Synchronized (tokenTypeMapCache) { -// Map result = tokenTypeMapCache.Get(vocabulary) -// if (result == null) { -// result = new HashMap() -// for (int i = 0; i < GetATN().maxTokenType; i++) { -// String literalName = vocabulary.getLiteralName(i) -// if (literalName != null) { -// result.put(literalName, i) -// } -// -// String symbolicName = vocabulary.GetSymbolicName(i) -// if (symbolicName != null) { -// result.put(symbolicName, i) -// } -// } -// -// result.put("EOF", Token.EOF) -// result = Collections.unmodifiableMap(result) -// tokenTypeMapCache.put(vocabulary, result) -// } -// -// return result -// } -//} - -// What is the error header, normally line/character position information?// -func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string { - line := e.GetOffendingToken().GetLine() - column := e.GetOffendingToken().GetColumn() - return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column) -} - -// How should a token be displayed in an error message? The default -// is to display just the text, but during development you might -// want to have a lot of information spit out. Override in that case -// to use t.String() (which, for CommonToken, dumps everything about -// the token). This is better than forcing you to override a method in -// your token objects because you don't have to go modify your lexer -// so that it creates a NewJava type. -// -// @deprecated This method is not called by the ANTLR 4 Runtime. Specific -// implementations of {@link ANTLRErrorStrategy} may provide a similar -// feature when necessary. For example, see -// {@link DefaultErrorStrategy//GetTokenErrorDisplay}. -// -func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string { - if t == nil { - return "" - } - s := t.GetText() - if s == "" { - if t.GetTokenType() == TokenEOF { - s = "" - } else { - s = "<" + strconv.Itoa(t.GetTokenType()) + ">" - } - } - s = strings.Replace(s, "\t", "\\t", -1) - s = strings.Replace(s, "\n", "\\n", -1) - s = strings.Replace(s, "\r", "\\r", -1) - - return "'" + s + "'" -} - -func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener { - return NewProxyErrorListener(b.listeners) -} - -// subclass needs to override these if there are sempreds or actions -// that the ATN interp needs to execute -func (b *BaseRecognizer) Sempred(localctx RuleContext, ruleIndex int, actionIndex int) bool { - return true -} - -func (b *BaseRecognizer) Precpred(localctx RuleContext, precedence int) bool { - return true -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go deleted file mode 100644 index 600cf8c..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// A rule context is a record of a single rule invocation. It knows -// which context invoked it, if any. If there is no parent context, then -// naturally the invoking state is not valid. The parent link -// provides a chain upwards from the current rule invocation to the root -// of the invocation tree, forming a stack. We actually carry no -// information about the rule associated with b context (except -// when parsing). We keep only the state number of the invoking state from -// the ATN submachine that invoked b. Contrast b with the s -// pointer inside ParserRuleContext that tracks the current state -// being "executed" for the current rule. -// -// The parent contexts are useful for computing lookahead sets and -// getting error information. -// -// These objects are used during parsing and prediction. -// For the special case of parsers, we use the subclass -// ParserRuleContext. -// -// @see ParserRuleContext -// - -type RuleContext interface { - RuleNode - - GetInvokingState() int - SetInvokingState(int) - - GetRuleIndex() int - IsEmpty() bool - - GetAltNumber() int - SetAltNumber(altNumber int) - - String([]string, RuleContext) string -} - -type BaseRuleContext struct { - parentCtx RuleContext - invokingState int - RuleIndex int -} - -func NewBaseRuleContext(parent RuleContext, invokingState int) *BaseRuleContext { - - rn := new(BaseRuleContext) - - // What context invoked b rule? - rn.parentCtx = parent - - // What state invoked the rule associated with b context? - // The "return address" is the followState of invokingState - // If parent is nil, b should be -1. - if parent == nil { - rn.invokingState = -1 - } else { - rn.invokingState = invokingState - } - - return rn -} - -func (b *BaseRuleContext) GetBaseRuleContext() *BaseRuleContext { - return b -} - -func (b *BaseRuleContext) SetParent(v Tree) { - if v == nil { - b.parentCtx = nil - } else { - b.parentCtx = v.(RuleContext) - } -} - -func (b *BaseRuleContext) GetInvokingState() int { - return b.invokingState -} - -func (b *BaseRuleContext) SetInvokingState(t int) { - b.invokingState = t -} - -func (b *BaseRuleContext) GetRuleIndex() int { - return b.RuleIndex -} - -func (b *BaseRuleContext) GetAltNumber() int { - return ATNInvalidAltNumber -} - -func (b *BaseRuleContext) SetAltNumber(altNumber int) {} - -// A context is empty if there is no invoking state meaning nobody call -// current context. -func (b *BaseRuleContext) IsEmpty() bool { - return b.invokingState == -1 -} - -// Return the combined text of all child nodes. This method only considers -// tokens which have been added to the parse tree. -//

-// Since tokens on hidden channels (e.g. whitespace or comments) are not -// added to the parse trees, they will not appear in the output of b -// method. -// - -func (b *BaseRuleContext) GetParent() Tree { - return b.parentCtx -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go deleted file mode 100644 index 49205a1..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go +++ /dev/null @@ -1,455 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strconv" -) - -// A tree structure used to record the semantic context in which -// an ATN configuration is valid. It's either a single predicate, -// a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}. -// -//

I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of -// {@link SemanticContext} within the scope of this outer class.

-// - -type SemanticContext interface { - comparable - - evaluate(parser Recognizer, outerContext RuleContext) bool - evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext - - hash() int - String() string -} - -func SemanticContextandContext(a, b SemanticContext) SemanticContext { - if a == nil || a == SemanticContextNone { - return b - } - if b == nil || b == SemanticContextNone { - return a - } - result := NewAND(a, b) - if len(result.opnds) == 1 { - return result.opnds[0] - } - - return result -} - -func SemanticContextorContext(a, b SemanticContext) SemanticContext { - if a == nil { - return b - } - if b == nil { - return a - } - if a == SemanticContextNone || b == SemanticContextNone { - return SemanticContextNone - } - result := NewOR(a, b) - if len(result.opnds) == 1 { - return result.opnds[0] - } - - return result -} - -type Predicate struct { - ruleIndex int - predIndex int - isCtxDependent bool -} - -func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate { - p := new(Predicate) - - p.ruleIndex = ruleIndex - p.predIndex = predIndex - p.isCtxDependent = isCtxDependent // e.g., $i ref in pred - return p -} - -//The default {@link SemanticContext}, which is semantically equivalent to -//a predicate of the form {@code {true}?}. - -var SemanticContextNone SemanticContext = NewPredicate(-1, -1, false) - -func (p *Predicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { - return p -} - -func (p *Predicate) evaluate(parser Recognizer, outerContext RuleContext) bool { - - var localctx RuleContext - - if p.isCtxDependent { - localctx = outerContext - } - - return parser.Sempred(localctx, p.ruleIndex, p.predIndex) -} - -func (p *Predicate) equals(other interface{}) bool { - if p == other { - return true - } else if _, ok := other.(*Predicate); !ok { - return false - } else { - return p.ruleIndex == other.(*Predicate).ruleIndex && - p.predIndex == other.(*Predicate).predIndex && - p.isCtxDependent == other.(*Predicate).isCtxDependent - } -} - -func (p *Predicate) hash() int { - return p.ruleIndex*43 + p.predIndex*47 -} - -func (p *Predicate) String() string { - return "{" + strconv.Itoa(p.ruleIndex) + ":" + strconv.Itoa(p.predIndex) + "}?" -} - -type PrecedencePredicate struct { - precedence int -} - -func NewPrecedencePredicate(precedence int) *PrecedencePredicate { - - p := new(PrecedencePredicate) - p.precedence = precedence - - return p -} - -func (p *PrecedencePredicate) evaluate(parser Recognizer, outerContext RuleContext) bool { - return parser.Precpred(outerContext, p.precedence) -} - -func (p *PrecedencePredicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { - if parser.Precpred(outerContext, p.precedence) { - return SemanticContextNone - } - - return nil -} - -func (p *PrecedencePredicate) compareTo(other *PrecedencePredicate) int { - return p.precedence - other.precedence -} - -func (p *PrecedencePredicate) equals(other interface{}) bool { - if p == other { - return true - } else if _, ok := other.(*PrecedencePredicate); !ok { - return false - } else { - return p.precedence == other.(*PrecedencePredicate).precedence - } -} - -func (p *PrecedencePredicate) hash() int { - return p.precedence * 51 -} - -func (p *PrecedencePredicate) String() string { - return "{" + strconv.Itoa(p.precedence) + ">=prec}?" -} - -func PrecedencePredicatefilterPrecedencePredicates(set *Set) []*PrecedencePredicate { - result := make([]*PrecedencePredicate, 0) - - for _, v := range set.values() { - if c2, ok := v.(*PrecedencePredicate); ok { - result = append(result, c2) - } - } - - return result -} - -// A semantic context which is true whenever none of the contained contexts -// is false.` - -type AND struct { - opnds []SemanticContext -} - -func NewAND(a, b SemanticContext) *AND { - - operands := NewSet(nil, nil) - if aa, ok := a.(*AND); ok { - for _, o := range aa.opnds { - operands.add(o) - } - } else { - operands.add(a) - } - - if ba, ok := b.(*AND); ok { - for _, o := range ba.opnds { - operands.add(o) - } - } else { - operands.add(b) - } - precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands) - if len(precedencePredicates) > 0 { - // interested in the transition with the lowest precedence - var reduced *PrecedencePredicate - - for _, p := range precedencePredicates { - if reduced == nil || p.precedence < reduced.precedence { - reduced = p - } - } - - operands.add(reduced) - } - - vs := operands.values() - opnds := make([]SemanticContext, len(vs)) - for i, v := range vs { - opnds[i] = v.(SemanticContext) - } - - and := new(AND) - and.opnds = opnds - - return and -} - -func (a *AND) equals(other interface{}) bool { - if a == other { - return true - } else if _, ok := other.(*AND); !ok { - return false - } else { - for i, v := range other.(*AND).opnds { - if !a.opnds[i].equals(v) { - return false - } - } - return true - } -} - -// -// {@inheritDoc} -// -//

-// The evaluation of predicates by a context is short-circuiting, but -// unordered.

-// -func (a *AND) evaluate(parser Recognizer, outerContext RuleContext) bool { - for i := 0; i < len(a.opnds); i++ { - if !a.opnds[i].evaluate(parser, outerContext) { - return false - } - } - return true -} - -func (a *AND) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { - differs := false - operands := make([]SemanticContext, 0) - - for i := 0; i < len(a.opnds); i++ { - context := a.opnds[i] - evaluated := context.evalPrecedence(parser, outerContext) - differs = differs || (evaluated != context) - if evaluated == nil { - // The AND context is false if any element is false - return nil - } else if evaluated != SemanticContextNone { - // Reduce the result by Skipping true elements - operands = append(operands, evaluated) - } - } - if !differs { - return a - } - - if len(operands) == 0 { - // all elements were true, so the AND context is true - return SemanticContextNone - } - - var result SemanticContext - - for _, o := range operands { - if result == nil { - result = o - } else { - result = SemanticContextandContext(result, o) - } - } - - return result -} - -func (a *AND) hash() int { - h := murmurInit(37) // Init with a value different from OR - for _, op := range a.opnds { - h = murmurUpdate(h, op.hash()) - } - return murmurFinish(h, len(a.opnds)) -} - -func (a *OR) hash() int { - h := murmurInit(41) // Init with a value different from AND - for _, op := range a.opnds { - h = murmurUpdate(h, op.hash()) - } - return murmurFinish(h, len(a.opnds)) -} - -func (a *AND) String() string { - s := "" - - for _, o := range a.opnds { - s += "&& " + fmt.Sprint(o) - } - - if len(s) > 3 { - return s[0:3] - } - - return s -} - -// -// A semantic context which is true whenever at least one of the contained -// contexts is true. -// - -type OR struct { - opnds []SemanticContext -} - -func NewOR(a, b SemanticContext) *OR { - - operands := NewSet(nil, nil) - if aa, ok := a.(*OR); ok { - for _, o := range aa.opnds { - operands.add(o) - } - } else { - operands.add(a) - } - - if ba, ok := b.(*OR); ok { - for _, o := range ba.opnds { - operands.add(o) - } - } else { - operands.add(b) - } - precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands) - if len(precedencePredicates) > 0 { - // interested in the transition with the lowest precedence - var reduced *PrecedencePredicate - - for _, p := range precedencePredicates { - if reduced == nil || p.precedence > reduced.precedence { - reduced = p - } - } - - operands.add(reduced) - } - - vs := operands.values() - - opnds := make([]SemanticContext, len(vs)) - for i, v := range vs { - opnds[i] = v.(SemanticContext) - } - - o := new(OR) - o.opnds = opnds - - return o -} - -func (o *OR) equals(other interface{}) bool { - if o == other { - return true - } else if _, ok := other.(*OR); !ok { - return false - } else { - for i, v := range other.(*OR).opnds { - if !o.opnds[i].equals(v) { - return false - } - } - return true - } -} - -//

-// The evaluation of predicates by o context is short-circuiting, but -// unordered.

-// -func (o *OR) evaluate(parser Recognizer, outerContext RuleContext) bool { - for i := 0; i < len(o.opnds); i++ { - if o.opnds[i].evaluate(parser, outerContext) { - return true - } - } - return false -} - -func (o *OR) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { - differs := false - operands := make([]SemanticContext, 0) - for i := 0; i < len(o.opnds); i++ { - context := o.opnds[i] - evaluated := context.evalPrecedence(parser, outerContext) - differs = differs || (evaluated != context) - if evaluated == SemanticContextNone { - // The OR context is true if any element is true - return SemanticContextNone - } else if evaluated != nil { - // Reduce the result by Skipping false elements - operands = append(operands, evaluated) - } - } - if !differs { - return o - } - if len(operands) == 0 { - // all elements were false, so the OR context is false - return nil - } - var result SemanticContext - - for _, o := range operands { - if result == nil { - result = o - } else { - result = SemanticContextorContext(result, o) - } - } - - return result -} - -func (o *OR) String() string { - s := "" - - for _, o := range o.opnds { - s += "|| " + fmt.Sprint(o) - } - - if len(s) > 3 { - return s[0:3] - } - - return s -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go deleted file mode 100644 index 2d8e990..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "strconv" - "strings" -) - -type TokenSourceCharStreamPair struct { - tokenSource TokenSource - charStream CharStream -} - -// A token has properties: text, type, line, character position in the line -// (so we can ignore tabs), token channel, index, and source from which -// we obtained this token. - -type Token interface { - GetSource() *TokenSourceCharStreamPair - GetTokenType() int - GetChannel() int - GetStart() int - GetStop() int - GetLine() int - GetColumn() int - - GetText() string - SetText(s string) - - GetTokenIndex() int - SetTokenIndex(v int) - - GetTokenSource() TokenSource - GetInputStream() CharStream -} - -type BaseToken struct { - source *TokenSourceCharStreamPair - tokenType int // token type of the token - channel int // The parser ignores everything not on DEFAULT_CHANNEL - start int // optional return -1 if not implemented. - stop int // optional return -1 if not implemented. - tokenIndex int // from 0..n-1 of the token object in the input stream - line int // line=1..n of the 1st character - column int // beginning of the line at which it occurs, 0..n-1 - text string // text of the token. - readOnly bool -} - -const ( - TokenInvalidType = 0 - - // During lookahead operations, this "token" signifies we hit rule end ATN state - // and did not follow it despite needing to. - TokenEpsilon = -2 - - TokenMinUserTokenType = 1 - - TokenEOF = -1 - - // All tokens go to the parser (unless Skip() is called in that rule) - // on a particular "channel". The parser tunes to a particular channel - // so that whitespace etc... can go to the parser on a "hidden" channel. - - TokenDefaultChannel = 0 - - // Anything on different channel than DEFAULT_CHANNEL is not parsed - // by parser. - - TokenHiddenChannel = 1 -) - -func (b *BaseToken) GetChannel() int { - return b.channel -} - -func (b *BaseToken) GetStart() int { - return b.start -} - -func (b *BaseToken) GetStop() int { - return b.stop -} - -func (b *BaseToken) GetLine() int { - return b.line -} - -func (b *BaseToken) GetColumn() int { - return b.column -} - -func (b *BaseToken) GetTokenType() int { - return b.tokenType -} - -func (b *BaseToken) GetSource() *TokenSourceCharStreamPair { - return b.source -} - -func (b *BaseToken) GetTokenIndex() int { - return b.tokenIndex -} - -func (b *BaseToken) SetTokenIndex(v int) { - b.tokenIndex = v -} - -func (b *BaseToken) GetTokenSource() TokenSource { - return b.source.tokenSource -} - -func (b *BaseToken) GetInputStream() CharStream { - return b.source.charStream -} - -type CommonToken struct { - *BaseToken -} - -func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken { - - t := new(CommonToken) - - t.BaseToken = new(BaseToken) - - t.source = source - t.tokenType = tokenType - t.channel = channel - t.start = start - t.stop = stop - t.tokenIndex = -1 - if t.source.tokenSource != nil { - t.line = source.tokenSource.GetLine() - t.column = source.tokenSource.GetCharPositionInLine() - } else { - t.column = -1 - } - return t -} - -// An empty {@link Pair} which is used as the default value of -// {@link //source} for tokens that do not have a source. - -//CommonToken.EMPTY_SOURCE = [ nil, nil ] - -// Constructs a New{@link CommonToken} as a copy of another {@link Token}. -// -//

-// If {@code oldToken} is also a {@link CommonToken} instance, the newly -// constructed token will share a reference to the {@link //text} field and -// the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will -// be assigned the result of calling {@link //GetText}, and {@link //source} -// will be constructed from the result of {@link Token//GetTokenSource} and -// {@link Token//GetInputStream}.

-// -// @param oldToken The token to copy. -// -func (c *CommonToken) clone() *CommonToken { - t := NewCommonToken(c.source, c.tokenType, c.channel, c.start, c.stop) - t.tokenIndex = c.GetTokenIndex() - t.line = c.GetLine() - t.column = c.GetColumn() - t.text = c.GetText() - return t -} - -func (c *CommonToken) GetText() string { - if c.text != "" { - return c.text - } - input := c.GetInputStream() - if input == nil { - return "" - } - n := input.Size() - if c.start < n && c.stop < n { - return input.GetTextFromInterval(NewInterval(c.start, c.stop)) - } - return "" -} - -func (c *CommonToken) SetText(text string) { - c.text = text -} - -func (c *CommonToken) String() string { - txt := c.GetText() - if txt != "" { - txt = strings.Replace(txt, "\n", "\\n", -1) - txt = strings.Replace(txt, "\r", "\\r", -1) - txt = strings.Replace(txt, "\t", "\\t", -1) - } else { - txt = "" - } - - var ch string - if c.channel > 0 { - ch = ",channel=" + strconv.Itoa(c.channel) - } else { - ch = "" - } - - return "[@" + strconv.Itoa(c.tokenIndex) + "," + strconv.Itoa(c.start) + ":" + strconv.Itoa(c.stop) + "='" + - txt + "',<" + strconv.Itoa(c.tokenType) + ">" + - ch + "," + strconv.Itoa(c.line) + ":" + strconv.Itoa(c.column) + "]" -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go deleted file mode 100644 index e023978..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -type TokenSource interface { - NextToken() Token - Skip() - More() - GetLine() int - GetCharPositionInLine() int - GetInputStream() CharStream - GetSourceName() string - setTokenFactory(factory TokenFactory) - GetTokenFactory() TokenFactory -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go deleted file mode 100644 index df92c81..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -type TokenStream interface { - IntStream - - LT(k int) Token - - Get(index int) Token - GetTokenSource() TokenSource - SetTokenSource(TokenSource) - - GetAllText() string - GetTextFromInterval(*Interval) string - GetTextFromRuleContext(RuleContext) string - GetTextFromTokens(Token, Token) string -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/trace_listener.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/trace_listener.go deleted file mode 100644 index e6fff99..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/trace_listener.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import "fmt" - -type TraceListener struct { - parser *BaseParser -} - -func NewTraceListener(parser *BaseParser) *TraceListener { - tl := new(TraceListener) - tl.parser = parser - return tl -} - -func (t *TraceListener) VisitErrorNode(_ ErrorNode) { -} - -func (t *TraceListener) EnterEveryRule(ctx ParserRuleContext) { - fmt.Println("enter " + t.parser.GetRuleNames()[ctx.GetRuleIndex()] + ", LT(1)=" + t.parser.input.LT(1).GetText()) -} - -func (t *TraceListener) VisitTerminal(node TerminalNode) { - fmt.Println("consume " + fmt.Sprint(node.GetSymbol()) + " rule " + t.parser.GetRuleNames()[t.parser.ctx.GetRuleIndex()]) -} - -func (t *TraceListener) ExitEveryRule(ctx ParserRuleContext) { - fmt.Println("exit " + t.parser.GetRuleNames()[ctx.GetRuleIndex()] + ", LT(1)=" + t.parser.input.LT(1).GetText()) -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/transition.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/transition.go deleted file mode 100644 index eb7e57e..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/transition.go +++ /dev/null @@ -1,421 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strconv" -) - -// atom, set, epsilon, action, predicate, rule transitions. -// -//

This is a one way link. It emanates from a state (usually via a list of -// transitions) and has a target state.

-// -//

Since we never have to change the ATN transitions once we construct it, -// the states. We'll use the term Edge for the DFA to distinguish them from -// ATN transitions.

- -type Transition interface { - getTarget() ATNState - setTarget(ATNState) - getIsEpsilon() bool - getLabel() *IntervalSet - getSerializationType() int - Matches(int, int, int) bool -} - -type BaseTransition struct { - target ATNState - isEpsilon bool - label int - intervalSet *IntervalSet - serializationType int -} - -func NewBaseTransition(target ATNState) *BaseTransition { - - if target == nil { - panic("target cannot be nil.") - } - - t := new(BaseTransition) - - t.target = target - // Are we epsilon, action, sempred? - t.isEpsilon = false - t.intervalSet = nil - - return t -} - -func (t *BaseTransition) getTarget() ATNState { - return t.target -} - -func (t *BaseTransition) setTarget(s ATNState) { - t.target = s -} - -func (t *BaseTransition) getIsEpsilon() bool { - return t.isEpsilon -} - -func (t *BaseTransition) getLabel() *IntervalSet { - return t.intervalSet -} - -func (t *BaseTransition) getSerializationType() int { - return t.serializationType -} - -func (t *BaseTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - panic("Not implemented") -} - -const ( - TransitionEPSILON = 1 - TransitionRANGE = 2 - TransitionRULE = 3 - TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}? - TransitionATOM = 5 - TransitionACTION = 6 - TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2 - TransitionNOTSET = 8 - TransitionWILDCARD = 9 - TransitionPRECEDENCE = 10 -) - -var TransitionserializationNames = []string{ - "INVALID", - "EPSILON", - "RANGE", - "RULE", - "PREDICATE", - "ATOM", - "ACTION", - "SET", - "NOT_SET", - "WILDCARD", - "PRECEDENCE", -} - -//var TransitionserializationTypes struct { -// EpsilonTransition int -// RangeTransition int -// RuleTransition int -// PredicateTransition int -// AtomTransition int -// ActionTransition int -// SetTransition int -// NotSetTransition int -// WildcardTransition int -// PrecedencePredicateTransition int -//}{ -// TransitionEPSILON, -// TransitionRANGE, -// TransitionRULE, -// TransitionPREDICATE, -// TransitionATOM, -// TransitionACTION, -// TransitionSET, -// TransitionNOTSET, -// TransitionWILDCARD, -// TransitionPRECEDENCE -//} - -// TODO: make all transitions sets? no, should remove set edges -type AtomTransition struct { - *BaseTransition -} - -func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition { - - t := new(AtomTransition) - t.BaseTransition = NewBaseTransition(target) - - t.label = intervalSet // The token type or character value or, signifies special intervalSet. - t.intervalSet = t.makeLabel() - t.serializationType = TransitionATOM - - return t -} - -func (t *AtomTransition) makeLabel() *IntervalSet { - s := NewIntervalSet() - s.addOne(t.label) - return s -} - -func (t *AtomTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return t.label == symbol -} - -func (t *AtomTransition) String() string { - return strconv.Itoa(t.label) -} - -type RuleTransition struct { - *BaseTransition - - followState ATNState - ruleIndex, precedence int -} - -func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition { - - t := new(RuleTransition) - t.BaseTransition = NewBaseTransition(ruleStart) - - t.ruleIndex = ruleIndex - t.precedence = precedence - t.followState = followState - t.serializationType = TransitionRULE - t.isEpsilon = true - - return t -} - -func (t *RuleTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -type EpsilonTransition struct { - *BaseTransition - - outermostPrecedenceReturn int -} - -func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition { - - t := new(EpsilonTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionEPSILON - t.isEpsilon = true - t.outermostPrecedenceReturn = outermostPrecedenceReturn - return t -} - -func (t *EpsilonTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -func (t *EpsilonTransition) String() string { - return "epsilon" -} - -type RangeTransition struct { - *BaseTransition - - start, stop int -} - -func NewRangeTransition(target ATNState, start, stop int) *RangeTransition { - - t := new(RangeTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionRANGE - t.start = start - t.stop = stop - t.intervalSet = t.makeLabel() - return t -} - -func (t *RangeTransition) makeLabel() *IntervalSet { - s := NewIntervalSet() - s.addRange(t.start, t.stop) - return s -} - -func (t *RangeTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return symbol >= t.start && symbol <= t.stop -} - -func (t *RangeTransition) String() string { - return "'" + string(t.start) + "'..'" + string(t.stop) + "'" -} - -type AbstractPredicateTransition interface { - Transition - IAbstractPredicateTransitionFoo() -} - -type BaseAbstractPredicateTransition struct { - *BaseTransition -} - -func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition { - - t := new(BaseAbstractPredicateTransition) - t.BaseTransition = NewBaseTransition(target) - - return t -} - -func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {} - -type PredicateTransition struct { - *BaseAbstractPredicateTransition - - isCtxDependent bool - ruleIndex, predIndex int -} - -func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition { - - t := new(PredicateTransition) - t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target) - - t.serializationType = TransitionPREDICATE - t.ruleIndex = ruleIndex - t.predIndex = predIndex - t.isCtxDependent = isCtxDependent // e.g., $i ref in pred - t.isEpsilon = true - return t -} - -func (t *PredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -func (t *PredicateTransition) getPredicate() *Predicate { - return NewPredicate(t.ruleIndex, t.predIndex, t.isCtxDependent) -} - -func (t *PredicateTransition) String() string { - return "pred_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.predIndex) -} - -type ActionTransition struct { - *BaseTransition - - isCtxDependent bool - ruleIndex, actionIndex, predIndex int -} - -func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition { - - t := new(ActionTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionACTION - t.ruleIndex = ruleIndex - t.actionIndex = actionIndex - t.isCtxDependent = isCtxDependent // e.g., $i ref in pred - t.isEpsilon = true - return t -} - -func (t *ActionTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -func (t *ActionTransition) String() string { - return "action_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex) -} - -type SetTransition struct { - *BaseTransition -} - -func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition { - - t := new(SetTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionSET - if set != nil { - t.intervalSet = set - } else { - t.intervalSet = NewIntervalSet() - t.intervalSet.addOne(TokenInvalidType) - } - - return t -} - -func (t *SetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return t.intervalSet.contains(symbol) -} - -func (t *SetTransition) String() string { - return t.intervalSet.String() -} - -type NotSetTransition struct { - *SetTransition -} - -func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition { - - t := new(NotSetTransition) - - t.SetTransition = NewSetTransition(target, set) - - t.serializationType = TransitionNOTSET - - return t -} - -func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.intervalSet.contains(symbol) -} - -func (t *NotSetTransition) String() string { - return "~" + t.intervalSet.String() -} - -type WildcardTransition struct { - *BaseTransition -} - -func NewWildcardTransition(target ATNState) *WildcardTransition { - - t := new(WildcardTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionWILDCARD - return t -} - -func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return symbol >= minVocabSymbol && symbol <= maxVocabSymbol -} - -func (t *WildcardTransition) String() string { - return "." -} - -type PrecedencePredicateTransition struct { - *BaseAbstractPredicateTransition - - precedence int -} - -func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition { - - t := new(PrecedencePredicateTransition) - t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target) - - t.serializationType = TransitionPRECEDENCE - t.precedence = precedence - t.isEpsilon = true - - return t -} - -func (t *PrecedencePredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -func (t *PrecedencePredicateTransition) getPredicate() *PrecedencePredicate { - return NewPrecedencePredicate(t.precedence) -} - -func (t *PrecedencePredicateTransition) String() string { - return fmt.Sprint(t.precedence) + " >= _p" -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go deleted file mode 100644 index ad0eabf..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go +++ /dev/null @@ -1,251 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// The basic notion of a tree has a parent, a payload, and a list of children. -// It is the most abstract interface for all the trees used by ANTLR. -/// - -var TreeInvalidInterval = NewInterval(-1, -2) - -type Tree interface { - GetParent() Tree - SetParent(Tree) - GetPayload() interface{} - GetChild(i int) Tree - GetChildCount() int - GetChildren() []Tree -} - -type SyntaxTree interface { - Tree - - GetSourceInterval() *Interval -} - -type ParseTree interface { - SyntaxTree - - Accept(Visitor ParseTreeVisitor) interface{} - GetText() string - - ToStringTree([]string, Recognizer) string -} - -type RuleNode interface { - ParseTree - - GetRuleContext() RuleContext - GetBaseRuleContext() *BaseRuleContext -} - -type TerminalNode interface { - ParseTree - - GetSymbol() Token -} - -type ErrorNode interface { - TerminalNode - - errorNode() -} - -type ParseTreeVisitor interface { - Visit(tree ParseTree) interface{} - VisitChildren(node RuleNode) interface{} - VisitTerminal(node TerminalNode) interface{} - VisitErrorNode(node ErrorNode) interface{} -} - -type BaseParseTreeVisitor struct{} - -var _ ParseTreeVisitor = &BaseParseTreeVisitor{} - -func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return nil } -func (v *BaseParseTreeVisitor) VisitChildren(node RuleNode) interface{} { return nil } -func (v *BaseParseTreeVisitor) VisitTerminal(node TerminalNode) interface{} { return nil } -func (v *BaseParseTreeVisitor) VisitErrorNode(node ErrorNode) interface{} { return nil } - -// TODO -//func (this ParseTreeVisitor) Visit(ctx) { -// if (Utils.isArray(ctx)) { -// self := this -// return ctx.map(function(child) { return VisitAtom(self, child)}) -// } else { -// return VisitAtom(this, ctx) -// } -//} -// -//func VisitAtom(Visitor, ctx) { -// if (ctx.parser == nil) { //is terminal -// return -// } -// -// name := ctx.parser.ruleNames[ctx.ruleIndex] -// funcName := "Visit" + Utils.titleCase(name) -// -// return Visitor[funcName](ctx) -//} - -type ParseTreeListener interface { - VisitTerminal(node TerminalNode) - VisitErrorNode(node ErrorNode) - EnterEveryRule(ctx ParserRuleContext) - ExitEveryRule(ctx ParserRuleContext) -} - -type BaseParseTreeListener struct{} - -var _ ParseTreeListener = &BaseParseTreeListener{} - -func (l *BaseParseTreeListener) VisitTerminal(node TerminalNode) {} -func (l *BaseParseTreeListener) VisitErrorNode(node ErrorNode) {} -func (l *BaseParseTreeListener) EnterEveryRule(ctx ParserRuleContext) {} -func (l *BaseParseTreeListener) ExitEveryRule(ctx ParserRuleContext) {} - -type TerminalNodeImpl struct { - parentCtx RuleContext - - symbol Token -} - -var _ TerminalNode = &TerminalNodeImpl{} - -func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl { - tn := new(TerminalNodeImpl) - - tn.parentCtx = nil - tn.symbol = symbol - - return tn -} - -func (t *TerminalNodeImpl) GetChild(i int) Tree { - return nil -} - -func (t *TerminalNodeImpl) GetChildren() []Tree { - return nil -} - -func (t *TerminalNodeImpl) SetChildren(tree []Tree) { - panic("Cannot set children on terminal node") -} - -func (t *TerminalNodeImpl) GetSymbol() Token { - return t.symbol -} - -func (t *TerminalNodeImpl) GetParent() Tree { - return t.parentCtx -} - -func (t *TerminalNodeImpl) SetParent(tree Tree) { - t.parentCtx = tree.(RuleContext) -} - -func (t *TerminalNodeImpl) GetPayload() interface{} { - return t.symbol -} - -func (t *TerminalNodeImpl) GetSourceInterval() *Interval { - if t.symbol == nil { - return TreeInvalidInterval - } - tokenIndex := t.symbol.GetTokenIndex() - return NewInterval(tokenIndex, tokenIndex) -} - -func (t *TerminalNodeImpl) GetChildCount() int { - return 0 -} - -func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{} { - return v.VisitTerminal(t) -} - -func (t *TerminalNodeImpl) GetText() string { - return t.symbol.GetText() -} - -func (t *TerminalNodeImpl) String() string { - if t.symbol.GetTokenType() == TokenEOF { - return "" - } - - return t.symbol.GetText() -} - -func (t *TerminalNodeImpl) ToStringTree(s []string, r Recognizer) string { - return t.String() -} - -// Represents a token that was consumed during reSynchronization -// rather than during a valid Match operation. For example, -// we will create this kind of a node during single token insertion -// and deletion as well as during "consume until error recovery set" -// upon no viable alternative exceptions. - -type ErrorNodeImpl struct { - *TerminalNodeImpl -} - -var _ ErrorNode = &ErrorNodeImpl{} - -func NewErrorNodeImpl(token Token) *ErrorNodeImpl { - en := new(ErrorNodeImpl) - en.TerminalNodeImpl = NewTerminalNodeImpl(token) - return en -} - -func (e *ErrorNodeImpl) errorNode() {} - -func (e *ErrorNodeImpl) Accept(v ParseTreeVisitor) interface{} { - return v.VisitErrorNode(e) -} - -type ParseTreeWalker struct { -} - -func NewParseTreeWalker() *ParseTreeWalker { - return new(ParseTreeWalker) -} - -func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) { - switch tt := t.(type) { - case ErrorNode: - listener.VisitErrorNode(tt) - case TerminalNode: - listener.VisitTerminal(tt) - default: - p.EnterRule(listener, t.(RuleNode)) - for i := 0; i < t.GetChildCount(); i++ { - child := t.GetChild(i) - p.Walk(listener, child) - } - p.ExitRule(listener, t.(RuleNode)) - } -} - -// -// The discovery of a rule node, involves sending two events: the generic -// {@link ParseTreeListener//EnterEveryRule} and a -// {@link RuleContext}-specific event. First we trigger the generic and then -// the rule specific. We to them in reverse order upon finishing the node. -// -func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) { - ctx := r.GetRuleContext().(ParserRuleContext) - listener.EnterEveryRule(ctx) - ctx.EnterRule(listener) -} - -func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) { - ctx := r.GetRuleContext().(ParserRuleContext) - ctx.ExitRule(listener) - listener.ExitEveryRule(ctx) -} - -var ParseTreeWalkerDefault = NewParseTreeWalker() diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/trees.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/trees.go deleted file mode 100644 index 80144ec..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/trees.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import "fmt" - -/** A set of utility routines useful for all kinds of ANTLR trees. */ - -// Print out a whole tree in LISP form. {@link //getNodeText} is used on the -// node payloads to get the text for the nodes. Detect -// parse trees and extract data appropriately. -func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string { - - if recog != nil { - ruleNames = recog.GetRuleNames() - } - - s := TreesGetNodeText(tree, ruleNames, nil) - - s = EscapeWhitespace(s, false) - c := tree.GetChildCount() - if c == 0 { - return s - } - res := "(" + s + " " - if c > 0 { - s = TreesStringTree(tree.GetChild(0), ruleNames, nil) - res += s - } - for i := 1; i < c; i++ { - s = TreesStringTree(tree.GetChild(i), ruleNames, nil) - res += (" " + s) - } - res += ")" - return res -} - -func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string { - if recog != nil { - ruleNames = recog.GetRuleNames() - } - - if ruleNames != nil { - switch t2 := t.(type) { - case RuleNode: - t3 := t2.GetRuleContext() - altNumber := t3.GetAltNumber() - - if altNumber != ATNInvalidAltNumber { - return fmt.Sprintf("%s:%d", ruleNames[t3.GetRuleIndex()], altNumber) - } - return ruleNames[t3.GetRuleIndex()] - case ErrorNode: - return fmt.Sprint(t2) - case TerminalNode: - if t2.GetSymbol() != nil { - return t2.GetSymbol().GetText() - } - } - } - - // no recog for rule names - payload := t.GetPayload() - if p2, ok := payload.(Token); ok { - return p2.GetText() - } - - return fmt.Sprint(t.GetPayload()) -} - -// Return ordered list of all children of this node -func TreesGetChildren(t Tree) []Tree { - list := make([]Tree, 0) - for i := 0; i < t.GetChildCount(); i++ { - list = append(list, t.GetChild(i)) - } - return list -} - -// Return a list of all ancestors of this node. The first node of -// list is the root and the last is the parent of this node. -// -func TreesgetAncestors(t Tree) []Tree { - ancestors := make([]Tree, 0) - t = t.GetParent() - for t != nil { - f := []Tree{t} - ancestors = append(f, ancestors...) - t = t.GetParent() - } - return ancestors -} - -func TreesFindAllTokenNodes(t ParseTree, ttype int) []ParseTree { - return TreesfindAllNodes(t, ttype, true) -} - -func TreesfindAllRuleNodes(t ParseTree, ruleIndex int) []ParseTree { - return TreesfindAllNodes(t, ruleIndex, false) -} - -func TreesfindAllNodes(t ParseTree, index int, findTokens bool) []ParseTree { - nodes := make([]ParseTree, 0) - treesFindAllNodes(t, index, findTokens, &nodes) - return nodes -} - -func treesFindAllNodes(t ParseTree, index int, findTokens bool, nodes *[]ParseTree) { - // check this node (the root) first - - t2, ok := t.(TerminalNode) - t3, ok2 := t.(ParserRuleContext) - - if findTokens && ok { - if t2.GetSymbol().GetTokenType() == index { - *nodes = append(*nodes, t2) - } - } else if !findTokens && ok2 { - if t3.GetRuleIndex() == index { - *nodes = append(*nodes, t3) - } - } - // check children - for i := 0; i < t.GetChildCount(); i++ { - treesFindAllNodes(t.GetChild(i).(ParseTree), index, findTokens, nodes) - } -} - -func TreesDescendants(t ParseTree) []ParseTree { - nodes := []ParseTree{t} - for i := 0; i < t.GetChildCount(); i++ { - nodes = append(nodes, TreesDescendants(t.GetChild(i).(ParseTree))...) - } - return nodes -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils.go deleted file mode 100644 index bba2ffa..0000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils.go +++ /dev/null @@ -1,417 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "bytes" - "errors" - "fmt" - "sort" - "strconv" - "strings" -) - -func intMin(a, b int) int { - if a < b { - return a - } - return b -} - -func intMax(a, b int) int { - if a > b { - return a - } - return b -} - -// A simple integer stack - -type IntStack []int - -var ErrEmptyStack = errors.New("Stack is empty") - -func (s *IntStack) Pop() (int, error) { - l := len(*s) - 1 - if l < 0 { - return 0, ErrEmptyStack - } - v := (*s)[l] - *s = (*s)[0:l] - return v, nil -} - -func (s *IntStack) Push(e int) { - *s = append(*s, e) -} - -type Set struct { - data map[int][]interface{} - hashcodeFunction func(interface{}) int - equalsFunction func(interface{}, interface{}) bool -} - -func NewSet( - hashcodeFunction func(interface{}) int, - equalsFunction func(interface{}, interface{}) bool) *Set { - - s := new(Set) - - s.data = make(map[int][]interface{}) - - if hashcodeFunction != nil { - s.hashcodeFunction = hashcodeFunction - } else { - s.hashcodeFunction = standardHashFunction - } - - if equalsFunction == nil { - s.equalsFunction = standardEqualsFunction - } else { - s.equalsFunction = equalsFunction - } - - return s -} - -func standardEqualsFunction(a interface{}, b interface{}) bool { - - ac, oka := a.(comparable) - bc, okb := b.(comparable) - - if !oka || !okb { - panic("Not Comparable") - } - - return ac.equals(bc) -} - -func standardHashFunction(a interface{}) int { - if h, ok := a.(hasher); ok { - return h.hash() - } - - panic("Not Hasher") -} - -type hasher interface { - hash() int -} - -func (s *Set) length() int { - return len(s.data) -} - -func (s *Set) add(value interface{}) interface{} { - - key := s.hashcodeFunction(value) - - values := s.data[key] - - if s.data[key] != nil { - for i := 0; i < len(values); i++ { - if s.equalsFunction(value, values[i]) { - return values[i] - } - } - - s.data[key] = append(s.data[key], value) - return value - } - - v := make([]interface{}, 1, 10) - v[0] = value - s.data[key] = v - - return value -} - -func (s *Set) contains(value interface{}) bool { - - key := s.hashcodeFunction(value) - - values := s.data[key] - - if s.data[key] != nil { - for i := 0; i < len(values); i++ { - if s.equalsFunction(value, values[i]) { - return true - } - } - } - return false -} - -func (s *Set) values() []interface{} { - var l []interface{} - - for _, v := range s.data { - l = append(l, v...) - } - - return l -} - -func (s *Set) String() string { - r := "" - - for _, av := range s.data { - for _, v := range av { - r += fmt.Sprint(v) - } - } - - return r -} - -type BitSet struct { - data map[int]bool -} - -func NewBitSet() *BitSet { - b := new(BitSet) - b.data = make(map[int]bool) - return b -} - -func (b *BitSet) add(value int) { - b.data[value] = true -} - -func (b *BitSet) clear(index int) { - delete(b.data, index) -} - -func (b *BitSet) or(set *BitSet) { - for k := range set.data { - b.add(k) - } -} - -func (b *BitSet) remove(value int) { - delete(b.data, value) -} - -func (b *BitSet) contains(value int) bool { - return b.data[value] -} - -func (b *BitSet) values() []int { - ks := make([]int, len(b.data)) - i := 0 - for k := range b.data { - ks[i] = k - i++ - } - sort.Ints(ks) - return ks -} - -func (b *BitSet) minValue() int { - min := 2147483647 - - for k := range b.data { - if k < min { - min = k - } - } - - return min -} - -func (b *BitSet) equals(other interface{}) bool { - otherBitSet, ok := other.(*BitSet) - if !ok { - return false - } - - if len(b.data) != len(otherBitSet.data) { - return false - } - - for k, v := range b.data { - if otherBitSet.data[k] != v { - return false - } - } - - return true -} - -func (b *BitSet) length() int { - return len(b.data) -} - -func (b *BitSet) String() string { - vals := b.values() - valsS := make([]string, len(vals)) - - for i, val := range vals { - valsS[i] = strconv.Itoa(val) - } - return "{" + strings.Join(valsS, ", ") + "}" -} - -type AltDict struct { - data map[string]interface{} -} - -func NewAltDict() *AltDict { - d := new(AltDict) - d.data = make(map[string]interface{}) - return d -} - -func (a *AltDict) Get(key string) interface{} { - key = "k-" + key - return a.data[key] -} - -func (a *AltDict) put(key string, value interface{}) { - key = "k-" + key - a.data[key] = value -} - -func (a *AltDict) values() []interface{} { - vs := make([]interface{}, len(a.data)) - i := 0 - for _, v := range a.data { - vs[i] = v - i++ - } - return vs -} - -type DoubleDict struct { - data map[int]map[int]interface{} -} - -func NewDoubleDict() *DoubleDict { - dd := new(DoubleDict) - dd.data = make(map[int]map[int]interface{}) - return dd -} - -func (d *DoubleDict) Get(a, b int) interface{} { - data := d.data[a] - - if data == nil { - return nil - } - - return data[b] -} - -func (d *DoubleDict) set(a, b int, o interface{}) { - data := d.data[a] - - if data == nil { - data = make(map[int]interface{}) - d.data[a] = data - } - - data[b] = o -} - -func EscapeWhitespace(s string, escapeSpaces bool) string { - - s = strings.Replace(s, "\t", "\\t", -1) - s = strings.Replace(s, "\n", "\\n", -1) - s = strings.Replace(s, "\r", "\\r", -1) - if escapeSpaces { - s = strings.Replace(s, " ", "\u00B7", -1) - } - return s -} - -func TerminalNodeToStringArray(sa []TerminalNode) []string { - st := make([]string, len(sa)) - - for i, s := range sa { - st[i] = fmt.Sprintf("%v", s) - } - - return st -} - -func PrintArrayJavaStyle(sa []string) string { - var buffer bytes.Buffer - - buffer.WriteString("[") - - for i, s := range sa { - buffer.WriteString(s) - if i != len(sa)-1 { - buffer.WriteString(", ") - } - } - - buffer.WriteString("]") - - return buffer.String() -} - -// The following routines were lifted from bits.rotate* available in Go 1.9. - -const uintSize = 32 << (^uint(0) >> 32 & 1) // 32 or 64 - -// rotateLeft returns the value of x rotated left by (k mod UintSize) bits. -// To rotate x right by k bits, call RotateLeft(x, -k). -func rotateLeft(x uint, k int) uint { - if uintSize == 32 { - return uint(rotateLeft32(uint32(x), k)) - } - return uint(rotateLeft64(uint64(x), k)) -} - -// rotateLeft32 returns the value of x rotated left by (k mod 32) bits. -func rotateLeft32(x uint32, k int) uint32 { - const n = 32 - s := uint(k) & (n - 1) - return x<>(n-s) -} - -// rotateLeft64 returns the value of x rotated left by (k mod 64) bits. -func rotateLeft64(x uint64, k int) uint64 { - const n = 64 - s := uint(k) & (n - 1) - return x<>(n-s) -} - - -// murmur hash -const ( - c1_32 uint = 0xCC9E2D51 - c2_32 uint = 0x1B873593 - n1_32 uint = 0xE6546B64 -) - -func murmurInit(seed int) int { - return seed -} - -func murmurUpdate(h1 int, k1 int) int { - var k1u uint - k1u = uint(k1) * c1_32 - k1u = rotateLeft(k1u, 15) - k1u *= c2_32 - - var h1u = uint(h1) ^ k1u - k1u = rotateLeft(k1u, 13) - h1u = h1u*5 + 0xe6546b64 - return int(h1u) -} - -func murmurFinish(h1 int, numberOfWords int) int { - var h1u uint = uint(h1) - h1u ^= uint(numberOfWords * 4) - h1u ^= h1u >> 16 - h1u *= uint(0x85ebca6b) - h1u ^= h1u >> 13 - h1u *= 0xc2b2ae35 - h1u ^= h1u >> 16 - - return int(h1u) -}