diff --git a/compiler/docgen.nim b/compiler/docgen.nim
index 57d5e7b013503..6fa86a185a5b8 100644
--- a/compiler/docgen.nim
+++ b/compiler/docgen.nim
@@ -407,6 +407,9 @@ proc nodeToHighlightedHtml(d: PDoc; n: PNode; result: var Rope; renderFlags: TRe
of tkStrLit..tkTripleStrLit:
dispA(d.conf, result, "$1",
"\\spanStringLit{$1}", [escLit])
+ of tkStrNumLit:
+ dispA(d.conf, result, "$1",
+ "\\spanStringLit{$1}", [escLit])
of tkCharLit:
dispA(d.conf, result, "$1", "\\spanCharLit{$1}",
[escLit])
diff --git a/compiler/lexer.nim b/compiler/lexer.nim
index 729ba34352ceb..c0ba217b5496a 100644
--- a/compiler/lexer.nim
+++ b/compiler/lexer.nim
@@ -58,7 +58,8 @@ type
tkFloatLit = "tkFloatLit", tkFloat32Lit = "tkFloat32Lit",
tkFloat64Lit = "tkFloat64Lit", tkFloat128Lit = "tkFloat128Lit",
tkStrLit = "tkStrLit", tkRStrLit = "tkRStrLit", tkTripleStrLit = "tkTripleStrLit",
- tkGStrLit = "tkGStrLit", tkGTripleStrLit = "tkGTripleStrLit", tkCharLit = "tkCharLit",
+ tkGStrLit = "tkGStrLit", tkGTripleStrLit = "tkGTripleStrLit", tkCharLit = "tkCharLit",
+ tkStrNumLit = "tkStrNumLit",
tkParLe = "(", tkParRi = ")", tkBracketLe = "[",
tkBracketRi = "]", tkCurlyLe = "{", tkCurlyRi = "}",
@@ -74,11 +75,17 @@ type
TokTypes* = set[TokType]
+when defined(nimsuggest):
+ # tokens that should not be considered for previousToken
+ const weakTokens = {tkComma, tkSemiColon, tkColon,
+ tkParRi, tkParDotRi, tkBracketRi, tkBracketDotRi,
+ tkCurlyRi}
+
const
- weakTokens = {tkComma, tkSemiColon, tkColon,
- tkParRi, tkParDotRi, tkBracketRi, tkBracketDotRi,
- tkCurlyRi} # \
- # tokens that should not be considered for previousToken
+ # when a minus (-) is found in front of a numeric literal, if the previous
+ # token is one of these then it is a negative numeric literal
+ negationPrefixes = {tkComma, tkColon, tkParLe, tkBracketLe, tkSemiColon,
+ tkBracketDotLe, tkCurlyDotLe, tkParDotLe}
tokKeywordLow* = succ(tkSymbol)
tokKeywordHigh* = pred(tkIntLit)
@@ -119,6 +126,7 @@ type
cache*: IdentCache
when defined(nimsuggest):
previousToken: TLineInfo
+ previousTokType*: TokType
config*: ConfigRef
proc getLineInfo*(L: Lexer, tok: Token): TLineInfo {.inline.} =
@@ -296,8 +304,8 @@ proc getNumber(L: var Lexer, result: var Token) =
if L.buf[pos] == '_':
if L.buf[pos+1] notin chars:
lexMessage(L, errGenerated,
- "only single underscores may occur in a token and token may not " &
- "end with an underscore: e.g. '1__1' and '1_' are invalid")
+ "only single underscores may occur in a number and a number may " &
+ "not end with an underscore: e.g. '1__1' and '1_' are invalid")
break
tok.literal.add('_')
inc(pos)
@@ -332,22 +340,37 @@ proc getNumber(L: var Lexer, result: var Token) =
L.bufpos = msgPos
lexMessage(L, msgKind, msg % t.literal)
+ proc cmpSuffix(L: Lexer, prev: int, endOfSuffix: int, chs: openArray[char]): bool {.inline.} =
+ if (endOfSuffix - prev - 1) != chs.len:
+ return false
+ for i in 0.. BiggestInt(uint16.high))
of tkInt32Lit: (xi > BiggestInt(uint32.high))
else: false
-
if outOfRange:
#echo "out of range num: ", result.iNumber, " vs ", xi
lexMessageLitNum(L, "number out of range: '$1'", startpos)
-
+ # make negative when a sign starts the literal
+
+ if isNegative:
+ case result.tokType:
+ of floatTypes:
+ result.fNumber = -result.fNumber
+ of signedIntTypes:
+ result.iNumber = -result.iNumber
+ else:
+ lexMessageLitNum(L, "cannot assign a negative value to an unsigned type: '$1'", startpos)
else:
+ # place the base10 number into result.iNumber or result.fNumber
+ # the parsing routines already handle the isNegative case
case result.tokType
of floatTypes:
result.fNumber = parseFloat(result.literal)
@@ -571,6 +618,7 @@ proc getNumber(L: var Lexer, result: var Token) =
if outOfRange: lexMessageLitNum(L, "number out of range: '$1'", startpos)
+
# Promote int literal to int64? Not always necessary, but more consistent
if result.tokType == tkIntLit:
if result.iNumber > high(int32):
@@ -733,9 +781,8 @@ proc getEscapedChar(L: var Lexer, tok: var Token) =
proc handleCRLF(L: var Lexer, pos: int): int =
template registerLine =
- let col = L.getColNumber(pos)
-
when not defined(nimpretty):
+ let col = L.getColNumber(pos)
if col > MaxLineLength:
lexMessagePos(L, hintLineTooLong, pos)
@@ -859,6 +906,12 @@ proc getSymbol(L: var Lexer, tok: var Token) =
break
inc(pos)
suspicious = true
+ of '\'':
+ if pos==L.bufpos: # leading single quote only allowed at start
+ h = h !& ord(c)
+ inc(pos)
+ else:
+ break
else: break
tokenEnd(tok, pos-1)
h = !$h
@@ -1072,6 +1125,7 @@ proc scanComment(L: var Lexer, tok: var Token) =
tok.commentOffsetB = L.offsetBase + pos - 1
proc skip(L: var Lexer, tok: var Token) =
+ # advance the lexer past whitespaces and comments while accounting for indents
var pos = L.bufpos
tokenBegin(tok, pos)
tok.strongSpaceA = 0
@@ -1150,6 +1204,7 @@ proc rawGetTok*(L: var Lexer, tok: var Token) =
if tok.tokType notin weakTokens:
L.previousToken.line = tok.line.uint16
L.previousToken.col = tok.col.int16
+ L.previousTokType = tok.tokType
fillToken(tok)
if L.indentAhead >= 0:
@@ -1270,14 +1325,26 @@ proc rawGetTok*(L: var Lexer, tok: var Token) =
# tkTripleStrLit -> tkGTripleStrLit
inc(tok.tokType, 2)
of '\'':
- tok.tokType = tkCharLit
- getCharacter(L, tok)
- tok.tokType = tkCharLit
+ if (L.previousTokType == tkStrNumLit) and (tok.strongSpaceA == 0):
+ # if the previous token (with no prior whitespace) is a tkStrNumLit, then this is a numeric suffix
+ getSymbol(L, tok) # example: -12'big
+ elif L.previousTokType == tkAccent:
+ getSymbol(L, tok) # example: `'big`
+ else:
+ tok.tokType = tkCharLit
+ getCharacter(L, tok)
+ tok.tokType = tkCharLit
of '0'..'9':
getNumber(L, tok)
- let c = L.buf[L.bufpos]
- if c in SymChars+{'_'}:
+ if L.buf[L.bufpos] in SymChars+{'_'}:
lexMessage(L, errGenerated, "invalid token: no whitespace between number and identifier")
+ of '-':
+ if ((tok.strongSpaceA > 0) or (L.previousTokType in negationPrefixes)) and (L.buf[L.bufpos + 1] in '0'..'9'):
+ getNumber(L, tok)
+ if L.buf[L.bufpos] in SymChars+{'_'}:
+ lexMessage(L, errGenerated, "invalid token: no whitespace between number and identifier")
+ else:
+ getOperator(L, tok)
else:
if c in OpChars:
getOperator(L, tok)
diff --git a/compiler/parser.nim b/compiler/parser.nim
index fe857c81bb47d..f62ca3ddfabc3 100644
--- a/compiler/parser.nim
+++ b/compiler/parser.nim
@@ -89,6 +89,10 @@ proc parseExprStmt(p: var Parser): PNode
proc parseBlock(p: var Parser): PNode
proc primary(p: var Parser, mode: PrimaryMode): PNode
proc simpleExprAux(p: var Parser, limit: int, mode: PrimaryMode): PNode
+proc identOrLiteral(p: var Parser, mode: PrimaryMode): PNode
+proc dotExpr(p: var Parser, a: PNode): PNode
+proc parseGStrLit(p: var Parser, a: PNode): PNode
+
# implementation
@@ -362,6 +366,10 @@ proc parseSymbol(p: var Parser, mode = smNormal): PNode =
parMessage(p, errIdentifierExpected, p.tok)
break
eat(p, tkAccent)
+ of tkStrNumLit:
+ result = identOrLiteral(p, pmNormal)
+ result = dotExpr(p, result)
+ result = parseGStrLit(p, result)
else:
parMessage(p, errIdentifierExpected, p.tok)
# BUGFIX: We must consume a token here to prevent endless loops!
@@ -635,6 +643,9 @@ proc identOrLiteral(p: var Parser, mode: PrimaryMode): PNode =
#| | castExpr
#| tupleConstr = '(' optInd (exprColonEqExpr comma?)* optPar ')'
#| arrayConstr = '[' optInd (exprColonEqExpr comma?)* optPar ']'
+ #
+ # when a tkStrNumLit is encountered, the next token is not parsed since
+ # a tkDot is implied.
case p.tok.tokType
of tkSymbol, tkBuiltInMagics, tkOut:
result = newIdentNodeP(p.tok.ident, p)
@@ -701,6 +712,8 @@ proc identOrLiteral(p: var Parser, mode: PrimaryMode): PNode =
of tkStrLit:
result = newStrNodeP(nkStrLit, p.tok.literal, p)
getTok(p)
+ of tkStrNumLit:
+ result = newStrNodeP(nkStrLit, p.tok.literal, p)
of tkRStrLit:
result = newStrNodeP(nkRStrLit, p.tok.literal, p)
getTok(p)
@@ -764,14 +777,17 @@ proc commandExpr(p: var Parser; r: PNode; mode: PrimaryMode): PNode =
result.add commandParam(p, isFirstParam, mode)
proc primarySuffix(p: var Parser, r: PNode,
- baseIndent: int, mode: PrimaryMode): PNode =
+ baseIndent: int, mode: PrimaryMode,
+ baseTokType: TokType): PNode =
#| primarySuffix = '(' (exprColonEqExpr comma?)* ')'
#| | '.' optInd symbol generalizedLit?
#| | '[' optInd exprColonEqExprList optPar ']'
#| | '{' optInd exprColonEqExprList optPar '}'
#| | &( '`'|IDENT|literal|'cast'|'addr'|'type') expr # command syntax
result = r
-
+ if baseTokType == tkStrNumLit:
+ result = dotExpr(p, result)
+ result = parseGStrLit(p, result)
# progress guaranteed
while p.tok.indent < 0 or
(p.tok.tokType == tkDot and p.tok.indent >= baseIndent):
@@ -1097,8 +1113,8 @@ proc isExprStart(p: Parser): bool =
case p.tok.tokType
of tkSymbol, tkAccent, tkOpr, tkNot, tkNil, tkCast, tkIf, tkFor,
tkProc, tkFunc, tkIterator, tkBind, tkBuiltInMagics,
- tkParLe, tkBracketLe, tkCurlyLe, tkIntLit..tkCharLit, tkVar, tkRef, tkPtr,
- tkTuple, tkObject, tkWhen, tkCase, tkOut:
+ tkParLe, tkBracketLe, tkCurlyLe, tkIntLit..tkCharLit, tkStrNumLit, tkVar,
+ tkRef, tkPtr, tkTuple, tkObject, tkWhen, tkCase, tkOut:
result = true
else: result = false
@@ -1218,8 +1234,9 @@ proc primary(p: var Parser, mode: PrimaryMode): PNode =
if isSigil:
#XXX prefix operators
let baseInd = p.lex.currLineIndent
+ let baseTokType = p.tok.tokType
result.add(primary(p, pmSkipSuffix))
- result = primarySuffix(p, result, baseInd, mode)
+ result = primarySuffix(p, result, baseInd, mode, baseTokType)
else:
result.add(primary(p, pmNormal))
return
@@ -1266,9 +1283,10 @@ proc primary(p: var Parser, mode: PrimaryMode): PNode =
of tkDistinct: result = parseTypeDescKAux(p, nkDistinctTy, mode)
else:
let baseInd = p.lex.currLineIndent
+ let baseTokType = p.tok.tokType
result = identOrLiteral(p, mode)
if mode != pmSkipSuffix:
- result = primarySuffix(p, result, baseInd, mode)
+ result = primarySuffix(p, result, baseInd, mode, baseTokType)
proc binaryNot(p: var Parser; a: PNode): PNode =
if p.tok.tokType == tkNot:
diff --git a/compiler/renderer.nim b/compiler/renderer.nim
index 13ff6941fe2f5..151733efeb7f1 100644
--- a/compiler/renderer.nim
+++ b/compiler/renderer.nim
@@ -1188,9 +1188,14 @@ proc gsub(g: var TSrcGen, n: PNode, c: TContext) =
gcomma(g, n, c)
put(g, tkBracketRi, "]")
of nkDotExpr:
- gsub(g, n, 0)
- put(g, tkDot, ".")
- gsub(g, n, 1)
+ if n[1].kind == nkIdent and n[1].ident.s[0] == '\'': # TODO: add a re-usable helper function isUserLitteral
+ assert n[0].kind == nkStrLit
+ put(g, tkStrNumLit, n[0].strVal)
+ gsub(g, n, 1)
+ else:
+ gsub(g, n, 0)
+ put(g, tkDot, ".")
+ gsub(g, n, 1)
of nkBind:
putWithSpace(g, tkBind, "bind")
gsub(g, n, 0)
diff --git a/compiler/semtypes.nim b/compiler/semtypes.nim
index dcab9a8842aa3..0fce7b417128b 100644
--- a/compiler/semtypes.nim
+++ b/compiler/semtypes.nim
@@ -299,7 +299,7 @@ proc semArrayIndex(c: PContext, n: PNode): PType =
result = makeRangeWithStaticExpr(c, e.typ.n)
elif e.kind in {nkIntLit..nkUInt64Lit}:
if e.intVal < 0:
- localError(c.config, n[1].info,
+ localError(c.config, n.info,
"Array length can't be negative, but was " & $e.intVal)
result = makeRangeType(c, 0, e.intVal-1, n.info, e.typ)
elif e.kind == nkSym and e.typ.kind == tyStatic:
diff --git a/doc/manual.rst b/doc/manual.rst
index 1bb47f28b7eb9..e1a675ef64c13 100644
--- a/doc/manual.rst
+++ b/doc/manual.rst
@@ -491,10 +491,10 @@ type is used for Unicode characters, it can represent any Unicode character.
`Rune` is declared in the `unicode module `_.
-Numerical constants
--------------------
+Numeric Literals
+----------------
-Numerical constants are of a single type and have the form::
+Numeric literals have the form::
hexdigit = digit | 'A'..'F' | 'a'..'f'
octdigit = '0'..'7'
@@ -530,7 +530,7 @@ Numerical constants are of a single type and have the form::
| (FLOAT_LIT | DEC_LIT | OCT_LIT | BIN_LIT) ['\''] FLOAT64_SUFFIX
-As can be seen in the productions, numerical constants can contain underscores
+As can be seen in the productions, numeric literals can contain underscores
for readability. Integer and floating-point literals may be given in decimal (no
prefix), binary (prefix `0b`), octal (prefix `0o`), and hexadecimal
(prefix `0x`) notation.
@@ -546,7 +546,7 @@ is optional if it is not ambiguous (only hexadecimal floating-point literals
with a type suffix can be ambiguous).
-The type suffixes are:
+The pre-defined type suffixes are:
================= =========================
Type Suffix Resulting type of literal
@@ -566,6 +566,19 @@ The type suffixes are:
`'f64` float64
================= =========================
+If the suffix is not predefined, then the suffix is assumed to be a call
+to a proc, template, macro or other callable identifier that is passed the
+string containing the literal. The identifier needs to contain the apostrophe
+and so will need to be escaped with backticks.
+
+.. code-block:: nim
+ import strutils
+ type u4 = distinct uint8 # a 4-bit unsigned integer aka "nibble"
+ proc `'u4`(n: string): u4 =
+ result = (parseInt(n) and 0x0F).u4
+
+ var x = 5'u4
+
Floating-point literals may also be in binary, octal or hexadecimal
notation:
`0B0_10001110100_0000101001000111101011101111111011000101001101001001'f64`
diff --git a/tests/lexer/mlexerutils.nim b/tests/lexer/mlexerutils.nim
new file mode 100644
index 0000000000000..eae7a0006938a
--- /dev/null
+++ b/tests/lexer/mlexerutils.nim
@@ -0,0 +1,9 @@
+import macros
+
+macro lispReprStr*(a: untyped): untyped = newLit(a.lispRepr)
+
+macro assertAST*(expected: string, struct: untyped): untyped =
+ var ast = newLit(struct.treeRepr)
+ result = quote do:
+ if `ast` != `expected`:
+ doAssert false, "\nGot:\n" & `ast`.indent(2) & "\nExpected:\n" & `expected`.indent(2)
\ No newline at end of file
diff --git a/tests/lexer/tminushandling.nim b/tests/lexer/tminushandling.nim
new file mode 100644
index 0000000000000..e39fe3af8f919
--- /dev/null
+++ b/tests/lexer/tminushandling.nim
@@ -0,0 +1,69 @@
+discard """
+ targets: "c cpp js"
+"""
+
+# Test numeric literals and handling of minus symbol
+
+import std/[macros, strutils]
+import mlexerutils
+
+const one = 1
+const minusOne = `-`(one)
+
+# border cases that *should* generate compiler errors:
+assertAST dedent """
+ StmtList
+ Asgn
+ Ident "x"
+ Command
+ IntLit 4
+ IntLit -1""":
+ x = 4 -1
+assertAST dedent """
+ StmtList
+ VarSection
+ IdentDefs
+ Ident "x"
+ Ident "uint"
+ IntLit -1""":
+ var x: uint = -1
+template bad() =
+ x = 4 -1
+doAssert not compiles(bad())
+
+template main =
+ block: # check when a minus (-) is a negative sign for a literal
+ doAssert -1 == minusOne:
+ "unable to parse a spaced-prefixed negative int"
+ doAssert lispReprStr(-1) == """(IntLit -1)"""
+ doAssert -1.0'f64 == minusOne.float64
+ doAssert lispReprStr(-1.000'f64) == """(Float64Lit -1.0)"""
+ doAssert lispReprStr( -1.000'f64) == """(Float64Lit -1.0)"""
+ doAssert [-1].contains(minusOne):
+ "unable to handle negatives after square bracket"
+ doAssert lispReprStr([-1]) == """(Bracket (IntLit -1))"""
+ doAssert (-1, 2)[0] == minusOne:
+ "unable to handle negatives after parenthesis"
+ doAssert lispReprStr((-1, 2)) == """(Par (IntLit -1) (IntLit 2))"""
+ proc x(): int =
+ var a = 1;-1 # the -1 should act as the return value
+ doAssert x() == minusOne:
+ "unable to handle negatives after semi-colon"
+
+ block: # check when a minus (-) is an unary op
+ doAssert -one == minusOne:
+ "unable to a negative prior to identifier"
+
+ block: # check when a minus (-) is a a subtraction op
+ doAssert 4-1 == 3:
+ "unable to handle subtraction sans surrounding spaces with a numeric literal"
+ doAssert 4-one == 3:
+ "unable to handle subtraction sans surrounding spaces with an identifier"
+ doAssert 4 - 1 == 3:
+ "unable to handle subtraction with surrounding spaces with a numeric literal"
+ doAssert 4 - one == 3:
+ "unable to handle subtraction with surrounding spaces with an identifier"
+
+
+static: main()
+main()
diff --git a/tests/lexer/tstrnumlits.nim b/tests/lexer/tstrnumlits.nim
new file mode 100644
index 0000000000000..768ff045c29bc
--- /dev/null
+++ b/tests/lexer/tstrnumlits.nim
@@ -0,0 +1,149 @@
+discard """
+ targets: "c cpp js"
+"""
+
+# Test tkStrNumLit
+
+import std/[macros, strutils]
+import mlexerutils
+
+# AST checks
+
+assertAST dedent """
+ StmtList
+ ProcDef
+ AccQuoted
+ Ident "\'wrap"
+ Empty
+ Empty
+ FormalParams
+ Ident "string"
+ IdentDefs
+ Ident "number"
+ Ident "string"
+ Empty
+ Empty
+ Empty
+ StmtList
+ Asgn
+ Ident "result"
+ Infix
+ Ident "&"
+ Infix
+ Ident "&"
+ StrLit "[["
+ Ident "number"
+ StrLit "]]"""":
+ proc `'wrap`(number: string): string =
+ result = "[[" & number & "]]"
+
+assertAST dedent """
+ StmtList
+ DotExpr
+ StrLit "-38383839292839283928392839283928392839283.928493849385935898243e-50000"
+ Ident "\'wrap"""":
+ -38383839292839283928392839283928392839283.928493849385935898243e-50000'wrap
+
+proc `'wrap`(number: string): string = "[[" & number & "]]"
+doAssert lispReprStr(-1'wrap) == """(DotExpr (StrLit "-1") (Ident "\'wrap"))"""
+
+template main =
+ block: # basic suffix usage
+ template `'twrap`(number: string): untyped =
+ number.`'wrap`
+ proc extraContext(): string =
+ 22.40'wrap
+ proc `*`(left, right: string): string =
+ result = left & "times" & right
+ proc `+`(left, right: string): string =
+ result = left & "plus" & right
+
+ doAssert 1'wrap == "[[1]]"
+ doAssert -1'wrap == "[[-1]]":
+ "unable to resolve a negative integer-suffix pattern"
+ doAssert 12345.67890'wrap == "[[12345.67890]]"
+ doAssert 1'wrap*1'wrap == "[[1]]times[[1]]":
+ "unable to resolve an operator between two suffixed numeric literals"
+ doAssert 1'wrap+ -1'wrap == "[[1]]plus[[-1]]": # will generate a compiler warning about inconsistent spacing
+ "unable to resolve a negative suffixed numeric literal following an operator"
+ doAssert 1'wrap + -1'wrap == "[[1]]plus[[-1]]"
+ doAssert 1'twrap == "[[1]]"
+ doAssert extraContext() == "[[22.40]]":
+ "unable to return a suffixed numeric literal by an implicit return"
+ doAssert 0x5a3a'wrap == "[[0x5a3a]]"
+ doAssert 0o5732'wrap == "[[0o5732]]"
+ doAssert 0b0101111010101'wrap == "[[0b0101111010101]]"
+ doAssert -38383839292839283928392839283928392839283.928493849385935898243e-50000'wrap == "[[-38383839292839283928392839283928392839283.928493849385935898243e-50000]]"
+ doAssert 1234.56'wrap == "[[1234.56]]":
+ "unable to properly account for context with suffixed numeric literals"
+
+ block: # verify that the i64, f32, etc builtin suffixes still parse correctly
+ const expectedF32: float32 = 123.125
+ proc `'f9`(number: string): string = # proc starts with 'f' just like 'f32'
+ "[[" & number & "]]"
+ proc `'f32a`(number: string): string = # looks even more like 'f32'
+ "[[" & number & "]]"
+ proc `'d9`(number: string): string = # proc starts with 'd' just like the d suffix
+ "[[" & number & "]]"
+ proc `'i9`(number: string): string = # proc starts with 'i' just like 'i64'
+ "[[" & number & "]]"
+ proc `'u9`(number: string): string = # proc starts with 'u' just like 'u8'
+ "[[" & number & "]]"
+
+ doAssert 123.125f32 == expectedF32:
+ "failing to support non-quoted legacy f32 floating point suffix"
+ doAssert 123.125'f32 == expectedF32
+ doAssert 123.125e0'f32 == expectedF32
+ doAssert 1234.56'wrap == 1234.56'f9
+ doAssert 1234.56'wrap == 1234.56'f32a
+ doAssert 1234.56'wrap == 1234.56'd9
+ doAssert 1234.56'wrap == 1234.56'i9
+ doAssert 1234.56'wrap == 1234.56'u9
+ doAssert lispReprStr(1234.56'u9) == """(DotExpr (StrLit "1234.56") (Ident "\'u9"))""":
+ "failed to properly build AST for suffix that starts with u"
+ doAssert -128'i8 == (-128).int8
+
+ block: # case checks
+ doAssert 1E2 == 100:
+ "lexer not handling upper-case exponent"
+ doAssert 1.0E2 == 100.0
+ doAssert 1e2 == 100
+ doAssert 0xdeadBEEF'wrap == "[[0xdeadBEEF]]":
+ "lexer not maintaining original case"
+ doAssert 0.1E12'wrap == "[[0.1E12]]"
+ doAssert 0.0e12'wrap == "[[0.0e12]]"
+ doAssert 0.0e+12'wrap == "[[0.0e+12]]"
+ doAssert 0.0e-12'wrap == "[[0.0e-12]]"
+ doAssert 0e-12'wrap == "[[0e-12]]"
+
+ block: # macro and template usage
+ template `'foo`(a: string): untyped = (a, 2)
+ doAssert -12'foo == ("-12", 2)
+ template `'fooplus`(a: string, b: int): untyped = (a, b)
+ doAssert -12'fooplus(2) == ("-12", 2)
+ template `'fooplusopt`(a: string, b: int = 99): untyped = (a, b)
+ doAssert -12'fooplusopt(2) == ("-12", 2)
+ doAssert -12'fooplusopt() == ("-12", 99)
+ doAssert -12'fooplusopt == ("-12", 99)
+ macro `'bar`(a: static string): untyped =
+ var infix = newNimNode(nnkInfix)
+ infix.add newIdentNode("&")
+ infix.add newLit("got ")
+ infix.add newLit(a.repr)
+ result = newNimNode(nnkStmtList)
+ result.add infix
+ doAssert -12'bar == "got \"-12\""
+ macro deb(a): untyped = newLit(a.repr)
+ doAssert deb(-12'bar) == "-12'bar"
+ # macro metawrap(): untyped =
+ # func wrap1(a: string): string = "{" & a & "}"
+ # func `'wrap2`(a: string): string = "{" & a & "}"
+ # result = quote do:
+ # let a1 = wrap1"-128"
+ # let a2 = -128'wrap2
+ # metawrap()
+ # doAssert a1 == "{-128}"
+ # doAssert a2 == "{-128}"
+
+static: main()
+main()