diff --git a/Makefile b/Makefile index 5d400ffff6..9ee6945f13 100644 --- a/Makefile +++ b/Makefile @@ -208,6 +208,7 @@ TEST_PACKAGES = \ crypto/des \ crypto/dsa \ crypto/elliptic/internal/fiat \ + crypto/hmac \ crypto/internal/subtle \ crypto/md5 \ crypto/rc4 \ @@ -236,9 +237,11 @@ TEST_PACKAGES = \ os \ path \ reflect \ + strconv \ testing \ testing/iotest \ text/scanner \ + text/template/parse \ unicode \ unicode/utf16 \ unicode/utf8 \ diff --git a/compileopts/target.go b/compileopts/target.go index ba19cae08e..e3f51e8cec 100644 --- a/compileopts/target.go +++ b/compileopts/target.go @@ -296,7 +296,7 @@ func defaultTarget(goos, goarch, triple string) (*TargetSpec, error) { // systems so we need separate assembly files. suffix = "_windows" } - spec.ExtraFiles = append(spec.ExtraFiles, "src/runtime/gc_"+goarch+suffix+".S") + spec.ExtraFiles = append(spec.ExtraFiles, "src/runtime/asm_"+goarch+suffix+".S") spec.ExtraFiles = append(spec.ExtraFiles, "src/internal/task/task_stack_"+goarch+suffix+".S") } if goarch != runtime.GOARCH { diff --git a/compiler/calls.go b/compiler/calls.go index 4c5b1fb477..f3510c2010 100644 --- a/compiler/calls.go +++ b/compiler/calls.go @@ -33,8 +33,9 @@ const ( paramIsDeferenceableOrNull = 1 << iota ) -// createCall creates a new call to runtime. with the given arguments. -func (b *builder) createRuntimeCall(fnName string, args []llvm.Value, name string) llvm.Value { +// createRuntimeCallCommon creates a runtime call. Use createRuntimeCall or +// createRuntimeInvoke instead. +func (b *builder) createRuntimeCallCommon(fnName string, args []llvm.Value, name string, isInvoke bool) llvm.Value { fn := b.program.ImportedPackage("runtime").Members[fnName].(*ssa.Function) llvmFn := b.getFunction(fn) if llvmFn.IsNil() { @@ -42,9 +43,27 @@ func (b *builder) createRuntimeCall(fnName string, args []llvm.Value, name strin } args = append(args, llvm.Undef(b.i8ptrType)) // unused context parameter args = append(args, llvm.ConstPointerNull(b.i8ptrType)) // coroutine handle + if isInvoke { + return b.createInvoke(llvmFn, args, name) + } return b.createCall(llvmFn, args, name) } +// createRuntimeCall creates a new call to runtime. with the given +// arguments. +func (b *builder) createRuntimeCall(fnName string, args []llvm.Value, name string) llvm.Value { + return b.createRuntimeCallCommon(fnName, args, name, false) +} + +// createRuntimeInvoke creates a new call to runtime. with the given +// arguments. If the runtime call panics, control flow is diverted to the +// landing pad block. +// Note that "invoke" here is meant in the LLVM sense (a call that can +// panic/throw), not in the Go sense (an interface method call). +func (b *builder) createRuntimeInvoke(fnName string, args []llvm.Value, name string) llvm.Value { + return b.createRuntimeCallCommon(fnName, args, name, true) +} + // createCall creates a call to the given function with the arguments possibly // expanded. func (b *builder) createCall(fn llvm.Value, args []llvm.Value, name string) llvm.Value { @@ -56,6 +75,15 @@ func (b *builder) createCall(fn llvm.Value, args []llvm.Value, name string) llvm return b.CreateCall(fn, expanded, name) } +// createInvoke is like createCall but continues execution at the landing pad if +// the call resulted in a panic. +func (b *builder) createInvoke(fn llvm.Value, args []llvm.Value, name string) llvm.Value { + if b.hasDeferFrame() { + b.createInvokeCheckpoint() + } + return b.createCall(fn, args, name) +} + // Expand an argument type to a list that can be used in a function call // parameter list. func (c *compilerContext) expandFormalParamType(t llvm.Type, name string, goType types.Type) []paramInfo { diff --git a/compiler/compiler.go b/compiler/compiler.go index c9f34cdaa6..7e8ac96f82 100644 --- a/compiler/compiler.go +++ b/compiler/compiler.go @@ -23,7 +23,7 @@ import ( // Version of the compiler pacakge. Must be incremented each time the compiler // package changes in a way that affects the generated LLVM module. // This version is independent of the TinyGo version number. -const Version = 25 // last change: add "target-cpu" and "target-features" attributes +const Version = 26 // last change: implement recover func init() { llvm.InitializeAllTargets() @@ -142,6 +142,8 @@ type builder struct { currentBlock *ssa.BasicBlock phis []phiNode deferPtr llvm.Value + deferFrame llvm.Value + landingpad llvm.BasicBlock difunc llvm.Metadata dilocals map[*types.Var]llvm.Metadata allDeferFuncs []interface{} @@ -985,6 +987,12 @@ func (b *builder) createFunction() { } } + if b.hasDeferFrame() { + // Create the landing pad block, where execution continues after a + // panic. + b.createLandingPad() + } + // Resolve phi nodes for _, phi := range b.phis { block := phi.ssa.Block() @@ -1113,9 +1121,12 @@ func (b *builder) createInstruction(instr ssa.Instruction) { b.createMapUpdate(mapType.Key(), m, key, value, instr.Pos()) case *ssa.Panic: value := b.getValue(instr.X) - b.createRuntimeCall("_panic", []llvm.Value{value}, "") + b.createRuntimeInvoke("_panic", []llvm.Value{value}, "") b.CreateUnreachable() case *ssa.Return: + if b.hasDeferFrame() { + b.createRuntimeCall("destroyDeferFrame", []llvm.Value{b.deferFrame}, "") + } if len(instr.Results) == 0 { b.CreateRetVoid() } else if len(instr.Results) == 1 { @@ -1304,7 +1315,13 @@ func (b *builder) createBuiltin(argTypes []types.Type, argValues []llvm.Value, c cplx := argValues[0] return b.CreateExtractValue(cplx, 0, "real"), nil case "recover": - return b.createRuntimeCall("_recover", nil, ""), nil + useParentFrame := uint64(0) + if b.hasDeferFrame() { + // recover() should return the panic value of the parent function, + // not of the current function. + useParentFrame = 1 + } + return b.createRuntimeCall("_recover", []llvm.Value{llvm.ConstInt(b.ctx.Int1Type(), useParentFrame, false)}, ""), nil case "ssa:wrapnilchk": // TODO: do an actual nil check? return argValues[0], nil @@ -1391,6 +1408,12 @@ func (b *builder) createFunctionCall(instr *ssa.CallCommon) (llvm.Value, error) return b.createVolatileLoad(instr) case strings.HasPrefix(name, "runtime/volatile.Store"): return b.createVolatileStore(instr) + case name == "runtime.supportsRecover": + supportsRecover := uint64(0) + if b.supportsRecover() { + supportsRecover = 1 + } + return llvm.ConstInt(b.ctx.Int1Type(), supportsRecover, false), nil case strings.HasPrefix(name, "sync/atomic."): val, ok := b.createAtomicOp(instr) if ok { @@ -1456,7 +1479,7 @@ func (b *builder) createFunctionCall(instr *ssa.CallCommon) (llvm.Value, error) params = append(params, llvm.Undef(b.i8ptrType)) } - return b.createCall(callee, params, ""), nil + return b.createInvoke(callee, params, ""), nil } // getValue returns the LLVM value of a constant, function value, global, or diff --git a/compiler/defer.go b/compiler/defer.go index 2a367e468a..ab12acf9e9 100644 --- a/compiler/defer.go +++ b/compiler/defer.go @@ -22,6 +22,32 @@ import ( "tinygo.org/x/go-llvm" ) +// supportsRecover returns whether the compiler supports the recover() builtin +// for the current architecture. +func (b *builder) supportsRecover() bool { + switch b.archFamily() { + case "wasm32": + // Probably needs to be implemented using the exception handling + // proposal of WebAssembly: + // https://github.com/WebAssembly/exception-handling + return false + case "avr", "riscv64", "xtensa": + // TODO: add support for these architectures + return false + default: + return true + } +} + +// hasDeferFrame returns whether the current function needs to catch panics and +// run defers. +func (b *builder) hasDeferFrame() bool { + if b.fn.Recover == nil { + return false + } + return b.supportsRecover() +} + // deferInitFunc sets up this function for future deferred calls. It must be // called from within the entry block when this function contains deferred // calls. @@ -37,6 +63,123 @@ func (b *builder) deferInitFunc() { deferType := llvm.PointerType(b.getLLVMRuntimeType("_defer"), 0) b.deferPtr = b.CreateAlloca(deferType, "deferPtr") b.CreateStore(llvm.ConstPointerNull(deferType), b.deferPtr) + + if b.hasDeferFrame() { + // Set up the defer frame with the current stack pointer. + // This assumes that the stack pointer doesn't move outside of the + // function prologue/epilogue (an invariant maintained by TinyGo but + // possibly broken by the C alloca function). + // The frame pointer is _not_ saved, because it is marked as clobbered + // in the setjmp-like inline assembly. + deferFrameType := b.getLLVMType(b.program.ImportedPackage("internal/task").Members["DeferFrame"].Type()) + b.deferFrame = b.CreateAlloca(deferFrameType, "deferframe.buf") + stackPointer := b.readStackPointer() + b.createRuntimeCall("setupDeferFrame", []llvm.Value{b.deferFrame, stackPointer}, "") + + // Create the landing pad block, which is where control transfers after + // a panic. + b.landingpad = llvm.AddBasicBlock(b.llvmFn, "lpad") + } +} + +// createLandingPad fills in the landing pad block. This block runs the deferred +// functions and returns (by jumping to the recover block). If the function is +// still panicking after the defers are run, the panic will be re-raised in +// destroyDeferFrame. +func (b *builder) createLandingPad() { + b.SetInsertPointAtEnd(b.landingpad) + + // Add debug info, if needed. + // The location used is the closing bracket of the function. + if b.Debug { + pos := b.program.Fset.Position(b.fn.Syntax().End()) + b.SetCurrentDebugLocation(uint(pos.Line), uint(pos.Column), b.difunc, llvm.Metadata{}) + } + + b.createRunDefers() + + // Continue at the 'recover' block, which returns to the parent in an + // appropriate way. + b.CreateBr(b.blockEntries[b.fn.Recover]) +} + +// createInvokeCheckpoint saves the function state at the given point, to +// continue at the landing pad if a panic happened. This is implemented using a +// setjmp-like construct. +func (b *builder) createInvokeCheckpoint() { + // Construct inline assembly equivalents of setjmp. + // The assembly works as follows: + // * All registers (both callee-saved and caller saved) are clobbered + // after the inline assembly returns. + // * The assembly stores the address just past the end of the assembly + // into the jump buffer. + // * The return value (eax, rax, r0, etc) is set to zero in the inline + // assembly but set to an unspecified non-zero value when jumping using + // a longjmp. + asmType := llvm.FunctionType(b.uintptrType, []llvm.Type{b.deferFrame.Type()}, false) + var asmString, constraints string + switch b.archFamily() { + case "i386": + asmString = ` +xorl %eax, %eax +movl $$1f, 4(%ebx) +1:` + constraints = "={eax},{ebx},~{ebx},~{ecx},~{edx},~{esi},~{edi},~{ebp},~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{fpsr},~{fpcr},~{flags},~{dirflag},~{memory}" + // This doesn't include the floating point stack because TinyGo uses + // newer floating point instructions. + case "x86_64": + asmString = ` +xorq %rax, %rax +movq $$1f, 8(%rbx) +1:` + constraints = "={rax},{rbx},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15},~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{fpsr},~{fpcr},~{flags},~{dirflag},~{memory}" + // This list doesn't include AVX/AVX512 registers because TinyGo + // doesn't currently enable support for AVX instructions. + case "arm": + // Note: the following assembly takes into account that the PC is + // always 4 bytes ahead on ARM. The PC that is stored always points + // to the instruction just after the assembly fragment so that + // tinygo_longjmp lands at the correct instruction. + if b.isThumb() { + // Instructions are 2 bytes in size. + asmString = ` +movs r0, #0 +mov r2, pc +str r2, [r1, #4]` + } else { + // Instructions are 4 bytes in size. + asmString = ` +str pc, [r1, #4] +movs r0, #0` + } + constraints = "={r0},{r1},~{r1},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{r14},~{q0},~{q1},~{q2},~{q3},~{q4},~{q5},~{q6},~{q7},~{q8},~{q9},~{q10},~{q11},~{q12},~{q13},~{q14},~{q15},~{cpsr},~{memory}" + case "aarch64": + asmString = ` +adr x2, 1f +str x2, [x1, #8] +mov x0, #0 +1: +` + constraints = "={x0},{x1},~{x1},~{x2},~{x3},~{x4},~{x5},~{x6},~{x7},~{x8},~{x9},~{x10},~{x11},~{x12},~{x13},~{x14},~{x15},~{x16},~{x17},~{x18},~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27},~{x28},~{fp},~{lr},~{q0},~{q1},~{q2},~{q3},~{q4},~{q5},~{q6},~{q7},~{q8},~{q9},~{q10},~{q11},~{q12},~{q13},~{q14},~{q15},~{q16},~{q17},~{q18},~{q19},~{q20},~{q21},~{q22},~{q23},~{q24},~{q25},~{q26},~{q27},~{q28},~{q29},~{q30},~{nzcv},~{ffr},~{vg},~{memory}" + // TODO: SVE registers, which we don't use in TinyGo at the moment. + case "riscv32": + asmString = ` +la a2, 1f +sw a2, 4(a1) +li a0, 0 +1:` + constraints = "={a0},{a1},~{a1},~{a2},~{a3},~{a4},~{a5},~{a6},~{a7},~{s0},~{s1},~{s2},~{s3},~{s4},~{s5},~{s6},~{s7},~{s8},~{s9},~{s10},~{s11},~{t0},~{t1},~{t2},~{t3},~{t4},~{t5},~{t6},~{ra},~{f0},~{f1},~{f2},~{f3},~{f4},~{f5},~{f6},~{f7},~{f8},~{f9},~{f10},~{f11},~{f12},~{f13},~{f14},~{f15},~{f16},~{f17},~{f18},~{f19},~{f20},~{f21},~{f22},~{f23},~{f24},~{f25},~{f26},~{f27},~{f28},~{f29},~{f30},~{f31},~{memory}" + default: + // This case should have been handled by b.supportsRecover(). + b.addError(b.fn.Pos(), "unknown architecture for defer: "+b.archFamily()) + } + asm := llvm.InlineAsm(asmType, asmString, constraints, false, false, 0) + result := b.CreateCall(asm, []llvm.Value{b.deferFrame}, "setjmp") + isZero := b.CreateICmp(llvm.IntEQ, result, llvm.ConstInt(b.uintptrType, 0, false), "setjmp.result") + continueBB := b.insertBasicBlock("") + b.CreateCondBr(isZero, continueBB, b.landingpad) + b.SetInsertPointAtEnd(continueBB) + b.blockExits[b.currentBlock] = continueBB } // isInLoop checks if there is a path from a basic block to itself. @@ -202,30 +345,31 @@ func (b *builder) createDefer(instr *ssa.Defer) { } } - // Make a struct out of the collected values to put in the defer frame. - deferFrameType := b.ctx.StructType(valueTypes, false) - deferFrame := llvm.ConstNull(deferFrameType) + // Make a struct out of the collected values to put in the deferred call + // struct. + deferredCallType := b.ctx.StructType(valueTypes, false) + deferredCall := llvm.ConstNull(deferredCallType) for i, value := range values { - deferFrame = b.CreateInsertValue(deferFrame, value, i, "") + deferredCall = b.CreateInsertValue(deferredCall, value, i, "") } // Put this struct in an allocation. var alloca llvm.Value if !isInLoop(instr.Block()) { // This can safely use a stack allocation. - alloca = llvmutil.CreateEntryBlockAlloca(b.Builder, deferFrameType, "defer.alloca") + alloca = llvmutil.CreateEntryBlockAlloca(b.Builder, deferredCallType, "defer.alloca") } else { // This may be hit a variable number of times, so use a heap allocation. - size := b.targetData.TypeAllocSize(deferFrameType) + size := b.targetData.TypeAllocSize(deferredCallType) sizeValue := llvm.ConstInt(b.uintptrType, size, false) nilPtr := llvm.ConstNull(b.i8ptrType) allocCall := b.createRuntimeCall("alloc", []llvm.Value{sizeValue, nilPtr}, "defer.alloc.call") - alloca = b.CreateBitCast(allocCall, llvm.PointerType(deferFrameType, 0), "defer.alloc") + alloca = b.CreateBitCast(allocCall, llvm.PointerType(deferredCallType, 0), "defer.alloc") } if b.NeedsStackObjects { b.trackPointer(alloca) } - b.CreateStore(deferFrame, alloca) + b.CreateStore(deferredCall, alloca) // Push it on top of the linked list by replacing deferPtr. allocaCast := b.CreateBitCast(alloca, next.Type(), "defer.alloca.cast") @@ -296,7 +440,7 @@ func (b *builder) createRunDefers() { valueTypes := []llvm.Type{b.uintptrType, llvm.PointerType(b.getLLVMRuntimeType("_defer"), 0)} if !callback.IsInvoke() { - //Expect funcValue to be passed through the defer frame. + //Expect funcValue to be passed through the deferred call. valueTypes = append(valueTypes, b.getFuncType(callback.Signature())) } else { //Expect typecode @@ -307,14 +451,14 @@ func (b *builder) createRunDefers() { valueTypes = append(valueTypes, b.getLLVMType(arg.Type())) } - deferFrameType := b.ctx.StructType(valueTypes, false) - deferFramePtr := b.CreateBitCast(deferData, llvm.PointerType(deferFrameType, 0), "deferFrame") + deferredCallType := b.ctx.StructType(valueTypes, false) + deferredCallPtr := b.CreateBitCast(deferData, llvm.PointerType(deferredCallType, 0), "defercall") // Extract the params from the struct (including receiver). forwardParams := []llvm.Value{} zero := llvm.ConstInt(b.ctx.Int32Type(), 0, false) for i := 2; i < len(valueTypes); i++ { - gep := b.CreateInBoundsGEP(deferFramePtr, []llvm.Value{zero, llvm.ConstInt(b.ctx.Int32Type(), uint64(i), false)}, "gep") + gep := b.CreateInBoundsGEP(deferredCallPtr, []llvm.Value{zero, llvm.ConstInt(b.ctx.Int32Type(), uint64(i), false)}, "gep") forwardParam := b.CreateLoad(gep, "param") forwardParams = append(forwardParams, forwardParam) } @@ -357,14 +501,14 @@ func (b *builder) createRunDefers() { for _, param := range getParams(callback.Signature) { valueTypes = append(valueTypes, b.getLLVMType(param.Type())) } - deferFrameType := b.ctx.StructType(valueTypes, false) - deferFramePtr := b.CreateBitCast(deferData, llvm.PointerType(deferFrameType, 0), "deferFrame") + deferredCallType := b.ctx.StructType(valueTypes, false) + deferredCallPtr := b.CreateBitCast(deferData, llvm.PointerType(deferredCallType, 0), "defercall") // Extract the params from the struct. forwardParams := []llvm.Value{} zero := llvm.ConstInt(b.ctx.Int32Type(), 0, false) for i := range getParams(callback.Signature) { - gep := b.CreateInBoundsGEP(deferFramePtr, []llvm.Value{zero, llvm.ConstInt(b.ctx.Int32Type(), uint64(i+2), false)}, "gep") + gep := b.CreateInBoundsGEP(deferredCallPtr, []llvm.Value{zero, llvm.ConstInt(b.ctx.Int32Type(), uint64(i+2), false)}, "gep") forwardParam := b.CreateLoad(gep, "param") forwardParams = append(forwardParams, forwardParam) } @@ -381,7 +525,7 @@ func (b *builder) createRunDefers() { } // Call real function. - b.createCall(b.getFunction(callback), forwardParams, "") + b.createInvoke(b.getFunction(callback), forwardParams, "") case *ssa.MakeClosure: // Get the real defer struct type and cast to it. @@ -392,14 +536,14 @@ func (b *builder) createRunDefers() { valueTypes = append(valueTypes, b.getLLVMType(params.At(i).Type())) } valueTypes = append(valueTypes, b.i8ptrType) // closure - deferFrameType := b.ctx.StructType(valueTypes, false) - deferFramePtr := b.CreateBitCast(deferData, llvm.PointerType(deferFrameType, 0), "deferFrame") + deferredCallType := b.ctx.StructType(valueTypes, false) + deferredCallPtr := b.CreateBitCast(deferData, llvm.PointerType(deferredCallType, 0), "defercall") // Extract the params from the struct. forwardParams := []llvm.Value{} zero := llvm.ConstInt(b.ctx.Int32Type(), 0, false) for i := 2; i < len(valueTypes); i++ { - gep := b.CreateInBoundsGEP(deferFramePtr, []llvm.Value{zero, llvm.ConstInt(b.ctx.Int32Type(), uint64(i), false)}, "") + gep := b.CreateInBoundsGEP(deferredCallPtr, []llvm.Value{zero, llvm.ConstInt(b.ctx.Int32Type(), uint64(i), false)}, "") forwardParam := b.CreateLoad(gep, "param") forwardParams = append(forwardParams, forwardParam) } @@ -421,14 +565,14 @@ func (b *builder) createRunDefers() { valueTypes = append(valueTypes, b.getLLVMType(params.At(i).Type())) } - deferFrameType := b.ctx.StructType(valueTypes, false) - deferFramePtr := b.CreateBitCast(deferData, llvm.PointerType(deferFrameType, 0), "deferFrame") + deferredCallType := b.ctx.StructType(valueTypes, false) + deferredCallPtr := b.CreateBitCast(deferData, llvm.PointerType(deferredCallType, 0), "defercall") // Extract the params from the struct. var argValues []llvm.Value zero := llvm.ConstInt(b.ctx.Int32Type(), 0, false) for i := 0; i < params.Len(); i++ { - gep := b.CreateInBoundsGEP(deferFramePtr, []llvm.Value{zero, llvm.ConstInt(b.ctx.Int32Type(), uint64(i+2), false)}, "gep") + gep := b.CreateInBoundsGEP(deferredCallPtr, []llvm.Value{zero, llvm.ConstInt(b.ctx.Int32Type(), uint64(i+2), false)}, "gep") forwardParam := b.CreateLoad(gep, "param") argValues = append(argValues, forwardParam) } diff --git a/compiler/llvm.go b/compiler/llvm.go index 6bd29bb25b..0f15680da3 100644 --- a/compiler/llvm.go +++ b/compiler/llvm.go @@ -5,6 +5,7 @@ import ( "go/token" "go/types" "math/big" + "strings" "github.com/tinygo-org/tinygo/compiler/llvmutil" "tinygo.org/x/go-llvm" @@ -270,3 +271,43 @@ func (c *compilerContext) getPointerBitmap(typ llvm.Type, pos token.Pos) *big.In panic("unknown LLVM type") } } + +// archFamily returns the archtecture from the LLVM triple but with some +// architecture names ("armv6", "thumbv7m", etc) merged into a single +// architecture name ("arm"). +func (c *compilerContext) archFamily() string { + arch := strings.Split(c.Triple, "-")[0] + if strings.HasPrefix(arch, "arm") || strings.HasPrefix(arch, "thumb") { + return "arm" + } + return arch +} + +// isThumb returns whether we're in ARM or in Thumb mode. It panics if the +// features string is not one for an ARM architecture. +func (c *compilerContext) isThumb() bool { + var isThumb, isNotThumb bool + for _, feature := range strings.Split(c.Features, ",") { + if feature == "+thumb-mode" { + isThumb = true + } + if feature == "-thumb-mode" { + isNotThumb = true + } + } + if isThumb == isNotThumb { + panic("unexpected feature flags") + } + return isThumb +} + +// readStackPointer emits a LLVM intrinsic call that returns the current stack +// pointer as an *i8. +func (b *builder) readStackPointer() llvm.Value { + stacksave := b.mod.NamedFunction("llvm.stacksave") + if stacksave.IsNil() { + fnType := llvm.FunctionType(b.i8ptrType, nil, false) + stacksave = llvm.AddFunction(b.mod, "llvm.stacksave", fnType) + } + return b.CreateCall(stacksave, nil, "") +} diff --git a/compiler/testdata/channel.ll b/compiler/testdata/channel.ll index 04bfa4af11..f7ff48e18b 100644 --- a/compiler/testdata/channel.ll +++ b/compiler/testdata/channel.ll @@ -5,9 +5,11 @@ target triple = "wasm32-unknown-wasi" %runtime.channel = type { i32, i32, i8, %runtime.channelBlockedList*, i32, i32, i32, i8* } %runtime.channelBlockedList = type { %runtime.channelBlockedList*, %"internal/task.Task"*, %runtime.chanSelectState*, { %runtime.channelBlockedList*, i32, i32 } } -%"internal/task.Task" = type { %"internal/task.Task"*, i8*, i64, %"internal/task.state" } +%"internal/task.Task" = type { %"internal/task.Task"*, i8*, i64, %"internal/task.state", %"internal/task.DeferFrame"* } %"internal/task.state" = type { i32, i8*, %"internal/task.stackState", i1 } %"internal/task.stackState" = type { i32, i32 } +%"internal/task.DeferFrame" = type { i8*, i8*, %"internal/task.DeferFrame"*, i1, %runtime._interface } +%runtime._interface = type { i32, i8* } %runtime.chanSelectState = type { %runtime.channel*, i8* } declare noalias nonnull i8* @runtime.alloc(i32, i8*, i8*, i8*) diff --git a/compiler/testdata/goroutine-cortex-m-qemu-tasks.ll b/compiler/testdata/goroutine-cortex-m-qemu-tasks.ll index d34a1eb43e..e41882c721 100644 --- a/compiler/testdata/goroutine-cortex-m-qemu-tasks.ll +++ b/compiler/testdata/goroutine-cortex-m-qemu-tasks.ll @@ -5,8 +5,10 @@ target triple = "thumbv7m-unknown-unknown-eabi" %runtime.channel = type { i32, i32, i8, %runtime.channelBlockedList*, i32, i32, i32, i8* } %runtime.channelBlockedList = type { %runtime.channelBlockedList*, %"internal/task.Task"*, %runtime.chanSelectState*, { %runtime.channelBlockedList*, i32, i32 } } -%"internal/task.Task" = type { %"internal/task.Task"*, i8*, i64, %"internal/task.state" } +%"internal/task.Task" = type { %"internal/task.Task"*, i8*, i64, %"internal/task.state", %"internal/task.DeferFrame"* } %"internal/task.state" = type { i32, i32* } +%"internal/task.DeferFrame" = type { i8*, i8*, %"internal/task.DeferFrame"*, i1, %runtime._interface } +%runtime._interface = type { i32, i8* } %runtime.chanSelectState = type { %runtime.channel*, i8* } @"main$string" = internal unnamed_addr constant [4 x i8] c"test", align 1 diff --git a/compiler/testdata/goroutine-wasm-asyncify.ll b/compiler/testdata/goroutine-wasm-asyncify.ll index 1ecf0c7960..e427d9bee7 100644 --- a/compiler/testdata/goroutine-wasm-asyncify.ll +++ b/compiler/testdata/goroutine-wasm-asyncify.ll @@ -5,9 +5,11 @@ target triple = "wasm32-unknown-wasi" %runtime.channel = type { i32, i32, i8, %runtime.channelBlockedList*, i32, i32, i32, i8* } %runtime.channelBlockedList = type { %runtime.channelBlockedList*, %"internal/task.Task"*, %runtime.chanSelectState*, { %runtime.channelBlockedList*, i32, i32 } } -%"internal/task.Task" = type { %"internal/task.Task"*, i8*, i64, %"internal/task.state" } +%"internal/task.Task" = type { %"internal/task.Task"*, i8*, i64, %"internal/task.state", %"internal/task.DeferFrame"* } %"internal/task.state" = type { i32, i8*, %"internal/task.stackState", i1 } %"internal/task.stackState" = type { i32, i32 } +%"internal/task.DeferFrame" = type { i8*, i8*, %"internal/task.DeferFrame"*, i1, %runtime._interface } +%runtime._interface = type { i32, i8* } %runtime.chanSelectState = type { %runtime.channel*, i8* } @"main$string" = internal unnamed_addr constant [4 x i8] c"test", align 1 diff --git a/compiler/testdata/goroutine-wasm-coroutines.ll b/compiler/testdata/goroutine-wasm-coroutines.ll index 6f30824719..77f7ed0fba 100644 --- a/compiler/testdata/goroutine-wasm-coroutines.ll +++ b/compiler/testdata/goroutine-wasm-coroutines.ll @@ -6,8 +6,10 @@ target triple = "wasm32-unknown-wasi" %runtime.funcValueWithSignature = type { i32, i8* } %runtime.channel = type { i32, i32, i8, %runtime.channelBlockedList*, i32, i32, i32, i8* } %runtime.channelBlockedList = type { %runtime.channelBlockedList*, %"internal/task.Task"*, %runtime.chanSelectState*, { %runtime.channelBlockedList*, i32, i32 } } -%"internal/task.Task" = type { %"internal/task.Task"*, i8*, i64, %"internal/task.state" } +%"internal/task.Task" = type { %"internal/task.Task"*, i8*, i64, %"internal/task.state", %"internal/task.DeferFrame"* } %"internal/task.state" = type { i8* } +%"internal/task.DeferFrame" = type { i8*, i8*, %"internal/task.DeferFrame"*, i1, %runtime._interface } +%runtime._interface = type { i32, i8* } %runtime.chanSelectState = type { %runtime.channel*, i8* } @"main$pack" = internal unnamed_addr constant { i32, i8* } { i32 5, i8* undef } diff --git a/interp/interpreter.go b/interp/interpreter.go index 304f651756..ec5a7d413c 100644 --- a/interp/interpreter.go +++ b/interp/interpreter.go @@ -188,6 +188,15 @@ func (r *runner) run(fn *function, params []value, parentMem *memoryView, indent if err != nil { return nil, mem, r.errorAt(inst, err) } + if v, ok := inst.operands[0].(localValue); ok && !v.value.IsNil() && !v.value.IsAInlineAsm().IsNil() { + // Inline assembly can't be executed at compile time. It must be + // run at runtime. + err := r.runAtRuntime(fn, inst, locals, &mem, indent) + if err != nil { + return nil, mem, err + } + continue + } callFn := r.getFunction(fnPtr.llvmValue(&mem)) switch { case callFn.name == "runtime.trackPointer": diff --git a/main_test.go b/main_test.go index d1ef19649c..1d88984980 100644 --- a/main_test.go +++ b/main_test.go @@ -184,6 +184,13 @@ func runPlatTests(options compileopts.Options, tests []string, t *testing.T) { runTest("rand.go", options, t, nil, nil) }) } + if options.Target != "wasi" && options.Target != "wasm" { + // The recover() builtin isn't supported yet on WebAssembly and Windows. + t.Run("recover.go", func(t *testing.T) { + t.Parallel() + runTest("recover.go", options, t, nil, nil) + }) + } } // Due to some problems with LLD, we cannot run links in parallel, or in parallel with compiles. diff --git a/src/internal/task/task.go b/src/internal/task/task.go index bad501b61e..7f338b0b34 100644 --- a/src/internal/task/task.go +++ b/src/internal/task/task.go @@ -17,6 +17,22 @@ type Task struct { // state is the underlying running state of the task. state state + + // DeferFrame stores a pointer to the (stack allocated) defer frame of the + // goroutine that is used for the recover builtin. + DeferFrame *DeferFrame +} + +// DeferFrame is a stack allocated object that stores information for the +// current "defer frame", which is used in functions that use the `defer` +// keyword. +// The compiler knows the JumpPC struct offset. +type DeferFrame struct { + JumpSP unsafe.Pointer // stack pointer to return to + JumpPC unsafe.Pointer // pc to return to + Previous *DeferFrame // previous recover buffer pointer + Panicking bool // true iff this defer frame is panicking + PanicValue interface{} // panic value, might be nil for panic(nil) for example } // getGoroutineStackSize is a compiler intrinsic that returns the stack size for diff --git a/src/internal/task/task_none.go b/src/internal/task/task_none.go index 79d02c5cbe..40420f0224 100644 --- a/src/internal/task/task_none.go +++ b/src/internal/task/task_none.go @@ -4,6 +4,9 @@ package task import "unsafe" +// There is only one goroutine so the task struct can be a global. +var mainTask Task + //go:linkname runtimePanic runtime.runtimePanic func runtimePanic(str string) @@ -12,8 +15,8 @@ func Pause() { } func Current() *Task { - runtimePanic("scheduler is disabled") - return nil + // Return a task struct, which is used for the recover builtin for example. + return &mainTask } //go:noinline diff --git a/src/runtime/gc_386.S b/src/runtime/asm_386.S similarity index 71% rename from src/runtime/gc_386.S rename to src/runtime/asm_386.S index 9604ddbd03..1854be6a00 100644 --- a/src/runtime/gc_386.S +++ b/src/runtime/asm_386.S @@ -21,3 +21,13 @@ tinygo_scanCurrentStack: // were only pushed to be discoverable by the GC. addl $28, %esp retl + + +.section .text.tinygo_longjmp +.global tinygo_longjmp +tinygo_longjmp: + // Note: the code we jump to assumes eax is set to a non-zero value if we + // jump from here. + movl 8(%esp), %eax // jumpPC (stash in volatile register) + movl 4(%esp), %esp // jumpSP + jmpl *%eax diff --git a/src/runtime/gc_amd64.S b/src/runtime/asm_amd64.S similarity index 70% rename from src/runtime/gc_amd64.S rename to src/runtime/asm_amd64.S index c0ad7bc88a..2168a6f415 100644 --- a/src/runtime/gc_amd64.S +++ b/src/runtime/asm_amd64.S @@ -28,6 +28,22 @@ _tinygo_scanCurrentStack: addq $56, %rsp retq + +#ifdef __ELF__ +.section .text.tinygo_longjmp +.global tinygo_longjmp +tinygo_longjmp: +#else // Darwin +.global _tinygo_longjmp +_tinygo_longjmp: +#endif + // Note: the code we jump to assumes rax is non-zero so we have to load it + // with some value here. + movq $1, %rax + movq %rdi, %rsp // jumpSP + jmpq *%rsi // jumpPC + + #ifdef __MACH__ // Darwin // allow these symbols to stripped as dead code .subsections_via_symbols diff --git a/src/runtime/gc_amd64_windows.S b/src/runtime/asm_amd64_windows.S similarity index 68% rename from src/runtime/gc_amd64_windows.S rename to src/runtime/asm_amd64_windows.S index e65352cba5..2e717baf94 100644 --- a/src/runtime/gc_amd64_windows.S +++ b/src/runtime/asm_amd64_windows.S @@ -20,3 +20,12 @@ tinygo_scanCurrentStack: // were only pushed to be discoverable by the GC. addq $72, %rsp retq + +.section .text.tinygo_longjmp,"ax" +.global tinygo_longjmp +tinygo_longjmp: + // Note: the code we jump to assumes rax is non-zero so we have to load it + // with some value here. + movq $1, %rax + movq %rcx, %rsp // jumpSP + jmpq *%rdx // jumpPC diff --git a/src/runtime/gc_arm.S b/src/runtime/asm_arm.S similarity index 67% rename from src/runtime/gc_arm.S rename to src/runtime/asm_arm.S index 5c6c58f114..d1de455c2d 100644 --- a/src/runtime/gc_arm.S +++ b/src/runtime/asm_arm.S @@ -31,3 +31,16 @@ tinygo_scanCurrentStack: pop {pc} .cfi_endproc .size tinygo_scanCurrentStack, .-tinygo_scanCurrentStack + + +.section .text.tinygo_longjmp +.global tinygo_longjmp +.type tinygo_longjmp, %function +tinygo_longjmp: + .cfi_startproc + // Note: the code we jump to assumes r0 is set to a non-zero value if we + // jump from here (which is conveniently already the case). + mov sp, r0 // jumpSP + mov pc, r1 // jumpPC + .cfi_endproc +.size tinygo_longjmp, .-tinygo_longjmp diff --git a/src/runtime/gc_arm64.S b/src/runtime/asm_arm64.S similarity index 73% rename from src/runtime/gc_arm64.S rename to src/runtime/asm_arm64.S index 8a7f53355e..aa05bcba79 100644 --- a/src/runtime/gc_arm64.S +++ b/src/runtime/asm_arm64.S @@ -21,3 +21,12 @@ tinygo_scanCurrentStack: // Restore stack state and return. ldp x29, x30, [sp], #96 ret + + +.section .text.tinygo_longjmp +.global tinygo_longjmp +tinygo_longjmp: + // Note: the code we jump to assumes x0 is set to a non-zero value if we + // jump from here (which is conveniently already the case). + mov sp, x0 // jumpSP + br x1 // jumpPC diff --git a/src/runtime/gc_avr.S b/src/runtime/asm_avr.S similarity index 100% rename from src/runtime/gc_avr.S rename to src/runtime/asm_avr.S diff --git a/src/runtime/gc_riscv.S b/src/runtime/asm_riscv.S similarity index 78% rename from src/runtime/gc_riscv.S rename to src/runtime/asm_riscv.S index e0aeada5db..9a99fbc4c1 100644 --- a/src/runtime/gc_riscv.S +++ b/src/runtime/asm_riscv.S @@ -40,3 +40,12 @@ tinygo_scanCurrentStack: // Return to the caller. ret + + +.section .text.tinygo_longjmp +.global tinygo_longjmp +tinygo_longjmp: + // Note: the code we jump to assumes a0 is non-zero, which is already the + // case because that's jumpSP (the stack pointer). + mv sp, a0 // jumpSP + jr a1 // jumpPC diff --git a/src/runtime/panic.go b/src/runtime/panic.go index 51778d061c..fc8db1ea0e 100644 --- a/src/runtime/panic.go +++ b/src/runtime/panic.go @@ -1,12 +1,35 @@ package runtime +import ( + "internal/task" + "unsafe" +) + // trap is a compiler hint that this function cannot be executed. It is // translated into either a trap instruction or a call to abort(). //export llvm.trap func trap() +// Inline assembly stub. It is essentially C longjmp but modified a bit for the +// purposes of TinyGo. It restores the stack pointer and jumps to the given pc. +//export tinygo_longjmp +func tinygo_longjmp(sp, pc unsafe.Pointer) + +// Compiler intrinsic. +// Returns whether recover is supported on the current architecture. +func supportsRecover() bool + // Builtin function panic(msg), used as a compiler intrinsic. func _panic(message interface{}) { + if supportsRecover() { + frame := task.Current().DeferFrame + if frame != nil { + frame.PanicValue = message + frame.Panicking = true + tinygo_longjmp(frame.JumpSP, frame.JumpPC) + // unreachable + } + } printstring("panic: ") printitf(message) printnl() @@ -20,10 +43,59 @@ func runtimePanic(msg string) { abort() } -// Try to recover a panicking goroutine. -func _recover() interface{} { - // Deferred functions are currently not executed during panic, so there is - // no way this can return anything besides nil. +// Called at the start of a function that includes a deferred call. +// It gets passed in the stack-allocated defer frame and configures it. +// Note that the frame is not zeroed yet, so we need to initialize all values +// that will be used. +//go:inline +//go:nobounds +func setupDeferFrame(frame *task.DeferFrame, jumpSP unsafe.Pointer) { + currentTask := task.Current() + frame.Previous = currentTask.DeferFrame + frame.JumpSP = jumpSP + frame.Panicking = false + currentTask.DeferFrame = frame +} + +// Called right before the return instruction. It pops the defer frame from the +// linked list of defer frames. It also re-raises a panic if the goroutine is +// still panicking. +//go:inline +//go:nobounds +func destroyDeferFrame(frame *task.DeferFrame) { + task.Current().DeferFrame = frame.Previous + if frame.Panicking { + // We're still panicking! + // Re-raise the panic now. + _panic(frame.PanicValue) + } +} + +// _recover is the built-in recover() function. It tries to recover a currently +// panicking goroutine. +// useParentFrame is set when the caller of runtime._recover has a defer frame +// itself. In that case, recover() shouldn't check that frame but one frame up. +func _recover(useParentFrame bool) interface{} { + if !supportsRecover() { + // Compiling without stack unwinding support, so make this a no-op. + return nil + } + // TODO: somehow check that recover() is called directly by a deferred + // function in a panicking goroutine. Maybe this can be done by comparing + // the frame pointer? + frame := task.Current().DeferFrame + if useParentFrame { + // Don't recover panic from the current frame (which can't be panicking + // already), but instead from the previous frame. + frame = frame.Previous + } + if frame != nil && frame.Panicking { + // Only the first call to recover returns the panic value. It also stops + // the panicking sequence, hence setting panicking to false. + frame.Panicking = false + return frame.PanicValue + } + // Not panicking, so return a nil interface. return nil } diff --git a/targets/avr.json b/targets/avr.json index fbf4306451..224444cb44 100644 --- a/targets/avr.json +++ b/targets/avr.json @@ -16,7 +16,7 @@ ], "extra-files": [ "src/internal/task/task_stack_avr.S", - "src/runtime/gc_avr.S" + "src/runtime/asm_avr.S" ], "gdb": ["avr-gdb"] } diff --git a/targets/cortex-m.json b/targets/cortex-m.json index c2661393ab..b16b1902ab 100644 --- a/targets/cortex-m.json +++ b/targets/cortex-m.json @@ -23,7 +23,7 @@ "extra-files": [ "src/device/arm/cortexm.s", "src/internal/task/task_stack_cortexm.S", - "src/runtime/gc_arm.S" + "src/runtime/asm_arm.S" ], "gdb": ["gdb-multiarch", "arm-none-eabi-gdb"] } diff --git a/targets/cortex-m33.json b/targets/cortex-m33.json index 9b74ad163d..abe43d1683 100644 --- a/targets/cortex-m33.json +++ b/targets/cortex-m33.json @@ -1,7 +1,3 @@ { - "inherits": ["cortex-m"], - "llvm-target": "thumbv7m-unknown-unknown-eabi", - "cflags": [ - "-mfloat-abi=soft" - ] + "inherits": ["cortex-m3"] } diff --git a/targets/gameboy-advance.json b/targets/gameboy-advance.json index 2467b4b915..8ae0cc9069 100644 --- a/targets/gameboy-advance.json +++ b/targets/gameboy-advance.json @@ -21,7 +21,7 @@ "linkerscript": "targets/gameboy-advance.ld", "extra-files": [ "targets/gameboy-advance.s", - "src/runtime/gc_arm.S" + "src/runtime/asm_arm.S" ], "gdb": ["gdb-multiarch"], "emulator": ["mgba", "-3"] diff --git a/targets/nintendoswitch.json b/targets/nintendoswitch.json index ec0b2713a9..383853ccaa 100644 --- a/targets/nintendoswitch.json +++ b/targets/nintendoswitch.json @@ -29,7 +29,7 @@ "extra-files": [ "targets/nintendoswitch.s", "src/internal/task/task_stack_arm64.S", - "src/runtime/gc_arm64.S", + "src/runtime/asm_arm64.S", "src/runtime/runtime_nintendoswitch.s" ] } diff --git a/targets/riscv.json b/targets/riscv.json index 8d9dbb2650..ce36a24d37 100644 --- a/targets/riscv.json +++ b/targets/riscv.json @@ -18,7 +18,7 @@ "extra-files": [ "src/device/riscv/start.S", "src/internal/task/task_stack_tinygoriscv.S", - "src/runtime/gc_riscv.S", + "src/runtime/asm_riscv.S", "src/device/riscv/handleinterrupt.S" ], "gdb": ["riscv64-unknown-elf-gdb"] diff --git a/testdata/recover.go b/testdata/recover.go new file mode 100644 index 0000000000..ced90cfaee --- /dev/null +++ b/testdata/recover.go @@ -0,0 +1,99 @@ +package main + +func main() { + println("# simple recover") + recoverSimple() + + println("\n# recover with result") + result := recoverWithResult() + println("result:", result) + + println("\n# nested defer frame") + nestedDefer() + + println("\n# nested panic: panic inside recover") + nestedPanic() + + println("\n# panic inside defer") + panicInsideDefer() + + println("\n# panic replace") + panicReplace() +} + +func recoverSimple() { + defer func() { + println("recovering...") + printitf("recovered:", recover()) + }() + println("running panic...") + panic("panic") +} + +func recoverWithResult() (result int) { + defer func() { + printitf("recovered:", recover()) + }() + result = 3 + println("running panic...") + panic("panic") +} + +func nestedDefer() { + defer func() { + printitf("recovered:", recover()) + }() + + func() { + // The defer here doesn't catch the panic using recover(), so the outer + // panic should do that. + defer func() { + println("deferred nested function") + }() + panic("panic") + }() + println("unreachable") +} + +func nestedPanic() { + defer func() { + printitf("recovered 1:", recover()) + + defer func() { + printitf("recovered 2:", recover()) + }() + + panic("foo") + }() + panic("panic") +} + +func panicInsideDefer() { + defer func() { + printitf("recovered:", recover()) + }() + defer func() { + panic("panic") + }() +} + +func panicReplace() { + defer func() { + printitf("recovered:", recover()) + }() + defer func() { + println("panic 2") + panic("panic 2") + }() + println("panic 1") + panic("panic 1") +} + +func printitf(msg string, itf interface{}) { + switch itf := itf.(type) { + case string: + println(msg, itf) + default: + println(msg, itf) + } +} diff --git a/testdata/recover.txt b/testdata/recover.txt new file mode 100644 index 0000000000..d276498550 --- /dev/null +++ b/testdata/recover.txt @@ -0,0 +1,25 @@ +# simple recover +running panic... +recovering... +recovered: panic + +# recover with result +running panic... +recovered: panic +result: 3 + +# nested defer frame +deferred nested function +recovered: panic + +# nested panic: panic inside recover +recovered 1: panic +recovered 2: foo + +# panic inside defer +recovered: panic + +# panic replace +panic 1 +panic 2 +recovered: panic 2