From 4aa9422e3c7ebeb65e61d11d5a86e406d2edb421 Mon Sep 17 00:00:00 2001 From: Anuraag Agrawal Date: Mon, 4 Sep 2023 10:57:57 +0900 Subject: [PATCH] Revert "Remove threads support (#1487)" This reverts commit 714368bcea70260aad67be95491887172dd31736. --- .github/workflows/spectest.yaml | 2 + Makefile | 26 +- api/features.go | 5 + experimental/features.go | 14 + experimental/features_example_test.go | 94 ++ experimental/testdata/pthread.c | 18 + experimental/testdata/pthread.wasm | Bin 0 -> 3295 bytes internal/engine/interpreter/interpreter.go | 362 +++++ .../integration_test/engine/adhoc_test.go | 4 +- .../engine/testdata/threads/add.wasm | Bin 0 -> 189 bytes .../engine/testdata/threads/add.wat | 38 + .../engine/testdata/threads/mutex.wasm | Bin 0 -> 738 bytes .../engine/testdata/threads/mutex.wat | 312 +++++ .../engine/testdata/threads/sub.wasm | Bin 0 -> 189 bytes .../engine/testdata/threads/sub.wat | 38 + .../engine/testdata/threads/xor.wasm | Bin 0 -> 189 bytes .../engine/testdata/threads/xor.wat | 38 + .../integration_test/engine/threads_test.go | 280 ++++ .../integration_test/spectest/spectest.go | 6 +- .../spectest/threads/atomic.wast.patch | 49 + .../spectest/threads/spec_test.go | 32 + .../spectest/threads/testdata/atomic.0.wasm | Bin 0 -> 3307 bytes .../spectest/threads/testdata/atomic.1.wasm | Bin 0 -> 265 bytes .../spectest/threads/testdata/atomic.10.wasm | Bin 0 -> 43 bytes .../spectest/threads/testdata/atomic.11.wasm | Bin 0 -> 43 bytes .../spectest/threads/testdata/atomic.12.wasm | Bin 0 -> 43 bytes .../spectest/threads/testdata/atomic.13.wasm | Bin 0 -> 44 bytes .../spectest/threads/testdata/atomic.14.wasm | Bin 0 -> 44 bytes .../spectest/threads/testdata/atomic.15.wasm | Bin 0 -> 44 bytes .../spectest/threads/testdata/atomic.16.wasm | Bin 0 -> 44 bytes .../spectest/threads/testdata/atomic.17.wasm | Bin 0 -> 44 bytes .../spectest/threads/testdata/atomic.18.wasm | Bin 0 -> 45 bytes .../spectest/threads/testdata/atomic.19.wasm | Bin 0 -> 45 bytes .../spectest/threads/testdata/atomic.2.wasm | Bin 0 -> 265 bytes .../spectest/threads/testdata/atomic.20.wasm | Bin 0 -> 45 bytes .../spectest/threads/testdata/atomic.21.wasm | Bin 0 -> 45 bytes .../spectest/threads/testdata/atomic.22.wasm | Bin 0 -> 45 bytes .../spectest/threads/testdata/atomic.23.wasm | Bin 0 -> 45 bytes .../spectest/threads/testdata/atomic.24.wasm | Bin 0 -> 45 bytes .../spectest/threads/testdata/atomic.25.wasm | Bin 0 -> 45 bytes .../spectest/threads/testdata/atomic.26.wasm | Bin 0 -> 45 bytes .../spectest/threads/testdata/atomic.27.wasm | Bin 0 -> 45 bytes .../spectest/threads/testdata/atomic.28.wasm | Bin 0 -> 45 bytes .../spectest/threads/testdata/atomic.29.wasm | Bin 0 -> 45 bytes .../spectest/threads/testdata/atomic.3.wasm | Bin 0 -> 756 bytes .../spectest/threads/testdata/atomic.30.wasm | Bin 0 -> 45 bytes .../spectest/threads/testdata/atomic.31.wasm | Bin 0 -> 45 bytes .../spectest/threads/testdata/atomic.32.wasm | Bin 0 -> 45 bytes .../spectest/threads/testdata/atomic.33.wasm | Bin 0 -> 45 bytes .../spectest/threads/testdata/atomic.34.wasm | Bin 0 -> 45 bytes .../spectest/threads/testdata/atomic.35.wasm | Bin 0 -> 45 bytes .../spectest/threads/testdata/atomic.36.wasm | Bin 0 -> 45 bytes .../spectest/threads/testdata/atomic.37.wasm | Bin 0 -> 45 bytes .../spectest/threads/testdata/atomic.38.wasm | Bin 0 -> 45 bytes .../spectest/threads/testdata/atomic.39.wasm | Bin 0 -> 45 bytes .../spectest/threads/testdata/atomic.4.wasm | Bin 0 -> 50 bytes .../spectest/threads/testdata/atomic.40.wasm | Bin 0 -> 45 bytes .../spectest/threads/testdata/atomic.41.wasm | Bin 0 -> 45 bytes .../spectest/threads/testdata/atomic.42.wasm | Bin 0 -> 45 bytes .../spectest/threads/testdata/atomic.43.wasm | Bin 0 -> 45 bytes .../spectest/threads/testdata/atomic.44.wasm | Bin 0 -> 45 bytes .../spectest/threads/testdata/atomic.45.wasm | Bin 0 -> 45 bytes .../spectest/threads/testdata/atomic.46.wasm | Bin 0 -> 45 bytes .../spectest/threads/testdata/atomic.47.wasm | Bin 0 -> 45 bytes .../spectest/threads/testdata/atomic.48.wasm | Bin 0 -> 47 bytes .../spectest/threads/testdata/atomic.49.wasm | Bin 0 -> 47 bytes .../spectest/threads/testdata/atomic.5.wasm | Bin 0 -> 45 bytes .../spectest/threads/testdata/atomic.50.wasm | Bin 0 -> 47 bytes .../spectest/threads/testdata/atomic.51.wasm | Bin 0 -> 47 bytes .../spectest/threads/testdata/atomic.52.wasm | Bin 0 -> 47 bytes .../spectest/threads/testdata/atomic.6.wasm | Bin 0 -> 47 bytes .../spectest/threads/testdata/atomic.7.wasm | Bin 0 -> 47 bytes .../spectest/threads/testdata/atomic.8.wasm | Bin 0 -> 43 bytes .../spectest/threads/testdata/atomic.9.wasm | Bin 0 -> 43 bytes .../spectest/threads/testdata/atomic.json | 329 +++++ .../spectest/threads/testdata/atomic.wast | 611 +++++++++ internal/testing/binaryencoding/import.go | 4 +- internal/testing/binaryencoding/limits.go | 18 +- internal/testing/binaryencoding/memory.go | 2 +- internal/testing/binaryencoding/table.go | 2 +- internal/wasm/binary/decoder.go | 2 +- internal/wasm/binary/import.go | 2 +- internal/wasm/binary/limits.go | 13 +- internal/wasm/binary/limits_test.go | 36 +- internal/wasm/binary/memory.go | 19 +- internal/wasm/binary/memory_test.go | 31 +- internal/wasm/binary/section.go | 3 +- internal/wasm/binary/section_test.go | 4 +- internal/wasm/binary/table.go | 6 +- internal/wasm/binary/table_test.go | 8 + internal/wasm/func_validation.go | 375 ++++++ internal/wasm/func_validation_test.go | 1169 +++++++++++++++++ internal/wasm/instruction.go | 316 +++++ internal/wasm/memory.go | 90 +- internal/wasm/memory_test.go | 138 ++ internal/wasm/module.go | 2 + internal/wasmruntime/errors.go | 6 + internal/wazeroir/compiler.go | 542 ++++++++ internal/wazeroir/compiler_test.go | 649 +++++++++ internal/wazeroir/operations.go | 245 ++++ internal/wazeroir/signature.go | 54 + internal/wazeroir/signature_test.go | 335 +++++ 102 files changed, 6293 insertions(+), 36 deletions(-) create mode 100644 experimental/features.go create mode 100644 experimental/features_example_test.go create mode 100644 experimental/testdata/pthread.c create mode 100755 experimental/testdata/pthread.wasm create mode 100644 internal/integration_test/engine/testdata/threads/add.wasm create mode 100644 internal/integration_test/engine/testdata/threads/add.wat create mode 100644 internal/integration_test/engine/testdata/threads/mutex.wasm create mode 100644 internal/integration_test/engine/testdata/threads/mutex.wat create mode 100644 internal/integration_test/engine/testdata/threads/sub.wasm create mode 100644 internal/integration_test/engine/testdata/threads/sub.wat create mode 100644 internal/integration_test/engine/testdata/threads/xor.wasm create mode 100644 internal/integration_test/engine/testdata/threads/xor.wat create mode 100644 internal/integration_test/engine/threads_test.go create mode 100644 internal/integration_test/spectest/threads/atomic.wast.patch create mode 100644 internal/integration_test/spectest/threads/spec_test.go create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.0.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.1.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.10.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.11.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.12.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.13.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.14.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.15.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.16.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.17.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.18.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.19.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.2.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.20.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.21.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.22.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.23.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.24.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.25.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.26.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.27.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.28.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.29.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.3.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.30.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.31.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.32.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.33.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.34.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.35.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.36.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.37.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.38.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.39.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.4.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.40.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.41.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.42.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.43.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.44.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.45.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.46.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.47.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.48.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.49.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.5.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.50.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.51.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.52.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.6.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.7.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.8.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.9.wasm create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.json create mode 100644 internal/integration_test/spectest/threads/testdata/atomic.wast diff --git a/.github/workflows/spectest.yaml b/.github/workflows/spectest.yaml index 0a795376f5..4db637d0ab 100644 --- a/.github/workflows/spectest.yaml +++ b/.github/workflows/spectest.yaml @@ -35,6 +35,7 @@ jobs: spec-version: - "v1" - "v2" + - "threads" steps: - uses: actions/checkout@v3 @@ -60,6 +61,7 @@ jobs: spec-version: - "v1" - "v2" + - "threads" steps: - uses: actions/checkout@v3 diff --git a/Makefile b/Makefile index 26d4c9e187..4b3525b93d 100644 --- a/Makefile +++ b/Makefile @@ -119,13 +119,17 @@ spectest_v1_testdata_dir := $(spectest_v1_dir)/testdata spec_version_v1 := wg-1.0 spectest_v2_dir := $(spectest_base_dir)/v2 spectest_v2_testdata_dir := $(spectest_v2_dir)/testdata -# Latest draft state as of May 23, 2023. -spec_version_v2 := 2e8912e88a3118a46b90e8ccb659e24b4e8f3c23 +# Latest draft state as of Dec 16, 2022. +spec_version_v2 := 1782235239ddebaf2cb079b00fdaa2d2c4dedba3 +spectest_threads_dir := $(spectest_base_dir)/threads +spectest_threads_testdata_dir := $(spectest_threads_dir)/testdata +spec_version_threads := cc01bf0d17ba3fb1dc59fb7c5c725838aff18b50 .PHONY: build.spectest build.spectest: @$(MAKE) build.spectest.v1 @$(MAKE) build.spectest.v2 + @$(MAKE) build.spectest.threads .PHONY: build.spectest.v1 build.spectest.v1: # Note: wabt by default uses >1.0 features, so wast2json flags might drift as they include more. See WebAssembly/wabt#1878 @@ -165,9 +169,21 @@ build.spectest.v2: # Note: SIMD cases are placed in the "simd" subdirectory. wast2json --debug-names --no-check $$f; \ done +.PHONY: build.spectest.threads +build.spectest.threads: + @mkdir -p $(spectest_threads_testdata_dir) + @cd $(spectest_threads_testdata_dir) \ + && curl -sSL 'https://api.github.com/repos/WebAssembly/threads/contents/test/core?ref=$(spec_version_threads)' | jq -r '.[]| .download_url' | grep -E "atomic.wast" | xargs -Iurl curl -sJL url -O +# Fix broken CAS spectests +# https://github.com/WebAssembly/threads/issues/195#issuecomment-1318429506 + @cd $(spectest_threads_testdata_dir) && patch < ../atomic.wast.patch + @cd $(spectest_threads_testdata_dir) && for f in `find . -name '*.wast'`; do \ + wast2json --enable-threads --debug-names $$f; \ + done + .PHONY: test test: - @go test $(go_test_options) $$(go list ./... | grep -vE '$(spectest_v1_dir)|$(spectest_v2_dir)') + @go test $(go_test_options) $$(go list ./... | grep -vE '$(spectest_v1_dir)|$(spectest_v2_dir)|$(spectest_threads_dir)') @cd internal/version/testdata && go test $(go_test_options) ./... .PHONY: coverage @@ -181,6 +197,7 @@ coverage: ## Generate test coverage spectest: @$(MAKE) spectest.v1 @$(MAKE) spectest.v2 + @$(MAKE) spectest.threads spectest.v1: @go test $(go_test_options) $$(go list ./... | grep $(spectest_v1_dir)) @@ -188,6 +205,9 @@ spectest.v1: spectest.v2: @go test $(go_test_options) $$(go list ./... | grep $(spectest_v2_dir)) +spectest.threads: + @go test $(go_test_options) $$(go list ./... | grep $(spectest_threads_dir)) + golangci_lint_path := $(shell go env GOPATH)/bin/golangci-lint $(golangci_lint_path): diff --git a/api/features.go b/api/features.go index 04d35e18fc..22a74f41c9 100644 --- a/api/features.go +++ b/api/features.go @@ -143,6 +143,8 @@ const ( // Note: The instruction list is too long to enumerate in godoc. // See https://github.com/WebAssembly/spec/blob/wg-2.0.draft1/proposals/simd/SIMD.md CoreFeatureSIMD + + // Update experimental/features.go when adding elements here. ) // SetEnabled enables or disables the feature or group of features. @@ -207,6 +209,9 @@ func featureName(f CoreFeatures) string { case CoreFeatureSIMD: // match https://github.com/WebAssembly/spec/blob/wg-2.0.draft1/proposals/simd/SIMD.md return "simd" + case CoreFeatureSIMD << 1: // Defined in experimental/features.go + // match https://github.com/WebAssembly/threads/blob/main/proposals/threads/Overview.md + return "threads" } return "" } diff --git a/experimental/features.go b/experimental/features.go new file mode 100644 index 0000000000..964c201bb2 --- /dev/null +++ b/experimental/features.go @@ -0,0 +1,14 @@ +package experimental + +import "github.com/tetratelabs/wazero/api" + +// CoreFeaturesThreads enables threads instructions ("threads"). +// +// # Notes +// +// - This is not yet implemented by default, so you will need to use +// wazero.NewRuntimeConfigInterpreter +// - The instruction list is too long to enumerate in godoc. +// See https://github.com/WebAssembly/threads/blob/main/proposals/threads/Overview.md +// - Atomic operations are guest-only until api.Memory or otherwise expose them to host functions. +const CoreFeaturesThreads = api.CoreFeatureSIMD << 1 // TODO: Implement the compiler engine diff --git a/experimental/features_example_test.go b/experimental/features_example_test.go new file mode 100644 index 0000000000..333b086098 --- /dev/null +++ b/experimental/features_example_test.go @@ -0,0 +1,94 @@ +package experimental_test + +import ( + "context" + _ "embed" + "fmt" + "log" + "sync" + + "github.com/tetratelabs/wazero" + "github.com/tetratelabs/wazero/api" + "github.com/tetratelabs/wazero/experimental" + "github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1" +) + +// pthreadWasm was generated by the following: +// +// docker run -it --rm -v `pwd`/testdata:/workspace ghcr.io/webassembly/wasi-sdk:wasi-sdk-20 sh -c '$CC -o /workspace/pthread.wasm /workspace/pthread.c --target=wasm32-wasi-threads --sysroot=/wasi-sysroot -pthread -mexec-model=reactor -Wl,--export=run -Wl,--export=get' +// +// TODO: Use zig cc instead of wasi-sdk to compile when it supports wasm32-wasi-threads +// https://github.com/ziglang/zig/issues/15484 +// +//go:embed testdata/pthread.wasm +var pthreadWasm []byte + +// This shows how to use a WebAssembly module compiled with the threads feature. +func ExampleCoreFeaturesThreads() { + // Use a default context + ctx := context.Background() + + // Threads support is currently only supported with interpreter, so the config + // must explicitly specify it. + cfg := wazero.NewRuntimeConfigInterpreter() + + // Threads support must be enabled explicitly in addition to standard V2 features. + cfg = cfg.WithCoreFeatures(api.CoreFeaturesV2 | experimental.CoreFeaturesThreads) + + r := wazero.NewRuntimeWithConfig(ctx, cfg) + defer r.Close(ctx) + + // Because we are using wasi-sdk to compile the guest, we must initialize WASI. + wasi_snapshot_preview1.MustInstantiate(ctx, r) + + mod, err := r.Instantiate(ctx, pthreadWasm) + if err != nil { + log.Panicln(err) + } + + // Channel to synchronize start of goroutines before running. + startCh := make(chan struct{}) + // Channel to synchronize end of goroutines. + endCh := make(chan struct{}) + + // Unfortunately, while memory accesses are thread-safe using atomic operations, compilers such + // as LLVM still have global state that is not handled thread-safe, preventing actually making + // concurrent invocations. We go ahead and add a global lock for now until this is resolved. + // TODO: Remove this lock once functions can actually be called concurrently. + var mu sync.Mutex + + // We start up 8 goroutines and run for 6000 iterations each. The count should reach + // 48000, at the end, but it would not if threads weren't working! + for i := 0; i < 8; i++ { + go func() { + defer func() { endCh <- struct{}{} }() + <-startCh + + mu.Lock() + defer mu.Unlock() + + // ExportedFunction must be called within each goroutine to have independent call stacks. + // This incurs some overhead, a sync.Pool can be used to reduce this overhead if neeeded. + fn := mod.ExportedFunction("run") + for i := 0; i < 6000; i++ { + _, err := fn.Call(ctx) + if err != nil { + log.Panicln(err) + } + } + }() + } + for i := 0; i < 8; i++ { + startCh <- struct{}{} + } + for i := 0; i < 8; i++ { + <-endCh + } + + res, err := mod.ExportedFunction("get").Call(ctx) + if err != nil { + log.Panicln(err) + } + fmt.Println(res[0]) + // Output: 48000 +} diff --git a/experimental/testdata/pthread.c b/experimental/testdata/pthread.c new file mode 100644 index 0000000000..d353cfbaf4 --- /dev/null +++ b/experimental/testdata/pthread.c @@ -0,0 +1,18 @@ +#include + +pthread_mutex_t mutex; +int count = 0; + +void run() { + pthread_mutex_lock(&mutex); + count++; + pthread_mutex_unlock(&mutex); +} + +int get() { + int res; + pthread_mutex_lock(&mutex); + res = count; + pthread_mutex_unlock(&mutex); + return res; +} diff --git a/experimental/testdata/pthread.wasm b/experimental/testdata/pthread.wasm new file mode 100755 index 0000000000000000000000000000000000000000..ce8abba2d49c87299f1418a8fbc071cb0d638ebb GIT binary patch literal 3295 zcmcguyKfxF8K2kg9yxcqmCe|)XeYC41|UI@;3GOJDQpb^i9(bjO^N^kR?Fi`zT@$p z%SbY0?gbH&DDNOCQlt#h#Jyp_g$oxpkRpFT{(t~!T)1#S(D{9{ON*o(r1O#3{dVSi z{=UZyOD6}MF~*mJyRtYccyX7py9_T*v?%DQilR8e+ryg{#1enuK|1LqlYTm!yfMg= z;W+zoCws7dsofj2_mjMHkR|uCoH2Ph@y`vsaNBo&gJa8&xPR1nYclJ_PrU)EI+v| z*d@WZ&~{qG{{qz7kJ)jJ;Z*~zj%%FRPbyL{`zethuw%_gg*bT;YaogfCfL@YAR}g2_;XskYFWe$ zUlhkA6bZ{CX@rs8NMXk_o>8rGuafkQAO6lsHg(Qti&sIDLK{L4GkiZ1Ml86+N z%$73h)RqT3UQ5)!pxB5-QI7&>tbwK0jb_YDsnG;Y{T{Mcyy_ydPsnyRMl>w}1c1T& zPoKEhO9D?Y>_;51RgO|}!BXwmDTQr#`MxtBac!6tE%7p5eO`=K%|U?O??R*Uv-Na3jJJRd_PeKji|7j>@2HxF1)Tm%0}P+lGLxbwl_tWigdQ zg_%>*b%@y_VnBv;FUeXgx2J#l&%fj}R$Gz`N~6k^qqklYD9VUw=nT76E|?3(?nR2i zv65nW)4o#x3PvSF1sVg)Tl3iEx8@_IjnaP*5}l0htz0+zu~(G~H=eC`!{3v~S1Aza zl?}FCL_U@M%TZwDA{_bbkMQzN171TQ?1o6el;P7eatV%mOEP>b_P1n6D-bZ=qPVgj zBY#&&qb<%JBfD=3ABXlpOOE>cEUy3LYHzcBh1S*UqYlNRgP1fmU8H%rd z_5U*jR&?W_!L%;R2|2l7KB&49%`2@M-9-0F6o(Z%k(6ap$&FZ*O*JBI4oNdJqUske z>A2Q+XI2v|NYd)s*7uLz$= zUDQ!L)1mDAE%Kv*WSleYG6HX)b;4W5_ ztsznr5X8!21pYK!P?7klQ6#D66rsim0YCB`Gb-T(gDAMufE93a%%M5Un%Z6mhgX`p z)ex?8JSqiBwn7$wtDS8Uwg8Hgb&%ZtgLRP#Fmju>#P<+7AV%GA`%l(I^@K2hUaqjE zew&X3X@P1NSn2PWCYO-}pp&F%%0TNQTSo*=Z$mjBs_<#d!5HE2OvYZfv(Df7CbsEV zx*g;E35lk$8`F8={E84H((bvW*~eXjnT=9FO=C1;RQYGL5)5#`VQgPR`x;+=ll=R9 z2GeFaf9>puOozXriqU@)I5Npk7W31ta(S*rvP>|{G)J*Ql5kqmlzN@o4uuf=Ro2fR zRe>vN4&(eZy~I8jd(KBBu{cet@h!(}v9IJK`;EhhWCY>}&gKRYJ2Qdnc8r+VRZ19h zN*o6>P%HBSjC_ZY8w#xK1;k~D&!euPW`Yr%LCE>3C6`MKyvV~ArZEg+e!4@7yBi5c zo4$cl4fC0Df`NbM^Rwm&q1bin+-2H`6}I5K0F2*Z3($*77g`sR57}4zBS~9Lo6J&v+;!2LAJBWB->lreI&egFYVv=*ITP=t84Wi>>l>>L*v$7cWdir zad~-TZDVt7bL0Bj*5>B5t>tfR?5*!^Y_;xfZGC%X{V01lTt7O)a&2v6wRLTYZ8!6D z43v|5S(+b?vx!;`(tL2xX-}5*?qP3#rA+g36In}ld)dnU-e5QFO_pnu&i(#M_Avi1 D0zb=t literal 0 HcmV?d00001 diff --git a/internal/engine/interpreter/interpreter.go b/internal/engine/interpreter/interpreter.go index 8f0db3c75f..94f902cb9a 100644 --- a/internal/engine/interpreter/interpreter.go +++ b/internal/engine/interpreter/interpreter.go @@ -3897,6 +3897,368 @@ func (ce *callEngine) callNativeFunc(ctx context.Context, m *wasm.ModuleInstance ce.pushValue(retLo) ce.pushValue(retHi) frame.pc++ + case wazeroir.OperationKindAtomicMemoryWait: + timeout := int64(ce.popValue()) + exp := ce.popValue() + offset := ce.popMemoryOffset(op) + if !memoryInst.Shared { + panic(wasmruntime.ErrRuntimeExpectedSharedMemory) + } + var cur uint64 + switch wazeroir.UnsignedType(op.B1) { + case wazeroir.UnsignedTypeI32: + if offset%4 != 0 { + panic(wasmruntime.ErrRuntimeUnalignedAtomic) + } + memoryInst.Mux.Lock() + val, ok := memoryInst.ReadUint32Le(offset) + memoryInst.Mux.Unlock() + if !ok { + panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess) + } + cur = uint64(val) + case wazeroir.UnsignedTypeI64: + if offset%8 != 0 { + panic(wasmruntime.ErrRuntimeUnalignedAtomic) + } + memoryInst.Mux.Lock() + val, ok := memoryInst.ReadUint64Le(offset) + memoryInst.Mux.Unlock() + if !ok { + panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess) + } + cur = val + } + if exp != cur { + ce.pushValue(1) + } else { + tooMany, timedOut := memoryInst.Wait(offset, timeout) + if tooMany { + panic(wasmruntime.ErrRuntimeTooManyWaiters) + } + if timedOut { + ce.pushValue(2) + } else { + ce.pushValue(0) + } + } + frame.pc++ + case wazeroir.OperationKindAtomicMemoryNotify: + count := ce.popValue() + offset := ce.popMemoryOffset(op) + if offset%4 != 0 { + panic(wasmruntime.ErrRuntimeUnalignedAtomic) + } + // Just a bounds check + if offset >= memoryInst.Size() { + panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess) + } + res := memoryInst.Notify(offset, uint32(count)) + ce.pushValue(uint64(res)) + frame.pc++ + case wazeroir.OperationKindAtomicFence: + // Memory not required for fence only + if memoryInst != nil { + // An empty critical section can be used as a synchronization primitive, which is what + // fence is. Probably, there are no spectests or defined behavior to confirm this yet. + memoryInst.Mux.Lock() + memoryInst.Mux.Unlock() //nolint:staticcheck + } + frame.pc++ + case wazeroir.OperationKindAtomicLoad: + offset := ce.popMemoryOffset(op) + switch wazeroir.UnsignedType(op.B1) { + case wazeroir.UnsignedTypeI32: + if offset%4 != 0 { + panic(wasmruntime.ErrRuntimeUnalignedAtomic) + } + memoryInst.Mux.Lock() + val, ok := memoryInst.ReadUint32Le(offset) + memoryInst.Mux.Unlock() + if !ok { + panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess) + } + ce.pushValue(uint64(val)) + case wazeroir.UnsignedTypeI64: + if offset%8 != 0 { + panic(wasmruntime.ErrRuntimeUnalignedAtomic) + } + memoryInst.Mux.Lock() + val, ok := memoryInst.ReadUint64Le(offset) + memoryInst.Mux.Unlock() + if !ok { + panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess) + } + ce.pushValue(val) + } + frame.pc++ + case wazeroir.OperationKindAtomicLoad8: + offset := ce.popMemoryOffset(op) + memoryInst.Mux.Lock() + val, ok := memoryInst.ReadByte(offset) + memoryInst.Mux.Unlock() + if !ok { + panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess) + } + ce.pushValue(uint64(val)) + frame.pc++ + case wazeroir.OperationKindAtomicLoad16: + offset := ce.popMemoryOffset(op) + if offset%2 != 0 { + panic(wasmruntime.ErrRuntimeUnalignedAtomic) + } + memoryInst.Mux.Lock() + val, ok := memoryInst.ReadUint16Le(offset) + memoryInst.Mux.Unlock() + if !ok { + panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess) + } + ce.pushValue(uint64(val)) + frame.pc++ + case wazeroir.OperationKindAtomicStore: + val := ce.popValue() + offset := ce.popMemoryOffset(op) + switch wazeroir.UnsignedType(op.B1) { + case wazeroir.UnsignedTypeI32: + if offset%4 != 0 { + panic(wasmruntime.ErrRuntimeUnalignedAtomic) + } + memoryInst.Mux.Lock() + ok := memoryInst.WriteUint32Le(offset, uint32(val)) + memoryInst.Mux.Unlock() + if !ok { + panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess) + } + case wazeroir.UnsignedTypeI64: + if offset%8 != 0 { + panic(wasmruntime.ErrRuntimeUnalignedAtomic) + } + memoryInst.Mux.Lock() + ok := memoryInst.WriteUint64Le(offset, val) + memoryInst.Mux.Unlock() + if !ok { + panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess) + } + } + frame.pc++ + case wazeroir.OperationKindAtomicStore8: + val := byte(ce.popValue()) + offset := ce.popMemoryOffset(op) + memoryInst.Mux.Lock() + ok := memoryInst.WriteByte(offset, val) + memoryInst.Mux.Unlock() + if !ok { + panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess) + } + frame.pc++ + case wazeroir.OperationKindAtomicStore16: + val := uint16(ce.popValue()) + offset := ce.popMemoryOffset(op) + if offset%2 != 0 { + panic(wasmruntime.ErrRuntimeUnalignedAtomic) + } + memoryInst.Mux.Lock() + ok := memoryInst.WriteUint16Le(offset, val) + memoryInst.Mux.Unlock() + if !ok { + panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess) + } + frame.pc++ + case wazeroir.OperationKindAtomicRMW: + val := ce.popValue() + offset := ce.popMemoryOffset(op) + switch wazeroir.UnsignedType(op.B1) { + case wazeroir.UnsignedTypeI32: + if offset%4 != 0 { + panic(wasmruntime.ErrRuntimeUnalignedAtomic) + } + memoryInst.Mux.Lock() + old, ok := memoryInst.ReadUint32Le(offset) + if !ok { + memoryInst.Mux.Unlock() + panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess) + } + var newVal uint32 + switch wazeroir.AtomicArithmeticOp(op.B2) { + case wazeroir.AtomicArithmeticOpAdd: + newVal = old + uint32(val) + case wazeroir.AtomicArithmeticOpSub: + newVal = old - uint32(val) + case wazeroir.AtomicArithmeticOpAnd: + newVal = old & uint32(val) + case wazeroir.AtomicArithmeticOpOr: + newVal = old | uint32(val) + case wazeroir.AtomicArithmeticOpXor: + newVal = old ^ uint32(val) + case wazeroir.AtomicArithmeticOpNop: + newVal = uint32(val) + } + memoryInst.WriteUint32Le(offset, newVal) + memoryInst.Mux.Unlock() + ce.pushValue(uint64(old)) + case wazeroir.UnsignedTypeI64: + if offset%8 != 0 { + panic(wasmruntime.ErrRuntimeUnalignedAtomic) + } + memoryInst.Mux.Lock() + old, ok := memoryInst.ReadUint64Le(offset) + if !ok { + memoryInst.Mux.Unlock() + panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess) + } + var newVal uint64 + switch wazeroir.AtomicArithmeticOp(op.B2) { + case wazeroir.AtomicArithmeticOpAdd: + newVal = old + val + case wazeroir.AtomicArithmeticOpSub: + newVal = old - val + case wazeroir.AtomicArithmeticOpAnd: + newVal = old & val + case wazeroir.AtomicArithmeticOpOr: + newVal = old | val + case wazeroir.AtomicArithmeticOpXor: + newVal = old ^ val + case wazeroir.AtomicArithmeticOpNop: + newVal = val + } + memoryInst.WriteUint64Le(offset, newVal) + memoryInst.Mux.Unlock() + ce.pushValue(old) + } + frame.pc++ + case wazeroir.OperationKindAtomicRMW8: + val := ce.popValue() + offset := ce.popMemoryOffset(op) + memoryInst.Mux.Lock() + old, ok := memoryInst.ReadByte(offset) + if !ok { + memoryInst.Mux.Unlock() + panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess) + } + arg := byte(val) + var newVal byte + switch wazeroir.AtomicArithmeticOp(op.B2) { + case wazeroir.AtomicArithmeticOpAdd: + newVal = old + arg + case wazeroir.AtomicArithmeticOpSub: + newVal = old - arg + case wazeroir.AtomicArithmeticOpAnd: + newVal = old & arg + case wazeroir.AtomicArithmeticOpOr: + newVal = old | arg + case wazeroir.AtomicArithmeticOpXor: + newVal = old ^ arg + case wazeroir.AtomicArithmeticOpNop: + newVal = arg + } + memoryInst.WriteByte(offset, newVal) + memoryInst.Mux.Unlock() + ce.pushValue(uint64(old)) + frame.pc++ + case wazeroir.OperationKindAtomicRMW16: + val := ce.popValue() + offset := ce.popMemoryOffset(op) + if offset%2 != 0 { + panic(wasmruntime.ErrRuntimeUnalignedAtomic) + } + memoryInst.Mux.Lock() + old, ok := memoryInst.ReadUint16Le(offset) + if !ok { + memoryInst.Mux.Unlock() + panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess) + } + arg := uint16(val) + var newVal uint16 + switch wazeroir.AtomicArithmeticOp(op.B2) { + case wazeroir.AtomicArithmeticOpAdd: + newVal = old + arg + case wazeroir.AtomicArithmeticOpSub: + newVal = old - arg + case wazeroir.AtomicArithmeticOpAnd: + newVal = old & arg + case wazeroir.AtomicArithmeticOpOr: + newVal = old | arg + case wazeroir.AtomicArithmeticOpXor: + newVal = old ^ arg + case wazeroir.AtomicArithmeticOpNop: + newVal = arg + } + memoryInst.WriteUint16Le(offset, newVal) + memoryInst.Mux.Unlock() + ce.pushValue(uint64(old)) + frame.pc++ + case wazeroir.OperationKindAtomicRMWCmpxchg: + rep := ce.popValue() + exp := ce.popValue() + offset := ce.popMemoryOffset(op) + switch wazeroir.UnsignedType(op.B1) { + case wazeroir.UnsignedTypeI32: + if offset%4 != 0 { + panic(wasmruntime.ErrRuntimeUnalignedAtomic) + } + memoryInst.Mux.Lock() + old, ok := memoryInst.ReadUint32Le(offset) + if !ok { + memoryInst.Mux.Unlock() + panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess) + } + if old == uint32(exp) { + memoryInst.WriteUint32Le(offset, uint32(rep)) + } + memoryInst.Mux.Unlock() + ce.pushValue(uint64(old)) + case wazeroir.UnsignedTypeI64: + if offset%8 != 0 { + panic(wasmruntime.ErrRuntimeUnalignedAtomic) + } + memoryInst.Mux.Lock() + old, ok := memoryInst.ReadUint64Le(offset) + if !ok { + memoryInst.Mux.Unlock() + panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess) + } + if old == exp { + memoryInst.WriteUint64Le(offset, rep) + } + memoryInst.Mux.Unlock() + ce.pushValue(old) + } + frame.pc++ + case wazeroir.OperationKindAtomicRMW8Cmpxchg: + rep := byte(ce.popValue()) + exp := byte(ce.popValue()) + offset := ce.popMemoryOffset(op) + memoryInst.Mux.Lock() + old, ok := memoryInst.ReadByte(offset) + if !ok { + memoryInst.Mux.Unlock() + panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess) + } + if old == exp { + memoryInst.WriteByte(offset, rep) + } + memoryInst.Mux.Unlock() + ce.pushValue(uint64(old)) + frame.pc++ + case wazeroir.OperationKindAtomicRMW16Cmpxchg: + rep := uint16(ce.popValue()) + exp := uint16(ce.popValue()) + offset := ce.popMemoryOffset(op) + if offset%2 != 0 { + panic(wasmruntime.ErrRuntimeUnalignedAtomic) + } + memoryInst.Mux.Lock() + old, ok := memoryInst.ReadUint16Le(offset) + if !ok { + memoryInst.Mux.Unlock() + panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess) + } + if old == exp { + memoryInst.WriteUint16Le(offset, rep) + } + memoryInst.Mux.Unlock() + ce.pushValue(uint64(old)) + frame.pc++ default: frame.pc++ } diff --git a/internal/integration_test/engine/adhoc_test.go b/internal/integration_test/engine/adhoc_test.go index ab3c19a6ec..94e91a8e29 100644 --- a/internal/integration_test/engine/adhoc_test.go +++ b/internal/integration_test/engine/adhoc_test.go @@ -66,7 +66,9 @@ func runAllTests(t *testing.T, tests map[string]func(t *testing.T, r wazero.Runt testf := testf // pin t.Run(name, func(t *testing.T) { t.Parallel() - testf(t, wazero.NewRuntimeWithConfig(testCtx, config)) + r := wazero.NewRuntimeWithConfig(testCtx, config) + defer r.Close(testCtx) + testf(t, r) }) } } diff --git a/internal/integration_test/engine/testdata/threads/add.wasm b/internal/integration_test/engine/testdata/threads/add.wasm new file mode 100644 index 0000000000000000000000000000000000000000..aaa543ff7fc296efef1289eb3cc4e2b7d268d5c4 GIT binary patch literal 189 zcmYk!I}XAy5QO2G*k&Vg0}3923jmQt8wzef4IBW9Ag&)V3qiq+|NXQ!ZqGXbF!Bn7 z($s!ZWFYNise3*4JAv-pfcBX$2NdPEwi&@OH^S(s5~d;bcrra(g%&OQd4XBwePq7~ UL(7*Kd&P05{<-$of4=d5-e4{tJOBUy literal 0 HcmV?d00001 diff --git a/internal/integration_test/engine/testdata/threads/add.wat b/internal/integration_test/engine/testdata/threads/add.wat new file mode 100644 index 0000000000..014862c722 --- /dev/null +++ b/internal/integration_test/engine/testdata/threads/add.wat @@ -0,0 +1,38 @@ +(module + (memory 1 1 shared) + + (func (export "run32") + (i32.atomic.rmw.add (i32.const 0) (i32.const 1)) + (drop) + ) + + (func (export "run64") + (i64.atomic.rmw.add (i32.const 0) (i64.const 1)) + (drop) + ) + + (func (export "run32_8") + (i32.atomic.rmw8.add_u (i32.const 0) (i32.const 1)) + (drop) + ) + + (func (export "run32_16") + (i32.atomic.rmw16.add_u (i32.const 0) (i32.const 1)) + (drop) + ) + + (func (export "run64_8") + (i64.atomic.rmw8.add_u (i32.const 0) (i64.const 1)) + (drop) + ) + + (func (export "run64_16") + (i64.atomic.rmw16.add_u (i32.const 0) (i64.const 1)) + (drop) + ) + + (func (export "run64_32") + (i64.atomic.rmw32.add_u (i32.const 0) (i64.const 1)) + (drop) + ) +) diff --git a/internal/integration_test/engine/testdata/threads/mutex.wasm b/internal/integration_test/engine/testdata/threads/mutex.wasm new file mode 100644 index 0000000000000000000000000000000000000000..f2aa4957fe60f4d47e1afcc3b9c0b98bf32c7852 GIT binary patch literal 738 zcmZ|N%}T^D6b0aOlh)SBRI8(QrtQEi-Ks-zISnZO;~O|`1zjkv`=Gu{@TLk4DOohR zJv1MOP`rM<69CHOmOAQq!4{Cq8bl)W#1ToNLa=8QBbW%U5|bBWKf@ktImV! La`X=@e6>`+7Dr^8 literal 0 HcmV?d00001 diff --git a/internal/integration_test/engine/testdata/threads/mutex.wat b/internal/integration_test/engine/testdata/threads/mutex.wat new file mode 100644 index 0000000000..0ec7b55054 --- /dev/null +++ b/internal/integration_test/engine/testdata/threads/mutex.wat @@ -0,0 +1,312 @@ +(module + (memory 1 1 shared) + + (func $tryLockMutex32 + (param $mutexAddr i32) (result i32) + ;; Attempt to grab the mutex. The cmpxchg operation atomically + ;; does the following: + ;; - Loads the value at $mutexAddr. + ;; - If it is 0 (unlocked), set it to 1 (locked). + ;; - Return the originally loaded value. + (i32.atomic.rmw.cmpxchg + (local.get $mutexAddr) ;; mutex address + (i32.const 0) ;; expected value (0 => unlocked) + (i32.const 1)) ;; replacement value (1 => locked) + + ;; The top of the stack is the originally loaded value. + ;; If it is 0, this means we acquired the mutex. We want to + ;; return the inverse (1 means mutex acquired), so use i32.eqz + ;; as a logical not. + (i32.eqz) + ) + + ;; Lock a mutex at the given address, retrying until successful. + (func $lockMutex32 + (param $mutexAddr i32) + (block $done + (loop $retry + ;; Try to lock the mutex. $tryLockMutex returns 1 if the mutex + ;; was locked, and 0 otherwise. + (call $tryLockMutex32 (local.get $mutexAddr)) + (br_if $done) + + ;; Wait for the other agent to finish with mutex. + (memory.atomic.wait32 + (local.get $mutexAddr) ;; mutex address + (i32.const 1) ;; expected value (1 => locked) + (i64.const -1)) ;; infinite timeout + + ;; memory.atomic.wait32 returns: + ;; 0 => "ok", woken by another agent. + ;; 1 => "not-equal", loaded value != expected value + ;; 2 => "timed-out", the timeout expired + ;; + ;; Since there is an infinite timeout, only 0 or 1 will be returned. In + ;; either case we should try to acquire the mutex again, so we can + ;; ignore the result. + (drop) + + ;; Try to acquire the lock again. + (br $retry) + ) + ) + ) + + ;; Unlock a mutex at the given address. + (func $unlockMutex32 + (param $mutexAddr i32) + ;; Unlock the mutex. + (i32.atomic.store + (local.get $mutexAddr) ;; mutex address + (i32.const 0)) ;; 0 => unlocked + + ;; Notify one agent that is waiting on this lock. + (drop + (memory.atomic.notify + (local.get $mutexAddr) ;; mutex address + (i32.const 1))) ;; notify 1 waiter + ) + + (func (export "run32") + (call $lockMutex32 (i32.const 0)) + (i32.store (i32.const 8) (i32.load (i32.const 8)) (i32.add (i32.const 1))) + (call $unlockMutex32 (i32.const 0)) + ) + + ;; Below functions are the same as above with different integer sizes so + ;; have comments elided see above to understand logic. + + (func $tryLockMutex64 + (param $mutexAddr i32) (result i32) + (i64.atomic.rmw.cmpxchg + (local.get $mutexAddr) + (i64.const 0) + (i64.const 1)) + (i64.eqz) + ) + (func $lockMutex64 + (param $mutexAddr i32) + (block $done + (loop $retry + (call $tryLockMutex64 (local.get $mutexAddr)) + (br_if $done) + (memory.atomic.wait64 + (local.get $mutexAddr) + (i64.const 1) + (i64.const -1)) + (drop) + (br $retry) + ) + ) + ) + (func $unlockMutex64 + (param $mutexAddr i32) + (i64.atomic.store + (local.get $mutexAddr) + (i64.const 0)) + (drop + (memory.atomic.notify + (local.get $mutexAddr) + (i32.const 1))) + ) + (func (export "run64") + (call $lockMutex64 (i32.const 0)) + (i32.store (i32.const 8) (i32.load (i32.const 8)) (i32.add (i32.const 1))) + (call $unlockMutex64 (i32.const 0)) + ) + + (func $tryLockMutex32_8 + (param $mutexAddr i32) (result i32) + (i32.atomic.rmw8.cmpxchg_u + (local.get $mutexAddr) + (i32.const 0) + (i32.const 1)) + (i32.eqz) + ) + (func $lockMutex32_8 + (param $mutexAddr i32) + (block $done + (loop $retry + (call $tryLockMutex32_8 (local.get $mutexAddr)) + (br_if $done) + (memory.atomic.wait32 + (local.get $mutexAddr) + (i32.const 1) + (i64.const -1)) + (drop) + (br $retry) + ) + ) + ) + (func $unlockMutex32_8 + (param $mutexAddr i32) + (i32.atomic.store8 + (local.get $mutexAddr) + (i32.const 0)) + (drop + (memory.atomic.notify + (local.get $mutexAddr) + (i32.const 1))) + ) + (func (export "run32_8") + (call $lockMutex32_8 (i32.const 0)) + (i32.store (i32.const 8) (i32.load (i32.const 8)) (i32.add (i32.const 1))) + (call $unlockMutex32_8 (i32.const 0)) + ) + + (func $tryLockMutex32_16 + (param $mutexAddr i32) (result i32) + (i32.atomic.rmw16.cmpxchg_u + (local.get $mutexAddr) + (i32.const 0) + (i32.const 1)) + (i32.eqz) + ) + (func $lockMutex32_16 + (param $mutexAddr i32) + (block $done + (loop $retry + (call $tryLockMutex32_16 (local.get $mutexAddr)) + (br_if $done) + (memory.atomic.wait32 + (local.get $mutexAddr) + (i32.const 1) + (i64.const -1)) + (drop) + (br $retry) + ) + ) + ) + (func $unlockMutex32_16 + (param $mutexAddr i32) + (i32.atomic.store16 + (local.get $mutexAddr) + (i32.const 0)) + (drop + (memory.atomic.notify + (local.get $mutexAddr) + (i32.const 1))) + ) + (func (export "run32_16") + (call $lockMutex32_16 (i32.const 0)) + (i32.store (i32.const 8) (i32.load (i32.const 8)) (i32.add (i32.const 1))) + (call $unlockMutex32_16 (i32.const 0)) + ) + + (func $tryLockMutex64_8 + (param $mutexAddr i32) (result i32) + (i64.atomic.rmw8.cmpxchg_u + (local.get $mutexAddr) + (i64.const 0) + (i64.const 1)) + (i64.eqz) + ) + (func $lockMutex64_8 + (param $mutexAddr i32) + (block $done + (loop $retry + (call $tryLockMutex64_8 (local.get $mutexAddr)) + (br_if $done) + (memory.atomic.wait64 + (local.get $mutexAddr) + (i64.const 1) + (i64.const -1)) + (drop) + (br $retry) + ) + ) + ) + (func $unlockMutex64_8 + (param $mutexAddr i32) + (i64.atomic.store8 + (local.get $mutexAddr) + (i64.const 0)) + (drop + (memory.atomic.notify + (local.get $mutexAddr) + (i32.const 1))) + ) + (func (export "run64_8") + (call $lockMutex64_8 (i32.const 0)) + (i32.store (i32.const 8) (i32.load (i32.const 8)) (i32.add (i32.const 1))) + (call $unlockMutex64_8 (i32.const 0)) + ) + + (func $tryLockMutex64_16 + (param $mutexAddr i32) (result i32) + (i64.atomic.rmw16.cmpxchg_u + (local.get $mutexAddr) + (i64.const 0) + (i64.const 1)) + (i64.eqz) + ) + (func $lockMutex64_16 + (param $mutexAddr i32) + (block $done + (loop $retry + (call $tryLockMutex64_16 (local.get $mutexAddr)) + (br_if $done) + (memory.atomic.wait64 + (local.get $mutexAddr) + (i64.const 1) + (i64.const -1)) + (drop) + (br $retry) + ) + ) + ) + (func $unlockMutex64_16 + (param $mutexAddr i32) + (i64.atomic.store16 + (local.get $mutexAddr) + (i64.const 0)) + (drop + (memory.atomic.notify + (local.get $mutexAddr) + (i32.const 1))) + ) + (func (export "run64_16") + (call $lockMutex64_16 (i32.const 0)) + (i32.store (i32.const 8) (i32.load (i32.const 8)) (i32.add (i32.const 1))) + (call $unlockMutex64_16 (i32.const 0)) + ) + + (func $tryLockMutex64_32 + (param $mutexAddr i32) (result i32) + (i64.atomic.rmw32.cmpxchg_u + (local.get $mutexAddr) + (i64.const 0) + (i64.const 1)) + (i64.eqz) + ) + (func $lockMutex64_32 + (param $mutexAddr i32) + (block $done + (loop $retry + (call $tryLockMutex64_32 (local.get $mutexAddr)) + (br_if $done) + (memory.atomic.wait64 + (local.get $mutexAddr) + (i64.const 1) + (i64.const -1)) + (drop) + (br $retry) + ) + ) + ) + (func $unlockMutex64_32 + (param $mutexAddr i32) + (i64.atomic.store32 + (local.get $mutexAddr) + (i64.const 0)) + (drop + (memory.atomic.notify + (local.get $mutexAddr) + (i32.const 1))) + ) + (func (export "run64_32") + (call $lockMutex64_32 (i32.const 0)) + (i32.store (i32.const 8) (i32.load (i32.const 8)) (i32.add (i32.const 1))) + (call $unlockMutex64_32 (i32.const 0)) + ) +) diff --git a/internal/integration_test/engine/testdata/threads/sub.wasm b/internal/integration_test/engine/testdata/threads/sub.wasm new file mode 100644 index 0000000000000000000000000000000000000000..1ea85e0b93363642397b93c789e57700ef410e1d GIT binary patch literal 189 zcmYk!I}XAy5QO2G*k&Vg19~2T3WzM)P;dik-~dnras7x{2nufe@29nKd)^6vkyjv; zruLH}18FBq-RrU633TTMw9kAwpeVn!%?OUU5k^OqFb%24lj+$iv}oDS3(P9-Bl|@d UTE4{CD~>z$&$Yk)^Ns)W251K#Z2$lO literal 0 HcmV?d00001 diff --git a/internal/integration_test/engine/testdata/threads/sub.wat b/internal/integration_test/engine/testdata/threads/sub.wat new file mode 100644 index 0000000000..620947e3a6 --- /dev/null +++ b/internal/integration_test/engine/testdata/threads/sub.wat @@ -0,0 +1,38 @@ +(module + (memory 1 1 shared) + + (func (export "run32") + (i32.atomic.rmw.sub (i32.const 0) (i32.const 1)) + (drop) + ) + + (func (export "run64") + (i64.atomic.rmw.sub (i32.const 0) (i64.const 1)) + (drop) + ) + + (func (export "run32_8") + (i32.atomic.rmw8.sub_u (i32.const 0) (i32.const 1)) + (drop) + ) + + (func (export "run32_16") + (i32.atomic.rmw16.sub_u (i32.const 0) (i32.const 1)) + (drop) + ) + + (func (export "run64_8") + (i64.atomic.rmw8.sub_u (i32.const 0) (i64.const 1)) + (drop) + ) + + (func (export "run64_16") + (i64.atomic.rmw16.sub_u (i32.const 0) (i64.const 1)) + (drop) + ) + + (func (export "run64_32") + (i64.atomic.rmw32.sub_u (i32.const 0) (i64.const 1)) + (drop) + ) +) diff --git a/internal/integration_test/engine/testdata/threads/xor.wasm b/internal/integration_test/engine/testdata/threads/xor.wasm new file mode 100644 index 0000000000000000000000000000000000000000..7aacfc9690c3d21e9d30f4165c8163d86a2944b1 GIT binary patch literal 189 zcmZQbEY4+QU|?WmVN76PVCG;4vO$2Ag^`(&k=>1*wWu`D*a#>9q|Hni7}>%6cnbz5 z4hYN8jDZ;>Xl4QwWB~~RSwKNnC=;lejVqL$o57L6vHqVG6N3~ti0xGW&zc#^28!D- VAlSBy2sL&vaiDs8Byk6jIRF#KBXR%$ literal 0 HcmV?d00001 diff --git a/internal/integration_test/engine/testdata/threads/xor.wat b/internal/integration_test/engine/testdata/threads/xor.wat new file mode 100644 index 0000000000..a466662bdb --- /dev/null +++ b/internal/integration_test/engine/testdata/threads/xor.wat @@ -0,0 +1,38 @@ +(module + (memory 1 1 shared) + + (func (export "run32") + (i32.atomic.rmw.xor (i32.const 0) (i32.const 0xFFFFFFFF)) + (drop) + ) + + (func (export "run64") + (i64.atomic.rmw.xor (i32.const 0) (i64.const 0xFFFFFFFFFFFFFFFF)) + (drop) + ) + + (func (export "run32_8") + (i32.atomic.rmw8.xor_u (i32.const 0) (i32.const 0xFFFFFFFF)) + (drop) + ) + + (func (export "run32_16") + (i32.atomic.rmw16.xor_u (i32.const 0) (i32.const 0xFFFFFFFF)) + (drop) + ) + + (func (export "run64_8") + (i64.atomic.rmw8.xor_u (i32.const 0) (i64.const 0xFFFFFFFFFFFFFFFF)) + (drop) + ) + + (func (export "run64_16") + (i64.atomic.rmw16.xor_u (i32.const 0) (i64.const 0xFFFFFFFFFFFFFFFF)) + (drop) + ) + + (func (export "run64_32") + (i64.atomic.rmw32.xor_u (i32.const 0) (i64.const 0xFFFFFFFFFFFFFFFF)) + (drop) + ) +) diff --git a/internal/integration_test/engine/threads_test.go b/internal/integration_test/engine/threads_test.go new file mode 100644 index 0000000000..61e51dd752 --- /dev/null +++ b/internal/integration_test/engine/threads_test.go @@ -0,0 +1,280 @@ +package adhoc + +import ( + _ "embed" + "testing" + + "github.com/tetratelabs/wazero" + "github.com/tetratelabs/wazero/api" + "github.com/tetratelabs/wazero/experimental" + "github.com/tetratelabs/wazero/internal/testing/hammer" + "github.com/tetratelabs/wazero/internal/testing/require" +) + +// We do not currently have hammer tests for bitwise and/or operations. The tests are designed to have +// input that changes deterministically every iteration, which is difficult to model with these operations. +// This is likely why atomic and/or do not show up in the wild very often if at all. +var ( + // memory.atomic.notify, memory.atomic.wait32, memory.atomic.wait64 + // i32.atomic.store, i32.atomic.rmw.cmpxchg + // i64.atomic.store, i64.atomic.rmw.cmpxchg + // i32.atomic.store8, i32.atomic.rmw8.cmpxchg_u + // i32.atomic.store16, i32.atomic.rmw16.cmpxchg_u + // i64.atomic.store8, i64.atomic.rmw8.cmpxchg_u + // i64.atomic.store16, i64.atomic.rmw16.cmpxchg_u + // i64.atomic.store32, i64.atomic.rmw32.cmpxchg_u + //go:embed testdata/threads/mutex.wasm + mutexWasm []byte + + // i32.atomic.rmw.add, i64.atomic.rmw.add, i32.atomic.rmw8.add_u, i32.atomic.rmw16.add_u, i64.atomic.rmw8.add_u, i64.atomic.rmw16.add_u, i64.atomic.rmw32.add_u + //go:embed testdata/threads/add.wasm + addWasm []byte + + // i32.atomic.rmw.sub, i64.atomic.rmw.sub, i32.atomic.rmw8.sub_u, i32.atomic.rmw16.sub_u, i64.atomic.rmw8.sub_u, i64.atomic.rmw16.sub_u, i64.atomic.rmw32.sub_u + //go:embed testdata/threads/sub.wasm + subWasm []byte + + // i32.atomic.rmw.xor, i64.atomic.rmw.xor, i32.atomic.rmw8.xor_u, i32.atomic.rmw16.xor_u, i64.atomic.rmw8.xor_u, i64.atomic.rmw16.xor_u, i64.atomic.rmw32.xor_u + //go:embed testdata/threads/xor.wasm + xorWasm []byte +) + +var threadTests = map[string]func(t *testing.T, r wazero.Runtime){ + "increment guarded by mutex": incrementGuardedByMutex, + "atomic add": atomicAdd, + "atomic sub": atomicSub, + "atomic xor": atomicXor, +} + +func TestThreadsNotEnabled(t *testing.T) { + r := wazero.NewRuntime(testCtx) + _, err := r.Instantiate(testCtx, mutexWasm) + require.EqualError(t, err, "section memory: shared memory requested but threads feature not enabled") +} + +func TestThreadsInterpreter(t *testing.T) { + runAllTests(t, threadTests, wazero.NewRuntimeConfigInterpreter().WithCoreFeatures(api.CoreFeaturesV2|experimental.CoreFeaturesThreads)) +} + +func incrementGuardedByMutex(t *testing.T, r wazero.Runtime) { + P := 8 // max count of goroutines + if testing.Short() { // Adjust down if `-test.short` + P = 4 + } + tests := []struct { + fn string + }{ + { + fn: "run32", + }, + { + fn: "run64", + }, + { + fn: "run32_8", + }, + { + fn: "run32_16", + }, + { + fn: "run64_8", + }, + { + fn: "run64_16", + }, + { + fn: "run64_32", + }, + } + for _, tc := range tests { + tt := tc + t.Run(tt.fn, func(t *testing.T) { + mod, err := r.Instantiate(testCtx, mutexWasm) + require.NoError(t, err) + + hammer.NewHammer(t, P, 30000).Run(func(name string) { + _, err := mod.ExportedFunction(tt.fn).Call(testCtx) + require.NoError(t, err) + }, func() {}) + + // Cheat that LE encoding can read both 32 and 64 bits + res, ok := mod.Memory().ReadUint32Le(8) + require.True(t, ok) + require.Equal(t, uint32(P*30000), res) + }) + } +} + +func atomicAdd(t *testing.T, r wazero.Runtime) { + P := 8 // max count of goroutines + if testing.Short() { // Adjust down if `-test.short` + P = 4 + } + tests := []struct { + fn string + exp int + }{ + { + fn: "run32", + exp: P * 30000, + }, + { + fn: "run64", + exp: P * 30000, + }, + { + fn: "run32_8", + // Overflows + exp: (P * 30000) % (1 << 8), + }, + { + fn: "run32_16", + // Overflows + exp: (P * 30000) % (1 << 16), + }, + { + fn: "run64_8", + // Overflows + exp: (P * 30000) % (1 << 8), + }, + { + fn: "run64_16", + // Overflows + exp: (P * 30000) % (1 << 16), + }, + { + fn: "run64_32", + exp: P * 30000, + }, + } + for _, tc := range tests { + tt := tc + t.Run(tt.fn, func(t *testing.T) { + mod, err := r.Instantiate(testCtx, addWasm) + require.NoError(t, err) + + hammer.NewHammer(t, P, 30000).Run(func(name string) { + _, err := mod.ExportedFunction(tt.fn).Call(testCtx) + require.NoError(t, err) + }, func() {}) + + // Cheat that LE encoding can read both 32 and 64 bits + res, ok := mod.Memory().ReadUint32Le(0) + require.True(t, ok) + require.Equal(t, uint32(tt.exp), res) + }) + } +} + +func atomicSub(t *testing.T, r wazero.Runtime) { + P := 8 // max count of goroutines + if testing.Short() { // Adjust down if `-test.short` + P = 4 + } + tests := []struct { + fn string + exp int + }{ + { + fn: "run32", + exp: -(P * 30000), + }, + { + fn: "run64", + exp: -(P * 30000), + }, + { + fn: "run32_8", + // Overflows + exp: (1 << 8) - ((P * 30000) % (1 << 8)), + }, + { + fn: "run32_16", + // Overflows + exp: (1 << 16) - ((P * 30000) % (1 << 16)), + }, + { + fn: "run64_8", + // Overflows + exp: (1 << 8) - ((P * 30000) % (1 << 8)), + }, + { + fn: "run64_16", + // Overflows + exp: (1 << 16) - ((P * 30000) % (1 << 16)), + }, + { + fn: "run64_32", + exp: -(P * 30000), + }, + } + for _, tc := range tests { + tt := tc + t.Run(tt.fn, func(t *testing.T) { + mod, err := r.Instantiate(testCtx, subWasm) + require.NoError(t, err) + + hammer.NewHammer(t, P, 30000).Run(func(name string) { + _, err := mod.ExportedFunction(tt.fn).Call(testCtx) + require.NoError(t, err) + }, func() {}) + + // Cheat that LE encoding can read both 32 and 64 bits + res, ok := mod.Memory().ReadUint32Le(0) + require.True(t, ok) + require.Equal(t, int32(tt.exp), int32(res)) + }) + } +} + +func atomicXor(t *testing.T, r wazero.Runtime) { + P := 8 // max count of goroutines + if testing.Short() { // Adjust down if `-test.short` + P = 4 + } + tests := []struct { + fn string + }{ + { + fn: "run32", + }, + { + fn: "run64", + }, + { + fn: "run32_8", + }, + { + fn: "run32_16", + }, + { + fn: "run64_8", + }, + { + fn: "run64_16", + }, + { + fn: "run64_32", + }, + } + for _, tc := range tests { + tt := tc + t.Run(tt.fn, func(t *testing.T) { + mod, err := r.Instantiate(testCtx, xorWasm) + require.NoError(t, err) + + mod.Memory().WriteUint32Le(0, 12345) + + hammer.NewHammer(t, P, 30000).Run(func(name string) { + _, err := mod.ExportedFunction(tt.fn).Call(testCtx) + require.NoError(t, err) + }, func() {}) + + // Cheat that LE encoding can read both 32 and 64 bits + res, ok := mod.Memory().ReadUint32Le(0) + require.True(t, ok) + // Even number of iterations, the value should be unchanged. + require.Equal(t, uint32(12345), res) + }) + } +} diff --git a/internal/integration_test/spectest/spectest.go b/internal/integration_test/spectest/spectest.go index 77c16d596a..87bc6fa138 100644 --- a/internal/integration_test/spectest/spectest.go +++ b/internal/integration_test/spectest/spectest.go @@ -287,6 +287,8 @@ func (c command) expectedError() (err error) { panic("unreachable") } switch c.Text { + case "expected shared memory": + err = wasmruntime.ErrRuntimeExpectedSharedMemory case "out of bounds memory access": err = wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess case "indirect call type mismatch", "indirect call": @@ -299,6 +301,8 @@ func (c command) expectedError() (err error) { err = wasmruntime.ErrRuntimeInvalidConversionToInteger case "integer divide by zero": err = wasmruntime.ErrRuntimeIntegerDivideByZero + case "unaligned atomic": + err = wasmruntime.ErrRuntimeUnalignedAtomic case "unreachable": err = wasmruntime.ErrRuntimeUnreachable default: @@ -336,7 +340,7 @@ func Run(t *testing.T, testDataFS embed.FS, ctx context.Context, config wazero.R // If the go:embed path resolution was wrong, this fails. // https://github.com/tetratelabs/wazero/issues/247 - require.True(t, len(caseNames) > 1, "len(caseNames)=%d (not greater than one)", len(caseNames)) + require.True(t, len(caseNames) > 0, "len(caseNames)=%d (not greater than zero)", len(caseNames)) for _, f := range caseNames { RunCase(t, testDataFS, f, ctx, config, -1, 0, math.MaxInt) diff --git a/internal/integration_test/spectest/threads/atomic.wast.patch b/internal/integration_test/spectest/threads/atomic.wast.patch new file mode 100644 index 0000000000..6b60b7fa7d --- /dev/null +++ b/internal/integration_test/spectest/threads/atomic.wast.patch @@ -0,0 +1,49 @@ +diff --git a/internal/integration_test/spectest/threads/testdata/atomic.wast b/internal/integration_test/spectest/threads/testdata/atomic.wast +index 66ad0ebb..40259a9a 100644 +--- a/internal/integration_test/spectest/threads/testdata/atomic.wast ++++ b/internal/integration_test/spectest/threads/testdata/atomic.wast +@@ -324,7 +324,7 @@ + + (invoke "init" (i64.const 0x1111111111111111)) + (assert_return (invoke "i32.atomic.rmw8.cmpxchg_u" (i32.const 0) (i32.const 0x11111111) (i32.const 0xcdcdcdcd)) (i32.const 0x11)) +-(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111111111111)) ++(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x11111111111111cd)) + + (invoke "init" (i64.const 0x1111111111111111)) + (assert_return (invoke "i32.atomic.rmw16.cmpxchg_u" (i32.const 0) (i32.const 0) (i32.const 0xcafecafe)) (i32.const 0x1111)) +@@ -332,7 +332,7 @@ + + (invoke "init" (i64.const 0x1111111111111111)) + (assert_return (invoke "i32.atomic.rmw16.cmpxchg_u" (i32.const 0) (i32.const 0x11111111) (i32.const 0xcafecafe)) (i32.const 0x1111)) +-(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111111111111)) ++(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x111111111111cafe)) + + (invoke "init" (i64.const 0x1111111111111111)) + (assert_return (invoke "i64.atomic.rmw8.cmpxchg_u" (i32.const 0) (i64.const 0) (i64.const 0x4242424242424242)) (i64.const 0x11)) +@@ -340,7 +340,7 @@ + + (invoke "init" (i64.const 0x1111111111111111)) + (assert_return (invoke "i64.atomic.rmw8.cmpxchg_u" (i32.const 0) (i64.const 0x1111111111111111) (i64.const 0x4242424242424242)) (i64.const 0x11)) +-(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111111111111)) ++(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111111111142)) + + (invoke "init" (i64.const 0x1111111111111111)) + (assert_return (invoke "i64.atomic.rmw16.cmpxchg_u" (i32.const 0) (i64.const 0) (i64.const 0xbeefbeefbeefbeef)) (i64.const 0x1111)) +@@ -348,7 +348,7 @@ + + (invoke "init" (i64.const 0x1111111111111111)) + (assert_return (invoke "i64.atomic.rmw16.cmpxchg_u" (i32.const 0) (i64.const 0x1111111111111111) (i64.const 0xbeefbeefbeefbeef)) (i64.const 0x1111)) +-(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111111111111)) ++(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x111111111111beef)) + + (invoke "init" (i64.const 0x1111111111111111)) + (assert_return (invoke "i64.atomic.rmw32.cmpxchg_u" (i32.const 0) (i64.const 0) (i64.const 0xcabba6e5cabba6e5)) (i64.const 0x11111111)) +@@ -356,7 +356,7 @@ + + (invoke "init" (i64.const 0x1111111111111111)) + (assert_return (invoke "i64.atomic.rmw32.cmpxchg_u" (i32.const 0) (i64.const 0x1111111111111111) (i64.const 0xcabba6e5cabba6e5)) (i64.const 0x11111111)) +-(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111111111111)) ++(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x11111111cabba6e5)) + + ;; *.atomic.rmw*.cmpxchg (compare true) + diff --git a/internal/integration_test/spectest/threads/spec_test.go b/internal/integration_test/spectest/threads/spec_test.go new file mode 100644 index 0000000000..d6fc6b5d1d --- /dev/null +++ b/internal/integration_test/spectest/threads/spec_test.go @@ -0,0 +1,32 @@ +package spectest + +import ( + "context" + "embed" + "testing" + + "github.com/tetratelabs/wazero/api" + "github.com/tetratelabs/wazero/experimental" + "github.com/tetratelabs/wazero/internal/engine/compiler" + "github.com/tetratelabs/wazero/internal/engine/interpreter" + "github.com/tetratelabs/wazero/internal/integration_test/spectest" + "github.com/tetratelabs/wazero/internal/platform" +) + +//go:embed testdata/*.wasm +//go:embed testdata/*.json +var testcases embed.FS + +const enabledFeatures = api.CoreFeaturesV2 | experimental.CoreFeaturesThreads + +func TestCompiler(t *testing.T) { + t.Skip("compiler not implemented yet") + if !platform.CompilerSupported() { + t.Skip() + } + spectest.Run(t, testcases, context.Background(), nil, compiler.NewEngine, enabledFeatures) +} + +func TestInterpreter(t *testing.T) { + spectest.Run(t, testcases, context.Background(), nil, interpreter.NewEngine, enabledFeatures) +} diff --git a/internal/integration_test/spectest/threads/testdata/atomic.0.wasm b/internal/integration_test/spectest/threads/testdata/atomic.0.wasm new file mode 100644 index 0000000000000000000000000000000000000000..f10a2364abfdef1237c52a01f6c50e706ca21cab GIT binary patch literal 3307 zcma)8*-{!o6z!fFU;vdJ1U2HmkFp5kj@kELhyN*fFq`~VJSK*AJQec%Fp@CZo1 ztTC=}u4$TX=(=GTKEKcB_xt~22$+F@X_`KR>zoIEhigXrrrl>O(k>OPdcWIgH>~S! zy~%i_T`3briz{pFFZWDOC}g1mtPxkt&Qf1OA>E~Zl`R!vZ6GSe-u1h^7Bi!g-J^mr zb(PIRa!Yqvp~Av3)oZgUx#Mu*^cYzxvKcwD*ZFDHn@tvxR}RT&N-6}T?JR{~Czhss zp(2EMN-8E95^2hh3`s?Bo|?NwN-cSJ|COcHk`9>}D+w%=oEY&uS568ldWHinVrIz9UN|L(dp3RYh^CdSSM*zt!m6x^<6~T}HxjmKL*z3Y0#{Q4Jc1JP=Q_w4Q7{ z%TbLO&lM5cW9midm#-FNmn_e!It!aHK1DS#i4B=J6HJ1ONpi@fhD;h+2gnwXEn-3mr)3N~L|X-IRb*{sk1*jFrzaS8incSfog;gJYz-4Garzv? zUZCwI+Fl|18re6HFy+1)dF|<~4BkQDNO=$D5%>W15%?%8pJe4Tq_MvN`8A3d0mmQ8-QE422O2;nKN% z#lA5L;}j++Oj4Ml5H6;i43|@evlM12%uzT;;XH+KY2{?NxH4R%Fi+tUh07GKPzV=V zPKHY@!*vQbDBPrQi^6RR;j+ugaN%XRN1;XGK7|Jq7AS-dft(B<0~wYnJfyHfVUn+a literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.1.wasm b/internal/integration_test/spectest/threads/testdata/atomic.1.wasm new file mode 100644 index 0000000000000000000000000000000000000000..6abaa998fc9c6a3374ad460de81f8b8b4f9b4cea GIT binary patch literal 265 zcmZ{eu?~VT6h-fAOHgohapMb!QKPdTVBDN(pe9W$HCR*@`sc(37h`mJ=iU3x@E~u7 z0MJ4d)T3bDlP|dU{fIt*M*_scBu*3`gwVBiz}zTfn=Y5m8eQhKb^6?azUqh6ZoLBg zQExZk$x=+QM}{3ongnJvfFYfYYsLop3@k`EiCP-Pj^QapcX?@5aD=R?hN9B8wF}le UPSo?NN~bCoJ8hJ09shrN0b4FW761SM literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.10.wasm b/internal/integration_test/spectest/threads/testdata/atomic.10.wasm new file mode 100644 index 0000000000000000000000000000000000000000..6372394c16d128389d884bbd06068bd1933d3cc3 GIT binary patch literal 43 xcmZQbEY4+QU|?WmVN76PU}j=u;NoWFWN>8oC(Oto#m&IQl9!m9%ESy*0|09<1p5F0 literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.11.wasm b/internal/integration_test/spectest/threads/testdata/atomic.11.wasm new file mode 100644 index 0000000000000000000000000000000000000000..97c6a5d452bfcb28ab1462e79ca042b8e490e128 GIT binary patch literal 43 xcmZQbEY4+QU|?WmVN76PU}j=u;NoWFWN>8oC(6hm#m&IQl9!m9%ESy*0|0AM1pNR2 literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.12.wasm b/internal/integration_test/spectest/threads/testdata/atomic.12.wasm new file mode 100644 index 0000000000000000000000000000000000000000..8d6c7982cf493e805dfc389baae235ddd3bade0b GIT binary patch literal 43 xcmZQbEY4+QU|?WmVN76PU}j=u;NoWFWN>8oC&t7e#m&IQl9!m9%ESy*0|0At1pfd4 literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.13.wasm b/internal/integration_test/spectest/threads/testdata/atomic.13.wasm new file mode 100644 index 0000000000000000000000000000000000000000..cd71364c958b13d3763170f1881c0b6c66f5ec44 GIT binary patch literal 44 wcmZQbEY4+QU|?WmVN76PU}j=u;NoHAVsHe)f8tCG+zebSd5O8HOw2$<0B{opDF6Tf literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.14.wasm b/internal/integration_test/spectest/threads/testdata/atomic.14.wasm new file mode 100644 index 0000000000000000000000000000000000000000..0cd1e79a6e4d9b0224a88313641d56062619e33f GIT binary patch literal 44 ycmZQbEY4+QU|?WmVN76PU}j=u;NoHAVsK<|V)!S)%)rgS#gdnpo65utR0IHUMFlJX literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.15.wasm b/internal/integration_test/spectest/threads/testdata/atomic.15.wasm new file mode 100644 index 0000000000000000000000000000000000000000..72d14d418b543a71ea00f9e4ed3bf9109c9d682a GIT binary patch literal 44 wcmZQbEY4+QU|?WmVN76PU}j=u;NoHAVsHe)e^QJL+zebSd5O8HOw2$<0B|$~D*ylh literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.16.wasm b/internal/integration_test/spectest/threads/testdata/atomic.16.wasm new file mode 100644 index 0000000000000000000000000000000000000000..10a75acee912e639c18e32fac8b71e5469573d77 GIT binary patch literal 44 ycmZQbEY4+QU|?WmVN76PU}j=u;NoHAVsK<|V)!S+$iU6O#gdnpo65utR0IHUX9X?* literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.17.wasm b/internal/integration_test/spectest/threads/testdata/atomic.17.wasm new file mode 100644 index 0000000000000000000000000000000000000000..1ce5767151c620b92ed98bd18a5e0b94a8df36ae GIT binary patch literal 44 ycmZQbEY4+QU|?WmVN76PU}j=u;NoHAVsK<|V)!S^#K6tK#gdnpo65utR0IHUh6OMH literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.18.wasm b/internal/integration_test/spectest/threads/testdata/atomic.18.wasm new file mode 100644 index 0000000000000000000000000000000000000000..dc118fd72fbac10823785306dec03eeefec1c184 GIT binary patch literal 45 xcmZQbEY4+QU|?WmVN76PU}j=u;NoTEW^e?;e{xI=Qrrw&EP08!sZ7j3RRDF71xx?{ literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.19.wasm b/internal/integration_test/spectest/threads/testdata/atomic.19.wasm new file mode 100644 index 0000000000000000000000000000000000000000..2f960972b7b076538411d2677697b51f6f3371b0 GIT binary patch literal 45 zcmZQbEY4+QU|?WmVN76PU}j=u;NoTEW^iP1V)!S|%pk?hz{Qf6n48MP3{(XGb;box literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.2.wasm b/internal/integration_test/spectest/threads/testdata/atomic.2.wasm new file mode 100644 index 0000000000000000000000000000000000000000..fae839549b5df0879f6d78b172483cd59d397e48 GIT binary patch literal 265 zcmZ{ezYc;h5XQf2OHgohapMVyQKPdDVBDN(pe9W$HCR*@`sTz27i08>`~KW-ILMnJ z0JIPV^(dJ42m3;(Pds+r_UYetA0rB)+?|d z^>zcEEX5RiWY}?}NzjZ2Fr>55W^AC(zygGmsHIWt7@k6OmzP!rM#!paC@O7RyFk6; TL_M#nbgE*p(?;3W@&Bh6S$04a literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.20.wasm b/internal/integration_test/spectest/threads/testdata/atomic.20.wasm new file mode 100644 index 0000000000000000000000000000000000000000..cf50378e7246955494b0c8f74339aa809720056c GIT binary patch literal 45 xcmZQbEY4+QU|?WmVN76PU}j=u;NoTEW^e?;e~OF@Qrrw&EP08!sZ7j3RRDFg1x^3} literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.21.wasm b/internal/integration_test/spectest/threads/testdata/atomic.21.wasm new file mode 100644 index 0000000000000000000000000000000000000000..20b222427971167b7e76d2784e07fbc342d6a375 GIT binary patch literal 45 zcmZQbEY4+QU|?WmVN76PU}j=u;NoTEW^iP1V)&=b$RNedz{Qf6n48MP3{(XGb?OCC literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.22.wasm b/internal/integration_test/spectest/threads/testdata/atomic.22.wasm new file mode 100644 index 0000000000000000000000000000000000000000..d392d3b7d44855f0eca4f6fe8feb7a999c4231ad GIT binary patch literal 45 zcmZQbEY4+QU|?WmVN76PU}j=u;NoTEW^iP1V)&=R#304Zz{Qf6n48MP3{(XGb_)el literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.23.wasm b/internal/integration_test/spectest/threads/testdata/atomic.23.wasm new file mode 100644 index 0000000000000000000000000000000000000000..b62233d4becabe88bbb03d006d30f4aa301c9d57 GIT binary patch literal 45 xcmZQbEY4+QU|?WmVN76PU}j=u;NoTEW^e?;f2vFjQrrw&EP08!sZ7j3RRDGf1ycY3 literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.24.wasm b/internal/integration_test/spectest/threads/testdata/atomic.24.wasm new file mode 100644 index 0000000000000000000000000000000000000000..0b65cb9e0e7ef88b5f507447c607f6a09743906a GIT binary patch literal 45 zcmZQbEY4+QU|?WmVN76PU}j=u;NoTEW^iP1V)&=V%pk?hz{Qf6n48MP3{(XGc0C1F literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.25.wasm b/internal/integration_test/spectest/threads/testdata/atomic.25.wasm new file mode 100644 index 0000000000000000000000000000000000000000..c92c6fe0d9659347003ef50d3af3cf3d5497d9e4 GIT binary patch literal 45 xcmZQbEY4+QU|?WmVN76PU}j=u;NoTEW^e?;e;SMoQrrw&EP08!sZ7j3RRDG?1yuk5 literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.26.wasm b/internal/integration_test/spectest/threads/testdata/atomic.26.wasm new file mode 100644 index 0000000000000000000000000000000000000000..2419c530c56793e99ebf118df5cb10a29df171c4 GIT binary patch literal 45 zcmZQbEY4+QU|?WmVN76PU}j=u;NoTEW^iP1V)&=U$RNedz{Qf6n48MP3{(XGc3}lr literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.27.wasm b/internal/integration_test/spectest/threads/testdata/atomic.27.wasm new file mode 100644 index 0000000000000000000000000000000000000000..0a1ce7227d9b6bc81a19432caf43824d1c59a7ff GIT binary patch literal 45 zcmZQbEY4+QU|?WmVN76PU}j=u;NoTEW^iP1V)&=c#304Zz{Qf6n48MP3{(XGc7g?3 literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.28.wasm b/internal/integration_test/spectest/threads/testdata/atomic.28.wasm new file mode 100644 index 0000000000000000000000000000000000000000..86cc77e50e41039d8d242d77e556860ad1cbf9e1 GIT binary patch literal 45 xcmZQbEY4+QU|?WmVN76PU}j=u;NoTEW^e?;e>zMIQrrw&EP08!sZ7j3RRDH>1zG?A literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.29.wasm b/internal/integration_test/spectest/threads/testdata/atomic.29.wasm new file mode 100644 index 0000000000000000000000000000000000000000..12f59aca023a30390e8b587ba917ea2ab5f323df GIT binary patch literal 45 zcmZQbEY4+QU|?WmVN76PU}j=u;NoTEW^iP1V)&=a%pk?hz{Qf6n48MP3{(XGcC-au literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.3.wasm b/internal/integration_test/spectest/threads/testdata/atomic.3.wasm new file mode 100644 index 0000000000000000000000000000000000000000..9b42a7918b23edffb3e60bdaa2b926221e5ee4b2 GIT binary patch literal 756 zcmb7?$xZ@65Qe{=8B{7@nWD!{e)EJi-g&3C@eFiz0c)^P==#vqv2O7(xJ(sVl z|F3$IZYEz-1DL?Pfj=Aw|7%48BP;eD4C0LBo8eK2Cqf=GGp-sVmFLS|)TWZQA5v=B zOB>hAEGcVqvH9ejM~E^g@6S;t70j|*{Kb-F7e7?CBP@$ov4<`CReQ*yUsGInc^Tet zEN)Y|EtPL8?zp_TUB}|~l-pPNq2gng7x%=mxTnfJQ~7hn7cMXErDJiglzXl6H;Qjv zUfhvmamUKNQ~7(v4>mu)Y_T6*6<+p}^_P3k*0NQ&_wuKDwH_bA=VbcsP2>q0c|<+| z5ms1bjVLkVtdn4aO_HS8BF#1#vgF9KLxEk2lqgeSk1G4rsMDay0WA(`)1ga`Bl`RR Djp1Y4 literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.30.wasm b/internal/integration_test/spectest/threads/testdata/atomic.30.wasm new file mode 100644 index 0000000000000000000000000000000000000000..c490b347d74a0054796748ac22d4c6bc7c7d0fec GIT binary patch literal 45 xcmZQbEY4+QU|?WmVN76PU}j=u;NoTEW^e?;fBK9JQrrw&EP08!sZ7j3RRDIP1zZ3C literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.31.wasm b/internal/integration_test/spectest/threads/testdata/atomic.31.wasm new file mode 100644 index 0000000000000000000000000000000000000000..ffe5ce4b5d1c0649663df628e53f38d39065d453 GIT binary patch literal 45 zcmZQbEY4+QU|?WmVN76PU}j=u;NoTEW^iP1V)$ps$RNedz{Qf6n48MP3{(XGcGv}9 literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.32.wasm b/internal/integration_test/spectest/threads/testdata/atomic.32.wasm new file mode 100644 index 0000000000000000000000000000000000000000..3385de893e9674f5647c8a2a862701c40a54f2ff GIT binary patch literal 45 zcmZQbEY4+QU|?WmVN76PU}j=u;NoTEW^iP1V)$po#304Zz{Qf6n48MP3{(XGcKHQi literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.33.wasm b/internal/integration_test/spectest/threads/testdata/atomic.33.wasm new file mode 100644 index 0000000000000000000000000000000000000000..4931ccd2cb687a4897dfdd155a5dcb29f808214f GIT binary patch literal 45 xcmZQbEY4+QU|?WmVN76PU}j=u;NoTEW^e?;f5uDO literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.39.wasm b/internal/integration_test/spectest/threads/testdata/atomic.39.wasm new file mode 100644 index 0000000000000000000000000000000000000000..30ec3c7d14bfb2b5058691889e4ddfc03f35e3af GIT binary patch literal 45 zcmZQbEY4+QU|?WmVN76PU}j=u;NoTEW^iP1V)$px%pk?hz{Qf6n48MP3{(XGccKMr literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.4.wasm b/internal/integration_test/spectest/threads/testdata/atomic.4.wasm new file mode 100644 index 0000000000000000000000000000000000000000..b0ab85501245df1e2de429d4d87e0fa2dbdd3fbe GIT binary patch literal 50 zcmWN{K@I>B5JkcFH4$8rNZYY-f0?OuG42+is16oHE=tSKbf{U0XEbU&V|%c_exnBl Ao&W#< literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.40.wasm b/internal/integration_test/spectest/threads/testdata/atomic.40.wasm new file mode 100644 index 0000000000000000000000000000000000000000..415420c3e56b86d8ce54492df10d0b1b40d99a18 GIT binary patch literal 45 xcmZQbEY4+QU|?WmVN76PU}j=u;NoTEW^e?;f3}PaQrrw&EP08!sZ7j3RRDL81!@2Q literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.41.wasm b/internal/integration_test/spectest/threads/testdata/atomic.41.wasm new file mode 100644 index 0000000000000000000000000000000000000000..5ca00263428daaafbbdc984c795af4fc1f4c0d06 GIT binary patch literal 45 zcmZQbEY4+QU|?WmVN76PU}j=u;NoTEW^iP1V)$pz$RNedz{Qf6n48MP3{(XGcg6*6 literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.42.wasm b/internal/integration_test/spectest/threads/testdata/atomic.42.wasm new file mode 100644 index 0000000000000000000000000000000000000000..9ee652ac29c28668553b0431bed3607bb4ce00e8 GIT binary patch literal 45 zcmZQbEY4+QU|?WmVN76PU}j=u;NoTEW^iP1V)*C4#304Zz{Qf6n48MP3{(XGcjpCf literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.43.wasm b/internal/integration_test/spectest/threads/testdata/atomic.43.wasm new file mode 100644 index 0000000000000000000000000000000000000000..11c714c0ea7235cc9a8ee5b309e01296c400ffca GIT binary patch literal 45 xcmZQbEY4+QU|?WmVN76PU}j=u;NoTEW^e?;e~wHHQrrw&EP08!sZ7j3RRDM71#bWV literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.44.wasm b/internal/integration_test/spectest/threads/testdata/atomic.44.wasm new file mode 100644 index 0000000000000000000000000000000000000000..a02870a75a7ac42632c63a5c2618ac8a7d092faa GIT binary patch literal 45 zcmZQbEY4+QU|?WmVN76PU}j=u;NoTEW^iP1V)*C8%pk?hz{Qf6n48MP3{(XGco_w9 literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.45.wasm b/internal/integration_test/spectest/threads/testdata/atomic.45.wasm new file mode 100644 index 0000000000000000000000000000000000000000..84c1ae33df55db8294703f1cea6638511e0ebc97 GIT binary patch literal 45 xcmZQbEY4+QU|?WmVN76PU}j=u;NoTEW^e?;e=dv+Qrrw&EP08!sZ7j3RRDMg1#tiX literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.46.wasm b/internal/integration_test/spectest/threads/testdata/atomic.46.wasm new file mode 100644 index 0000000000000000000000000000000000000000..70dd2ceeaa7080e71298ea2a1b698a0d89e3b504 GIT binary patch literal 45 zcmZQbEY4+QU|?WmVN76PU}j=u;NoTEW^iP1V)*CA$RNedz{Qf6n48MP3{(XGcs&Jl literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.47.wasm b/internal/integration_test/spectest/threads/testdata/atomic.47.wasm new file mode 100644 index 0000000000000000000000000000000000000000..7a0bb72be08483f4371d66743f8c0264f989b5fd GIT binary patch literal 45 zcmZQbEY4+QU|?WmVN76PU}j=u;NoTEW^iP1V)*CI#304Zz{Qf6n48MP3{(XGcwPl| literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.48.wasm b/internal/integration_test/spectest/threads/testdata/atomic.48.wasm new file mode 100644 index 0000000000000000000000000000000000000000..3ba78ffe3626cb335594b4220002cd36e64af93d GIT binary patch literal 47 xcmZQbEY4+QU|?WmVN76PU}j=u;NoZGWpD(;e;!N>Qrrw&EP08!sZ7j3bpV2c1-k$M literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.49.wasm b/internal/integration_test/spectest/threads/testdata/atomic.49.wasm new file mode 100644 index 0000000000000000000000000000000000000000..34627f5afda8e7538d2e8c151b4101c132092e7f GIT binary patch literal 47 zcmZQbEY4+QU|?WmVN76PU}j=u;NoZGWpHG00>Xcu%nVZ83|uUEiMgpv%s_Pjg3blM literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.5.wasm b/internal/integration_test/spectest/threads/testdata/atomic.5.wasm new file mode 100644 index 0000000000000000000000000000000000000000..92fc37f7b3aef3facf4db38bd5296692d24d5b05 GIT binary patch literal 45 xcmZQbEY4+QU|?WmVN76PU}j=u;NoTEW^e?;e+*0vQrrw&EP08!sZ7j3RRD971ug&p literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.50.wasm b/internal/integration_test/spectest/threads/testdata/atomic.50.wasm new file mode 100644 index 0000000000000000000000000000000000000000..9cb7d1d911910c80bb147d402f9049ea1aaabe0c GIT binary patch literal 47 xcmZQbEY4+QU|?WmVN76PU}j=u;NoZGWpD(;f8LA?Qrrw&EP08!sZ7j3bpV2<1-$?O literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.51.wasm b/internal/integration_test/spectest/threads/testdata/atomic.51.wasm new file mode 100644 index 0000000000000000000000000000000000000000..16307598f08f9a14ae739c9ef80501cdb4584eda GIT binary patch literal 47 zcmZQbEY4+QU|?WmVN76PU}j=u;NoZGWpHG00>Xd3j0{rT3|uUEiMgpv%s_Pjg7O8y literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.52.wasm b/internal/integration_test/spectest/threads/testdata/atomic.52.wasm new file mode 100644 index 0000000000000000000000000000000000000000..8f89610dec8785920a8c7fab53ecb142d2c39f99 GIT binary patch literal 47 zcmZQbEY4+QU|?WmVN76PU}j=u;NoZGWpHG00>XcOObk-o3|uUEiMgpv%s_PjgA)bA literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.6.wasm b/internal/integration_test/spectest/threads/testdata/atomic.6.wasm new file mode 100644 index 0000000000000000000000000000000000000000..040840d250c09821028cbbe2f8c5d8a8e5cf074e GIT binary patch literal 47 zcmZQbEY4+QU|?WmVN76PU}j=u;NoZGWpD&SCx(BFObk-o3|uUEiMgpv%s_Pjec%Ok literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.7.wasm b/internal/integration_test/spectest/threads/testdata/atomic.7.wasm new file mode 100644 index 0000000000000000000000000000000000000000..0e047d8f9e28ac8950b604e14d3b2b94ef1c2aa4 GIT binary patch literal 47 zcmZQbEY4+QU|?WmVN76PU}j=u;NoZGWpHG00>Xbx%nVZ83|uUEiMgpv%s_PjeisFJ literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.8.wasm b/internal/integration_test/spectest/threads/testdata/atomic.8.wasm new file mode 100644 index 0000000000000000000000000000000000000000..a4a26d28aa2b7c0c858aead55bbbe212241d0415 GIT binary patch literal 43 xcmZQbEY4+QU|?WmVN76PU}j=u;NoWFWN>8oC&0uY#m&IQl9!m9%ESy*0|09c1o;2} literal 0 HcmV?d00001 diff --git a/internal/integration_test/spectest/threads/testdata/atomic.9.wasm b/internal/integration_test/spectest/threads/testdata/atomic.9.wasm new file mode 100644 index 0000000000000000000000000000000000000000..0733777c3e03f25c30acbf9695e313d7d6990500 GIT binary patch literal 43 xcmZQbEY4+QU|?WmVN76PU}j=u;NoWFWN>8oC& 32/8 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + valueTypeStack.push(ValueTypeI32) + case OpcodeAtomicMemoryWait32: + if 1< 32/8 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI64); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + valueTypeStack.push(ValueTypeI32) + case OpcodeAtomicMemoryWait64: + if 1< 64/8 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI64); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI64); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + valueTypeStack.push(ValueTypeI32) + case OpcodeAtomicI32Load: + if 1< 32/8 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + valueTypeStack.push(ValueTypeI32) + case OpcodeAtomicI64Load: + if 1< 64/8 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + valueTypeStack.push(ValueTypeI64) + case OpcodeAtomicI32Load8U: + if 1< 16/8 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + valueTypeStack.push(ValueTypeI64) + case OpcodeAtomicI64Load32U: + if 1< 32/8 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + valueTypeStack.push(ValueTypeI64) + case OpcodeAtomicI32Store: + if 1< 32/8 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + case OpcodeAtomicI64Store: + if 1< 64/8 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI64); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + case OpcodeAtomicI32Store8: + if 1< 1 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + case OpcodeAtomicI32Store16: + if 1< 16/8 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + case OpcodeAtomicI64Store8: + if 1< 1 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI64); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + case OpcodeAtomicI64Store16: + if 1< 16/8 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI64); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + case OpcodeAtomicI64Store32: + if 1< 32/8 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI64); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + case OpcodeAtomicI32RmwAdd, OpcodeAtomicI32RmwSub, OpcodeAtomicI32RmwAnd, OpcodeAtomicI32RmwOr, OpcodeAtomicI32RmwXor, OpcodeAtomicI32RmwXchg: + if 1< 32/8 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + valueTypeStack.push(ValueTypeI32) + case OpcodeAtomicI32Rmw8AddU, OpcodeAtomicI32Rmw8SubU, OpcodeAtomicI32Rmw8AndU, OpcodeAtomicI32Rmw8OrU, OpcodeAtomicI32Rmw8XorU, OpcodeAtomicI32Rmw8XchgU: + if 1< 1 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + valueTypeStack.push(ValueTypeI32) + case OpcodeAtomicI32Rmw16AddU, OpcodeAtomicI32Rmw16SubU, OpcodeAtomicI32Rmw16AndU, OpcodeAtomicI32Rmw16OrU, OpcodeAtomicI32Rmw16XorU, OpcodeAtomicI32Rmw16XchgU: + if 1< 16/8 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + valueTypeStack.push(ValueTypeI32) + case OpcodeAtomicI64RmwAdd, OpcodeAtomicI64RmwSub, OpcodeAtomicI64RmwAnd, OpcodeAtomicI64RmwOr, OpcodeAtomicI64RmwXor, OpcodeAtomicI64RmwXchg: + if 1< 64/8 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI64); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + valueTypeStack.push(ValueTypeI64) + case OpcodeAtomicI64Rmw8AddU, OpcodeAtomicI64Rmw8SubU, OpcodeAtomicI64Rmw8AndU, OpcodeAtomicI64Rmw8OrU, OpcodeAtomicI64Rmw8XorU, OpcodeAtomicI64Rmw8XchgU: + if 1< 1 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI64); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + valueTypeStack.push(ValueTypeI64) + case OpcodeAtomicI64Rmw16AddU, OpcodeAtomicI64Rmw16SubU, OpcodeAtomicI64Rmw16AndU, OpcodeAtomicI64Rmw16OrU, OpcodeAtomicI64Rmw16XorU, OpcodeAtomicI64Rmw16XchgU: + if 1< 16/8 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI64); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + valueTypeStack.push(ValueTypeI64) + case OpcodeAtomicI64Rmw32AddU, OpcodeAtomicI64Rmw32SubU, OpcodeAtomicI64Rmw32AndU, OpcodeAtomicI64Rmw32OrU, OpcodeAtomicI64Rmw32XorU, OpcodeAtomicI64Rmw32XchgU: + if 1< 32/8 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI64); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + valueTypeStack.push(ValueTypeI64) + case OpcodeAtomicI32RmwCmpxchg: + if 1< 32/8 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + valueTypeStack.push(ValueTypeI32) + case OpcodeAtomicI32Rmw8CmpxchgU: + if 1< 1 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + valueTypeStack.push(ValueTypeI32) + case OpcodeAtomicI32Rmw16CmpxchgU: + if 1< 16/8 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + valueTypeStack.push(ValueTypeI32) + case OpcodeAtomicI64RmwCmpxchg: + if 1< 64/8 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI64); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI64); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + valueTypeStack.push(ValueTypeI64) + case OpcodeAtomicI64Rmw8CmpxchgU: + if 1< 1 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI64); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI64); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + valueTypeStack.push(ValueTypeI64) + case OpcodeAtomicI64Rmw16CmpxchgU: + if 1< 16/8 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI64); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI64); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + valueTypeStack.push(ValueTypeI64) + case OpcodeAtomicI64Rmw32CmpxchgU: + if 1< 32/8 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI64); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI64); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + valueTypeStack.push(ValueTypeI64) + default: + return fmt.Errorf("invalid atomic opcode: 0x%x", atomicOpcode) + } } else if op == OpcodeBlock { br.Reset(body[pc+1:]) bt, num, err := DecodeBlockType(m.TypeSection, br, enabledFeatures) diff --git a/internal/wasm/func_validation_test.go b/internal/wasm/func_validation_test.go index e571ad2e93..57edb5a372 100644 --- a/internal/wasm/func_validation_test.go +++ b/internal/wasm/func_validation_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/tetratelabs/wazero/api" + "github.com/tetratelabs/wazero/experimental" "github.com/tetratelabs/wazero/internal/leb128" "github.com/tetratelabs/wazero/internal/testing/require" ) @@ -3678,3 +3679,1171 @@ func Test_SplitCallStack(t *testing.T) { }) } } + +func TestModule_funcValidation_Atomic(t *testing.T) { + t.Run("valid bytecode", func(t *testing.T) { + tests := []struct { + name string + body []byte + noDropBeforeReturn bool + }{ + { + name: "i32.atomic.load8_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeAtomicPrefix, OpcodeAtomicI32Load8U, 0x0, 0x8, // alignment=2^0, offset=8 + }, + }, + { + name: "i32.atomic.load16_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeAtomicPrefix, OpcodeAtomicI32Load16U, 0x1, 0x8, // alignment=2^1, offset=8 + }, + }, + { + name: "i32.atomic.load", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeAtomicPrefix, OpcodeAtomicI32Load, 0x2, 0x8, // alignment=2^2, offset=8 + }, + }, + { + name: "i64.atomic.load8_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeAtomicPrefix, OpcodeAtomicI64Load8U, 0x0, 0x8, // alignment=2^0, offset=8 + }, + }, + { + name: "i64.atomic.load16_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeAtomicPrefix, OpcodeAtomicI64Load16U, 0x1, 0x8, // alignment=2^1, offset=8 + }, + }, + { + name: "i64.atomic.load32_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeAtomicPrefix, OpcodeAtomicI64Load32U, 0x2, 0x8, // alignment=2^2, offset=8 + }, + }, + { + name: "i64.atomic.load", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeAtomicPrefix, OpcodeAtomicI64Load, 0x3, 0x8, // alignment=2^3, offset=8 + }, + }, + { + name: "i32.atomic.store8", + body: []byte{ + OpcodeI32Const, 0x1, + OpcodeI32Const, 0x0, + OpcodeAtomicPrefix, OpcodeAtomicI32Store8, 0x0, 0x8, // alignment=2^0, offset=8 + }, + noDropBeforeReturn: true, + }, + { + name: "i32.atomic.store16", + body: []byte{ + OpcodeI32Const, 0x1, + OpcodeI32Const, 0x0, + OpcodeAtomicPrefix, OpcodeAtomicI32Store16, 0x1, 0x8, // alignment=2^1, offset=8 + }, + noDropBeforeReturn: true, + }, + { + name: "i32.atomic.store", + body: []byte{ + OpcodeI32Const, 0x1, + OpcodeI32Const, 0x0, + OpcodeAtomicPrefix, OpcodeAtomicI32Store, 0x2, 0x8, // alignment=2^2, offset=8 + }, + noDropBeforeReturn: true, + }, + { + name: "i64.atomic.store8", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Store8, 0x0, 0x8, // alignment=2^0, offset=8 + }, + noDropBeforeReturn: true, + }, + { + name: "i64.atomic.store16", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Store16, 0x1, 0x8, // alignment=2^1, offset=8 + }, + noDropBeforeReturn: true, + }, + { + name: "i64.atomic.store32", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Store32, 0x2, 0x8, // alignment=2^2, offset=8 + }, + noDropBeforeReturn: true, + }, + { + name: "i64.atomic.store", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Store, 0x3, 0x8, // alignment=2^3, offset=8 + }, + noDropBeforeReturn: true, + }, + { + name: "i32.atomic.rmw8.add_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI32Rmw8AddU, 0x0, 0x8, // alignment=2^0, offset=8 + }, + }, + { + name: "i32.atomic.rmw16.add_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI32Rmw16AddU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + }, + { + name: "i32.atomic.rmw.add", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI32RmwAdd, 0x2, 0x8, // alignment=2^2, offset=8 + }, + }, + { + name: "i64.atomic.rmw8.add_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw8AddU, 0x0, 0x8, // alignment=2^0, offset=8 + }, + }, + { + name: "i64.atomic.rmw16.add_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw16AddU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + }, + { + name: "i64.atomic.rmw32.add_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw32AddU, 0x2, 0x8, // alignment=2^2, offset=8 + }, + }, + { + name: "i64.atomic.rmw.add", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64RmwAdd, 0x3, 0x8, // alignment=2^3, offset=8 + }, + }, + { + name: "i32.atomic.rmw8.sub_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI32Rmw8SubU, 0x0, 0x8, // alignment=2^0, offset=8 + }, + }, + { + name: "i32.atomic.rmw16.sub_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI32Rmw16SubU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + }, + { + name: "i32.atomic.rmw.sub", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI32RmwSub, 0x2, 0x8, // alignment=2^2, offset=8 + }, + }, + { + name: "i64.atomic.rmw8.sub_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw8SubU, 0x0, 0x8, // alignment=2^0, offset=8 + }, + }, + { + name: "i64.atomic.rmw16.sub_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw16SubU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + }, + { + name: "i64.atomic.rmw32.sub_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw32SubU, 0x2, 0x8, // alignment=2^2, offset=8 + }, + }, + { + name: "i64.atomic.rmw.sub", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64RmwSub, 0x3, 0x8, // alignment=2^3, offset=8 + }, + }, + { + name: "i32.atomic.rmw8.and_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI32Rmw8AndU, 0x0, 0x8, // alignment=2^0, offset=8 + }, + }, + { + name: "i32.atomic.rmw16.and_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI32Rmw16AndU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + }, + { + name: "i32.atomic.rmw.and", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI32RmwAnd, 0x2, 0x8, // alignment=2^2, offset=8 + }, + }, + { + name: "i64.atomic.rmw8.and_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw8AndU, 0x0, 0x8, // alignment=2^0, offset=8 + }, + }, + { + name: "i64.atomic.rmw16.and_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw16AndU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + }, + { + name: "i64.atomic.rmw32.and_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw32AndU, 0x2, 0x8, // alignment=2^2, offset=8 + }, + }, + { + name: "i64.atomic.rmw.and", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64RmwAnd, 0x3, 0x8, // alignment=2^3, offset=8 + }, + }, + { + name: "i32.atomic.rmw8.or", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI32Rmw8OrU, 0x0, 0x8, // alignment=2^0, offset=8 + }, + }, + { + name: "i32.atomic.rmw16.or_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI32Rmw16OrU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + }, + { + name: "i32.atomic.rmw.or", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI32RmwOr, 0x2, 0x8, // alignment=2^2, offset=8 + }, + }, + { + name: "i64.atomic.rmw8.or_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw8OrU, 0x0, 0x8, // alignment=2^0, offset=8 + }, + }, + { + name: "i64.atomic.rmw16.or_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw16OrU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + }, + { + name: "i64.atomic.rmw32.or_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw32OrU, 0x2, 0x8, // alignment=2^2, offset=8 + }, + }, + { + name: "i64.atomic.rmw.or", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64RmwOr, 0x3, 0x8, // alignment=2^3, offset=8 + }, + }, + { + name: "i32.atomic.rmw8.xor_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI32Rmw8XorU, 0x0, 0x8, // alignment=2^0, offset=8 + }, + }, + { + name: "i32.atomic.rmw16.xor_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI32Rmw16XorU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + }, + { + name: "i32.atomic.rmw.xor", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI32RmwXor, 0x2, 0x8, // alignment=2^2, offset=8 + }, + }, + { + name: "i64.atomic.rmw8.xor_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw8XorU, 0x0, 0x8, // alignment=2^0, offset=8 + }, + }, + { + name: "i64.atomic.rmw16.xor_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw16XorU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + }, + { + name: "i64.atomic.rmw32.xor_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw32XorU, 0x2, 0x8, // alignment=2^2, offset=8 + }, + }, + { + name: "i64.atomic.rmw.xor", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64RmwXor, 0x3, 0x8, // alignment=2^3, offset=8 + }, + }, + { + name: "i32.atomic.rmw8.xchg_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI32Rmw8XchgU, 0x0, 0x8, // alignment=2^0, offset=8 + }, + }, + { + name: "i32.atomic.rmw16.xchg_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI32Rmw16XchgU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + }, + { + name: "i32.atomic.rmw.xchg", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI32RmwXchg, 0x2, 0x8, // alignment=2^2, offset=8 + }, + }, + { + name: "i64.atomic.rmw8.xchg_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw8XchgU, 0x0, 0x8, // alignment=2^0, offset=8 + }, + }, + { + name: "i64.atomic.rmw16.xchg_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw16XchgU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + }, + { + name: "i64.atomic.rmw32.xchg_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw32XchgU, 0x2, 0x8, // alignment=2^2, offset=8 + }, + }, + { + name: "i64.atomic.rmw.xchg", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64RmwXchg, 0x3, 0x8, // alignment=2^3, offset=8 + }, + }, + { + name: "i32.atomic.rmw8.cmpxchg_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeI32Const, 0x2, + OpcodeAtomicPrefix, OpcodeAtomicI32Rmw8CmpxchgU, 0x0, 0x8, // alignment=2^0, offset=8 + }, + }, + { + name: "i32.atomic.rmw16.cmpxchg_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeI32Const, 0x2, + OpcodeAtomicPrefix, OpcodeAtomicI32Rmw16CmpxchgU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + }, + { + name: "i32.atomic.rmw.cmpxchg", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeI32Const, 0x2, + OpcodeAtomicPrefix, OpcodeAtomicI32RmwCmpxchg, 0x2, 0x8, // alignment=2^2, offset=8 + }, + }, + { + name: "i64.atomic.rmw8.xchg_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeI64Const, 0x2, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw8CmpxchgU, 0x0, 0x8, // alignment=2^0, offset=8 + }, + }, + { + name: "i64.atomic.rmw16.cmpxchg_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeI64Const, 0x2, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw16CmpxchgU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + }, + { + name: "i64.atomic.rmw32.cmpxchg_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw32CmpxchgU, 0x2, 0x8, // alignment=2^2, offset=8 + }, + }, + { + name: "i64.atomic.rmw.cmpxchg", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeI64Const, 0x2, + OpcodeAtomicPrefix, OpcodeAtomicI64RmwCmpxchg, 0x3, 0x8, // alignment=2^3, offset=8 + }, + }, + { + name: "memory.atomic.wait32", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeI64Const, 0x2, + OpcodeAtomicPrefix, OpcodeAtomicMemoryWait32, 0x2, 0x8, // alignment=2^2, offset=8 + }, + }, + { + name: "memory.atomic.wait64", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeI64Const, 0x2, + OpcodeAtomicPrefix, OpcodeAtomicMemoryWait64, 0x3, 0x8, // alignment=2^3, offset=8 + }, + }, + { + name: "memory.atomic.notify", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicMemoryNotify, 0x2, 0x8, // alignment=2^2, offset=8 + }, + }, + { + name: "memory.atomic.fence", + body: []byte{ + OpcodeAtomicPrefix, OpcodeAtomicFence, 0x0, + }, + noDropBeforeReturn: true, + }, + } + + for _, tt := range tests { + tc := tt + t.Run(tc.name, func(t *testing.T) { + body := append([]byte{}, tc.body...) + if !tt.noDropBeforeReturn { + body = append(body, OpcodeDrop) + } + body = append(body, OpcodeEnd) + m := &Module{ + TypeSection: []FunctionType{v_v}, + FunctionSection: []Index{0}, + CodeSection: []Code{{Body: body}}, + } + + t.Run("with memory", func(t *testing.T) { + err := m.validateFunction(&stacks{}, experimental.CoreFeaturesThreads, + 0, []Index{0}, nil, &Memory{}, []Table{}, nil, bytes.NewReader(nil)) + require.NoError(t, err) + }) + + t.Run("without memory", func(t *testing.T) { + err := m.validateFunction(&stacks{}, experimental.CoreFeaturesThreads, + 0, []Index{0}, nil, nil, []Table{}, nil, bytes.NewReader(nil)) + // Only fence doesn't require memory + if tc.name == "memory.atomic.fence" { + require.NoError(t, err) + } else { + require.Error(t, err, fmt.Sprintf("memory must exist for %s", tc.name)) + } + }) + }) + } + }) + + t.Run("atomic.fence bad immediate", func(t *testing.T) { + body := []byte{ + OpcodeAtomicPrefix, OpcodeAtomicFence, 0x1, + OpcodeEnd, + } + m := &Module{ + TypeSection: []FunctionType{v_v}, + FunctionSection: []Index{0}, + CodeSection: []Code{{Body: body}}, + } + err := m.validateFunction(&stacks{}, experimental.CoreFeaturesThreads, + 0, []Index{0}, nil, &Memory{}, []Table{}, nil, bytes.NewReader(nil)) + require.Error(t, err, "invalid immediate value for atomic.fence") + }) + + t.Run("bad alignment", func(t *testing.T) { + tests := []struct { + name string + body []byte + noDropBeforeReturn bool + }{ + { + name: "i32.atomic.load8_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeAtomicPrefix, OpcodeAtomicI32Load8U, 0x1, 0x8, // alignment=2^1, offset=8 + }, + }, + { + name: "i32.atomic.load16_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeAtomicPrefix, OpcodeAtomicI32Load16U, 0x2, 0x8, // alignment=2^2, offset=8 + }, + }, + { + name: "i32.atomic.load", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeAtomicPrefix, OpcodeAtomicI32Load, 0x3, 0x8, // alignment=2^3, offset=8 + }, + }, + { + name: "i64.atomic.load8_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeAtomicPrefix, OpcodeAtomicI64Load8U, 0x1, 0x8, // alignment=2^1, offset=8 + }, + }, + { + name: "i64.atomic.load16_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeAtomicPrefix, OpcodeAtomicI64Load16U, 0x2, 0x8, // alignment=2^2, offset=8 + }, + }, + { + name: "i64.atomic.load32_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeAtomicPrefix, OpcodeAtomicI64Load32U, 0x3, 0x8, // alignment=2^3, offset=8 + }, + }, + { + name: "i64.atomic.load", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeAtomicPrefix, OpcodeAtomicI64Load, 0x4, 0x8, // alignment=2^4, offset=8 + }, + }, + { + name: "i32.atomic.store8", + body: []byte{ + OpcodeI32Const, 0x1, + OpcodeI32Const, 0x0, + OpcodeAtomicPrefix, OpcodeAtomicI32Store8, 0x1, 0x8, // alignment=2^1, offset=8 + }, + noDropBeforeReturn: true, + }, + { + name: "i32.atomic.store16", + body: []byte{ + OpcodeI32Const, 0x1, + OpcodeI32Const, 0x0, + OpcodeAtomicPrefix, OpcodeAtomicI32Store16, 0x2, 0x8, // alignment=2^2, offset=8 + }, + noDropBeforeReturn: true, + }, + { + name: "i32.atomic.store", + body: []byte{ + OpcodeI32Const, 0x1, + OpcodeI32Const, 0x0, + OpcodeAtomicPrefix, OpcodeAtomicI32Store, 0x3, 0x8, // alignment=2^3, offset=8 + }, + noDropBeforeReturn: true, + }, + { + name: "i64.atomic.store8", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Store8, 0x1, 0x8, // alignment=2^1, offset=8 + }, + noDropBeforeReturn: true, + }, + { + name: "i64.atomic.store16", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Store16, 0x2, 0x8, // alignment=2^2, offset=8 + }, + noDropBeforeReturn: true, + }, + { + name: "i64.atomic.store32", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Store32, 0x3, 0x8, // alignment=2^3, offset=8 + }, + noDropBeforeReturn: true, + }, + { + name: "i64.atomic.store", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Store, 0x4, 0x8, // alignment=2^4, offset=8 + }, + noDropBeforeReturn: true, + }, + { + name: "i32.atomic.rmw8.add_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI32Rmw8AddU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + }, + { + name: "i32.atomic.rmw16.add_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI32Rmw16AddU, 0x2, 0x8, // alignment=2^2, offset=8 + }, + }, + { + name: "i32.atomic.rmw.add", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI32RmwAdd, 0x3, 0x8, // alignment=2^3, offset=8 + }, + }, + { + name: "i64.atomic.rmw8.add_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw8AddU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + }, + { + name: "i64.atomic.rmw16.add_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw16AddU, 0x2, 0x8, // alignment=2^2, offset=8 + }, + }, + { + name: "i64.atomic.rmw32.add_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw32AddU, 0x3, 0x8, // alignment=2^3, offset=8 + }, + }, + { + name: "i64.atomic.rmw.add", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64RmwAdd, 0x4, 0x8, // alignment=2^4, offset=8 + }, + }, + { + name: "i32.atomic.rmw8.sub_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI32Rmw8SubU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + }, + { + name: "i32.atomic.rmw16.sub_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI32Rmw16SubU, 0x2, 0x8, // alignment=2^2, offset=8 + }, + }, + { + name: "i32.atomic.rmw.sub", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI32RmwSub, 0x3, 0x8, // alignment=2^3, offset=8 + }, + }, + { + name: "i64.atomic.rmw8.sub_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw8SubU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + }, + { + name: "i64.atomic.rmw16.sub_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw16SubU, 0x2, 0x8, // alignment=2^2, offset=8 + }, + }, + { + name: "i64.atomic.rmw32.sub_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw32SubU, 0x3, 0x8, // alignment=2^3, offset=8 + }, + }, + { + name: "i64.atomic.rmw.sub", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64RmwSub, 0x4, 0x8, // alignment=2^4, offset=8 + }, + }, + { + name: "i32.atomic.rmw8.and_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI32Rmw8AndU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + }, + { + name: "i32.atomic.rmw16.and_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI32Rmw16AndU, 0x2, 0x8, // alignment=2^2, offset=8 + }, + }, + { + name: "i32.atomic.rmw.and", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI32RmwAnd, 0x3, 0x8, // alignment=2^3, offset=8 + }, + }, + { + name: "i64.atomic.rmw8.and_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw8AndU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + }, + { + name: "i64.atomic.rmw16.and_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw16AndU, 0x2, 0x8, // alignment=2^2, offset=8 + }, + }, + { + name: "i64.atomic.rmw32.and_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw32AndU, 0x3, 0x8, // alignment=2^3, offset=8 + }, + }, + { + name: "i64.atomic.rmw.and", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64RmwAnd, 0x4, 0x8, // alignment=2^4, offset=8 + }, + }, + { + name: "i32.atomic.rmw8.or", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI32Rmw8OrU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + }, + { + name: "i32.atomic.rmw16.or_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI32Rmw16OrU, 0x2, 0x8, // alignment=2^2, offset=8 + }, + }, + { + name: "i32.atomic.rmw.or", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI32RmwOr, 0x3, 0x8, // alignment=2^3, offset=8 + }, + }, + { + name: "i64.atomic.rmw8.or_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw8OrU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + }, + { + name: "i64.atomic.rmw16.or_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw16OrU, 0x2, 0x8, // alignment=2^2, offset=8 + }, + }, + { + name: "i64.atomic.rmw32.or_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw32OrU, 0x3, 0x8, // alignment=2^3, offset=8 + }, + }, + { + name: "i64.atomic.rmw.or", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64RmwOr, 0x4, 0x8, // alignment=2^4, offset=8 + }, + }, + { + name: "i32.atomic.rmw8.xor_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI32Rmw8XorU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + }, + { + name: "i32.atomic.rmw16.xor_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI32Rmw16XorU, 0x2, 0x8, // alignment=2^2, offset=8 + }, + }, + { + name: "i32.atomic.rmw.xor", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI32RmwXor, 0x3, 0x8, // alignment=2^3, offset=8 + }, + }, + { + name: "i64.atomic.rmw8.xor_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw8XorU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + }, + { + name: "i64.atomic.rmw16.xor_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw16XorU, 0x2, 0x8, // alignment=2^2, offset=8 + }, + }, + { + name: "i64.atomic.rmw32.xor_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw32XorU, 0x3, 0x8, // alignment=2^3, offset=8 + }, + }, + { + name: "i64.atomic.rmw.xor", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64RmwXor, 0x4, 0x8, // alignment=2^4, offset=8 + }, + }, + { + name: "i32.atomic.rmw8.xchg_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI32Rmw8XchgU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + }, + { + name: "i32.atomic.rmw16.xchg_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI32Rmw16XchgU, 0x2, 0x8, // alignment=2^2, offset=8 + }, + }, + { + name: "i32.atomic.rmw.xchg", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI32RmwXchg, 0x3, 0x8, // alignment=2^3, offset=8 + }, + }, + { + name: "i64.atomic.rmw8.xchg_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw8XchgU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + }, + { + name: "i64.atomic.rmw16.xchg_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw16XchgU, 0x2, 0x8, // alignment=2^2, offset=8 + }, + }, + { + name: "i64.atomic.rmw32.xchg_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw32XchgU, 0x3, 0x8, // alignment=2^3, offset=8 + }, + }, + { + name: "i64.atomic.rmw.xchg", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64RmwXchg, 0x4, 0x8, // alignment=2^4, offset=8 + }, + }, + { + name: "i32.atomic.rmw8.cmpxchg_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeI32Const, 0x2, + OpcodeAtomicPrefix, OpcodeAtomicI32Rmw8CmpxchgU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + }, + { + name: "i32.atomic.rmw16.cmpxchg_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeI32Const, 0x2, + OpcodeAtomicPrefix, OpcodeAtomicI32Rmw16CmpxchgU, 0x2, 0x8, // alignment=2^2, offset=8 + }, + }, + { + name: "i32.atomic.rmw.cmpxchg", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeI32Const, 0x2, + OpcodeAtomicPrefix, OpcodeAtomicI32RmwCmpxchg, 0x3, 0x8, // alignment=2^3, offset=8 + }, + }, + { + name: "i64.atomic.rmw8.xchg_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeI64Const, 0x2, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw8CmpxchgU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + }, + { + name: "i64.atomic.rmw16.cmpxchg_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeI64Const, 0x2, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw16CmpxchgU, 0x2, 0x8, // alignment=2^2, offset=8 + }, + }, + { + name: "i64.atomic.rmw32.cmpxchg_u", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeI64Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicI64Rmw32CmpxchgU, 0x3, 0x8, // alignment=2^3, offset=8 + }, + }, + { + name: "i64.atomic.rmw.cmpxchg", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeI64Const, 0x2, + OpcodeAtomicPrefix, OpcodeAtomicI64RmwCmpxchg, 0x4, 0x8, // alignment=2^4, offset=8 + }, + }, + { + name: "memory.atomic.wait32", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeI64Const, 0x2, + OpcodeAtomicPrefix, OpcodeAtomicMemoryWait32, 0x3, 0x8, // alignment=2^3, offset=8 + }, + }, + { + name: "memory.atomic.wait64", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI64Const, 0x1, + OpcodeI64Const, 0x2, + OpcodeAtomicPrefix, OpcodeAtomicMemoryWait64, 0x4, 0x8, // alignment=2^4, offset=8 + }, + }, + { + name: "memory.atomic.notify", + body: []byte{ + OpcodeI32Const, 0x0, + OpcodeI32Const, 0x1, + OpcodeAtomicPrefix, OpcodeAtomicMemoryNotify, 0x3, 0x8, // alignment=2^3, offset=8 + }, + }, + } + + for _, tt := range tests { + tc := tt + t.Run(tc.name, func(t *testing.T) { + body := append([]byte{}, tc.body...) + if !tt.noDropBeforeReturn { + body = append(body, OpcodeDrop) + } + body = append(body, OpcodeEnd) + m := &Module{ + TypeSection: []FunctionType{v_v}, + FunctionSection: []Index{0}, + CodeSection: []Code{{Body: body}}, + } + err := m.validateFunction(&stacks{}, experimental.CoreFeaturesThreads, + 0, []Index{0}, nil, &Memory{}, []Table{}, nil, bytes.NewReader(nil)) + require.Error(t, err, "invalid memory alignment") + }) + } + }) +} diff --git a/internal/wasm/instruction.go b/internal/wasm/instruction.go index ded246660d..05e47673d6 100644 --- a/internal/wasm/instruction.go +++ b/internal/wasm/instruction.go @@ -280,6 +280,10 @@ const ( // OpcodeVecPrefix is the prefix of all vector isntructions introduced in // CoreFeatureSIMD. OpcodeVecPrefix Opcode = 0xfd + + // OpcodeAtomicPrefix is the prefix of all atomic instructions introduced in + // FeatureThreads. + OpcodeAtomicPrefix Opcode = 0xfe ) // OpcodeMisc represents opcodes of the miscellaneous operations. @@ -622,6 +626,157 @@ const ( OpcodeVecF64x2PromoteLowF32x4Zero OpcodeVec = 0x5f ) +// OpcodeAtomic represents an opcode of atomic instructions which has +// multi-byte encoding and is prefixed by OpcodeAtomicPrefix. +// +// These opcodes are toggled with experimental.CoreFeaturesThreads. +type OpcodeAtomic = byte + +const ( + // OpcodeAtomicMemoryNotify represents the instruction memory.atomic.notify. + OpcodeAtomicMemoryNotify OpcodeAtomic = 0x00 + // OpcodeAtomicMemoryWait32 represents the instruction memory.atomic.wait32. + OpcodeAtomicMemoryWait32 OpcodeAtomic = 0x01 + // OpcodeAtomicMemoryWait64 represents the instruction memory.atomic.wait64. + OpcodeAtomicMemoryWait64 OpcodeAtomic = 0x02 + // OpcodeAtomicFence represents the instruction atomic.fence. + OpcodeAtomicFence OpcodeAtomic = 0x03 + + // OpcodeAtomicI32Load represents the instruction i32.atomic.load. + OpcodeAtomicI32Load OpcodeAtomic = 0x10 + // OpcodeAtomicI64Load represents the instruction i64.atomic.load. + OpcodeAtomicI64Load OpcodeAtomic = 0x11 + // OpcodeAtomicI32Load8U represents the instruction i32.atomic.load8_u. + OpcodeAtomicI32Load8U OpcodeAtomic = 0x12 + // OpcodeAtomicI32Load16U represents the instruction i32.atomic.load16_u. + OpcodeAtomicI32Load16U OpcodeAtomic = 0x13 + // OpcodeAtomicI64Load8U represents the instruction i64.atomic.load8_u. + OpcodeAtomicI64Load8U OpcodeAtomic = 0x14 + // OpcodeAtomicI64Load16U represents the instruction i64.atomic.load16_u. + OpcodeAtomicI64Load16U OpcodeAtomic = 0x15 + // OpcodeAtomicI64Load32U represents the instruction i64.atomic.load32_u. + OpcodeAtomicI64Load32U OpcodeAtomic = 0x16 + // OpcodeAtomicI32Store represents the instruction i32.atomic.store. + OpcodeAtomicI32Store OpcodeAtomic = 0x17 + // OpcodeAtomicI64Store represents the instruction i64.atomic.store. + OpcodeAtomicI64Store OpcodeAtomic = 0x18 + // OpcodeAtomicI32Store8 represents the instruction i32.atomic.store8. + OpcodeAtomicI32Store8 OpcodeAtomic = 0x19 + // OpcodeAtomicI32Store16 represents the instruction i32.atomic.store16. + OpcodeAtomicI32Store16 OpcodeAtomic = 0x1a + // OpcodeAtomicI64Store8 represents the instruction i64.atomic.store8. + OpcodeAtomicI64Store8 OpcodeAtomic = 0x1b + // OpcodeAtomicI64Store16 represents the instruction i64.atomic.store16. + OpcodeAtomicI64Store16 OpcodeAtomic = 0x1c + // OpcodeAtomicI64Store32 represents the instruction i64.atomic.store32. + OpcodeAtomicI64Store32 OpcodeAtomic = 0x1d + + // OpcodeAtomicI32RmwAdd represents the instruction i32.atomic.rmw.add. + OpcodeAtomicI32RmwAdd OpcodeAtomic = 0x1e + // OpcodeAtomicI64RmwAdd represents the instruction i64.atomic.rmw.add. + OpcodeAtomicI64RmwAdd OpcodeAtomic = 0x1f + // OpcodeAtomicI32Rmw8AddU represents the instruction i32.atomic.rmw8.add_u. + OpcodeAtomicI32Rmw8AddU OpcodeAtomic = 0x20 + // OpcodeAtomicI32Rmw16AddU represents the instruction i32.atomic.rmw16.add_u. + OpcodeAtomicI32Rmw16AddU OpcodeAtomic = 0x21 + // OpcodeAtomicI64Rmw8AddU represents the instruction i64.atomic.rmw8.add_u. + OpcodeAtomicI64Rmw8AddU OpcodeAtomic = 0x22 + // OpcodeAtomicI64Rmw16AddU represents the instruction i64.atomic.rmw16.add_u. + OpcodeAtomicI64Rmw16AddU OpcodeAtomic = 0x23 + // OpcodeAtomicI64Rmw32AddU represents the instruction i64.atomic.rmw32.add_u. + OpcodeAtomicI64Rmw32AddU OpcodeAtomic = 0x24 + + // OpcodeAtomicI32RmwSub represents the instruction i32.atomic.rmw.sub. + OpcodeAtomicI32RmwSub OpcodeAtomic = 0x25 + // OpcodeAtomicI64RmwSub represents the instruction i64.atomic.rmw.sub. + OpcodeAtomicI64RmwSub OpcodeAtomic = 0x26 + // OpcodeAtomicI32Rmw8SubU represents the instruction i32.atomic.rmw8.sub_u. + OpcodeAtomicI32Rmw8SubU OpcodeAtomic = 0x27 + // OpcodeAtomicI32Rmw16SubU represents the instruction i32.atomic.rmw16.sub_u. + OpcodeAtomicI32Rmw16SubU OpcodeAtomic = 0x28 + // OpcodeAtomicI64Rmw8SubU represents the instruction i64.atomic.rmw8.sub_u. + OpcodeAtomicI64Rmw8SubU OpcodeAtomic = 0x29 + // OpcodeAtomicI64Rmw16SubU represents the instruction i64.atomic.rmw16.sub_u. + OpcodeAtomicI64Rmw16SubU OpcodeAtomic = 0x2a + // OpcodeAtomicI64Rmw32SubU represents the instruction i64.atomic.rmw32.sub_u. + OpcodeAtomicI64Rmw32SubU OpcodeAtomic = 0x2b + + // OpcodeAtomicI32RmwAnd represents the instruction i32.atomic.rmw.and. + OpcodeAtomicI32RmwAnd OpcodeAtomic = 0x2c + // OpcodeAtomicI64RmwAnd represents the instruction i64.atomic.rmw.and. + OpcodeAtomicI64RmwAnd OpcodeAtomic = 0x2d + // OpcodeAtomicI32Rmw8AndU represents the instruction i32.atomic.rmw8.and_u. + OpcodeAtomicI32Rmw8AndU OpcodeAtomic = 0x2e + // OpcodeAtomicI32Rmw16AndU represents the instruction i32.atomic.rmw16.and_u. + OpcodeAtomicI32Rmw16AndU OpcodeAtomic = 0x2f + // OpcodeAtomicI64Rmw8AndU represents the instruction i64.atomic.rmw8.and_u. + OpcodeAtomicI64Rmw8AndU OpcodeAtomic = 0x30 + // OpcodeAtomicI64Rmw16AndU represents the instruction i64.atomic.rmw16.and_u. + OpcodeAtomicI64Rmw16AndU OpcodeAtomic = 0x31 + // OpcodeAtomicI64Rmw32AndU represents the instruction i64.atomic.rmw32.and_u. + OpcodeAtomicI64Rmw32AndU OpcodeAtomic = 0x32 + + // OpcodeAtomicI32RmwOr represents the instruction i32.atomic.rmw.or. + OpcodeAtomicI32RmwOr OpcodeAtomic = 0x33 + // OpcodeAtomicI64RmwOr represents the instruction i64.atomic.rmw.or. + OpcodeAtomicI64RmwOr OpcodeAtomic = 0x34 + // OpcodeAtomicI32Rmw8OrU represents the instruction i32.atomic.rmw8.or_u. + OpcodeAtomicI32Rmw8OrU OpcodeAtomic = 0x35 + // OpcodeAtomicI32Rmw16OrU represents the instruction i32.atomic.rmw16.or_u. + OpcodeAtomicI32Rmw16OrU OpcodeAtomic = 0x36 + // OpcodeAtomicI64Rmw8OrU represents the instruction i64.atomic.rmw8.or_u. + OpcodeAtomicI64Rmw8OrU OpcodeAtomic = 0x37 + // OpcodeAtomicI64Rmw16OrU represents the instruction i64.atomic.rmw16.or_u. + OpcodeAtomicI64Rmw16OrU OpcodeAtomic = 0x38 + // OpcodeAtomicI64Rmw32OrU represents the instruction i64.atomic.rmw32.or_u. + OpcodeAtomicI64Rmw32OrU OpcodeAtomic = 0x39 + + // OpcodeAtomicI32RmwXor represents the instruction i32.atomic.rmw.xor. + OpcodeAtomicI32RmwXor OpcodeAtomic = 0x3a + // OpcodeAtomicI64RmwXor represents the instruction i64.atomic.rmw.xor. + OpcodeAtomicI64RmwXor OpcodeAtomic = 0x3b + // OpcodeAtomicI32Rmw8XorU represents the instruction i32.atomic.rmw8.xor_u. + OpcodeAtomicI32Rmw8XorU OpcodeAtomic = 0x3c + // OpcodeAtomicI32Rmw16XorU represents the instruction i32.atomic.rmw16.xor_u. + OpcodeAtomicI32Rmw16XorU OpcodeAtomic = 0x3d + // OpcodeAtomicI64Rmw8XorU represents the instruction i64.atomic.rmw8.xor_u. + OpcodeAtomicI64Rmw8XorU OpcodeAtomic = 0x3e + // OpcodeAtomicI64Rmw16XorU represents the instruction i64.atomic.rmw16.xor_u. + OpcodeAtomicI64Rmw16XorU OpcodeAtomic = 0x3f + // OpcodeAtomicI64Rmw32XorU represents the instruction i64.atomic.rmw32.xor_u. + OpcodeAtomicI64Rmw32XorU OpcodeAtomic = 0x40 + + // OpcodeAtomicI32RmwXchg represents the instruction i32.atomic.rmw.xchg. + OpcodeAtomicI32RmwXchg OpcodeAtomic = 0x41 + // OpcodeAtomicI64RmwXchg represents the instruction i64.atomic.rmw.xchg. + OpcodeAtomicI64RmwXchg OpcodeAtomic = 0x42 + // OpcodeAtomicI32Rmw8XchgU represents the instruction i32.atomic.rmw8.xchg_u. + OpcodeAtomicI32Rmw8XchgU OpcodeAtomic = 0x43 + // OpcodeAtomicI32Rmw16XchgU represents the instruction i32.atomic.rmw16.xchg_u. + OpcodeAtomicI32Rmw16XchgU OpcodeAtomic = 0x44 + // OpcodeAtomicI64Rmw8XchgU represents the instruction i64.atomic.rmw8.xchg_u. + OpcodeAtomicI64Rmw8XchgU OpcodeAtomic = 0x45 + // OpcodeAtomicI64Rmw16XchgU represents the instruction i64.atomic.rmw16.xchg_u. + OpcodeAtomicI64Rmw16XchgU OpcodeAtomic = 0x46 + // OpcodeAtomicI64Rmw32XchgU represents the instruction i64.atomic.rmw32.xchg_u. + OpcodeAtomicI64Rmw32XchgU OpcodeAtomic = 0x47 + + // OpcodeAtomicI32RmwCmpxchg represents the instruction i32.atomic.rmw.cmpxchg. + OpcodeAtomicI32RmwCmpxchg OpcodeAtomic = 0x48 + // OpcodeAtomicI64RmwCmpxchg represents the instruction i64.atomic.rmw.cmpxchg. + OpcodeAtomicI64RmwCmpxchg OpcodeAtomic = 0x49 + // OpcodeAtomicI32Rmw8CmpxchgU represents the instruction i32.atomic.rmw8.cmpxchg_u. + OpcodeAtomicI32Rmw8CmpxchgU OpcodeAtomic = 0x4a + // OpcodeAtomicI32Rmw16CmpxchgU represents the instruction i32.atomic.rmw16.cmpxchg_u. + OpcodeAtomicI32Rmw16CmpxchgU OpcodeAtomic = 0x4b + // OpcodeAtomicI64Rmw8CmpxchgU represents the instruction i64.atomic.rmw8.cmpxchg_u. + OpcodeAtomicI64Rmw8CmpxchgU OpcodeAtomic = 0x4c + // OpcodeAtomicI64Rmw16CmpxchgU represents the instruction i64.atomic.rmw16.cmpxchg_u. + OpcodeAtomicI64Rmw16CmpxchgU OpcodeAtomic = 0x4d + // OpcodeAtomicI64Rmw32CmpxchgU represents the instruction i64.atomic.rmw32.cmpxchg_u. + OpcodeAtomicI64Rmw32CmpxchgU OpcodeAtomic = 0x4e +) + const ( OpcodeUnreachableName = "unreachable" OpcodeNopName = "nop" @@ -1547,3 +1702,164 @@ var vectorInstructionName = map[OpcodeVec]string{ func VectorInstructionName(oc OpcodeVec) (ret string) { return vectorInstructionName[oc] } + +const ( + OpcodeAtomicMemoryNotifyName = "memory.atomic.notify" + OpcodeAtomicMemoryWait32Name = "memory.atomic.wait32" + OpcodeAtomicMemoryWait64Name = "memory.atomic.wait64" + OpcodeAtomicFenceName = "atomic.fence" + + OpcodeAtomicI32LoadName = "i32.atomic.load" + OpcodeAtomicI64LoadName = "i64.atomic.load" + OpcodeAtomicI32Load8UName = "i32.atomic.load8_u" + OpcodeAtomicI32Load16UName = "i32.atomic.load16_u" + OpcodeAtomicI64Load8UName = "i64.atomic.load8_u" + OpcodeAtomicI64Load16UName = "i64.atomic.load16_u" + OpcodeAtomicI64Load32UName = "i64.atomic.load32_u" + OpcodeAtomicI32StoreName = "i32.atomic.store" + OpcodeAtomicI64StoreName = "i64.atomic.store" + OpcodeAtomicI32Store8Name = "i32.atomic.store8" + OpcodeAtomicI32Store16Name = "i32.atomic.store16" + OpcodeAtomicI64Store8Name = "i64.atomic.store8" + OpcodeAtomicI64Store16Name = "i64.atomic.store16" + OpcodeAtomicI64Store32Name = "i64.atomic.store32" + + OpcodeAtomicI32RmwAddName = "i32.atomic.rmw.add" + OpcodeAtomicI64RmwAddName = "i64.atomic.rmw.add" + OpcodeAtomicI32Rmw8AddUName = "i32.atomic.rmw8.add_u" + OpcodeAtomicI32Rmw16AddUName = "i32.atomic.rmw16.add_u" + OpcodeAtomicI64Rmw8AddUName = "i64.atomic.rmw8.add_u" + OpcodeAtomicI64Rmw16AddUName = "i64.atomic.rmw16.add_u" + OpcodeAtomicI64Rmw32AddUName = "i64.atomic.rmw32.add_u" + + OpcodeAtomicI32RmwSubName = "i32.atomic.rmw.sub" + OpcodeAtomicI64RmwSubName = "i64.atomic.rmw.sub" + OpcodeAtomicI32Rmw8SubUName = "i32.atomic.rmw8.sub_u" + OpcodeAtomicI32Rmw16SubUName = "i32.atomic.rmw16.sub_u" + OpcodeAtomicI64Rmw8SubUName = "i64.atomic.rmw8.sub_u" + OpcodeAtomicI64Rmw16SubUName = "i64.atomic.rmw16.sub_u" + OpcodeAtomicI64Rmw32SubUName = "i64.atomic.rmw32.sub_u" + + OpcodeAtomicI32RmwAndName = "i32.atomic.rmw.and" + OpcodeAtomicI64RmwAndName = "i64.atomic.rmw.and" + OpcodeAtomicI32Rmw8AndUName = "i32.atomic.rmw8.and_u" + OpcodeAtomicI32Rmw16AndUName = "i32.atomic.rmw16.and_u" + OpcodeAtomicI64Rmw8AndUName = "i64.atomic.rmw8.and_u" + OpcodeAtomicI64Rmw16AndUName = "i64.atomic.rmw16.and_u" + OpcodeAtomicI64Rmw32AndUName = "i64.atomic.rmw32.and_u" + + OpcodeAtomicI32RmwOrName = "i32.atomic.rmw.or" + OpcodeAtomicI64RmwOrName = "i64.atomic.rmw.or" + OpcodeAtomicI32Rmw8OrUName = "i32.atomic.rmw8.or_u" + OpcodeAtomicI32Rmw16OrUName = "i32.atomic.rmw16.or_u" + OpcodeAtomicI64Rmw8OrUName = "i64.atomic.rmw8.or_u" + OpcodeAtomicI64Rmw16OrUName = "i64.atomic.rmw16.or_u" + OpcodeAtomicI64Rmw32OrUName = "i64.atomic.rmw32.or_u" + + OpcodeAtomicI32RmwXorName = "i32.atomic.rmw.xor" + OpcodeAtomicI64RmwXorName = "i64.atomic.rmw.xor" + OpcodeAtomicI32Rmw8XorUName = "i32.atomic.rmw8.xor_u" + OpcodeAtomicI32Rmw16XorUName = "i32.atomic.rmw16.xor_u" + OpcodeAtomicI64Rmw8XorUName = "i64.atomic.rmw8.xor_u" + OpcodeAtomicI64Rmw16XorUName = "i64.atomic.rmw16.xor_u" + OpcodeAtomicI64Rmw32XorUName = "i64.atomic.rmw32.xor_u" + + OpcodeAtomicI32RmwXchgName = "i32.atomic.rmw.xchg" + OpcodeAtomicI64RmwXchgName = "i64.atomic.rmw.xchg" + OpcodeAtomicI32Rmw8XchgUName = "i32.atomic.rmw8.xchg_u" + OpcodeAtomicI32Rmw16XchgUName = "i32.atomic.rmw16.xchg_u" + OpcodeAtomicI64Rmw8XchgUName = "i64.atomic.rmw8.xchg_u" + OpcodeAtomicI64Rmw16XchgUName = "i64.atomic.rmw16.xchg_u" + OpcodeAtomicI64Rmw32XchgUName = "i64.atomic.rmw32.xchg_u" + + OpcodeAtomicI32RmwCmpxchgName = "i32.atomic.rmw.cmpxchg" + OpcodeAtomicI64RmwCmpxchgName = "i64.atomic.rmw.cmpxchg" + OpcodeAtomicI32Rmw8CmpxchgUName = "i32.atomic.rmw8.cmpxchg_u" + OpcodeAtomicI32Rmw16CmpxchgUName = "i32.atomic.rmw16.cmpxchg_u" + OpcodeAtomicI64Rmw8CmpxchgUName = "i64.atomic.rmw8.cmpxchg_u" + OpcodeAtomicI64Rmw16CmpxchgUName = "i64.atomic.rmw16.cmpxchg_u" + OpcodeAtomicI64Rmw32CmpxchgUName = "i64.atomic.rmw32.cmpxchg_u" +) + +var atomicInstructionName = map[OpcodeAtomic]string{ + OpcodeAtomicMemoryNotify: OpcodeAtomicMemoryNotifyName, + OpcodeAtomicMemoryWait32: OpcodeAtomicMemoryWait32Name, + OpcodeAtomicMemoryWait64: OpcodeAtomicMemoryWait64Name, + OpcodeAtomicFence: OpcodeAtomicFenceName, + + OpcodeAtomicI32Load: OpcodeAtomicI32LoadName, + OpcodeAtomicI64Load: OpcodeAtomicI64LoadName, + OpcodeAtomicI32Load8U: OpcodeAtomicI32Load8UName, + OpcodeAtomicI32Load16U: OpcodeAtomicI32Load16UName, + OpcodeAtomicI64Load8U: OpcodeAtomicI64Load8UName, + OpcodeAtomicI64Load16U: OpcodeAtomicI64Load16UName, + OpcodeAtomicI64Load32U: OpcodeAtomicI64Load32UName, + OpcodeAtomicI32Store: OpcodeAtomicI32StoreName, + OpcodeAtomicI64Store: OpcodeAtomicI64StoreName, + OpcodeAtomicI32Store8: OpcodeAtomicI32Store8Name, + OpcodeAtomicI32Store16: OpcodeAtomicI32Store16Name, + OpcodeAtomicI64Store8: OpcodeAtomicI64Store8Name, + OpcodeAtomicI64Store16: OpcodeAtomicI64Store16Name, + OpcodeAtomicI64Store32: OpcodeAtomicI64Store32Name, + + OpcodeAtomicI32RmwAdd: OpcodeAtomicI32RmwAddName, + OpcodeAtomicI64RmwAdd: OpcodeAtomicI64RmwAddName, + OpcodeAtomicI32Rmw8AddU: OpcodeAtomicI32Rmw8AddUName, + OpcodeAtomicI32Rmw16AddU: OpcodeAtomicI32Rmw16AddUName, + OpcodeAtomicI64Rmw8AddU: OpcodeAtomicI64Rmw8AddUName, + OpcodeAtomicI64Rmw16AddU: OpcodeAtomicI64Rmw16AddUName, + OpcodeAtomicI64Rmw32AddU: OpcodeAtomicI64Rmw32AddUName, + + OpcodeAtomicI32RmwSub: OpcodeAtomicI32RmwSubName, + OpcodeAtomicI64RmwSub: OpcodeAtomicI64RmwSubName, + OpcodeAtomicI32Rmw8SubU: OpcodeAtomicI32Rmw8SubUName, + OpcodeAtomicI32Rmw16SubU: OpcodeAtomicI32Rmw16SubUName, + OpcodeAtomicI64Rmw8SubU: OpcodeAtomicI64Rmw8SubUName, + OpcodeAtomicI64Rmw16SubU: OpcodeAtomicI64Rmw16SubUName, + OpcodeAtomicI64Rmw32SubU: OpcodeAtomicI64Rmw32SubUName, + + OpcodeAtomicI32RmwAnd: OpcodeAtomicI32RmwAndName, + OpcodeAtomicI64RmwAnd: OpcodeAtomicI64RmwAndName, + OpcodeAtomicI32Rmw8AndU: OpcodeAtomicI32Rmw8AndUName, + OpcodeAtomicI32Rmw16AndU: OpcodeAtomicI32Rmw16AndUName, + OpcodeAtomicI64Rmw8AndU: OpcodeAtomicI64Rmw8AndUName, + OpcodeAtomicI64Rmw16AndU: OpcodeAtomicI64Rmw16AndUName, + OpcodeAtomicI64Rmw32AndU: OpcodeAtomicI64Rmw32AndUName, + + OpcodeAtomicI32RmwOr: OpcodeAtomicI32RmwOrName, + OpcodeAtomicI64RmwOr: OpcodeAtomicI64RmwOrName, + OpcodeAtomicI32Rmw8OrU: OpcodeAtomicI32Rmw8OrUName, + OpcodeAtomicI32Rmw16OrU: OpcodeAtomicI32Rmw16OrUName, + OpcodeAtomicI64Rmw8OrU: OpcodeAtomicI64Rmw8OrUName, + OpcodeAtomicI64Rmw16OrU: OpcodeAtomicI64Rmw16OrUName, + OpcodeAtomicI64Rmw32OrU: OpcodeAtomicI64Rmw32OrUName, + + OpcodeAtomicI32RmwXor: OpcodeAtomicI32RmwXorName, + OpcodeAtomicI64RmwXor: OpcodeAtomicI64RmwXorName, + OpcodeAtomicI32Rmw8XorU: OpcodeAtomicI32Rmw8XorUName, + OpcodeAtomicI32Rmw16XorU: OpcodeAtomicI32Rmw16XorUName, + OpcodeAtomicI64Rmw8XorU: OpcodeAtomicI64Rmw8XorUName, + OpcodeAtomicI64Rmw16XorU: OpcodeAtomicI64Rmw16XorUName, + OpcodeAtomicI64Rmw32XorU: OpcodeAtomicI64Rmw32XorUName, + + OpcodeAtomicI32RmwXchg: OpcodeAtomicI32RmwXchgName, + OpcodeAtomicI64RmwXchg: OpcodeAtomicI64RmwXchgName, + OpcodeAtomicI32Rmw8XchgU: OpcodeAtomicI32Rmw8XchgUName, + OpcodeAtomicI32Rmw16XchgU: OpcodeAtomicI32Rmw16XchgUName, + OpcodeAtomicI64Rmw8XchgU: OpcodeAtomicI64Rmw8XchgUName, + OpcodeAtomicI64Rmw16XchgU: OpcodeAtomicI64Rmw16XchgUName, + OpcodeAtomicI64Rmw32XchgU: OpcodeAtomicI64Rmw32XchgUName, + + OpcodeAtomicI32RmwCmpxchg: OpcodeAtomicI32RmwCmpxchgName, + OpcodeAtomicI64RmwCmpxchg: OpcodeAtomicI64RmwCmpxchgName, + OpcodeAtomicI32Rmw8CmpxchgU: OpcodeAtomicI32Rmw8CmpxchgUName, + OpcodeAtomicI32Rmw16CmpxchgU: OpcodeAtomicI32Rmw16CmpxchgUName, + OpcodeAtomicI64Rmw8CmpxchgU: OpcodeAtomicI64Rmw8CmpxchgUName, + OpcodeAtomicI64Rmw16CmpxchgU: OpcodeAtomicI64Rmw16CmpxchgUName, + OpcodeAtomicI64Rmw32CmpxchgU: OpcodeAtomicI64Rmw32CmpxchgUName, +} + +// AtomicInstructionName returns the instruction name corresponding to the atomic Opcode. +func AtomicInstructionName(oc OpcodeAtomic) (ret string) { + return atomicInstructionName[oc] +} diff --git a/internal/wasm/memory.go b/internal/wasm/memory.go index e2d994f9cd..12ee8863cb 100644 --- a/internal/wasm/memory.go +++ b/internal/wasm/memory.go @@ -1,11 +1,13 @@ package wasm import ( + "container/list" "encoding/binary" "fmt" "math" "reflect" "sync" + "time" "unsafe" "github.com/tetratelabs/wazero/api" @@ -37,10 +39,16 @@ type MemoryInstance struct { Buffer []byte Min, Cap, Max uint32 - // mux is used to prevent overlapping calls to Grow. - mux sync.RWMutex + Shared bool + // Mux is used to prevent overlapping calls to Grow and implement atomic instructions in interpreter + // mode when Go does not provide atomic APIs to use. + Mux sync.RWMutex // definition is known at compile time. definition api.MemoryDefinition + + // waiters implements atomic wait and notify. It is implemented similarly to golang.org/x/sync/semaphore, + // with a fixed weight of 1 and no spurious notifications. + waiters map[uint32]*list.List } // NewMemoryInstance creates a new instance based on the parameters in the SectionIDMemory. @@ -52,6 +60,7 @@ func NewMemoryInstance(memSec *Memory) *MemoryInstance { Min: memSec.Min, Cap: memSec.Cap, Max: memSec.Max, + Shared: memSec.IsShared, } } @@ -181,8 +190,8 @@ func MemoryPagesToBytesNum(pages uint32) (bytesNum uint64) { // Grow implements the same method as documented on api.Memory. func (m *MemoryInstance) Grow(delta uint32) (result uint32, ok bool) { // We take write-lock here as the following might result in a new slice - m.mux.Lock() - defer m.mux.Unlock() + m.Mux.Lock() + defer m.Mux.Unlock() currentPages := memoryBytesNumToPages(uint64(len(m.Buffer))) if delta == 0 { @@ -284,3 +293,76 @@ func (m *MemoryInstance) writeUint64Le(offset uint32, v uint64) bool { binary.LittleEndian.PutUint64(m.Buffer[offset:], v) return true } + +// Wait suspends the caller until the offset is notified by a different agent. +func (m *MemoryInstance) Wait(offset uint32, timeout int64) (tooMany bool, timedOut bool) { + m.Mux.Lock() + + if m.waiters == nil { + m.waiters = make(map[uint32]*list.List) + } + + waiters := m.waiters[offset] + if waiters == nil { + waiters = list.New() + m.waiters[offset] = waiters + } + + // The specification requires a trap if the number of existing waiters + 1 == 2^32, so we add a check here. + // In practice, it is unlikely the application would ever accumulate such a large number of waiters as it + // indicates several GB of RAM used just for the list of waiters. + // https://github.com/WebAssembly/threads/blob/main/proposals/threads/Overview.md#wait + if uint64(waiters.Len()+1) == 1<<32 { + m.Mux.Unlock() + tooMany = true + return + } + + ready := make(chan struct{}) + elem := waiters.PushBack(ready) + m.Mux.Unlock() + + if timeout < 0 { + <-ready + return + } else { + select { + case <-ready: + return + case <-time.After(time.Duration(timeout)): + // While we could see if the channel completed by now and ignore the timeout, similar to x/sync/semaphore, + // the Wasm spec doesn't specify this behavior, so we keep things simple by prioritizing the timeout. + m.Mux.Lock() + if ws := m.waiters[offset]; ws != nil { + ws.Remove(elem) + } + m.Mux.Unlock() + timedOut = true + return + } + } +} + +// Notify wakes up at most count waiters at the given offset. +func (m *MemoryInstance) Notify(offset uint32, count uint32) uint32 { + m.Mux.Lock() + defer m.Mux.Unlock() + + res := uint32(0) + ws := m.waiters[offset] + if ws == nil { + return 0 + } + + for num := ws.Len(); num > 0 && res < count; num = ws.Len() { + w := ws.Remove(ws.Front()).(chan struct{}) + close(w) + res++ + } + + if ws.Len() == 0 { + m.waiters[offset] = nil + } + + return res +} diff --git a/internal/wasm/memory_test.go b/internal/wasm/memory_test.go index 5957741d80..81fc090eb3 100644 --- a/internal/wasm/memory_test.go +++ b/internal/wasm/memory_test.go @@ -5,6 +5,7 @@ import ( "reflect" "strings" "testing" + "time" "unsafe" "github.com/tetratelabs/wazero/api" @@ -797,3 +798,140 @@ func BenchmarkWriteString(b *testing.B) { }) } } + +func TestMemoryInstance_WaitNotifyOnce(t *testing.T) { + t.Run("no waiters", func(t *testing.T) { + mem := &MemoryInstance{Buffer: []byte{0, 0, 0, 0, 16, 0, 0, 0}, Min: 1, Shared: true} + + notifyWaiters(t, mem, 0, 1, 0) + }) + + t.Run("single wait, notify", func(t *testing.T) { + mem := &MemoryInstance{Buffer: []byte{0, 0, 0, 0, 16, 0, 0, 0}, Min: 1, Shared: true} + + ch := make(chan string) + // Reuse same offset 3 times to verify reuse + for i := 0; i < 3; i++ { + go func() { + tooMany, timedOut := mem.Wait(0, -1) + propagateWaitResult(t, ch, tooMany, timedOut) + }() + + requireChannelEmpty(t, ch) + notifyWaiters(t, mem, 0, 1, 1) + require.Equal(t, "", <-ch) + + notifyWaiters(t, mem, 0, 1, 0) + } + }) + + t.Run("multiple waiters, notify all", func(t *testing.T) { + mem := &MemoryInstance{Buffer: []byte{0, 0, 0, 0, 16, 0, 0, 0}, Min: 1, Shared: true} + + ch := make(chan string) + go func() { + tooMany, timedOut := mem.Wait(0, -1) + propagateWaitResult(t, ch, tooMany, timedOut) + }() + go func() { + tooMany, timedOut := mem.Wait(0, -1) + propagateWaitResult(t, ch, tooMany, timedOut) + }() + + requireChannelEmpty(t, ch) + + notifyWaiters(t, mem, 0, 2, 2) + require.Equal(t, "", <-ch) + require.Equal(t, "", <-ch) + }) + + t.Run("multiple waiters, notify one", func(t *testing.T) { + mem := &MemoryInstance{Buffer: []byte{0, 0, 0, 0, 16, 0, 0, 0}, Min: 1, Shared: true} + + ch := make(chan string) + go func() { + tooMany, timedOut := mem.Wait(0, -1) + propagateWaitResult(t, ch, tooMany, timedOut) + }() + go func() { + tooMany, timedOut := mem.Wait(0, -1) + propagateWaitResult(t, ch, tooMany, timedOut) + }() + + requireChannelEmpty(t, ch) + notifyWaiters(t, mem, 0, 1, 1) + require.Equal(t, "", <-ch) + requireChannelEmpty(t, ch) + notifyWaiters(t, mem, 0, 1, 1) + require.Equal(t, "", <-ch) + }) + + t.Run("multiple offsets", func(t *testing.T) { + mem := &MemoryInstance{Buffer: []byte{0, 0, 0, 0, 16, 0, 0, 0}, Min: 1, Shared: true} + + ch := make(chan string) + go func() { + tooMany, timedOut := mem.Wait(0, -1) + propagateWaitResult(t, ch, tooMany, timedOut) + }() + go func() { + tooMany, timedOut := mem.Wait(1, -1) + propagateWaitResult(t, ch, tooMany, timedOut) + }() + + requireChannelEmpty(t, ch) + notifyWaiters(t, mem, 0, 2, 1) + require.Equal(t, "", <-ch) + requireChannelEmpty(t, ch) + notifyWaiters(t, mem, 1, 2, 1) + require.Equal(t, "", <-ch) + }) + + t.Run("timeout", func(t *testing.T) { + mem := &MemoryInstance{Buffer: []byte{0, 0, 0, 0, 16, 0, 0, 0}, Min: 1, Shared: true} + + ch := make(chan string) + go func() { + tooMany, timedOut := mem.Wait(0, 10 /* ns */) + propagateWaitResult(t, ch, tooMany, timedOut) + }() + + require.Equal(t, "timeout", <-ch) + }) +} + +func notifyWaiters(t *testing.T, mem *MemoryInstance, offset, count, exp int) { + t.Helper() + cur := 0 + tries := 0 + for cur < exp { + if tries > 100 { + t.Fatal("too many tries waiting for wait and notify to converge") + } + n := mem.Notify(uint32(offset), uint32(count)) + cur += int(n) + time.Sleep(1 * time.Millisecond) + tries++ + } +} + +func propagateWaitResult(t *testing.T, ch chan string, tooMany, timedOut bool) { + t.Helper() + if tooMany { + ch <- "too many" + } else if timedOut { + ch <- "timeout" + } else { + ch <- "" + } +} + +func requireChannelEmpty(t *testing.T, ch chan string) { + t.Helper() + select { + case <-ch: + t.Fatal("channel should be empty") + default: + // fallthrough + } +} diff --git a/internal/wasm/module.go b/internal/wasm/module.go index ba2c032ac2..83a7e52b24 100644 --- a/internal/wasm/module.go +++ b/internal/wasm/module.go @@ -750,6 +750,8 @@ type Memory struct { Min, Cap, Max uint32 // IsMaxEncoded true if the Max is encoded in the original binary. IsMaxEncoded bool + // IsShared true if the memory is shared for access from multiple agents. + IsShared bool } // Validate ensures values assigned to Min, Cap and Max are within valid thresholds. diff --git a/internal/wasmruntime/errors.go b/internal/wasmruntime/errors.go index 43f951e0df..556e5de829 100644 --- a/internal/wasmruntime/errors.go +++ b/internal/wasmruntime/errors.go @@ -27,6 +27,12 @@ var ( ErrRuntimeInvalidTableAccess = New("invalid table access") // ErrRuntimeIndirectCallTypeMismatch indicates that the type check failed during call_indirect. ErrRuntimeIndirectCallTypeMismatch = New("indirect call type mismatch") + // ErrRuntimeUnalignedAtomic indicates that an atomic operation was made with incorrect memory alignment. + ErrRuntimeUnalignedAtomic = New("unaligned atomic") + // ErrRuntimeExpectedSharedMemory indicates that an operation was made against unshared memory when not allowed. + ErrRuntimeExpectedSharedMemory = New("expected shared memory") + // ErrRuntimeTooManyWaiters indicates that atomic.wait was called with too many waiters. + ErrRuntimeTooManyWaiters = New("too many waiters") ) // Error is returned by a wasm.Engine during the execution of Wasm functions, and they indicate that the Wasm runtime diff --git a/internal/wazeroir/compiler.go b/internal/wazeroir/compiler.go index 47ddc92bfa..85b20139b5 100644 --- a/internal/wazeroir/compiler.go +++ b/internal/wazeroir/compiler.go @@ -379,6 +379,8 @@ func (c *Compiler) handleInstruction() error { instName = wasm.VectorInstructionName(c.body[c.pc+1]) } else if op == wasm.OpcodeMiscPrefix { instName = wasm.MiscInstructionName(c.body[c.pc+1]) + } else if op == wasm.OpcodeAtomicPrefix { + instName = wasm.AtomicInstructionName(c.body[c.pc+1]) } else { instName = wasm.InstructionName(op) } @@ -2848,6 +2850,546 @@ operatorSwitch: default: return fmt.Errorf("unsupported vector instruction in wazeroir: %s", wasm.VectorInstructionName(vecOp)) } + case wasm.OpcodeAtomicPrefix: + c.pc++ + atomicOp := c.body[c.pc] + switch atomicOp { + case wasm.OpcodeAtomicMemoryWait32: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicMemoryWait32Name) + if err != nil { + return err + } + c.emit( + NewOperationAtomicMemoryWait(UnsignedTypeI32, imm), + ) + case wasm.OpcodeAtomicMemoryWait64: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicMemoryWait64Name) + if err != nil { + return err + } + c.emit( + NewOperationAtomicMemoryWait(UnsignedTypeI64, imm), + ) + case wasm.OpcodeAtomicMemoryNotify: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicMemoryNotifyName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicMemoryNotify(imm), + ) + case wasm.OpcodeAtomicFence: + // Skip immediate value + c.pc++ + _ = c.body[c.pc] + c.emit( + NewOperationAtomicFence(), + ) + case wasm.OpcodeAtomicI32Load: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32LoadName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicLoad(UnsignedTypeI32, imm), + ) + case wasm.OpcodeAtomicI64Load: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64LoadName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicLoad(UnsignedTypeI64, imm), + ) + case wasm.OpcodeAtomicI32Load8U: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32Load8UName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicLoad8(UnsignedTypeI32, imm), + ) + case wasm.OpcodeAtomicI32Load16U: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32Load16UName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicLoad16(UnsignedTypeI32, imm), + ) + case wasm.OpcodeAtomicI64Load8U: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Load8UName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicLoad8(UnsignedTypeI64, imm), + ) + case wasm.OpcodeAtomicI64Load16U: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Load16UName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicLoad16(UnsignedTypeI64, imm), + ) + case wasm.OpcodeAtomicI64Load32U: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Load32UName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicLoad(UnsignedTypeI32, imm), + ) + case wasm.OpcodeAtomicI32Store: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32StoreName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicStore(UnsignedTypeI32, imm), + ) + case wasm.OpcodeAtomicI32Store8: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32Store8Name) + if err != nil { + return err + } + c.emit( + NewOperationAtomicStore8(UnsignedTypeI32, imm), + ) + case wasm.OpcodeAtomicI32Store16: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32Store16Name) + if err != nil { + return err + } + c.emit( + NewOperationAtomicStore16(UnsignedTypeI32, imm), + ) + case wasm.OpcodeAtomicI64Store: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64StoreName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicStore(UnsignedTypeI64, imm), + ) + case wasm.OpcodeAtomicI64Store8: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Store8Name) + if err != nil { + return err + } + c.emit( + NewOperationAtomicStore8(UnsignedTypeI64, imm), + ) + case wasm.OpcodeAtomicI64Store16: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Store16Name) + if err != nil { + return err + } + c.emit( + NewOperationAtomicStore16(UnsignedTypeI64, imm), + ) + case wasm.OpcodeAtomicI64Store32: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Store32Name) + if err != nil { + return err + } + c.emit( + NewOperationAtomicStore(UnsignedTypeI32, imm), + ) + case wasm.OpcodeAtomicI32RmwAdd: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32RmwAddName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW(UnsignedTypeI32, imm, AtomicArithmeticOpAdd), + ) + case wasm.OpcodeAtomicI64RmwAdd: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64RmwAddName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW(UnsignedTypeI64, imm, AtomicArithmeticOpAdd), + ) + case wasm.OpcodeAtomicI32Rmw8AddU: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32Rmw8AddUName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW8(UnsignedTypeI32, imm, AtomicArithmeticOpAdd), + ) + case wasm.OpcodeAtomicI64Rmw8AddU: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw8AddUName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW8(UnsignedTypeI64, imm, AtomicArithmeticOpAdd), + ) + case wasm.OpcodeAtomicI32Rmw16AddU: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32Rmw16AddUName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW16(UnsignedTypeI32, imm, AtomicArithmeticOpAdd), + ) + case wasm.OpcodeAtomicI64Rmw16AddU: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw16AddUName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW16(UnsignedTypeI64, imm, AtomicArithmeticOpAdd), + ) + case wasm.OpcodeAtomicI64Rmw32AddU: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw32AddUName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW(UnsignedTypeI32, imm, AtomicArithmeticOpAdd), + ) + case wasm.OpcodeAtomicI32RmwSub: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32RmwSubName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW(UnsignedTypeI32, imm, AtomicArithmeticOpSub), + ) + case wasm.OpcodeAtomicI64RmwSub: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64RmwSubName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW(UnsignedTypeI64, imm, AtomicArithmeticOpSub), + ) + case wasm.OpcodeAtomicI32Rmw8SubU: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32Rmw8SubUName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW8(UnsignedTypeI32, imm, AtomicArithmeticOpSub), + ) + case wasm.OpcodeAtomicI64Rmw8SubU: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw8SubUName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW8(UnsignedTypeI64, imm, AtomicArithmeticOpSub), + ) + case wasm.OpcodeAtomicI32Rmw16SubU: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32Rmw16SubUName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW16(UnsignedTypeI32, imm, AtomicArithmeticOpSub), + ) + case wasm.OpcodeAtomicI64Rmw16SubU: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw16SubUName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW16(UnsignedTypeI64, imm, AtomicArithmeticOpSub), + ) + case wasm.OpcodeAtomicI64Rmw32SubU: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw32SubUName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW(UnsignedTypeI32, imm, AtomicArithmeticOpSub), + ) + case wasm.OpcodeAtomicI32RmwAnd: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32RmwAndName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW(UnsignedTypeI32, imm, AtomicArithmeticOpAnd), + ) + case wasm.OpcodeAtomicI64RmwAnd: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64RmwAndName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW(UnsignedTypeI64, imm, AtomicArithmeticOpAnd), + ) + case wasm.OpcodeAtomicI32Rmw8AndU: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32Rmw8AndUName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW8(UnsignedTypeI32, imm, AtomicArithmeticOpAnd), + ) + case wasm.OpcodeAtomicI64Rmw8AndU: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw8AndUName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW8(UnsignedTypeI64, imm, AtomicArithmeticOpAnd), + ) + case wasm.OpcodeAtomicI32Rmw16AndU: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32Rmw16AndUName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW16(UnsignedTypeI32, imm, AtomicArithmeticOpAnd), + ) + case wasm.OpcodeAtomicI64Rmw16AndU: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw16AndUName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW16(UnsignedTypeI64, imm, AtomicArithmeticOpAnd), + ) + case wasm.OpcodeAtomicI64Rmw32AndU: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw32AndUName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW(UnsignedTypeI32, imm, AtomicArithmeticOpAnd), + ) + case wasm.OpcodeAtomicI32RmwOr: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32RmwOrName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW(UnsignedTypeI32, imm, AtomicArithmeticOpOr), + ) + case wasm.OpcodeAtomicI64RmwOr: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64RmwOrName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW(UnsignedTypeI64, imm, AtomicArithmeticOpOr), + ) + case wasm.OpcodeAtomicI32Rmw8OrU: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32Rmw8OrUName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW8(UnsignedTypeI32, imm, AtomicArithmeticOpOr), + ) + case wasm.OpcodeAtomicI64Rmw8OrU: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw8OrUName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW8(UnsignedTypeI64, imm, AtomicArithmeticOpOr), + ) + case wasm.OpcodeAtomicI32Rmw16OrU: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32Rmw16OrUName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW16(UnsignedTypeI32, imm, AtomicArithmeticOpOr), + ) + case wasm.OpcodeAtomicI64Rmw16OrU: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw16OrUName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW16(UnsignedTypeI64, imm, AtomicArithmeticOpOr), + ) + case wasm.OpcodeAtomicI64Rmw32OrU: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw32OrUName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW(UnsignedTypeI32, imm, AtomicArithmeticOpOr), + ) + case wasm.OpcodeAtomicI32RmwXor: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32RmwXorName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW(UnsignedTypeI32, imm, AtomicArithmeticOpXor), + ) + case wasm.OpcodeAtomicI64RmwXor: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64RmwXorName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW(UnsignedTypeI64, imm, AtomicArithmeticOpXor), + ) + case wasm.OpcodeAtomicI32Rmw8XorU: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32Rmw8XorUName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW8(UnsignedTypeI32, imm, AtomicArithmeticOpXor), + ) + case wasm.OpcodeAtomicI64Rmw8XorU: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw8XorUName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW8(UnsignedTypeI64, imm, AtomicArithmeticOpXor), + ) + case wasm.OpcodeAtomicI32Rmw16XorU: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32Rmw16XorUName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW16(UnsignedTypeI32, imm, AtomicArithmeticOpXor), + ) + case wasm.OpcodeAtomicI64Rmw16XorU: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw16XorUName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW16(UnsignedTypeI64, imm, AtomicArithmeticOpXor), + ) + case wasm.OpcodeAtomicI64Rmw32XorU: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw32XorUName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW(UnsignedTypeI32, imm, AtomicArithmeticOpXor), + ) + case wasm.OpcodeAtomicI32RmwXchg: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32RmwXchgName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW(UnsignedTypeI32, imm, AtomicArithmeticOpNop), + ) + case wasm.OpcodeAtomicI64RmwXchg: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64RmwXchgName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW(UnsignedTypeI64, imm, AtomicArithmeticOpNop), + ) + case wasm.OpcodeAtomicI32Rmw8XchgU: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32Rmw8XchgUName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW8(UnsignedTypeI32, imm, AtomicArithmeticOpNop), + ) + case wasm.OpcodeAtomicI64Rmw8XchgU: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw8XchgUName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW8(UnsignedTypeI64, imm, AtomicArithmeticOpNop), + ) + case wasm.OpcodeAtomicI32Rmw16XchgU: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32Rmw16XchgUName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW16(UnsignedTypeI32, imm, AtomicArithmeticOpNop), + ) + case wasm.OpcodeAtomicI64Rmw16XchgU: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw16XchgUName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW16(UnsignedTypeI64, imm, AtomicArithmeticOpNop), + ) + case wasm.OpcodeAtomicI64Rmw32XchgU: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw32XchgUName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW(UnsignedTypeI32, imm, AtomicArithmeticOpNop), + ) + case wasm.OpcodeAtomicI32RmwCmpxchg: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32RmwCmpxchgName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMWCmpxchg(UnsignedTypeI32, imm), + ) + case wasm.OpcodeAtomicI64RmwCmpxchg: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64RmwCmpxchgName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMWCmpxchg(UnsignedTypeI64, imm), + ) + case wasm.OpcodeAtomicI32Rmw8CmpxchgU: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32Rmw8CmpxchgUName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW8Cmpxchg(UnsignedTypeI32, imm), + ) + case wasm.OpcodeAtomicI64Rmw8CmpxchgU: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw8CmpxchgUName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW8Cmpxchg(UnsignedTypeI64, imm), + ) + case wasm.OpcodeAtomicI32Rmw16CmpxchgU: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32Rmw16CmpxchgUName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW16Cmpxchg(UnsignedTypeI32, imm), + ) + case wasm.OpcodeAtomicI64Rmw16CmpxchgU: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw16CmpxchgUName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMW16Cmpxchg(UnsignedTypeI64, imm), + ) + case wasm.OpcodeAtomicI64Rmw32CmpxchgU: + imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw32CmpxchgUName) + if err != nil { + return err + } + c.emit( + NewOperationAtomicRMWCmpxchg(UnsignedTypeI32, imm), + ) + } default: return fmt.Errorf("unsupported instruction in wazeroir: 0x%x", op) } diff --git a/internal/wazeroir/compiler_test.go b/internal/wazeroir/compiler_test.go index b167bbf062..2520e56697 100644 --- a/internal/wazeroir/compiler_test.go +++ b/internal/wazeroir/compiler_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/tetratelabs/wazero/api" + "github.com/tetratelabs/wazero/experimental" "github.com/tetratelabs/wazero/internal/leb128" "github.com/tetratelabs/wazero/internal/testing/require" "github.com/tetratelabs/wazero/internal/wasm" @@ -3071,3 +3072,651 @@ func Test_ensureTermination(t *testing.T) { }) } } + +func TestCompiler_threads(t *testing.T) { + tests := []struct { + name string + body []byte + noDropBeforeReturn bool + expected UnionOperation + }{ + { + name: "i32.atomic.load8_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Load8U, 0x1, 0x8, // alignment=2^1, offset=8 + }, + expected: NewOperationAtomicLoad8(UnsignedTypeI32, MemoryArg{Alignment: 0x1, Offset: 0x8}), + }, + { + name: "i32.atomic.load16_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Load16U, 0x2, 0x8, // alignment=2^(2-1), offset=8 + }, + expected: NewOperationAtomicLoad16(UnsignedTypeI32, MemoryArg{Alignment: 0x2, Offset: 0x8}), + }, + { + name: "i32.atomic.load", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Load, 0x3, 0x8, // alignment=2^(3-1), offset=8 + }, + expected: NewOperationAtomicLoad(UnsignedTypeI32, MemoryArg{Alignment: 0x3, Offset: 0x8}), + }, + { + name: "i64.atomic.load8_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Load8U, 0x1, 0x8, // alignment=2^1, offset=8 + }, + expected: NewOperationAtomicLoad8(UnsignedTypeI64, MemoryArg{Alignment: 0x1, Offset: 0x8}), + }, + { + name: "i64.atomic.load16_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Load16U, 0x2, 0x8, // alignment=2^(2-1), offset=8 + }, + expected: NewOperationAtomicLoad16(UnsignedTypeI64, MemoryArg{Alignment: 0x2, Offset: 0x8}), + }, + { + name: "i64.atomic.load32_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Load32U, 0x3, 0x8, // alignment=2^(3-1), offset=8 + }, + expected: NewOperationAtomicLoad(UnsignedTypeI32, MemoryArg{Alignment: 0x3, Offset: 0x8}), + }, + { + name: "i64.atomic.load", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Load, 0x4, 0x8, // alignment=2^(4-1), offset=8 + }, + expected: NewOperationAtomicLoad(UnsignedTypeI64, MemoryArg{Alignment: 0x4, Offset: 0x8}), + }, + { + name: "i32.atomic.store8", + body: []byte{ + wasm.OpcodeI32Const, 0x1, + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Store8, 0x1, 0x8, // alignment=2^1, offset=8 + }, + noDropBeforeReturn: true, + expected: NewOperationAtomicStore8(UnsignedTypeI32, MemoryArg{Alignment: 0x1, Offset: 0x8}), + }, + { + name: "i32.atomic.store16", + body: []byte{ + wasm.OpcodeI32Const, 0x1, + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Store16, 0x2, 0x8, // alignment=2^(2-1), offset=8 + }, + noDropBeforeReturn: true, + expected: NewOperationAtomicStore16(UnsignedTypeI32, MemoryArg{Alignment: 0x2, Offset: 0x8}), + }, + { + name: "i32.atomic.store", + body: []byte{ + wasm.OpcodeI32Const, 0x1, + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Store, 0x3, 0x8, // alignment=2^(3-1), offset=8 + }, + noDropBeforeReturn: true, + expected: NewOperationAtomicStore(UnsignedTypeI32, MemoryArg{Alignment: 0x3, Offset: 0x8}), + }, + { + name: "i64.atomic.store8", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI64Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Store8, 0x1, 0x8, // alignment=2^1, offset=8 + }, + noDropBeforeReturn: true, + expected: NewOperationAtomicStore8(UnsignedTypeI64, MemoryArg{Alignment: 0x1, Offset: 0x8}), + }, + { + name: "i64.atomic.store16", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI64Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Store16, 0x2, 0x8, // alignment=2^(2-1), offset=8 + }, + noDropBeforeReturn: true, + expected: NewOperationAtomicStore16(UnsignedTypeI64, MemoryArg{Alignment: 0x2, Offset: 0x8}), + }, + { + name: "i64.atomic.store32", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI64Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Store32, 0x3, 0x8, // alignment=2^(3-1), offset=8 + }, + noDropBeforeReturn: true, + expected: NewOperationAtomicStore(UnsignedTypeI32, MemoryArg{Alignment: 0x3, Offset: 0x8}), + }, + { + name: "i64.atomic.store", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI64Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Store, 0x4, 0x8, // alignment=2^(4-1), offset=8 + }, + noDropBeforeReturn: true, + expected: NewOperationAtomicStore(UnsignedTypeI64, MemoryArg{Alignment: 0x4, Offset: 0x8}), + }, + { + name: "i32.atomic.rmw8.add_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI32Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Rmw8AddU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + expected: NewOperationAtomicRMW8(UnsignedTypeI32, MemoryArg{Alignment: 0x1, Offset: 0x8}, AtomicArithmeticOpAdd), + }, + { + name: "i32.atomic.rmw16.add_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI32Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Rmw16AddU, 0x2, 0x8, // alignment=2^(2-1), offset=8 + }, + expected: NewOperationAtomicRMW16(UnsignedTypeI32, MemoryArg{Alignment: 0x2, Offset: 0x8}, AtomicArithmeticOpAdd), + }, + { + name: "i32.atomic.rmw.add", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI32Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32RmwAdd, 0x3, 0x8, // alignment=2^(3-1), offset=8 + }, + expected: NewOperationAtomicRMW(UnsignedTypeI32, MemoryArg{Alignment: 0x3, Offset: 0x8}, AtomicArithmeticOpAdd), + }, + { + name: "i64.atomic.rmw8.add_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI64Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw8AddU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + expected: NewOperationAtomicRMW8(UnsignedTypeI64, MemoryArg{Alignment: 0x1, Offset: 0x8}, AtomicArithmeticOpAdd), + }, + { + name: "i64.atomic.rmw16.add_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI64Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw16AddU, 0x2, 0x8, // alignment=2^(2-1), offset=8 + }, + expected: NewOperationAtomicRMW16(UnsignedTypeI64, MemoryArg{Alignment: 0x2, Offset: 0x8}, AtomicArithmeticOpAdd), + }, + { + name: "i64.atomic.rmw32.add_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI64Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw32AddU, 0x3, 0x8, // alignment=2^(3-1), offset=8 + }, + expected: NewOperationAtomicRMW(UnsignedTypeI32, MemoryArg{Alignment: 0x3, Offset: 0x8}, AtomicArithmeticOpAdd), + }, + { + name: "i64.atomic.rmw.add", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI64Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64RmwAdd, 0x4, 0x8, // alignment=2^(4-1), offset=8 + }, + expected: NewOperationAtomicRMW(UnsignedTypeI64, MemoryArg{Alignment: 0x4, Offset: 0x8}, AtomicArithmeticOpAdd), + }, + { + name: "i32.atomic.rmw8.sub_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI32Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Rmw8SubU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + expected: NewOperationAtomicRMW8(UnsignedTypeI32, MemoryArg{Alignment: 0x1, Offset: 0x8}, AtomicArithmeticOpSub), + }, + { + name: "i32.atomic.rmw16.sub_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI32Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Rmw16SubU, 0x2, 0x8, // alignment=2^(2-1), offset=8 + }, + expected: NewOperationAtomicRMW16(UnsignedTypeI32, MemoryArg{Alignment: 0x2, Offset: 0x8}, AtomicArithmeticOpSub), + }, + { + name: "i32.atomic.rmw.sub", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI32Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32RmwSub, 0x3, 0x8, // alignment=2^(3-1), offset=8 + }, + expected: NewOperationAtomicRMW(UnsignedTypeI32, MemoryArg{Alignment: 0x3, Offset: 0x8}, AtomicArithmeticOpSub), + }, + { + name: "i64.atomic.rmw8.sub_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI64Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw8SubU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + expected: NewOperationAtomicRMW8(UnsignedTypeI64, MemoryArg{Alignment: 0x1, Offset: 0x8}, AtomicArithmeticOpSub), + }, + { + name: "i64.atomic.rmw16.sub_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI64Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw16SubU, 0x2, 0x8, // alignment=2^(2-1), offset=8 + }, + expected: NewOperationAtomicRMW16(UnsignedTypeI64, MemoryArg{Alignment: 0x2, Offset: 0x8}, AtomicArithmeticOpSub), + }, + { + name: "i64.atomic.rmw32.sub_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI64Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw32SubU, 0x3, 0x8, // alignment=2^(3-1), offset=8 + }, + expected: NewOperationAtomicRMW(UnsignedTypeI32, MemoryArg{Alignment: 0x3, Offset: 0x8}, AtomicArithmeticOpSub), + }, + { + name: "i64.atomic.rmw.sub", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI64Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64RmwSub, 0x4, 0x8, // alignment=2^(4-1), offset=8 + }, + expected: NewOperationAtomicRMW(UnsignedTypeI64, MemoryArg{Alignment: 0x4, Offset: 0x8}, AtomicArithmeticOpSub), + }, + { + name: "i32.atomic.rmw8.and_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI32Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Rmw8AndU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + expected: NewOperationAtomicRMW8(UnsignedTypeI32, MemoryArg{Alignment: 0x1, Offset: 0x8}, AtomicArithmeticOpAnd), + }, + { + name: "i32.atomic.rmw16.and_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI32Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Rmw16AndU, 0x2, 0x8, // alignment=2^(2-1), offset=8 + }, + expected: NewOperationAtomicRMW16(UnsignedTypeI32, MemoryArg{Alignment: 0x2, Offset: 0x8}, AtomicArithmeticOpAnd), + }, + { + name: "i32.atomic.rmw.and", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI32Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32RmwAnd, 0x3, 0x8, // alignment=2^(3-1), offset=8 + }, + expected: NewOperationAtomicRMW(UnsignedTypeI32, MemoryArg{Alignment: 0x3, Offset: 0x8}, AtomicArithmeticOpAnd), + }, + { + name: "i64.atomic.rmw8.and_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI64Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw8AndU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + expected: NewOperationAtomicRMW8(UnsignedTypeI64, MemoryArg{Alignment: 0x1, Offset: 0x8}, AtomicArithmeticOpAnd), + }, + { + name: "i64.atomic.rmw16.and_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI64Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw16AndU, 0x2, 0x8, // alignment=2^(2-1), offset=8 + }, + expected: NewOperationAtomicRMW16(UnsignedTypeI64, MemoryArg{Alignment: 0x2, Offset: 0x8}, AtomicArithmeticOpAnd), + }, + { + name: "i64.atomic.rmw32.and_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI64Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw32AndU, 0x3, 0x8, // alignment=2^(3-1), offset=8 + }, + expected: NewOperationAtomicRMW(UnsignedTypeI32, MemoryArg{Alignment: 0x3, Offset: 0x8}, AtomicArithmeticOpAnd), + }, + { + name: "i64.atomic.rmw.and", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI64Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64RmwAnd, 0x4, 0x8, // alignment=2^(4-1), offset=8 + }, + expected: NewOperationAtomicRMW(UnsignedTypeI64, MemoryArg{Alignment: 0x4, Offset: 0x8}, AtomicArithmeticOpAnd), + }, + { + name: "i32.atomic.rmw8.or", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI32Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Rmw8OrU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + expected: NewOperationAtomicRMW8(UnsignedTypeI32, MemoryArg{Alignment: 0x1, Offset: 0x8}, AtomicArithmeticOpOr), + }, + { + name: "i32.atomic.rmw16.or_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI32Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Rmw16OrU, 0x2, 0x8, // alignment=2^(2-1), offset=8 + }, + expected: NewOperationAtomicRMW16(UnsignedTypeI32, MemoryArg{Alignment: 0x2, Offset: 0x8}, AtomicArithmeticOpOr), + }, + { + name: "i32.atomic.rmw.or", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI32Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32RmwOr, 0x3, 0x8, // alignment=2^(3-1), offset=8 + }, + expected: NewOperationAtomicRMW(UnsignedTypeI32, MemoryArg{Alignment: 0x3, Offset: 0x8}, AtomicArithmeticOpOr), + }, + { + name: "i64.atomic.rmw8.or_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI64Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw8OrU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + expected: NewOperationAtomicRMW8(UnsignedTypeI64, MemoryArg{Alignment: 0x1, Offset: 0x8}, AtomicArithmeticOpOr), + }, + { + name: "i64.atomic.rmw16.or_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI64Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw16OrU, 0x2, 0x8, // alignment=2^(2-1), offset=8 + }, + expected: NewOperationAtomicRMW16(UnsignedTypeI64, MemoryArg{Alignment: 0x2, Offset: 0x8}, AtomicArithmeticOpOr), + }, + { + name: "i64.atomic.rmw32.or_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI64Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw32OrU, 0x3, 0x8, // alignment=2^(3-1), offset=8 + }, + expected: NewOperationAtomicRMW(UnsignedTypeI32, MemoryArg{Alignment: 0x3, Offset: 0x8}, AtomicArithmeticOpOr), + }, + { + name: "i64.atomic.rmw.or", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI64Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64RmwOr, 0x4, 0x8, // alignment=2^(4-1), offset=8 + }, + expected: NewOperationAtomicRMW(UnsignedTypeI64, MemoryArg{Alignment: 0x4, Offset: 0x8}, AtomicArithmeticOpOr), + }, + { + name: "i32.atomic.rmw8.xor_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI32Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Rmw8XorU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + expected: NewOperationAtomicRMW8(UnsignedTypeI32, MemoryArg{Alignment: 0x1, Offset: 0x8}, AtomicArithmeticOpXor), + }, + { + name: "i32.atomic.rmw16.xor_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI32Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Rmw16XorU, 0x2, 0x8, // alignment=2^(2-1), offset=8 + }, + expected: NewOperationAtomicRMW16(UnsignedTypeI32, MemoryArg{Alignment: 0x2, Offset: 0x8}, AtomicArithmeticOpXor), + }, + { + name: "i32.atomic.rmw.xor", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI32Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32RmwXor, 0x3, 0x8, // alignment=2^(3-1), offset=8 + }, + expected: NewOperationAtomicRMW(UnsignedTypeI32, MemoryArg{Alignment: 0x3, Offset: 0x8}, AtomicArithmeticOpXor), + }, + { + name: "i64.atomic.rmw8.xor_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI64Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw8XorU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + expected: NewOperationAtomicRMW8(UnsignedTypeI64, MemoryArg{Alignment: 0x1, Offset: 0x8}, AtomicArithmeticOpXor), + }, + { + name: "i64.atomic.rmw16.xor_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI64Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw16XorU, 0x2, 0x8, // alignment=2^(2-1), offset=8 + }, + expected: NewOperationAtomicRMW16(UnsignedTypeI64, MemoryArg{Alignment: 0x2, Offset: 0x8}, AtomicArithmeticOpXor), + }, + { + name: "i64.atomic.rmw32.xor_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI64Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw32XorU, 0x3, 0x8, // alignment=2^(3-1), offset=8 + }, + expected: NewOperationAtomicRMW(UnsignedTypeI32, MemoryArg{Alignment: 0x3, Offset: 0x8}, AtomicArithmeticOpXor), + }, + { + name: "i64.atomic.rmw.xor", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI64Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64RmwXor, 0x4, 0x8, // alignment=2^(4-1), offset=8 + }, + expected: NewOperationAtomicRMW(UnsignedTypeI64, MemoryArg{Alignment: 0x4, Offset: 0x8}, AtomicArithmeticOpXor), + }, + { + name: "i32.atomic.rmw8.xchg_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI32Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Rmw8XchgU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + expected: NewOperationAtomicRMW8(UnsignedTypeI32, MemoryArg{Alignment: 0x1, Offset: 0x8}, AtomicArithmeticOpNop), + }, + { + name: "i32.atomic.rmw16.xchg_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI32Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Rmw16XchgU, 0x2, 0x8, // alignment=2^(2-1), offset=8 + }, + expected: NewOperationAtomicRMW16(UnsignedTypeI32, MemoryArg{Alignment: 0x2, Offset: 0x8}, AtomicArithmeticOpNop), + }, + { + name: "i32.atomic.rmw.xchg", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI32Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32RmwXchg, 0x3, 0x8, // alignment=2^(3-1), offset=8 + }, + expected: NewOperationAtomicRMW(UnsignedTypeI32, MemoryArg{Alignment: 0x3, Offset: 0x8}, AtomicArithmeticOpNop), + }, + { + name: "i64.atomic.rmw8.xchg_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI64Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw8XchgU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + expected: NewOperationAtomicRMW8(UnsignedTypeI64, MemoryArg{Alignment: 0x1, Offset: 0x8}, AtomicArithmeticOpNop), + }, + { + name: "i64.atomic.rmw16.xchg_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI64Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw16XchgU, 0x2, 0x8, // alignment=2^(2-1), offset=8 + }, + expected: NewOperationAtomicRMW16(UnsignedTypeI64, MemoryArg{Alignment: 0x2, Offset: 0x8}, AtomicArithmeticOpNop), + }, + { + name: "i64.atomic.rmw32.xchg_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI64Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw32XchgU, 0x3, 0x8, // alignment=2^(3-1), offset=8 + }, + expected: NewOperationAtomicRMW(UnsignedTypeI32, MemoryArg{Alignment: 0x3, Offset: 0x8}, AtomicArithmeticOpNop), + }, + { + name: "i64.atomic.rmw.xchg", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI64Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64RmwXchg, 0x4, 0x8, // alignment=2^(4-1), offset=8 + }, + expected: NewOperationAtomicRMW(UnsignedTypeI64, MemoryArg{Alignment: 0x4, Offset: 0x8}, AtomicArithmeticOpNop), + }, + { + name: "i32.atomic.rmw8.cmpxchg_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI32Const, 0x1, + wasm.OpcodeI32Const, 0x2, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Rmw8CmpxchgU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + expected: NewOperationAtomicRMW8Cmpxchg(UnsignedTypeI32, MemoryArg{Alignment: 0x1, Offset: 0x8}), + }, + { + name: "i32.atomic.rmw16.cmpxchg_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI32Const, 0x1, + wasm.OpcodeI32Const, 0x2, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Rmw16CmpxchgU, 0x2, 0x8, // alignment=2^(2-1), offset=8 + }, + expected: NewOperationAtomicRMW16Cmpxchg(UnsignedTypeI32, MemoryArg{Alignment: 0x2, Offset: 0x8}), + }, + { + name: "i32.atomic.rmw.cmpxchg", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI32Const, 0x1, + wasm.OpcodeI32Const, 0x2, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32RmwCmpxchg, 0x3, 0x8, // alignment=2^(3-1), offset=8 + }, + expected: NewOperationAtomicRMWCmpxchg(UnsignedTypeI32, MemoryArg{Alignment: 0x3, Offset: 0x8}), + }, + { + name: "i64.atomic.rmw8.xchg_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI64Const, 0x1, + wasm.OpcodeI64Const, 0x2, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw8CmpxchgU, 0x1, 0x8, // alignment=2^1, offset=8 + }, + expected: NewOperationAtomicRMW8Cmpxchg(UnsignedTypeI64, MemoryArg{Alignment: 0x1, Offset: 0x8}), + }, + { + name: "i64.atomic.rmw16.cmpxchg_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI64Const, 0x1, + wasm.OpcodeI64Const, 0x2, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw16CmpxchgU, 0x2, 0x8, // alignment=2^(2-1), offset=8 + }, + expected: NewOperationAtomicRMW16Cmpxchg(UnsignedTypeI64, MemoryArg{Alignment: 0x2, Offset: 0x8}), + }, + { + name: "i64.atomic.rmw32.cmpxchg_u", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI64Const, 0x1, + wasm.OpcodeI64Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw32CmpxchgU, 0x3, 0x8, // alignment=2^(3-1), offset=8 + }, + expected: NewOperationAtomicRMWCmpxchg(UnsignedTypeI32, MemoryArg{Alignment: 0x3, Offset: 0x8}), + }, + { + name: "i64.atomic.rmw.cmpxchg", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI64Const, 0x1, + wasm.OpcodeI64Const, 0x2, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64RmwCmpxchg, 0x4, 0x8, // alignment=2^(4-1), offset=8 + }, + expected: NewOperationAtomicRMWCmpxchg(UnsignedTypeI64, MemoryArg{Alignment: 0x4, Offset: 0x8}), + }, + { + name: "memory.atomic.wait32", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI32Const, 0x1, + wasm.OpcodeI64Const, 0x2, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicMemoryWait32, 0x3, 0x8, // alignment=2^(3-1), offset=8 + }, + expected: NewOperationAtomicMemoryWait(UnsignedTypeI32, MemoryArg{Alignment: 0x3, Offset: 0x8}), + }, + { + name: "memory.atomic.wait64", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI64Const, 0x1, + wasm.OpcodeI64Const, 0x2, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicMemoryWait64, 0x4, 0x8, // alignment=2^(4-1), offset=8 + }, + expected: NewOperationAtomicMemoryWait(UnsignedTypeI64, MemoryArg{Alignment: 0x4, Offset: 0x8}), + }, + { + name: "memory.atomic.notify", + body: []byte{ + wasm.OpcodeI32Const, 0x0, + wasm.OpcodeI32Const, 0x1, + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicMemoryNotify, 0x3, 0x8, // alignment=2^(3-1), offset=8 + }, + expected: NewOperationAtomicMemoryNotify(MemoryArg{Alignment: 0x3, Offset: 0x8}), + }, + { + name: "memory.atomic.fence", + body: []byte{ + wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicFence, 0x0, // consistency=0 + }, + noDropBeforeReturn: true, + expected: NewOperationAtomicFence(), + }, + } + + for _, tc := range tests { + tt := tc + t.Run(tt.name, func(t *testing.T) { + body := append([]byte{}, tt.body...) + if !tt.noDropBeforeReturn { + body = append(body, wasm.OpcodeDrop) + } + body = append(body, wasm.OpcodeEnd) + module := &wasm.Module{ + TypeSection: []wasm.FunctionType{v_v}, + FunctionSection: []wasm.Index{0}, + MemorySection: &wasm.Memory{}, + CodeSection: []wasm.Code{{Body: body}}, + } + c, err := NewCompiler(api.CoreFeaturesV2|experimental.CoreFeaturesThreads, 0, module, false) + require.NoError(t, err) + + res, err := c.Next() + require.NoError(t, err) + if tt.noDropBeforeReturn { + require.Equal(t, tc.expected, res.Operations[len(res.Operations)-2]) + } else { + require.Equal(t, tc.expected, res.Operations[len(res.Operations)-3]) + } + }) + } +} diff --git a/internal/wazeroir/operations.go b/internal/wazeroir/operations.go index 4041944a48..684521bf69 100644 --- a/internal/wazeroir/operations.go +++ b/internal/wazeroir/operations.go @@ -415,6 +415,36 @@ func (o OperationKind) String() (ret string) { ret = "V128ITruncSatFromF" case OperationKindBuiltinFunctionCheckExitCode: ret = "BuiltinFunctionCheckExitCode" + case OperationKindAtomicMemoryWait: + ret = "OperationKindAtomicMemoryWait" + case OperationKindAtomicMemoryNotify: + ret = "OperationKindAtomicMemoryNotify" + case OperationKindAtomicFence: + ret = "OperationKindAtomicFence" + case OperationKindAtomicLoad: + ret = "OperationKindAtomicLoad" + case OperationKindAtomicLoad8: + ret = "OperationKindAtomicLoad8" + case OperationKindAtomicLoad16: + ret = "OperationKindAtomicLoad16" + case OperationKindAtomicStore: + ret = "OperationKindAtomicStore" + case OperationKindAtomicStore8: + ret = "OperationKindAtomicStore8" + case OperationKindAtomicStore16: + ret = "OperationKindAtomicStore16" + case OperationKindAtomicRMW: + ret = "OperationKindAtomicRMW" + case OperationKindAtomicRMW8: + ret = "OperationKindAtomicRMW8" + case OperationKindAtomicRMW16: + ret = "OperationKindAtomicRMW16" + case OperationKindAtomicRMWCmpxchg: + ret = "OperationKindAtomicRMWCmpxchg" + case OperationKindAtomicRMW8Cmpxchg: + ret = "OperationKindAtomicRMW8Cmpxchg" + case OperationKindAtomicRMW16Cmpxchg: + ret = "OperationKindAtomicRMW16Cmpxchg" default: panic(fmt.Errorf("unknown operation %d", o)) } @@ -705,10 +735,61 @@ const ( // OperationKindBuiltinFunctionCheckExitCode is the Kind for NewOperationBuiltinFunctionCheckExitCode. OperationKindBuiltinFunctionCheckExitCode + // OperationKindAtomicMemoryWait is the kind for NewOperationAtomicMemoryWait. + OperationKindAtomicMemoryWait + // OperationKindAtomicMemoryNotify is the kind for NewOperationAtomicMemoryNotify. + OperationKindAtomicMemoryNotify + // OperationKindAtomicFence is the kind for NewOperationAtomicFence. + OperationKindAtomicFence + // OperationKindAtomicLoad is the kind for NewOperationAtomicLoad. + OperationKindAtomicLoad + // OperationKindAtomicLoad8 is the kind for NewOperationAtomicLoad8. + OperationKindAtomicLoad8 + // OperationKindAtomicLoad16 is the kind for NewOperationAtomicLoad16. + OperationKindAtomicLoad16 + // OperationKindAtomicStore is the kind for NewOperationAtomicStore. + OperationKindAtomicStore + // OperationKindAtomicStore8 is the kind for NewOperationAtomicStore8. + OperationKindAtomicStore8 + // OperationKindAtomicStore16 is the kind for NewOperationAtomicStore16. + OperationKindAtomicStore16 + + // OperationKindAtomicRMW is the kind for NewOperationAtomicRMW. + OperationKindAtomicRMW + // OperationKindAtomicRMW8 is the kind for NewOperationAtomicRMW8. + OperationKindAtomicRMW8 + // OperationKindAtomicRMW16 is the kind for NewOperationAtomicRMW16. + OperationKindAtomicRMW16 + + // OperationKindAtomicRMWCmpxchg is the kind for NewOperationAtomicRMWCmpxchg. + OperationKindAtomicRMWCmpxchg + // OperationKindAtomicRMW8Cmpxchg is the kind for NewOperationAtomicRMW8Cmpxchg. + OperationKindAtomicRMW8Cmpxchg + // OperationKindAtomicRMW16Cmpxchg is the kind for NewOperationAtomicRMW16Cmpxchg. + OperationKindAtomicRMW16Cmpxchg + // operationKindEnd is always placed at the bottom of this iota definition to be used in the test. operationKindEnd ) +// AtomicArithmeticOp is the type for the operation kind of atomic arithmetic operations. +type AtomicArithmeticOp byte + +const ( + // AtomicArithmeticOpAdd is the kind for an add operation. + AtomicArithmeticOpAdd AtomicArithmeticOp = iota + // AtomicArithmeticOpSub is the kind for a sub operation. + AtomicArithmeticOpSub + // AtomicArithmeticOpAnd is the kind for a bitwise and operation. + AtomicArithmeticOpAnd + // AtomicArithmeticOpOr is the kind for a bitwise or operation. + AtomicArithmeticOpOr + // AtomicArithmeticOpXor is the kind for a bitwise xor operation. + AtomicArithmeticOpXor + // AtomicArithmeticOpNop is the kind for a nop operation. + AtomicArithmeticOpNop +) + // NewOperationBuiltinFunctionCheckExitCode is a constructor for UnionOperation with Kind OperationKindBuiltinFunctionCheckExitCode. // // OperationBuiltinFunctionCheckExitCode corresponds to the instruction to check the api.Module is already closed due to @@ -1017,6 +1098,23 @@ func (o UnionOperation) String() string { return fmt.Sprintf("%s.%sU", o.Kind, shapeName(o.B1)) } + case OperationKindAtomicMemoryWait, + OperationKindAtomicMemoryNotify, + OperationKindAtomicFence, + OperationKindAtomicLoad, + OperationKindAtomicLoad8, + OperationKindAtomicLoad16, + OperationKindAtomicStore, + OperationKindAtomicStore8, + OperationKindAtomicStore16, + OperationKindAtomicRMW, + OperationKindAtomicRMW8, + OperationKindAtomicRMW16, + OperationKindAtomicRMWCmpxchg, + OperationKindAtomicRMW8Cmpxchg, + OperationKindAtomicRMW16Cmpxchg: + return o.Kind.String() + default: panic(fmt.Sprintf("TODO: %v", o.Kind)) } @@ -2565,3 +2663,150 @@ func NewOperationV128Narrow(originShape Shape, signed bool) UnionOperation { func NewOperationV128ITruncSatFromF(originShape Shape, signed bool) UnionOperation { return UnionOperation{Kind: OperationKindV128ITruncSatFromF, B1: originShape, B3: signed} } + +// NewOperationAtomicMemoryWait is a constructor for UnionOperation with OperationKindAtomicMemoryWait. +// +// This corresponds to +// +// wasm.OpcodeAtomicWait32Name wasm.OpcodeAtomicWait64Name +func NewOperationAtomicMemoryWait(unsignedType UnsignedType, arg MemoryArg) UnionOperation { + return UnionOperation{Kind: OperationKindAtomicMemoryWait, B1: byte(unsignedType), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)} +} + +// NewOperationAtomicMemoryNotify is a constructor for UnionOperation with OperationKindAtomicMemoryNotify. +// +// This corresponds to +// +// wasm.OpcodeAtomicNotifyName +func NewOperationAtomicMemoryNotify(arg MemoryArg) UnionOperation { + return UnionOperation{Kind: OperationKindAtomicMemoryNotify, U1: uint64(arg.Alignment), U2: uint64(arg.Offset)} +} + +// NewOperationAtomicFence is a constructor for UnionOperation with OperationKindAtomicFence. +// +// This corresponds to +// +// wasm.OpcodeAtomicFenceName +func NewOperationAtomicFence() UnionOperation { + return UnionOperation{Kind: OperationKindAtomicFence} +} + +// NewOperationAtomicLoad is a constructor for UnionOperation with OperationKindAtomicLoad. +// +// This corresponds to +// +// wasm.OpcodeAtomicI32LoadName wasm.OpcodeAtomicI64LoadName +func NewOperationAtomicLoad(unsignedType UnsignedType, arg MemoryArg) UnionOperation { + return UnionOperation{Kind: OperationKindAtomicLoad, B1: byte(unsignedType), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)} +} + +// NewOperationAtomicLoad8 is a constructor for UnionOperation with OperationKindAtomicLoad8. +// +// This corresponds to +// +// wasm.OpcodeAtomicI32Load8UName wasm.OpcodeAtomicI64Load8UName +func NewOperationAtomicLoad8(unsignedType UnsignedType, arg MemoryArg) UnionOperation { + return UnionOperation{Kind: OperationKindAtomicLoad8, B1: byte(unsignedType), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)} +} + +// NewOperationAtomicLoad16 is a constructor for UnionOperation with OperationKindAtomicLoad16. +// +// This corresponds to +// +// wasm.OpcodeAtomicI32Load16UName wasm.OpcodeAtomicI64Load16UName +func NewOperationAtomicLoad16(unsignedType UnsignedType, arg MemoryArg) UnionOperation { + return UnionOperation{Kind: OperationKindAtomicLoad16, B1: byte(unsignedType), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)} +} + +// NewOperationAtomicStore is a constructor for UnionOperation with OperationKindAtomicStore. +// +// This corresponds to +// +// wasm.OpcodeAtomicI32StoreName wasm.OpcodeAtomicI64StoreName +func NewOperationAtomicStore(unsignedType UnsignedType, arg MemoryArg) UnionOperation { + return UnionOperation{Kind: OperationKindAtomicStore, B1: byte(unsignedType), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)} +} + +// NewOperationAtomicStore8 is a constructor for UnionOperation with OperationKindAtomicStore8. +// +// This corresponds to +// +// wasm.OpcodeAtomicI32Store8UName wasm.OpcodeAtomicI64Store8UName +func NewOperationAtomicStore8(unsignedType UnsignedType, arg MemoryArg) UnionOperation { + return UnionOperation{Kind: OperationKindAtomicStore8, B1: byte(unsignedType), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)} +} + +// NewOperationAtomicStore16 is a constructor for UnionOperation with OperationKindAtomicStore16. +// +// This corresponds to +// +// wasm.OpcodeAtomicI32Store16UName wasm.OpcodeAtomicI64Store16UName +func NewOperationAtomicStore16(unsignedType UnsignedType, arg MemoryArg) UnionOperation { + return UnionOperation{Kind: OperationKindAtomicStore16, B1: byte(unsignedType), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)} +} + +// NewOperationAtomicRMW is a constructor for UnionOperation with OperationKindAtomicRMW. +// +// This corresponds to +// +// wasm.OpcodeAtomicI32RMWAddName wasm.OpcodeAtomicI64RmwAddName +// wasm.OpcodeAtomicI32RMWSubName wasm.OpcodeAtomicI64RmwSubName +// wasm.OpcodeAtomicI32RMWAndName wasm.OpcodeAtomicI64RmwAndName +// wasm.OpcodeAtomicI32RMWOrName wasm.OpcodeAtomicI64RmwOrName +// wasm.OpcodeAtomicI32RMWXorName wasm.OpcodeAtomicI64RmwXorName +func NewOperationAtomicRMW(unsignedType UnsignedType, arg MemoryArg, op AtomicArithmeticOp) UnionOperation { + return UnionOperation{Kind: OperationKindAtomicRMW, B1: byte(unsignedType), B2: byte(op), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)} +} + +// NewOperationAtomicRMW8 is a constructor for UnionOperation with OperationKindAtomicRMW8. +// +// This corresponds to +// +// wasm.OpcodeAtomicI32RMW8AddUName wasm.OpcodeAtomicI64Rmw8AddUName +// wasm.OpcodeAtomicI32RMW8SubUName wasm.OpcodeAtomicI64Rmw8SubUName +// wasm.OpcodeAtomicI32RMW8AndUName wasm.OpcodeAtomicI64Rmw8AndUName +// wasm.OpcodeAtomicI32RMW8OrUName wasm.OpcodeAtomicI64Rmw8OrUName +// wasm.OpcodeAtomicI32RMW8XorUName wasm.OpcodeAtomicI64Rmw8XorUName +func NewOperationAtomicRMW8(unsignedType UnsignedType, arg MemoryArg, op AtomicArithmeticOp) UnionOperation { + return UnionOperation{Kind: OperationKindAtomicRMW8, B1: byte(unsignedType), B2: byte(op), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)} +} + +// NewOperationAtomicRMW16 is a constructor for UnionOperation with OperationKindAtomicRMW16. +// +// This corresponds to +// +// wasm.OpcodeAtomicI32RMW16AddUName wasm.OpcodeAtomicI64Rmw16AddUName +// wasm.OpcodeAtomicI32RMW16SubUName wasm.OpcodeAtomicI64Rmw16SubUName +// wasm.OpcodeAtomicI32RMW16AndUName wasm.OpcodeAtomicI64Rmw16AndUName +// wasm.OpcodeAtomicI32RMW16OrUName wasm.OpcodeAtomicI64Rmw16OrUName +// wasm.OpcodeAtomicI32RMW16XorUName wasm.OpcodeAtomicI64Rmw16XorUName +func NewOperationAtomicRMW16(unsignedType UnsignedType, arg MemoryArg, op AtomicArithmeticOp) UnionOperation { + return UnionOperation{Kind: OperationKindAtomicRMW16, B1: byte(unsignedType), B2: byte(op), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)} +} + +// NewOperationAtomicRMWCmpxchg is a constructor for UnionOperation with OperationKindAtomicRMWCmpxchg. +// +// This corresponds to +// +// wasm.OpcodeAtomicI32RMWCmpxchgName wasm.OpcodeAtomicI64RmwCmpxchgName +func NewOperationAtomicRMWCmpxchg(unsignedType UnsignedType, arg MemoryArg) UnionOperation { + return UnionOperation{Kind: OperationKindAtomicRMWCmpxchg, B1: byte(unsignedType), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)} +} + +// NewOperationAtomicRMW8Cmpxchg is a constructor for UnionOperation with OperationKindAtomicRMW8Cmpxchg. +// +// This corresponds to +// +// wasm.OpcodeAtomicI32RMW8CmpxchgUName wasm.OpcodeAtomicI64Rmw8CmpxchgUName +func NewOperationAtomicRMW8Cmpxchg(unsignedType UnsignedType, arg MemoryArg) UnionOperation { + return UnionOperation{Kind: OperationKindAtomicRMW8Cmpxchg, B1: byte(unsignedType), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)} +} + +// NewOperationAtomicRMW16Cmpxchg is a constructor for UnionOperation with OperationKindAtomicRMW16Cmpxchg. +// +// This corresponds to +// +// wasm.OpcodeAtomicI32RMW16CmpxchgUName wasm.OpcodeAtomicI64Rmw16CmpxchgUName +func NewOperationAtomicRMW16Cmpxchg(unsignedType UnsignedType, arg MemoryArg) UnionOperation { + return UnionOperation{Kind: OperationKindAtomicRMW16Cmpxchg, B1: byte(unsignedType), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)} +} diff --git a/internal/wazeroir/signature.go b/internal/wazeroir/signature.go index 6d0f84447c..30a5868df6 100644 --- a/internal/wazeroir/signature.go +++ b/internal/wazeroir/signature.go @@ -233,6 +233,26 @@ var ( in: []UnsignedType{UnsignedTypeF64}, out: []UnsignedType{UnsignedTypeV128}, } + signature_I32I64_I64 = &signature{ + in: []UnsignedType{UnsignedTypeI32, UnsignedTypeI64}, + out: []UnsignedType{UnsignedTypeI64}, + } + signature_I32I32I64_I32 = &signature{ + in: []UnsignedType{UnsignedTypeI32, UnsignedTypeI32, UnsignedTypeI64}, + out: []UnsignedType{UnsignedTypeI32}, + } + signature_I32I64I64_I32 = &signature{ + in: []UnsignedType{UnsignedTypeI32, UnsignedTypeI64, UnsignedTypeI64}, + out: []UnsignedType{UnsignedTypeI32}, + } + signature_I32I32I32_I32 = &signature{ + in: []UnsignedType{UnsignedTypeI32, UnsignedTypeI32, UnsignedTypeI32}, + out: []UnsignedType{UnsignedTypeI32}, + } + signature_I32I64I64_I64 = &signature{ + in: []UnsignedType{UnsignedTypeI32, UnsignedTypeI64, UnsignedTypeI64}, + out: []UnsignedType{UnsignedTypeI64}, + } ) // wasmOpcodeSignature returns the signature of given Wasm opcode. @@ -586,6 +606,40 @@ func (c *Compiler) wasmOpcodeSignature(op wasm.Opcode, index uint32) (*signature default: return nil, fmt.Errorf("unsupported vector instruction in wazeroir: %s", wasm.VectorInstructionName(vecOp)) } + case wasm.OpcodeAtomicPrefix: + switch atomicOp := c.body[c.pc+1]; atomicOp { + case wasm.OpcodeAtomicMemoryNotify: + return signature_I32I32_I32, nil + case wasm.OpcodeAtomicMemoryWait32: + return signature_I32I32I64_I32, nil + case wasm.OpcodeAtomicMemoryWait64: + return signature_I32I64I64_I32, nil + case wasm.OpcodeAtomicFence: + return signature_None_None, nil + case wasm.OpcodeAtomicI32Load, wasm.OpcodeAtomicI32Load8U, wasm.OpcodeAtomicI32Load16U: + return signature_I32_I32, nil + case wasm.OpcodeAtomicI64Load, wasm.OpcodeAtomicI64Load8U, wasm.OpcodeAtomicI64Load16U, wasm.OpcodeAtomicI64Load32U: + return signature_I32_I64, nil + case wasm.OpcodeAtomicI32Store, wasm.OpcodeAtomicI32Store8, wasm.OpcodeAtomicI32Store16: + return signature_I32I32_None, nil + case wasm.OpcodeAtomicI64Store, wasm.OpcodeAtomicI64Store8, wasm.OpcodeAtomicI64Store16, wasm.OpcodeAtomicI64Store32: + return signature_I32I64_None, nil + case wasm.OpcodeAtomicI32RmwAdd, wasm.OpcodeAtomicI32RmwSub, wasm.OpcodeAtomicI32RmwAnd, wasm.OpcodeAtomicI32RmwOr, wasm.OpcodeAtomicI32RmwXor, wasm.OpcodeAtomicI32RmwXchg, + wasm.OpcodeAtomicI32Rmw8AddU, wasm.OpcodeAtomicI32Rmw8SubU, wasm.OpcodeAtomicI32Rmw8AndU, wasm.OpcodeAtomicI32Rmw8OrU, wasm.OpcodeAtomicI32Rmw8XorU, wasm.OpcodeAtomicI32Rmw8XchgU, + wasm.OpcodeAtomicI32Rmw16AddU, wasm.OpcodeAtomicI32Rmw16SubU, wasm.OpcodeAtomicI32Rmw16AndU, wasm.OpcodeAtomicI32Rmw16OrU, wasm.OpcodeAtomicI32Rmw16XorU, wasm.OpcodeAtomicI32Rmw16XchgU: + return signature_I32I32_I32, nil + case wasm.OpcodeAtomicI64RmwAdd, wasm.OpcodeAtomicI64RmwSub, wasm.OpcodeAtomicI64RmwAnd, wasm.OpcodeAtomicI64RmwOr, wasm.OpcodeAtomicI64RmwXor, wasm.OpcodeAtomicI64RmwXchg, + wasm.OpcodeAtomicI64Rmw8AddU, wasm.OpcodeAtomicI64Rmw8SubU, wasm.OpcodeAtomicI64Rmw8AndU, wasm.OpcodeAtomicI64Rmw8OrU, wasm.OpcodeAtomicI64Rmw8XorU, wasm.OpcodeAtomicI64Rmw8XchgU, + wasm.OpcodeAtomicI64Rmw16AddU, wasm.OpcodeAtomicI64Rmw16SubU, wasm.OpcodeAtomicI64Rmw16AndU, wasm.OpcodeAtomicI64Rmw16OrU, wasm.OpcodeAtomicI64Rmw16XorU, wasm.OpcodeAtomicI64Rmw16XchgU, + wasm.OpcodeAtomicI64Rmw32AddU, wasm.OpcodeAtomicI64Rmw32SubU, wasm.OpcodeAtomicI64Rmw32AndU, wasm.OpcodeAtomicI64Rmw32OrU, wasm.OpcodeAtomicI64Rmw32XorU, wasm.OpcodeAtomicI64Rmw32XchgU: + return signature_I32I64_I64, nil + case wasm.OpcodeAtomicI32RmwCmpxchg, wasm.OpcodeAtomicI32Rmw8CmpxchgU, wasm.OpcodeAtomicI32Rmw16CmpxchgU: + return signature_I32I32I32_I32, nil + case wasm.OpcodeAtomicI64RmwCmpxchg, wasm.OpcodeAtomicI64Rmw8CmpxchgU, wasm.OpcodeAtomicI64Rmw16CmpxchgU, wasm.OpcodeAtomicI64Rmw32CmpxchgU: + return signature_I32I64I64_I64, nil + default: + return nil, fmt.Errorf("unsupported atomic instruction in wazeroir: %s", wasm.AtomicInstructionName(atomicOp)) + } default: return nil, fmt.Errorf("unsupported instruction in wazeroir: 0x%x", op) } diff --git a/internal/wazeroir/signature_test.go b/internal/wazeroir/signature_test.go index 3969de7c42..518678366f 100644 --- a/internal/wazeroir/signature_test.go +++ b/internal/wazeroir/signature_test.go @@ -88,6 +88,341 @@ func TestCompiler_wasmOpcodeSignature(t *testing.T) { body: []byte{wasm.OpcodeMiscPrefix, wasm.OpcodeMiscTableCopy}, exp: signature_I32I32I32_None, }, + { + name: "i32.atomic.load8_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Load8U}, + exp: signature_I32_I32, + }, + { + name: "i32.atomic.load16_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Load16U}, + exp: signature_I32_I32, + }, + { + name: "i32.atomic.load", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Load}, + exp: signature_I32_I32, + }, + { + name: "i64.atomic.load8_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Load8U}, + exp: signature_I32_I64, + }, + { + name: "i64.atomic.load16_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Load16U}, + exp: signature_I32_I64, + }, + { + name: "i64.atomic.load32_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Load32U}, + exp: signature_I32_I64, + }, + { + name: "i64.atomic.load", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Load}, + exp: signature_I32_I64, + }, + { + name: "i32.atomic.store8", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Store8}, + exp: signature_I32I32_None, + }, + { + name: "i32.atomic.store16_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Store16}, + exp: signature_I32I32_None, + }, + { + name: "i32.atomic.store", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Store}, + exp: signature_I32I32_None, + }, + { + name: "i64.atomic.store8", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Store8}, + exp: signature_I32I64_None, + }, + { + name: "i64.atomic.store16", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Store16}, + exp: signature_I32I64_None, + }, + { + name: "i64.atomic.store32", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Store32}, + exp: signature_I32I64_None, + }, + { + name: "i64.atomic.store", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Store}, + exp: signature_I32I64_None, + }, + { + name: "i32.atomic.rmw8.add_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Rmw8AddU}, + exp: signature_I32I32_I32, + }, + { + name: "i32.atomic.rmw16.add_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Rmw16AddU}, + exp: signature_I32I32_I32, + }, + { + name: "i32.atomic.rmw.add", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32RmwAdd}, + exp: signature_I32I32_I32, + }, + { + name: "i64.atomic.rmw8.add_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw8AddU}, + exp: signature_I32I64_I64, + }, + { + name: "i64.atomic.rmw16.add_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw16AddU}, + exp: signature_I32I64_I64, + }, + { + name: "i64.atomic.rmw32.add_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw32AddU}, + exp: signature_I32I64_I64, + }, + { + name: "i64.atomic.rmw.add", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64RmwAdd}, + exp: signature_I32I64_I64, + }, + { + name: "i32.atomic.rmw8.sub_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Rmw8SubU}, + exp: signature_I32I32_I32, + }, + { + name: "i32.atomic.rmw16.sub_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Rmw16SubU}, + exp: signature_I32I32_I32, + }, + { + name: "i32.atomic.rmw.sub", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32RmwSub}, + exp: signature_I32I32_I32, + }, + { + name: "i64.atomic.rmw8.sub_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw8SubU}, + exp: signature_I32I64_I64, + }, + { + name: "i64.atomic.rmw16.sub_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw16SubU}, + exp: signature_I32I64_I64, + }, + { + name: "i64.atomic.rmw32.sub_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw32SubU}, + exp: signature_I32I64_I64, + }, + { + name: "i64.atomic.rmw.sub", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64RmwSub}, + exp: signature_I32I64_I64, + }, + { + name: "i32.atomic.rmw8.and_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Rmw8AndU}, + exp: signature_I32I32_I32, + }, + { + name: "i32.atomic.rmw16.and_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Rmw16AndU}, + exp: signature_I32I32_I32, + }, + { + name: "i32.atomic.rmw.and", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32RmwAnd}, + exp: signature_I32I32_I32, + }, + { + name: "i64.atomic.rmw8.and_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw8AndU}, + exp: signature_I32I64_I64, + }, + { + name: "i64.atomic.rmw16.and_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw16AndU}, + exp: signature_I32I64_I64, + }, + { + name: "i64.atomic.rmw32.and_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw32AndU}, + exp: signature_I32I64_I64, + }, + { + name: "i64.atomic.rmw.and", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64RmwAnd}, + exp: signature_I32I64_I64, + }, + { + name: "i32.atomic.rmw8.or_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Rmw8OrU}, + exp: signature_I32I32_I32, + }, + { + name: "i32.atomic.rmw16.or_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Rmw16OrU}, + exp: signature_I32I32_I32, + }, + { + name: "i32.atomic.rmw.or", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32RmwOr}, + exp: signature_I32I32_I32, + }, + { + name: "i64.atomic.rmw8.or_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw8OrU}, + exp: signature_I32I64_I64, + }, + { + name: "i64.atomic.rmw16.or_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw16OrU}, + exp: signature_I32I64_I64, + }, + { + name: "i64.atomic.rmw32.or_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw32OrU}, + exp: signature_I32I64_I64, + }, + { + name: "i64.atomic.rmw.or", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64RmwOr}, + exp: signature_I32I64_I64, + }, + { + name: "i32.atomic.rmw8.xor_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Rmw8XorU}, + exp: signature_I32I32_I32, + }, + { + name: "i32.atomic.rmw16.xor_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Rmw16XorU}, + exp: signature_I32I32_I32, + }, + { + name: "i32.atomic.rmw.xor", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32RmwXor}, + exp: signature_I32I32_I32, + }, + { + name: "i64.atomic.rmw8.xor_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw8XorU}, + exp: signature_I32I64_I64, + }, + { + name: "i64.atomic.rmw16.xor_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw16XorU}, + exp: signature_I32I64_I64, + }, + { + name: "i64.atomic.rmw32.xor_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw32XorU}, + exp: signature_I32I64_I64, + }, + { + name: "i64.atomic.rmw.xor", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64RmwXor}, + exp: signature_I32I64_I64, + }, + { + name: "i32.atomic.rmw8.xchg_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Rmw8XchgU}, + exp: signature_I32I32_I32, + }, + { + name: "i32.atomic.rmw16.xchg_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Rmw16XchgU}, + exp: signature_I32I32_I32, + }, + { + name: "i32.atomic.rmw.xchg", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32RmwXchg}, + exp: signature_I32I32_I32, + }, + { + name: "i64.atomic.rmw8.xchg_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw8XchgU}, + exp: signature_I32I64_I64, + }, + { + name: "i64.atomic.rmw16.xchg_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw16XchgU}, + exp: signature_I32I64_I64, + }, + { + name: "i64.atomic.rmw32.xchg_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw32XchgU}, + exp: signature_I32I64_I64, + }, + { + name: "i64.atomic.rmw.xchg", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64RmwXchg}, + exp: signature_I32I64_I64, + }, + { + name: "i32.atomic.rmw8.cmpxchg_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Rmw8CmpxchgU}, + exp: signature_I32I32I32_I32, + }, + { + name: "i32.atomic.rmw16.cmpxchg_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32Rmw16CmpxchgU}, + exp: signature_I32I32I32_I32, + }, + { + name: "i32.atomic.rmw.cmpxchg", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI32RmwCmpxchg}, + exp: signature_I32I32I32_I32, + }, + { + name: "i64.atomic.rmw8.cmpxchg_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw8CmpxchgU}, + exp: signature_I32I64I64_I64, + }, + { + name: "i64.atomic.rmw16.cmpxchg_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw16CmpxchgU}, + exp: signature_I32I64I64_I64, + }, + { + name: "i64.atomic.rmw32.cmpxchg_u", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64Rmw32CmpxchgU}, + exp: signature_I32I64I64_I64, + }, + { + name: "i64.atomic.rmw.cmpxchg", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicI64RmwCmpxchg}, + exp: signature_I32I64I64_I64, + }, + { + name: "memory.atomic.wait32", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicMemoryWait32}, + exp: signature_I32I32I64_I32, + }, + { + name: "memory.atomic.wait64", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicMemoryWait64}, + exp: signature_I32I64I64_I32, + }, + { + name: "memory.atomic.notify", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicMemoryNotify}, + exp: signature_I32I32_I32, + }, + { + name: "memory.atomic.fence", + body: []byte{wasm.OpcodeAtomicPrefix, wasm.OpcodeAtomicFence}, + exp: signature_None_None, + }, } for _, tt := range tests {