From 796b504958ba1186473a8dcea8fb240034e6f8df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C3=ABl=20Zasso?= Date: Sat, 10 Jul 2021 08:23:20 +0200 Subject: [PATCH] deps: V8: cherry-pick 2b77ca200c56 Original commit message: [wasm][liftoff] Always zero-extend 32 bit offsets The upper 32 bits of the 64 bit offset register are not guaranteed to be cleared, so a zero-extension is needed. We already do the zero-extension in the case of explicit bounds checking, but this should also be done if the trap handler is enabled. R=clemensb@chromium.org CC=jkummerow@chromium.org Bug: v8:11809 Change-Id: I21e2535c701041d11fa06c176fa683d82db0a3f1 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2917612 Commit-Queue: Thibaud Michaud Reviewed-by: Clemens Backes Cr-Commit-Position: refs/heads/master@{#74881} Refs: https://github.com/v8/v8/commit/2b77ca200c56667c68895e49c96c10ff77834f09 PR-URL: https://github.com/nodejs/node/pull/39337 Reviewed-By: Matteo Collina Reviewed-By: James M Snell --- common.gypi | 2 +- .../wasm/baseline/arm/liftoff-assembler-arm.h | 3 +- .../baseline/arm64/liftoff-assembler-arm64.h | 15 +++-- .../baseline/ia32/liftoff-assembler-ia32.h | 3 +- deps/v8/src/wasm/baseline/liftoff-assembler.h | 2 +- deps/v8/src/wasm/baseline/liftoff-compiler.cc | 8 ++- .../wasm/baseline/x64/liftoff-assembler-x64.h | 6 +- .../mjsunit/regress/wasm/regress-11809.js | 58 +++++++++++++++++++ 8 files changed, 84 insertions(+), 13 deletions(-) create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-11809.js diff --git a/common.gypi b/common.gypi index b2ea540133f0af..aa42c69f96391b 100644 --- a/common.gypi +++ b/common.gypi @@ -36,7 +36,7 @@ # Reset this number to 0 on major V8 upgrades. # Increment by one for each non-official patch applied to deps/v8. - 'v8_embedder_string': '-node.13', + 'v8_embedder_string': '-node.14', ##### V8 defaults for Node.js ##### diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h index acc7f08fa07530..9a9932c6f60298 100644 --- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h +++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h @@ -768,7 +768,8 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr, void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, Register offset_reg, uint32_t offset_imm, LoadType type, LiftoffRegList pinned, - uint32_t* protected_load_pc, bool is_load_mem) { + uint32_t* protected_load_pc, bool is_load_mem, + bool i64_offset) { // Offsets >=2GB are statically OOB on 32-bit systems. DCHECK_LE(offset_imm, std::numeric_limits::max()); liftoff::LoadInternal(this, dst, src_addr, offset_reg, diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h index 38d424d8e023b0..ed1070444e18ad 100644 --- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h +++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h @@ -126,9 +126,13 @@ inline CPURegister AcquireByType(UseScratchRegisterScope* temps, template inline MemOperand GetMemOp(LiftoffAssembler* assm, UseScratchRegisterScope* temps, Register addr, - Register offset, T offset_imm) { + Register offset, T offset_imm, + bool i64_offset = false) { if (offset.is_valid()) { - if (offset_imm == 0) return MemOperand(addr.X(), offset.X()); + if (offset_imm == 0) { + return i64_offset ? MemOperand(addr.X(), offset.X()) + : MemOperand(addr.X(), offset.W(), UXTW); + } Register tmp = temps->AcquireX(); DCHECK_GE(kMaxUInt32, offset_imm); assm->Add(tmp, offset.X(), offset_imm); @@ -493,10 +497,11 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr, void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, LiftoffRegList pinned, - uint32_t* protected_load_pc, bool is_load_mem) { + uint32_t* protected_load_pc, bool is_load_mem, + bool i64_offset) { UseScratchRegisterScope temps(this); - MemOperand src_op = - liftoff::GetMemOp(this, &temps, src_addr, offset_reg, offset_imm); + MemOperand src_op = liftoff::GetMemOp(this, &temps, src_addr, offset_reg, + offset_imm, i64_offset); if (protected_load_pc) *protected_load_pc = pc_offset(); switch (type.value()) { case LoadType::kI32Load8U: diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h index 9f35b5efc3f63c..0cd8ddea5713d7 100644 --- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h +++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h @@ -390,7 +390,8 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr, void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, Register offset_reg, uint32_t offset_imm, LoadType type, LiftoffRegList pinned, - uint32_t* protected_load_pc, bool is_load_mem) { + uint32_t* protected_load_pc, bool is_load_mem, + bool i64_offset) { // Offsets >=2GB are statically OOB on 32-bit systems. DCHECK_LE(offset_imm, std::numeric_limits::max()); DCHECK_EQ(type.value_type() == kWasmI64, dst.is_gp_pair()); diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h index b0439dc4e10bc2..ab82b4fec84c33 100644 --- a/deps/v8/src/wasm/baseline/liftoff-assembler.h +++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h @@ -675,7 +675,7 @@ class LiftoffAssembler : public TurboAssembler { inline void Load(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, LiftoffRegList pinned, uint32_t* protected_load_pc = nullptr, - bool is_load_mem = false); + bool is_load_mem = false, bool i64_offset = false); inline void Store(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister src, StoreType type, LiftoffRegList pinned, diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc index 10d5527777f38b..3204f69675fcf7 100644 --- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc +++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc @@ -2792,6 +2792,7 @@ class LiftoffCompiler { // Only look at the slot, do not pop it yet (will happen in PopToRegister // below, if this is not a statically-in-bounds index). auto& index_slot = __ cache_state()->stack_state.back(); + bool i64_offset = index_val.type == kWasmI64; if (IndexStaticallyInBounds(index_slot, type.size(), &offset)) { __ cache_state()->stack_state.pop_back(); DEBUG_CODE_COMMENT("load from memory (constant offset)"); @@ -2799,7 +2800,8 @@ class LiftoffCompiler { Register mem = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); LOAD_INSTANCE_FIELD(mem, MemoryStart, kSystemPointerSize, pinned); LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned)); - __ Load(value, mem, no_reg, offset, type, pinned, nullptr, true); + __ Load(value, mem, no_reg, offset, type, pinned, nullptr, true, + i64_offset); __ PushRegister(kind, value); } else { LiftoffRegister full_index = __ PopToRegister(); @@ -2818,8 +2820,8 @@ class LiftoffCompiler { LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned)); uint32_t protected_load_pc = 0; - __ Load(value, mem, index, offset, type, pinned, &protected_load_pc, - true); + __ Load(value, mem, index, offset, type, pinned, &protected_load_pc, true, + i64_offset); if (env_->use_trap_handler) { AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds, protected_load_pc); diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h index 3da9656b42c60e..016a090eed9f89 100644 --- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h +++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h @@ -395,7 +395,11 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr, void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, LiftoffRegList pinned, - uint32_t* protected_load_pc, bool is_load_mem) { + uint32_t* protected_load_pc, bool is_load_mem, + bool i64_offset) { + if (offset_reg != no_reg && !i64_offset) { + AssertZeroExtended(offset_reg); + } Operand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm); if (protected_load_pc) *protected_load_pc = pc_offset(); switch (type.value()) { diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-11809.js b/deps/v8/test/mjsunit/regress/wasm/regress-11809.js new file mode 100644 index 00000000000000..890e26c609e151 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/wasm/regress-11809.js @@ -0,0 +1,58 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// Flags: --enable-testing-opcode-in-wasm --nowasm-tier-up --wasm-tier-mask-for-testing=2 + +load("test/mjsunit/wasm/wasm-module-builder.js"); + +var instance = (function () { + var builder = new WasmModuleBuilder(); + builder.addMemory(1, 1, false /* exported */); + + var sig_index = builder.addType(makeSig( + [kWasmI32, kWasmI32, kWasmI32, kWasmI32, kWasmI32, kWasmI32, kWasmI32, + kWasmI32], + [kWasmI32])); + var sig_three = builder.addType(makeSig( + [kWasmI64, kWasmI64, kWasmI64, kWasmI64, kWasmI64, kWasmI64, kWasmI64, + kWasmI64], + [])); + + var zero = builder.addFunction("zero", kSig_i_i); + var one = builder.addFunction("one", sig_index); + var two = builder.addFunction("two", kSig_v_i); + var three = builder.addFunction("three", sig_three).addBody([]); + + zero.addBody([kExprLocalGet, 0, kExprI32LoadMem, 0, 0]); + + one.addBody([ + kExprLocalGet, 7, + kExprCallFunction, zero.index]); + + two.addBody([ + kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10, + kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10, + kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10, + kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10, + kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10, + kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10, + kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10, + kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10, + kExprCallFunction, three.index, + kExprI32Const, 0, + kExprI32Const, 0, + kExprI32Const, 0, + kExprI32Const, 0, + kExprI32Const, 0, + kExprI32Const, 0, + kExprI32Const, 0, + kExprI32Const, 0, + kExprCallFunction, one.index, + kExprDrop, + ]).exportFunc(); + + return builder.instantiate({}); +})(); + +instance.exports.two()