Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

deps: patch V8 to 7.4.288.21 #27265

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion deps/v8/include/v8-version.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
#define V8_MAJOR_VERSION 7
#define V8_MINOR_VERSION 4
#define V8_BUILD_NUMBER 288
#define V8_PATCH_LEVEL 18
#define V8_PATCH_LEVEL 21

// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
Expand Down
23 changes: 11 additions & 12 deletions deps/v8/src/arm/macro-assembler-arm.cc
Original file line number Diff line number Diff line change
Expand Up @@ -332,31 +332,30 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,

if (options().isolate_independent_code) {
DCHECK(root_array_available());
Label if_code_is_builtin, out;
Label if_code_is_off_heap, out;

UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();

DCHECK(!AreAliased(destination, scratch));
DCHECK(!AreAliased(code_object, scratch));

// Check whether the Code object is a builtin. If so, call its (off-heap)
// entry point directly without going through the (on-heap) trampoline.
// Otherwise, just call the Code object as always.
// Check whether the Code object is an off-heap trampoline. If so, call its
// (off-heap) entry point directly without going through the (on-heap)
// trampoline. Otherwise, just call the Code object as always.
ldr(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
tst(scratch, Operand(Code::IsOffHeapTrampoline::kMask));
b(ne, &if_code_is_off_heap);

ldr(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
cmp(scratch, Operand(Builtins::kNoBuiltinId));
b(ne, &if_code_is_builtin);

// A non-builtin Code object, the entry point is at
// Not an off-heap trampoline, the entry point is at
// Code::raw_instruction_start().
add(destination, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
jmp(&out);

// A builtin Code object, the entry point is loaded from the builtin entry
// An off-heap trampoline, the entry point is loaded from the builtin entry
// table.
// The builtin index is loaded in scratch.
bind(&if_code_is_builtin);
bind(&if_code_is_off_heap);
ldr(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
lsl(destination, scratch, Operand(kSystemPointerSizeLog2));
add(destination, destination, kRootRegister);
ldr(destination,
Expand Down
22 changes: 11 additions & 11 deletions deps/v8/src/arm64/macro-assembler-arm64.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2054,31 +2054,31 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,

if (options().isolate_independent_code) {
DCHECK(root_array_available());
Label if_code_is_builtin, out;
Label if_code_is_off_heap, out;

UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();

DCHECK(!AreAliased(destination, scratch));
DCHECK(!AreAliased(code_object, scratch));

// Check whether the Code object is a builtin. If so, call its (off-heap)
// entry point directly without going through the (on-heap) trampoline.
// Otherwise, just call the Code object as always.
// Check whether the Code object is an off-heap trampoline. If so, call its
// (off-heap) entry point directly without going through the (on-heap)
// trampoline. Otherwise, just call the Code object as always.

Ldrsw(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
Cmp(scratch, Operand(Builtins::kNoBuiltinId));
B(ne, &if_code_is_builtin);
Ldrsw(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
Tst(scratch, Operand(Code::IsOffHeapTrampoline::kMask));
B(ne, &if_code_is_off_heap);

// A non-builtin Code object, the entry point is at
// Not an off-heap trampoline object, the entry point is at
// Code::raw_instruction_start().
Add(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
B(&out);

// A builtin Code object, the entry point is loaded from the builtin entry
// An off-heap trampoline, the entry point is loaded from the builtin entry
// table.
// The builtin index is loaded in scratch.
bind(&if_code_is_builtin);
bind(&if_code_is_off_heap);
Ldrsw(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
Lsl(destination, scratch, kSystemPointerSizeLog2);
Add(destination, destination, kRootRegister);
Ldr(destination,
Expand Down
20 changes: 10 additions & 10 deletions deps/v8/src/ia32/macro-assembler-ia32.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1905,24 +1905,24 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,

if (options().isolate_independent_code) {
DCHECK(root_array_available());
Label if_code_is_builtin, out;
Label if_code_is_off_heap, out;

// Check whether the Code object is a builtin. If so, call its (off-heap)
// entry point directly without going through the (on-heap) trampoline.
// Otherwise, just call the Code object as always.
cmp(FieldOperand(code_object, Code::kBuiltinIndexOffset),
Immediate(Builtins::kNoBuiltinId));
j(not_equal, &if_code_is_builtin);
// Check whether the Code object is an off-heap trampoline. If so, call its
// (off-heap) entry point directly without going through the (on-heap)
// trampoline. Otherwise, just call the Code object as always.
test(FieldOperand(code_object, Code::kFlagsOffset),
Immediate(Code::IsOffHeapTrampoline::kMask));
j(not_equal, &if_code_is_off_heap);

// A non-builtin Code object, the entry point is at
// Not an off-heap trampoline, the entry point is at
// Code::raw_instruction_start().
Move(destination, code_object);
add(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
jmp(&out);

// A builtin Code object, the entry point is loaded from the builtin entry
// An off-heap trampoline, the entry point is loaded from the builtin entry
// table.
bind(&if_code_is_builtin);
bind(&if_code_is_off_heap);
mov(destination, FieldOperand(code_object, Code::kBuiltinIndexOffset));
mov(destination,
Operand(kRootRegister, destination, times_system_pointer_size,
Expand Down
91 changes: 53 additions & 38 deletions deps/v8/src/wasm/module-compiler.cc
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,8 @@ class CompilationStateImpl {
void SetNumberOfFunctionsToCompile(int num_functions);

// Add the callback function to be called on compilation events. Needs to be
// set before {AddCompilationUnits} is run.
// set before {AddCompilationUnits} is run to ensure that it receives all
// events. The callback object must support being deleted from any thread.
void AddCallback(CompilationState::callback_t);

// Inserts new functions to compile and kicks off compilation.
Expand All @@ -153,7 +154,7 @@ class CompilationStateImpl {
}

bool baseline_compilation_finished() const {
base::MutexGuard guard(&mutex_);
base::MutexGuard guard(&callbacks_mutex_);
return outstanding_baseline_units_ == 0 ||
(compile_mode_ == CompileMode::kTiering &&
outstanding_tiering_units_ == 0);
Expand Down Expand Up @@ -203,8 +204,6 @@ class CompilationStateImpl {
: func_index(func_index), error(std::move(error)) {}
};

void NotifyOnEvent(CompilationEvent event);

NativeModule* const native_module_;
const std::shared_ptr<BackgroundCompileToken> background_compile_token_;
const CompileMode compile_mode_;
Expand Down Expand Up @@ -236,16 +235,26 @@ class CompilationStateImpl {
// compiling.
std::shared_ptr<WireBytesStorage> wire_bytes_storage_;

int outstanding_baseline_units_ = 0;
int outstanding_tiering_units_ = 0;

// End of fields protected by {mutex_}.
//////////////////////////////////////////////////////////////////////////////

// Callback functions to be called on compilation events. Only accessible from
// the foreground thread.
// This mutex protects the callbacks vector, and the counters used to
// determine which callbacks to call. The counters plus the callbacks
// themselves need to be synchronized to ensure correct order of events.
mutable base::Mutex callbacks_mutex_;

//////////////////////////////////////////////////////////////////////////////
// Protected by {callbacks_mutex_}:

// Callback functions to be called on compilation events.
std::vector<CompilationState::callback_t> callbacks_;

int outstanding_baseline_units_ = 0;
int outstanding_tiering_units_ = 0;

// End of fields protected by {callbacks_mutex_}.
//////////////////////////////////////////////////////////////////////////////

const int max_background_tasks_ = 0;
};

Expand Down Expand Up @@ -852,6 +861,7 @@ std::shared_ptr<StreamingDecoder> AsyncCompileJob::CreateStreamingDecoder() {
}

AsyncCompileJob::~AsyncCompileJob() {
// Note: This destructor always runs on the foreground thread of the isolate.
background_task_manager_.CancelAndWait();
// If the runtime objects were not created yet, then initial compilation did
// not finish yet. In this case we can abort compilation.
Expand Down Expand Up @@ -1473,12 +1483,13 @@ CompilationStateImpl::~CompilationStateImpl() {
void CompilationStateImpl::AbortCompilation() {
background_compile_token_->Cancel();
// No more callbacks after abort.
base::MutexGuard callbacks_guard(&callbacks_mutex_);
callbacks_.clear();
}

void CompilationStateImpl::SetNumberOfFunctionsToCompile(int num_functions) {
DCHECK(!failed());
base::MutexGuard guard(&mutex_);
base::MutexGuard guard(&callbacks_mutex_);
outstanding_baseline_units_ = num_functions;

if (compile_mode_ == CompileMode::kTiering) {
Expand All @@ -1487,6 +1498,7 @@ void CompilationStateImpl::SetNumberOfFunctionsToCompile(int num_functions) {
}

void CompilationStateImpl::AddCallback(CompilationState::callback_t callback) {
base::MutexGuard callbacks_guard(&callbacks_mutex_);
callbacks_.emplace_back(std::move(callback));
}

Expand Down Expand Up @@ -1536,7 +1548,7 @@ CompilationStateImpl::GetNextCompilationUnit() {

void CompilationStateImpl::OnFinishedUnit(ExecutionTier tier, WasmCode* code) {
// This mutex guarantees that events happen in the right order.
base::MutexGuard guard(&mutex_);
base::MutexGuard guard(&callbacks_mutex_);

// If we are *not* compiling in tiering mode, then all units are counted as
// baseline units.
Expand All @@ -1547,28 +1559,36 @@ void CompilationStateImpl::OnFinishedUnit(ExecutionTier tier, WasmCode* code) {
// tiering units.
DCHECK_IMPLIES(!is_tiering_mode, outstanding_tiering_units_ == 0);

bool baseline_finished = false;
bool tiering_finished = false;
if (is_tiering_unit) {
DCHECK_LT(0, outstanding_tiering_units_);
--outstanding_tiering_units_;
if (outstanding_tiering_units_ == 0) {
// If baseline compilation has not finished yet, then also trigger
// {kFinishedBaselineCompilation}.
if (outstanding_baseline_units_ > 0) {
NotifyOnEvent(CompilationEvent::kFinishedBaselineCompilation);
}
NotifyOnEvent(CompilationEvent::kFinishedTopTierCompilation);
}
tiering_finished = outstanding_tiering_units_ == 0;
// If baseline compilation has not finished yet, then also trigger
// {kFinishedBaselineCompilation}.
baseline_finished = tiering_finished && outstanding_baseline_units_ > 0;
} else {
DCHECK_LT(0, outstanding_baseline_units_);
--outstanding_baseline_units_;
if (outstanding_baseline_units_ == 0) {
NotifyOnEvent(CompilationEvent::kFinishedBaselineCompilation);
// If we are not tiering, then we also trigger the "top tier finished"
// event when baseline compilation is finished.
if (!is_tiering_mode) {
NotifyOnEvent(CompilationEvent::kFinishedTopTierCompilation);
}
}
// If we are in tiering mode and tiering finished before, then do not
// trigger baseline finished.
baseline_finished = outstanding_baseline_units_ == 0 &&
(!is_tiering_mode || outstanding_tiering_units_ > 0);
// If we are not tiering, then we also trigger the "top tier finished"
// event when baseline compilation is finished.
tiering_finished = baseline_finished && !is_tiering_mode;
}

if (baseline_finished) {
for (auto& callback : callbacks_)
callback(CompilationEvent::kFinishedBaselineCompilation);
}
if (tiering_finished) {
for (auto& callback : callbacks_)
callback(CompilationEvent::kFinishedTopTierCompilation);
// Clear the callbacks because no more events will be delivered.
callbacks_.clear();
}

if (code != nullptr) native_module_->engine()->LogCode(code);
Expand Down Expand Up @@ -1648,17 +1668,12 @@ void CompilationStateImpl::SetError(uint32_t func_index,
if (!set) return;
// If set successfully, give up ownership.
compile_error.release();
// Schedule a foreground task to call the callback and notify users about the
// compile error.
NotifyOnEvent(CompilationEvent::kFailedCompilation);
}

void CompilationStateImpl::NotifyOnEvent(CompilationEvent event) {
for (auto& callback : callbacks_) callback(event);
// If no more events are expected after this one, clear the callbacks to free
// memory. We can safely do this here, as this method is only called from
// foreground tasks.
if (event >= CompilationEvent::kFirstFinalEvent) callbacks_.clear();
base::MutexGuard callbacks_guard(&callbacks_mutex_);
for (auto& callback : callbacks_) {
callback(CompilationEvent::kFailedCompilation);
}
// No more callbacks after an error.
callbacks_.clear();
}

void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
Expand Down
20 changes: 10 additions & 10 deletions deps/v8/src/x64/macro-assembler-x64.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1588,24 +1588,24 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,

if (options().isolate_independent_code) {
DCHECK(root_array_available());
Label if_code_is_builtin, out;
Label if_code_is_off_heap, out;

// Check whether the Code object is a builtin. If so, call its (off-heap)
// entry point directly without going through the (on-heap) trampoline.
// Otherwise, just call the Code object as always.
cmpl(FieldOperand(code_object, Code::kBuiltinIndexOffset),
Immediate(Builtins::kNoBuiltinId));
j(not_equal, &if_code_is_builtin);
// Check whether the Code object is an off-heap trampoline. If so, call its
// (off-heap) entry point directly without going through the (on-heap)
// trampoline. Otherwise, just call the Code object as always.
testl(FieldOperand(code_object, Code::kFlagsOffset),
Immediate(Code::IsOffHeapTrampoline::kMask));
j(not_equal, &if_code_is_off_heap);

// A non-builtin Code object, the entry point is at
// Not an off-heap trampoline, the entry point is at
// Code::raw_instruction_start().
Move(destination, code_object);
addq(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
jmp(&out);

// A builtin Code object, the entry point is loaded from the builtin entry
// An off-heap trampoline, the entry point is loaded from the builtin entry
// table.
bind(&if_code_is_builtin);
bind(&if_code_is_off_heap);
movl(destination, FieldOperand(code_object, Code::kBuiltinIndexOffset));
movq(destination,
Operand(kRootRegister, destination, times_system_pointer_size,
Expand Down
2 changes: 1 addition & 1 deletion deps/v8/src/zone/accounting-allocator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ Segment* AccountingAllocator::AllocateSegment(size_t bytes) {
if (memory == nullptr) return nullptr;

size_t current =
current_memory_usage_.fetch_add(bytes, std::memory_order_relaxed);
current_memory_usage_.fetch_add(bytes, std::memory_order_relaxed) + bytes;
size_t max = max_memory_usage_.load(std::memory_order_relaxed);
while (current > max && !max_memory_usage_.compare_exchange_weak(
max, current, std::memory_order_relaxed)) {
Expand Down
4 changes: 4 additions & 0 deletions deps/v8/test/cctest/cctest.status
Original file line number Diff line number Diff line change
Expand Up @@ -615,6 +615,10 @@
'test-run-wasm-exceptions/RunWasmTurbofan_TryCatchThrow': [SKIP],
'test-run-wasm-exceptions/RunWasmTurbofan_TryCatchTrapTypeError': [SKIP],

# --interpreted-frames-native-stack tests
'test-log/ExternalCodeEventListenerWithInterpretedFramesNativeStack': [SKIP],
'test-log/LogInterpretedFramesNativeStack': [SKIP],

# Crashes on native arm.
'test-macro-assembler-arm/ExtractLane': [PASS, ['arch == arm and not simulator_run', SKIP]],
'test-macro-assembler-arm/LoadAndStoreWithRepresentation': [PASS, ['arch == arm and not simulator_run', SKIP]],
Expand Down
28 changes: 28 additions & 0 deletions deps/v8/test/cctest/test-allocation.cc
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,34 @@ TEST(AccountingAllocatorOOM) {
CHECK_EQ(result == nullptr, platform.oom_callback_called);
}

TEST(AccountingAllocatorCurrentAndMax) {
AllocationPlatform platform;
v8::internal::AccountingAllocator allocator;
static constexpr size_t kAllocationSizes[] = {51, 231, 27};
std::vector<v8::internal::Segment*> segments;
CHECK_EQ(0, allocator.GetCurrentMemoryUsage());
CHECK_EQ(0, allocator.GetMaxMemoryUsage());
size_t expected_current = 0;
size_t expected_max = 0;
for (size_t size : kAllocationSizes) {
segments.push_back(allocator.AllocateSegment(size));
CHECK_NOT_NULL(segments.back());
CHECK_EQ(size, segments.back()->total_size());
expected_current += size;
if (expected_current > expected_max) expected_max = expected_current;
CHECK_EQ(expected_current, allocator.GetCurrentMemoryUsage());
CHECK_EQ(expected_max, allocator.GetMaxMemoryUsage());
}
for (auto* segment : segments) {
expected_current -= segment->total_size();
allocator.ReturnSegment(segment);
CHECK_EQ(expected_current, allocator.GetCurrentMemoryUsage());
}
CHECK_EQ(expected_max, allocator.GetMaxMemoryUsage());
CHECK_EQ(0, allocator.GetCurrentMemoryUsage());
CHECK(!platform.oom_callback_called);
}

TEST(MallocedOperatorNewOOM) {
AllocationPlatform platform;
CHECK(!platform.oom_callback_called);
Expand Down