From ec023a7a793f2e2e457e811e4c6ab9e81d5feeb0 Mon Sep 17 00:00:00 2001 From: Keyhan Vakil Date: Sun, 12 Nov 2023 00:14:48 +0000 Subject: [PATCH] deps: V8: cherry-pick 475c8cdf9a95 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Original commit message: [ptr-compr] Fix multi-cage mode This CL introduces PtrComprCageAccessScope which sets/restores current thread's pointer compression cage base values. It's supposed to be used by V8 jobs accessing V8 heap outside of v8::Isolate::Scope or i::LocalHeap or i::LocalIsolate scopes (they already ensure that the cage base values are properly initialized). For all other build modes PtrComprCageAccessScope is a no-op. For simplicity reasons the multi-cage mode is made incompatible with external code space. Bug: v8:13788, v8:14292 Change-Id: I06c2d19a1eb7254fa7af07a17617e22d98abea9f Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4846592 Reviewed-by: Jakob Linke Reviewed-by: Jakob Kummerow Commit-Queue: Igor Sheludko Reviewed-by: Dominik Inführ Cr-Commit-Position: refs/heads/main@{#90075} Refs: https://github.com/v8/v8/commit/475c8cdf9a951bb06da3084794a0f659f8ef36c2 PR-URL: https://github.com/nodejs/node/pull/50680 Refs: https://bugs.chromium.org/p/v8/issues/detail?id=14292 Reviewed-By: Ben Noordhuis Reviewed-By: Darshan Sen Reviewed-By: Matteo Collina Reviewed-By: Joyee Cheung Reviewed-By: James M Snell --- deps/v8/BUILD.gn | 7 ++ deps/v8/src/api/api.cc | 16 ++++ deps/v8/src/common/ptr-compr-inl.h | 31 +++++- deps/v8/src/common/ptr-compr.h | 27 +++++- deps/v8/src/execution/isolate.cc | 8 ++ deps/v8/src/execution/v8threads.cc | 12 +++ deps/v8/src/heap/collection-barrier.cc | 7 +- deps/v8/src/heap/concurrent-marking.cc | 10 ++ deps/v8/src/heap/incremental-marking-job.cc | 3 + deps/v8/src/heap/local-heap.cc | 1 + deps/v8/src/heap/local-heap.h | 2 + deps/v8/src/heap/mark-compact.cc | 14 +++ deps/v8/src/heap/mark-compact.h | 2 + deps/v8/src/heap/scavenger.cc | 4 + deps/v8/src/heap/sweeper.cc | 10 ++ deps/v8/src/wasm/c-api.cc | 64 ++++++++++--- deps/v8/src/wasm/module-compiler.cc | 3 + .../cctest/heap/test-concurrent-allocation.cc | 96 ++++++++++--------- deps/v8/test/cctest/heap/test-heap.cc | 51 ++++++---- deps/v8/test/cctest/test-debug-helper.cc | 4 + deps/v8/test/cctest/test-serialize.cc | 4 +- deps/v8/test/fuzzer/wasm-async.cc | 3 +- deps/v8/test/fuzzer/wasm-fuzzer-common.cc | 7 +- deps/v8/test/fuzzer/wasm.cc | 3 +- deps/v8/test/wasm-api-tests/serialize.cc | 20 ++-- .../debug_helper/debug-helper-internal.cc | 4 +- .../debug_helper/get-object-properties.cc | 4 +- 27 files changed, 316 insertions(+), 101 deletions(-) diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn index dd97c4f922c81a..7e1e4b833d9c3c 100644 --- a/deps/v8/BUILD.gn +++ b/deps/v8/BUILD.gn @@ -500,6 +500,7 @@ if (v8_enable_short_builtin_calls == "") { if (v8_enable_external_code_space == "") { v8_enable_external_code_space = v8_enable_pointer_compression && + v8_enable_pointer_compression_shared_cage && (v8_current_cpu == "x64" || v8_current_cpu == "arm64") } if (v8_enable_maglev == "") { @@ -683,6 +684,12 @@ assert( !v8_enable_pointer_compression_shared_cage || v8_enable_pointer_compression, "Can't share a pointer compression cage if pointers aren't compressed") +assert( + !v8_enable_pointer_compression || + v8_enable_pointer_compression_shared_cage || + !v8_enable_external_code_space, + "Multi-cage pointer compression mode is not compatible with external code space") + assert( !v8_enable_pointer_compression_shared_cage || v8_current_cpu == "x64" || v8_current_cpu == "arm64" || v8_current_cpu == "riscv64" || diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc index ddf93efc1bd8da..2708ad9b2e55c7 100644 --- a/deps/v8/src/api/api.cc +++ b/deps/v8/src/api/api.cc @@ -9227,6 +9227,14 @@ void Isolate::TerminateExecution() { bool Isolate::IsExecutionTerminating() { i::Isolate* i_isolate = reinterpret_cast(this); +#ifdef DEBUG + // This method might be called on a thread that's not bound to any Isolate + // and thus pointer compression schemes might have cage base value unset. + // Read-only roots accessors contain type DCHECKs which require access to + // V8 heap in order to check the object type. So, allow heap access here + // to let the checks work. + i::PtrComprCageAccessScope ptr_compr_cage_access_scope(i_isolate); +#endif // DEBUG return i_isolate->is_execution_terminating(); } @@ -9898,6 +9906,14 @@ void Isolate::LowMemoryNotification() { i::NestedTimedHistogramScope idle_notification_scope( i_isolate->counters()->gc_low_memory_notification()); TRACE_EVENT0("v8", "V8.GCLowMemoryNotification"); +#ifdef DEBUG + // This method might be called on a thread that's not bound to any Isolate + // and thus pointer compression schemes might have cage base value unset. + // Read-only roots accessors contain type DCHECKs which require access to + // V8 heap in order to check the object type. So, allow heap access here + // to let the checks work. + i::PtrComprCageAccessScope ptr_compr_cage_access_scope(i_isolate); +#endif // DEBUG i_isolate->heap()->CollectAllAvailableGarbage( i::GarbageCollectionReason::kLowMemoryNotification); } diff --git a/deps/v8/src/common/ptr-compr-inl.h b/deps/v8/src/common/ptr-compr-inl.h index abba4502a29c65..f3b3a280a52c2a 100644 --- a/deps/v8/src/common/ptr-compr-inl.h +++ b/deps/v8/src/common/ptr-compr-inl.h @@ -91,11 +91,16 @@ Address V8HeapCompressionScheme::DecompressTaggedSigned(Tagged_t raw_value) { template Address V8HeapCompressionScheme::DecompressTagged(TOnHeapAddress on_heap_addr, Tagged_t raw_value) { -#if defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE) +#ifdef V8_COMPRESS_POINTERS Address cage_base = base(); +#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE + DCHECK_WITH_MSG(cage_base != kNullAddress, + "V8HeapCompressionScheme::base is not initialized for " + "current thread"); +#endif // V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE #else Address cage_base = GetPtrComprCageBaseAddress(on_heap_addr); -#endif +#endif // V8_COMPRESS_POINTERS Address result = cage_base + static_cast
(raw_value); V8_ASSUME(static_cast(result) == raw_value); return result; @@ -191,11 +196,16 @@ Address ExternalCodeCompressionScheme::DecompressTaggedSigned( template Address ExternalCodeCompressionScheme::DecompressTagged( TOnHeapAddress on_heap_addr, Tagged_t raw_value) { -#if defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE) +#ifdef V8_COMPRESS_POINTERS Address cage_base = base(); +#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE + DCHECK_WITH_MSG(cage_base != kNullAddress, + "ExternalCodeCompressionScheme::base is not initialized for " + "current thread"); +#endif // V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE #else Address cage_base = GetPtrComprCageBaseAddress(on_heap_addr); -#endif +#endif // V8_COMPRESS_POINTERS Address result = cage_base + static_cast
(raw_value); V8_ASSUME(static_cast(result) == raw_value); return result; @@ -275,6 +285,19 @@ V8_INLINE PtrComprCageBase GetPtrComprCageBase(Tagged object) { return GetPtrComprCageBaseFromOnHeapAddress(object.ptr()); } +#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE + +PtrComprCageAccessScope::PtrComprCageAccessScope(Isolate* isolate) + : cage_base_(V8HeapCompressionScheme::base()) { + V8HeapCompressionScheme::InitBase(isolate->cage_base()); +} + +PtrComprCageAccessScope::~PtrComprCageAccessScope() { + V8HeapCompressionScheme::InitBase(cage_base_); +} + +#endif // V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/common/ptr-compr.h b/deps/v8/src/common/ptr-compr.h index ca5f0e1f9b328d..c60e66eca500a7 100644 --- a/deps/v8/src/common/ptr-compr.h +++ b/deps/v8/src/common/ptr-compr.h @@ -55,8 +55,8 @@ class V8HeapCompressionScheme { private: // These non-inlined accessors to base_ field are used in component builds // where cross-component access to thread local variables is not allowed. - static Address base_non_inlined(); - static void set_base_non_inlined(Address base); + static V8_EXPORT_PRIVATE Address base_non_inlined(); + static V8_EXPORT_PRIVATE void set_base_non_inlined(Address base); #ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE static V8_EXPORT_PRIVATE uintptr_t base_ V8_CONSTINIT; @@ -156,6 +156,29 @@ static inline void WriteMaybeUnalignedValue(Address p, V value) { } } +// When multi-cage pointer compression mode is enabled this scope object +// saves current cage's base values and sets them according to given Isolate. +// For all other configurations this scope object is a no-op. +class PtrComprCageAccessScope final { + public: +#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE + V8_INLINE explicit PtrComprCageAccessScope(Isolate* isolate); + V8_INLINE ~PtrComprCageAccessScope(); +#else + V8_INLINE explicit PtrComprCageAccessScope(Isolate* isolate) {} + V8_INLINE ~PtrComprCageAccessScope() {} +#endif + + private: +#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE + const Address cage_base_; +#ifdef V8_EXTERNAL_CODE_SPACE +// In case this configuration is necessary the code cage base must be saved too. +#error Multi-cage pointer compression with external code space is not supported +#endif // V8_EXTERNAL_CODE_SPACE +#endif // V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE +}; + } // namespace v8::internal #endif // V8_COMMON_PTR_COMPR_H_ diff --git a/deps/v8/src/execution/isolate.cc b/deps/v8/src/execution/isolate.cc index cbe353e95f5584..9663fb8f65bbe1 100644 --- a/deps/v8/src/execution/isolate.cc +++ b/deps/v8/src/execution/isolate.cc @@ -3933,6 +3933,14 @@ Isolate::~Isolate() { void Isolate::InitializeThreadLocal() { thread_local_top()->Initialize(this); +#ifdef DEBUG + // This method might be called on a thread that's not bound to any Isolate + // and thus pointer compression schemes might have cage base value unset. + // Read-only roots accessors contain type DCHECKs which require access to + // V8 heap in order to check the object type. So, allow heap access here + // to let the checks work. + i::PtrComprCageAccessScope ptr_compr_cage_access_scope(this); +#endif // DEBUG clear_pending_exception(); clear_pending_message(); clear_scheduled_exception(); diff --git a/deps/v8/src/execution/v8threads.cc b/deps/v8/src/execution/v8threads.cc index be4f4a7f209e3e..9d4437c75e62cd 100644 --- a/deps/v8/src/execution/v8threads.cc +++ b/deps/v8/src/execution/v8threads.cc @@ -124,6 +124,10 @@ bool ThreadManager::RestoreThread() { InitThread(access); return false; } + // In case multi-cage pointer compression mode is enabled ensure that + // current thread's cage base values are properly initialized. + PtrComprCageAccessScope ptr_compr_cage_access_scope(isolate_); + ThreadState* state = per_thread->thread_state(); char* from = state->data(); from = isolate_->handle_scope_implementer()->RestoreThread(from); @@ -274,6 +278,14 @@ void ThreadManager::EagerlyArchiveThread() { } void ThreadManager::FreeThreadResources() { +#ifdef DEBUG + // This method might be called on a thread that's not bound to any Isolate + // and thus pointer compression schemes might have cage base value unset. + // Read-only roots accessors contain type DCHECKs which require access to + // V8 heap in order to check the object type. So, allow heap access here + // to let the checks work. + PtrComprCageAccessScope ptr_compr_cage_access_scope(isolate_); +#endif // DEBUG DCHECK(!isolate_->has_pending_exception()); DCHECK(!isolate_->external_caught_exception()); DCHECK_NULL(isolate_->try_catch_handler()); diff --git a/deps/v8/src/heap/collection-barrier.cc b/deps/v8/src/heap/collection-barrier.cc index 1bc9a972fab5a3..9cafa96eaaaac2 100644 --- a/deps/v8/src/heap/collection-barrier.cc +++ b/deps/v8/src/heap/collection-barrier.cc @@ -51,7 +51,12 @@ class BackgroundCollectionInterruptTask : public CancelableTask { private: // v8::internal::CancelableTask overrides. - void RunInternal() override { heap_->CheckCollectionRequested(); } + void RunInternal() override { + // In case multi-cage pointer compression mode is enabled ensure that + // current thread's cage base values are properly initialized. + PtrComprCageAccessScope ptr_compr_cage_access_scope(heap_->isolate()); + heap_->CheckCollectionRequested(); + } Heap* heap_; }; diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc index 14cf52df6e9b8a..37ab77f0747ee7 100644 --- a/deps/v8/src/heap/concurrent-marking.cc +++ b/deps/v8/src/heap/concurrent-marking.cc @@ -153,6 +153,11 @@ class ConcurrentMarking::JobTaskMajor : public v8::JobTask { // v8::JobTask overrides. void Run(JobDelegate* delegate) override { + // In case multi-cage pointer compression mode is enabled ensure that + // current thread's cage base values are properly initialized. + PtrComprCageAccessScope ptr_compr_cage_access_scope( + concurrent_marking_->heap_->isolate()); + if (delegate->IsJoiningThread()) { // TRACE_GC is not needed here because the caller opens the right scope. concurrent_marking_->RunMajor(delegate, code_flush_mode_, @@ -197,6 +202,11 @@ class ConcurrentMarking::JobTaskMinor : public v8::JobTask { // v8::JobTask overrides. void Run(JobDelegate* delegate) override { + // In case multi-cage pointer compression mode is enabled ensure that + // current thread's cage base values are properly initialized. + PtrComprCageAccessScope ptr_compr_cage_access_scope( + concurrent_marking_->heap_->isolate()); + if (delegate->IsJoiningThread()) { TRACE_GC_WITH_FLOW(concurrent_marking_->heap_->tracer(), GCTracer::Scope::MINOR_MS_MARK_PARALLEL, trace_id_, diff --git a/deps/v8/src/heap/incremental-marking-job.cc b/deps/v8/src/heap/incremental-marking-job.cc index 0b0b5de2013b54..03be503d3ba92f 100644 --- a/deps/v8/src/heap/incremental-marking-job.cc +++ b/deps/v8/src/heap/incremental-marking-job.cc @@ -91,6 +91,9 @@ void IncrementalMarkingJob::Task::RunInternal() { VMState state(isolate()); TRACE_EVENT_CALL_STATS_SCOPED(isolate(), "v8", "V8.IncrementalMarkingJob.Task"); + // In case multi-cage pointer compression mode is enabled ensure that + // current thread's cage base values are properly initialized. + PtrComprCageAccessScope ptr_compr_cage_access_scope(isolate()); isolate()->stack_guard()->ClearStartIncrementalMarking(); diff --git a/deps/v8/src/heap/local-heap.cc b/deps/v8/src/heap/local-heap.cc index 426466a48caa33..12f1dfe8c70be8 100644 --- a/deps/v8/src/heap/local-heap.cc +++ b/deps/v8/src/heap/local-heap.cc @@ -48,6 +48,7 @@ void LocalHeap::VerifyCurrent() const { LocalHeap::LocalHeap(Heap* heap, ThreadKind kind, std::unique_ptr persistent_handles) : heap_(heap), + ptr_compr_cage_access_scope_(heap->isolate()), is_main_thread_(kind == ThreadKind::kMain), state_(ThreadState::Parked()), allocation_failed_(false), diff --git a/deps/v8/src/heap/local-heap.h b/deps/v8/src/heap/local-heap.h index 1062f87b928c6c..9868c1ca29b0c0 100644 --- a/deps/v8/src/heap/local-heap.h +++ b/deps/v8/src/heap/local-heap.h @@ -13,6 +13,7 @@ #include "src/base/platform/condition-variable.h" #include "src/base/platform/mutex.h" #include "src/common/assert-scope.h" +#include "src/common/ptr-compr.h" #include "src/execution/isolate.h" #include "src/handles/global-handles.h" #include "src/handles/persistent-handles.h" @@ -348,6 +349,7 @@ class V8_EXPORT_PRIVATE LocalHeap { void SetUpSharedMarking(); Heap* heap_; + V8_NO_UNIQUE_ADDRESS PtrComprCageAccessScope ptr_compr_cage_access_scope_; bool is_main_thread_; AtomicThreadState state_; diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc index 189e023cc147f2..20e381ae768d7c 100644 --- a/deps/v8/src/heap/mark-compact.cc +++ b/deps/v8/src/heap/mark-compact.cc @@ -2522,6 +2522,10 @@ class ClearStringTableJobItem final : public ParallelClearingJob::ClearingItem { GCTracer::Scope::MC_CLEAR_STRING_TABLE)) {} void Run(JobDelegate* delegate) final { + // In case multi-cage pointer compression mode is enabled ensure that + // current thread's cage base values are properly initialized. + PtrComprCageAccessScope ptr_compr_cage_access_scope(isolate_); + if (isolate_->OwnsStringTables()) { TRACE_GC1_WITH_FLOW(isolate_->heap()->tracer(), GCTracer::Scope::MC_CLEAR_STRING_TABLE, @@ -4135,6 +4139,11 @@ class PageEvacuationJob : public v8::JobTask { tracer_->CurrentEpoch(GCTracer::Scope::MC_EVACUATE)) {} void Run(JobDelegate* delegate) override { + // In case multi-cage pointer compression mode is enabled ensure that + // current thread's cage base values are properly initialized. + PtrComprCageAccessScope ptr_compr_cage_access_scope( + collector_->heap()->isolate()); + Evacuator* evacuator = (*evacuators_)[delegate->GetTaskId()].get(); if (delegate->IsJoiningThread()) { TRACE_GC_WITH_FLOW(tracer_, GCTracer::Scope::MC_EVACUATE_COPY_PARALLEL, @@ -4471,6 +4480,11 @@ class PointersUpdatingJob : public v8::JobTask { tracer_->CurrentEpoch(GCTracer::Scope::MC_EVACUATE)) {} void Run(JobDelegate* delegate) override { + // In case multi-cage pointer compression mode is enabled ensure that + // current thread's cage base values are properly initialized. + PtrComprCageAccessScope ptr_compr_cage_access_scope( + collector_->heap()->isolate()); + if (delegate->IsJoiningThread()) { TRACE_GC_WITH_FLOW(tracer_, GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_PARALLEL, diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h index ceeb596edac85e..a8a56db74b7442 100644 --- a/deps/v8/src/heap/mark-compact.h +++ b/deps/v8/src/heap/mark-compact.h @@ -161,6 +161,8 @@ class MarkCompactCollector final { return use_background_threads_in_cycle_; } + Heap* heap() { return heap_; } + explicit MarkCompactCollector(Heap* heap); ~MarkCompactCollector(); diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc index 7760a6a540e1e5..cd2d9a49546fe3 100644 --- a/deps/v8/src/heap/scavenger.cc +++ b/deps/v8/src/heap/scavenger.cc @@ -200,6 +200,10 @@ ScavengerCollector::JobTask::JobTask( void ScavengerCollector::JobTask::Run(JobDelegate* delegate) { DCHECK_LT(delegate->GetTaskId(), scavengers_->size()); + // In case multi-cage pointer compression mode is enabled ensure that + // current thread's cage base values are properly initialized. + PtrComprCageAccessScope ptr_compr_cage_access_scope(outer_->heap_->isolate()); + Scavenger* scavenger = (*scavengers_)[delegate->GetTaskId()].get(); if (delegate->IsJoiningThread()) { TRACE_GC_WITH_FLOW(outer_->heap_->tracer(), diff --git a/deps/v8/src/heap/sweeper.cc b/deps/v8/src/heap/sweeper.cc index c8649bfa21a9ec..7a712cf200aeed 100644 --- a/deps/v8/src/heap/sweeper.cc +++ b/deps/v8/src/heap/sweeper.cc @@ -144,6 +144,11 @@ class Sweeper::MajorSweeperJob final : public JobTask { private: void RunImpl(JobDelegate* delegate, bool is_joining_thread) { + // In case multi-cage pointer compression mode is enabled ensure that + // current thread's cage base values are properly initialized. + PtrComprCageAccessScope ptr_compr_cage_access_scope( + sweeper_->heap_->isolate()); + DCHECK(sweeper_->major_sweeping_in_progress()); const int offset = delegate->GetTaskId(); DCHECK_LT(offset, concurrent_sweepers.size()); @@ -213,6 +218,11 @@ class Sweeper::MinorSweeperJob final : public JobTask { tracer_, sweeper_->GetTracingScope(NEW_SPACE, is_joining_thread), is_joining_thread ? ThreadKind::kMain : ThreadKind::kBackground, trace_id_, TRACE_EVENT_FLAG_FLOW_IN); + // In case multi-cage pointer compression mode is enabled ensure that + // current thread's cage base values are properly initialized. + PtrComprCageAccessScope ptr_compr_cage_access_scope( + sweeper_->heap_->isolate()); + if (!concurrent_sweeper.ConcurrentSweepSpace(delegate)) return; concurrent_sweeper.ConcurrentSweepPromotedPages(delegate); } diff --git a/deps/v8/src/wasm/c-api.cc b/deps/v8/src/wasm/c-api.cc index 333708e85d0e45..0f259ee1ca4c45 100644 --- a/deps/v8/src/wasm/c-api.cc +++ b/deps/v8/src/wasm/c-api.cc @@ -59,6 +59,17 @@ namespace wasm { namespace { +// Multi-cage pointer compression mode related note. +// Wasm C-Api is allowed to be used from a thread that's not bound to any +// Isolate. As a result, in a multi-cage pointer compression mode it's not +// guaranteed that current pointer compression cage base value is initialized +// for current thread (see V8HeapCompressionScheme::base_) which makes it +// impossible to read compressed pointers from V8 heap objects. +// This scope ensures that the pointer compression base value is set according +// to respective Wasm C-Api object. +// For all other configurations this scope is a no-op. +using PtrComprCageAccessScope = i::PtrComprCageAccessScope; + auto ReadLebU64(const byte_t** pos) -> uint64_t { uint64_t n = 0; uint64_t shift = 0; @@ -414,12 +425,16 @@ void CheckAndHandleInterrupts(i::Isolate* isolate) { // Stores StoreImpl::~StoreImpl() { + { #ifdef DEBUG - reinterpret_cast(isolate_)->heap()->PreciseCollectAllGarbage( - i::GCFlag::kForced, i::GarbageCollectionReason::kTesting, - v8::kNoGCCallbackFlags); + i::Isolate* i_isolate = reinterpret_cast(isolate_); + PtrComprCageAccessScope ptr_compr_cage_access_scope(i_isolate); + i_isolate->heap()->PreciseCollectAllGarbage( + i::GCFlag::kForced, i::GarbageCollectionReason::kTesting, + v8::kNoGCCallbackFlags); #endif - context()->Exit(); + context()->Exit(); + } isolate_->Dispose(); delete create_params_.array_buffer_allocator; } @@ -451,6 +466,7 @@ void StoreImpl::SetHostInfo(i::Handle object, void* info, } void* StoreImpl::GetHostInfo(i::Handle key) { + PtrComprCageAccessScope ptr_compr_cage_access_scope(i_isolate()); i::Tagged raw = i::EphemeronHashTable::cast(host_info_map_->table())->Lookup(key); if (IsTheHole(raw, i_isolate())) return nullptr; @@ -909,7 +925,12 @@ class RefImpl { i::Isolate* isolate() const { return val_->GetIsolate(); } - i::Handle v8_object() const { return i::Handle::cast(val_); } + i::Handle v8_object() const { +#ifdef DEBUG + PtrComprCageAccessScope ptr_compr_cage_access_scope(isolate()); +#endif // DEBUG + return i::Handle::cast(val_); + } void* get_host_info() const { return store()->GetHostInfo(v8_object()); } @@ -1024,7 +1045,7 @@ auto Trap::make(Store* store_abs, const Message& message) -> own { auto Trap::message() const -> Message { auto isolate = impl(this)->isolate(); - v8::Isolate::Scope isolate_scope(impl(this)->store()->isolate()); + v8::Isolate::Scope isolate_scope(reinterpret_cast(isolate)); i::HandleScope handle_scope(isolate); i::Handle message = @@ -1044,6 +1065,7 @@ own GetInstance(StoreImpl* store, own CreateFrameFromInternal(i::Handle frames, int index, i::Isolate* isolate, StoreImpl* store) { + PtrComprCageAccessScope ptr_compr_cage_access_scope(isolate); i::Handle frame(i::CallSiteInfo::cast(frames->get(index)), isolate); i::Handle instance(frame->GetWasmInstance(), isolate); @@ -1059,6 +1081,7 @@ own CreateFrameFromInternal(i::Handle frames, int index, own Trap::origin() const { i::Isolate* isolate = impl(this)->isolate(); + PtrComprCageAccessScope ptr_compr_cage_access_scope(impl(this)->isolate()); i::HandleScope handle_scope(isolate); i::Handle frames = @@ -1071,6 +1094,7 @@ own Trap::origin() const { ownvec Trap::trace() const { i::Isolate* isolate = impl(this)->isolate(); + PtrComprCageAccessScope ptr_compr_cage_access_scope(isolate); i::HandleScope handle_scope(isolate); i::Handle frames = @@ -1122,6 +1146,7 @@ auto Module::validate(Store* store_abs, const vec& binary) -> bool { i::wasm::ModuleWireBytes bytes( {reinterpret_cast(binary.get()), binary.size()}); i::Isolate* isolate = impl(store_abs)->i_isolate(); + PtrComprCageAccessScope ptr_compr_cage_access_scope(isolate); i::HandleScope scope(isolate); i::wasm::WasmFeatures features = i::wasm::WasmFeatures::FromIsolate(isolate); return i::wasm::GetWasmEngine()->SyncValidate(isolate, features, bytes); @@ -1185,6 +1210,7 @@ ownvec ExportsImpl(i::Handle module_obj) { } auto Module::exports() const -> ownvec { + PtrComprCageAccessScope ptr_compr_cage_access_scope(impl(this)->isolate()); return ExportsImpl(impl(this)->v8_object()); } @@ -1192,6 +1218,7 @@ auto Module::exports() const -> ownvec { // If no TurboFan code existed before calling this function, then the call to // {serialize} may take a long time. auto Module::serialize() const -> vec { + PtrComprCageAccessScope ptr_compr_cage_access_scope(impl(this)->isolate()); i::wasm::NativeModule* native_module = impl(this)->v8_object()->native_module(); native_module->compilation_state()->TierUpAllFunctions(); @@ -1291,6 +1318,7 @@ Extern::~Extern() = default; auto Extern::copy() const -> own { return impl(this)->copy(); } auto Extern::kind() const -> ExternKind { + PtrComprCageAccessScope ptr_compr_cage_access_scope(impl(this)->isolate()); i::Handle obj = impl(this)->v8_object(); if (i::WasmExportedFunction::IsWasmExportedFunction(*obj)) { return wasm::EXTERN_FUNC; @@ -1469,6 +1497,7 @@ auto Func::make(Store* store, const FuncType* type, callback_with_env callback, } auto Func::type() const -> own { + PtrComprCageAccessScope ptr_compr_cage_access_scope(impl(this)->isolate()); i::Handle func = impl(this)->v8_object(); if (i::WasmCapiFunction::IsWasmCapiFunction(*func)) { return SignatureHelper::Deserialize(SignatureHelper::GetSig(func)); @@ -1483,6 +1512,7 @@ auto Func::type() const -> own { } auto Func::param_arity() const -> size_t { + PtrComprCageAccessScope ptr_compr_cage_access_scope(impl(this)->isolate()); i::Handle func = impl(this)->v8_object(); if (i::WasmCapiFunction::IsWasmCapiFunction(*func)) { return i::wasm::SerializedSignatureHelper::ParamCount( @@ -1497,6 +1527,7 @@ auto Func::param_arity() const -> size_t { } auto Func::result_arity() const -> size_t { + PtrComprCageAccessScope ptr_compr_cage_access_scope(impl(this)->isolate()); i::Handle func = impl(this)->v8_object(); if (i::WasmCapiFunction::IsWasmCapiFunction(*func)) { return i::wasm::SerializedSignatureHelper::ReturnCount( @@ -1853,6 +1884,7 @@ auto Global::type() const -> own { } auto Global::get() const -> Val { + PtrComprCageAccessScope ptr_compr_cage_access_scope(impl(this)->isolate()); i::Handle v8_global = impl(this)->v8_object(); switch (v8_global->type().kind()) { case i::wasm::kI32: @@ -2008,7 +2040,8 @@ auto Table::type() const -> own { auto Table::get(size_t index) const -> own { i::Handle table = impl(this)->v8_object(); if (index >= static_cast(table->current_length())) return own(); - i::Isolate* isolate = table->GetIsolate(); + i::Isolate* isolate = impl(this)->isolate(); + PtrComprCageAccessScope ptr_compr_cage_access_scope(isolate); i::HandleScope handle_scope(isolate); i::Handle result = i::WasmTableObject::Get(isolate, table, static_cast(index)); @@ -2026,8 +2059,8 @@ auto Table::get(size_t index) const -> own { auto Table::set(size_t index, const Ref* ref) -> bool { i::Handle table = impl(this)->v8_object(); if (index >= static_cast(table->current_length())) return false; - i::Isolate* isolate = table->GetIsolate(); - v8::Isolate::Scope isolate_scope(impl(this)->store()->isolate()); + i::Isolate* isolate = impl(this)->isolate(); + v8::Isolate::Scope isolate_scope(reinterpret_cast(isolate)); i::HandleScope handle_scope(isolate); i::Handle obj = WasmRefToV8(isolate, ref); const char* error_message; @@ -2042,13 +2075,14 @@ auto Table::set(size_t index, const Ref* ref) -> bool { // TODO(jkummerow): Having Table::size_t shadowing "std" size_t is ugly. auto Table::size() const -> size_t { + PtrComprCageAccessScope ptr_compr_cage_access_scope(impl(this)->isolate()); return impl(this)->v8_object()->current_length(); } auto Table::grow(size_t delta, const Ref* ref) -> bool { i::Handle table = impl(this)->v8_object(); - i::Isolate* isolate = table->GetIsolate(); - v8::Isolate::Scope isolate_scope(impl(this)->store()->isolate()); + i::Isolate* isolate = impl(this)->isolate(); + v8::Isolate::Scope isolate_scope(reinterpret_cast(isolate)); i::HandleScope scope(isolate); i::Handle obj = WasmRefToV8(isolate, ref); const char* error_message; @@ -2101,6 +2135,7 @@ auto Memory::make(Store* store_abs, const MemoryType* type) -> own { } auto Memory::type() const -> own { + PtrComprCageAccessScope ptr_compr_cage_access_scope(impl(this)->isolate()); i::Handle memory = impl(this)->v8_object(); uint32_t min = static_cast(memory->array_buffer()->byte_length() / i::wasm::kWasmPageSize); @@ -2110,15 +2145,18 @@ auto Memory::type() const -> own { } auto Memory::data() const -> byte_t* { + PtrComprCageAccessScope ptr_compr_cage_access_scope(impl(this)->isolate()); return reinterpret_cast( impl(this)->v8_object()->array_buffer()->backing_store()); } auto Memory::data_size() const -> size_t { + PtrComprCageAccessScope ptr_compr_cage_access_scope(impl(this)->isolate()); return impl(this)->v8_object()->array_buffer()->byte_length(); } auto Memory::size() const -> pages_t { + PtrComprCageAccessScope ptr_compr_cage_access_scope(impl(this)->isolate()); return static_cast( impl(this)->v8_object()->array_buffer()->byte_length() / i::wasm::kWasmPageSize); @@ -2126,8 +2164,8 @@ auto Memory::size() const -> pages_t { auto Memory::grow(pages_t delta) -> bool { i::Handle memory = impl(this)->v8_object(); - i::Isolate* isolate = memory->GetIsolate(); - v8::Isolate::Scope isolate_scope(impl(this)->store()->isolate()); + i::Isolate* isolate = impl(this)->isolate(); + v8::Isolate::Scope isolate_scope(reinterpret_cast(isolate)); i::HandleScope handle_scope(isolate); int32_t old = i::WasmMemoryObject::Grow(isolate, memory, delta); return old != -1; diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc index f6ac958579af5b..bbb29f7f47b299 100644 --- a/deps/v8/src/wasm/module-compiler.cc +++ b/deps/v8/src/wasm/module-compiler.cc @@ -1983,6 +1983,9 @@ class AsyncCompileJSToWasmWrapperJob final } TRACE_EVENT0("v8.wasm", "wasm.JSToWasmWrapperCompilation"); + // In case multi-cage pointer compression mode is enabled ensure that + // current thread's cage base values are properly initialized. + PtrComprCageAccessScope ptr_compr_cage_access_scope(isolate); while (true) { DCHECK_EQ(isolate, wrapper_unit->isolate()); wrapper_unit->Execute(); diff --git a/deps/v8/test/cctest/heap/test-concurrent-allocation.cc b/deps/v8/test/cctest/heap/test-concurrent-allocation.cc index c46814e15ca465..12143306d24ab7 100644 --- a/deps/v8/test/cctest/heap/test-concurrent-allocation.cc +++ b/deps/v8/test/cctest/heap/test-concurrent-allocation.cc @@ -133,8 +133,10 @@ UNINITIALIZED_TEST(ConcurrentAllocationInOldSpaceFromMainThread) { v8::Isolate* isolate = v8::Isolate::New(create_params); Isolate* i_isolate = reinterpret_cast(isolate); - AllocateSomeObjects(i_isolate->main_thread_local_heap()); - + { + PtrComprCageAccessScope ptr_compr_cage_access_scope(i_isolate); + AllocateSomeObjects(i_isolate->main_thread_local_heap()); + } isolate->Dispose(); } @@ -383,36 +385,38 @@ UNINITIALIZED_TEST(ConcurrentBlackAllocation) { v8::Isolate* isolate = v8::Isolate::New(create_params); Isolate* i_isolate = reinterpret_cast(isolate); Heap* heap = i_isolate->heap(); + { + PtrComprCageAccessScope ptr_compr_cage_access_scope(i_isolate); - std::vector
objects; + std::vector
objects; - base::Semaphore sema_white(0); - base::Semaphore sema_marking_started(0); + base::Semaphore sema_white(0); + base::Semaphore sema_marking_started(0); - auto thread = std::make_unique( - heap, &objects, &sema_white, &sema_marking_started); - CHECK(thread->Start()); + auto thread = std::make_unique( + heap, &objects, &sema_white, &sema_marking_started); + CHECK(thread->Start()); - sema_white.Wait(); - heap->StartIncrementalMarking(i::GCFlag::kNoFlags, - i::GarbageCollectionReason::kTesting); - sema_marking_started.Signal(); + sema_white.Wait(); + heap->StartIncrementalMarking(i::GCFlag::kNoFlags, + i::GarbageCollectionReason::kTesting); + sema_marking_started.Signal(); - thread->Join(); + thread->Join(); - const int kObjectsAllocatedPerIteration = 2; + const int kObjectsAllocatedPerIteration = 2; - for (int i = 0; i < kNumIterations * kObjectsAllocatedPerIteration; i++) { - Address address = objects[i]; - Tagged object = HeapObject::FromAddress(address); + for (int i = 0; i < kNumIterations * kObjectsAllocatedPerIteration; i++) { + Address address = objects[i]; + Tagged object = HeapObject::FromAddress(address); - if (i < kWhiteIterations * kObjectsAllocatedPerIteration) { - CHECK(heap->marking_state()->IsUnmarked(object)); - } else { - CHECK(heap->marking_state()->IsMarked(object)); + if (i < kWhiteIterations * kObjectsAllocatedPerIteration) { + CHECK(heap->marking_state()->IsUnmarked(object)); + } else { + CHECK(heap->marking_state()->IsMarked(object)); + } } } - isolate->Dispose(); } @@ -449,34 +453,35 @@ UNINITIALIZED_TEST(ConcurrentWriteBarrier) { v8::Isolate* isolate = v8::Isolate::New(create_params); Isolate* i_isolate = reinterpret_cast(isolate); Heap* heap = i_isolate->heap(); - - Tagged fixed_array; - Tagged value; { - HandleScope handle_scope(i_isolate); - Handle fixed_array_handle( - i_isolate->factory()->NewFixedArray(1)); - Handle value_handle( - i_isolate->factory()->NewHeapNumber(1.1)); - fixed_array = *fixed_array_handle; - value = *value_handle; - } - heap->StartIncrementalMarking(i::GCFlag::kNoFlags, - i::GarbageCollectionReason::kTesting); - CHECK(heap->marking_state()->IsUnmarked(value)); - - // Mark host |fixed_array| to trigger the barrier. - heap->marking_state()->TryMarkAndAccountLiveBytes(fixed_array); + PtrComprCageAccessScope ptr_compr_cage_access_scope(i_isolate); + Tagged fixed_array; + Tagged value; + { + HandleScope handle_scope(i_isolate); + Handle fixed_array_handle( + i_isolate->factory()->NewFixedArray(1)); + Handle value_handle( + i_isolate->factory()->NewHeapNumber(1.1)); + fixed_array = *fixed_array_handle; + value = *value_handle; + } + heap->StartIncrementalMarking(i::GCFlag::kNoFlags, + i::GarbageCollectionReason::kTesting); + CHECK(heap->marking_state()->IsUnmarked(value)); - auto thread = - std::make_unique(heap, fixed_array, value); - CHECK(thread->Start()); + // Mark host |fixed_array| to trigger the barrier. + heap->marking_state()->TryMarkAndAccountLiveBytes(fixed_array); - thread->Join(); + auto thread = std::make_unique( + heap, fixed_array, value); + CHECK(thread->Start()); - CHECK(heap->marking_state()->IsMarked(value)); - heap::InvokeMajorGC(heap); + thread->Join(); + CHECK(heap->marking_state()->IsMarked(value)); + heap::InvokeMajorGC(heap); + } isolate->Dispose(); } @@ -525,6 +530,7 @@ UNINITIALIZED_TEST(ConcurrentRecordRelocSlot) { Isolate* i_isolate = reinterpret_cast(isolate); Heap* heap = i_isolate->heap(); { + PtrComprCageAccessScope ptr_compr_cage_access_scope(i_isolate); Tagged code; Tagged value; { diff --git a/deps/v8/test/cctest/heap/test-heap.cc b/deps/v8/test/cctest/heap/test-heap.cc index bcf0106da2c1a7..edbfb112d26d5e 100644 --- a/deps/v8/test/cctest/heap/test-heap.cc +++ b/deps/v8/test/cctest/heap/test-heap.cc @@ -1301,6 +1301,7 @@ UNINITIALIZED_TEST(Regress10843) { &callback_was_invoked); { + PtrComprCageAccessScope ptr_compr_cage_access_scope(i_isolate); HandleScope scope(i_isolate); std::vector> arrays; for (int i = 0; i < 140; i++) { @@ -6504,6 +6505,7 @@ UNINITIALIZED_TEST(OutOfMemory) { oom_isolate = i_isolate; isolate->SetOOMErrorHandler(OOMCallback); { + PtrComprCageAccessScope ptr_compr_cage_access_scope(i_isolate); Factory* factory = i_isolate->factory(); HandleScope handle_scope(i_isolate); while (true) { @@ -6530,8 +6532,10 @@ UNINITIALIZED_TEST(OutOfMemoryIneffectiveGC) { isolate->SetOOMErrorHandler(OOMCallback); Factory* factory = i_isolate->factory(); Heap* heap = i_isolate->heap(); - heap::InvokeMajorGC(heap); { + PtrComprCageAccessScope ptr_compr_cage_access_scope(i_isolate); + heap::InvokeMajorGC(heap); + HandleScope scope(i_isolate); while (heap->OldGenerationSizeOfObjects() < heap->MaxOldGenerationSize() * 0.9) { @@ -6685,6 +6689,8 @@ UNINITIALIZED_TEST(OutOfMemorySmallObjects) { state.oom_triggered = false; heap->AddNearHeapLimitCallback(NearHeapLimitCallback, &state); { + PtrComprCageAccessScope ptr_compr_cage_access_scope(isolate); + HandleScope handle_scope(isolate); while (!state.oom_triggered) { factory->NewFixedArray(100); @@ -6718,26 +6724,29 @@ UNINITIALIZED_TEST(OutOfMemoryLargeObjects) { state.heap = heap; state.oom_triggered = false; heap->AddNearHeapLimitCallback(NearHeapLimitCallback, &state); - const int kFixedArrayLength = 1000000; { - HandleScope handle_scope(isolate); - while (!state.oom_triggered) { - factory->NewFixedArray(kFixedArrayLength); + PtrComprCageAccessScope ptr_compr_cage_access_scope(isolate); + const int kFixedArrayLength = 1000000; + { + HandleScope handle_scope(isolate); + while (!state.oom_triggered) { + factory->NewFixedArray(kFixedArrayLength); + } } + CHECK_LE(state.old_generation_capacity_at_oom, + kOldGenerationLimit + state.new_space_capacity_at_oom + + state.new_lo_space_size_at_oom + + FixedArray::SizeFor(kFixedArrayLength)); + CHECK_LE(kOldGenerationLimit, state.old_generation_capacity_at_oom + + state.new_space_capacity_at_oom + + state.new_lo_space_size_at_oom + + FixedArray::SizeFor(kFixedArrayLength)); + CHECK_LE(state.memory_allocator_size_at_oom, + MemoryAllocatorSizeFromHeapCapacity( + state.old_generation_capacity_at_oom + + 2 * state.new_space_capacity_at_oom + + state.new_lo_space_size_at_oom)); } - CHECK_LE(state.old_generation_capacity_at_oom, - kOldGenerationLimit + state.new_space_capacity_at_oom + - state.new_lo_space_size_at_oom + - FixedArray::SizeFor(kFixedArrayLength)); - CHECK_LE(kOldGenerationLimit, state.old_generation_capacity_at_oom + - state.new_space_capacity_at_oom + - state.new_lo_space_size_at_oom + - FixedArray::SizeFor(kFixedArrayLength)); - CHECK_LE( - state.memory_allocator_size_at_oom, - MemoryAllocatorSizeFromHeapCapacity(state.old_generation_capacity_at_oom + - 2 * state.new_space_capacity_at_oom + - state.new_lo_space_size_at_oom)); reinterpret_cast(isolate)->Dispose(); } @@ -6757,6 +6766,7 @@ UNINITIALIZED_TEST(RestoreHeapLimit) { Factory* factory = isolate->factory(); { + PtrComprCageAccessScope ptr_compr_cage_access_scope(isolate); DisableConservativeStackScanningScopeForTesting no_stack_scanning(heap); OutOfMemoryState state; @@ -7060,8 +7070,9 @@ UNINITIALIZED_HEAP_TEST(CodeLargeObjectSpace64k) { v8::Isolate::CreateParams create_params; create_params.array_buffer_allocator = CcTest::array_buffer_allocator(); v8::Isolate* isolate = v8::Isolate::New(create_params); - - Heap* heap = reinterpret_cast(isolate)->heap(); + i::Isolate* i_isolate = reinterpret_cast(isolate); + Heap* heap = i_isolate->heap(); + PtrComprCageAccessScope ptr_compr_cage_access_scope(i_isolate); // Allocate a regular code object. { diff --git a/deps/v8/test/cctest/test-debug-helper.cc b/deps/v8/test/cctest/test-debug-helper.cc index 809a6b9ba6fbe8..4d986c87b8e432 100644 --- a/deps/v8/test/cctest/test-debug-helper.cc +++ b/deps/v8/test/cctest/test-debug-helper.cc @@ -126,6 +126,7 @@ TEST(GetObjectProperties) { CcTest::InitializeVM(); v8::Isolate* isolate = CcTest::isolate(); i::Isolate* i_isolate = reinterpret_cast(isolate); + PtrComprCageAccessScope ptr_compr_cage_access_scope(i_isolate); v8::HandleScope scope(isolate); LocalContext context; // Claim we don't know anything about the heap layout. @@ -470,6 +471,8 @@ static void FrameIterationCheck( THREADED_TEST(GetFrameStack) { LocalContext env; v8::Isolate* isolate = env->GetIsolate(); + i::Isolate* i_isolate = reinterpret_cast(isolate); + PtrComprCageAccessScope ptr_compr_cage_access_scope(i_isolate); v8::HandleScope scope(isolate); v8::Local obj = v8::ObjectTemplate::New(isolate); obj->SetAccessor(v8_str("xxx"), FrameIterationCheck); @@ -490,6 +493,7 @@ TEST(SmallOrderedHashSetGetObjectProperties) { LocalContext context; Isolate* isolate = reinterpret_cast((*context)->GetIsolate()); Factory* factory = isolate->factory(); + PtrComprCageAccessScope ptr_compr_cage_access_scope(isolate); HandleScope scope(isolate); Handle set = factory->NewSmallOrderedHashSet(); diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc index 36f131630d6fd4..bf346454f16ab6 100644 --- a/deps/v8/test/cctest/test-serialize.cc +++ b/deps/v8/test/cctest/test-serialize.cc @@ -4262,10 +4262,10 @@ UNINITIALIZED_TEST(ReinitializeHashSeedJSCollectionRehashable) { create_params.snapshot_blob = &blob; v8::Isolate* isolate = v8::Isolate::New(create_params); { + v8::Isolate::Scope isolate_scope(isolate); // Check that rehashing has been performed. CHECK_EQ(static_cast(1337), HashSeed(reinterpret_cast(isolate))); - v8::Isolate::Scope isolate_scope(isolate); v8::HandleScope handle_scope(isolate); v8::Local context = v8::Context::New(isolate); CHECK(!context.IsEmpty()); @@ -4330,10 +4330,10 @@ UNINITIALIZED_TEST(ReinitializeHashSeedRehashable) { create_params.snapshot_blob = &blob; v8::Isolate* isolate = v8::Isolate::New(create_params); { + v8::Isolate::Scope isolate_scope(isolate); // Check that rehashing has been performed. CHECK_EQ(static_cast(1337), HashSeed(reinterpret_cast(isolate))); - v8::Isolate::Scope isolate_scope(isolate); v8::HandleScope handle_scope(isolate); v8::Local context = v8::Context::New(isolate); CHECK(!context.IsEmpty()); diff --git a/deps/v8/test/fuzzer/wasm-async.cc b/deps/v8/test/fuzzer/wasm-async.cc index 488047d1d7326c..63176aa4fb85af 100644 --- a/deps/v8/test/fuzzer/wasm-async.cc +++ b/deps/v8/test/fuzzer/wasm-async.cc @@ -54,12 +54,13 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { Isolate* i_isolate = reinterpret_cast(isolate); + v8::Isolate::Scope isolate_scope(isolate); + // Clear any pending exceptions from a prior run. if (i_isolate->has_pending_exception()) { i_isolate->clear_pending_exception(); } - v8::Isolate::Scope isolate_scope(isolate); v8::HandleScope handle_scope(isolate); v8::Context::Scope context_scope(support->GetContext()); diff --git a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc index ff00334f87ac21..281da1edc8ba3e 100644 --- a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc +++ b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc @@ -811,10 +811,13 @@ void WasmExecutionFuzzer::FuzzWasmModule(base::Vector data, Isolate* i_isolate = reinterpret_cast(isolate); + v8::Isolate::Scope isolate_scope(isolate); + // Clear any pending exceptions from a prior run. - i_isolate->clear_pending_exception(); + if (i_isolate->has_pending_exception()) { + i_isolate->clear_pending_exception(); + } - v8::Isolate::Scope isolate_scope(isolate); v8::HandleScope handle_scope(isolate); v8::Context::Scope context_scope(support->GetContext()); diff --git a/deps/v8/test/fuzzer/wasm.cc b/deps/v8/test/fuzzer/wasm.cc index ad4d5163d001df..b99b30168f93d2 100644 --- a/deps/v8/test/fuzzer/wasm.cc +++ b/deps/v8/test/fuzzer/wasm.cc @@ -32,12 +32,13 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { Isolate* i_isolate = reinterpret_cast(isolate); + v8::Isolate::Scope isolate_scope(isolate); + // Clear any pending exceptions from a prior run. if (i_isolate->has_pending_exception()) { i_isolate->clear_pending_exception(); } - v8::Isolate::Scope isolate_scope(isolate); v8::HandleScope handle_scope(isolate); v8::Context::Scope context_scope(support->GetContext()); diff --git a/deps/v8/test/wasm-api-tests/serialize.cc b/deps/v8/test/wasm-api-tests/serialize.cc index 5916ce9abe9995..be1716e28bb6ff 100644 --- a/deps/v8/test/wasm-api-tests/serialize.cc +++ b/deps/v8/test/wasm-api-tests/serialize.cc @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. +#include "src/common/ptr-compr-inl.h" #include "src/execution/isolate.h" #include "src/wasm/c-api.h" #include "test/wasm-api-tests/wasm-api-test.h" @@ -35,12 +36,19 @@ TEST_F(WasmCapiTest, Serialize) { // We reset the module and collect it to make sure the NativeModuleCache does // not contain it anymore. Otherwise deserialization will not happen. ResetModule(); - Heap* heap = - reinterpret_cast<::wasm::StoreImpl*>(store())->i_isolate()->heap(); - heap->PreciseCollectAllGarbage(GCFlag::kForced, - GarbageCollectionReason::kTesting); - heap->PreciseCollectAllGarbage(GCFlag::kForced, - GarbageCollectionReason::kTesting); + { + Isolate* isolate = + reinterpret_cast<::wasm::StoreImpl*>(store())->i_isolate(); + // This method might be called on a thread that's not bound to any Isolate + // and thus pointer compression schemes might have cage base value unset. + // Ensure cage bases are initialized so that the V8 heap can be accessed. + i::PtrComprCageAccessScope ptr_compr_cage_access_scope(isolate); + Heap* heap = isolate->heap(); + heap->PreciseCollectAllGarbage(GCFlag::kForced, + GarbageCollectionReason::kTesting); + heap->PreciseCollectAllGarbage(GCFlag::kForced, + GarbageCollectionReason::kTesting); + } own deserialized = Module::deserialize(store(), serialized); // Try to serialize the module again. This can fail if deserialization does diff --git a/deps/v8/tools/debug_helper/debug-helper-internal.cc b/deps/v8/tools/debug_helper/debug-helper-internal.cc index 8ff3f29f5cd2d3..65098d13f0f8b3 100644 --- a/deps/v8/tools/debug_helper/debug-helper-internal.cc +++ b/deps/v8/tools/debug_helper/debug-helper-internal.cc @@ -23,13 +23,13 @@ bool IsPointerCompressed(uintptr_t address) { uintptr_t EnsureDecompressed(uintptr_t address, uintptr_t any_uncompressed_ptr) { if (!COMPRESS_POINTERS_BOOL || !IsPointerCompressed(address)) return address; -#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE +#ifdef V8_COMPRESS_POINTERS Address base = V8HeapCompressionScheme::GetPtrComprCageBaseAddress(any_uncompressed_ptr); if (base != V8HeapCompressionScheme::base()) { V8HeapCompressionScheme::InitBase(base); } -#endif +#endif // V8_COMPRESS_POINTERS // TODO(v8:11880): ExternalCodeCompressionScheme might be needed here for // decompressing Code pointers from external code space. return i::V8HeapCompressionScheme::DecompressTagged( diff --git a/deps/v8/tools/debug_helper/get-object-properties.cc b/deps/v8/tools/debug_helper/get-object-properties.cc index ac1d7a8af04c18..9d14e01093ca74 100644 --- a/deps/v8/tools/debug_helper/get-object-properties.cc +++ b/deps/v8/tools/debug_helper/get-object-properties.cc @@ -659,13 +659,13 @@ std::unique_ptr GetHeapObjectPropertiesMaybeCompressed( any_uncompressed_ptr = heap_addresses.old_space_first_page; if (any_uncompressed_ptr == 0) any_uncompressed_ptr = heap_addresses.read_only_space_first_page; -#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE +#ifdef V8_COMPRESS_POINTERS Address base = V8HeapCompressionScheme::GetPtrComprCageBaseAddress(any_uncompressed_ptr); if (base != V8HeapCompressionScheme::base()) { V8HeapCompressionScheme::InitBase(base); } -#endif +#endif // V8_COMPRESS_POINTERS FillInUnknownHeapAddresses(&heap_addresses, any_uncompressed_ptr); if (any_uncompressed_ptr == 0) { // We can't figure out the heap range. Just check for known objects.