diff --git a/common.gypi b/common.gypi index 812f6506c5ac46..4459d4f9d923f3 100644 --- a/common.gypi +++ b/common.gypi @@ -27,7 +27,7 @@ # Reset this number to 0 on major V8 upgrades. # Increment by one for each non-official patch applied to deps/v8. - 'v8_embedder_string': '-node.4', + 'v8_embedder_string': '-node.5', # Enable disassembler for `--print-code` v8 options 'v8_enable_disassembler': 1, diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc index bf359d69e93069..e8fa690660af53 100644 --- a/deps/v8/src/builtins/arm/builtins-arm.cc +++ b/deps/v8/src/builtins/arm/builtins-arm.cc @@ -782,22 +782,15 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, Runtime::kCompileOptimized_Concurrent); { - // Otherwise, the marker is InOptimizationQueue. + // Otherwise, the marker is InOptimizationQueue, so fall through hoping + // that an interrupt will eventually update the slot with optimized code. if (FLAG_debug_code) { __ cmp( optimized_code_entry, Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue))); __ Assert(eq, kExpectedOptimizationSentinel); } - // Checking whether the queued function is ready for install is - // optional, since we come across interrupts and stack checks elsewhere. - // However, not checking may delay installing ready functions, and - // always checking would be quite expensive. A good compromise is to - // first check against stack limit as a cue for an interrupt signal. - __ LoadRoot(scratch2, Heap::kStackLimitRootIndex); - __ cmp(sp, Operand(scratch2)); - __ b(hs, &fallthrough); - GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode); + __ jmp(&fallthrough); } } diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc index b1d5d32b9a4e59..7aaa2d0003363a 100644 --- a/deps/v8/src/builtins/arm64/builtins-arm64.cc +++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc @@ -788,22 +788,15 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, Runtime::kCompileOptimized_Concurrent); { - // Otherwise, the marker is InOptimizationQueue. + // Otherwise, the marker is InOptimizationQueue, so fall through hoping + // that an interrupt will eventually update the slot with optimized code. if (FLAG_debug_code) { __ Cmp( optimized_code_entry, Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue))); __ Assert(eq, kExpectedOptimizationSentinel); } - - // Checking whether the queued function is ready for install is optional, - // since we come across interrupts and stack checks elsewhere. However, - // not checking may delay installing ready functions, and always checking - // would be quite expensive. A good compromise is to first check against - // stack limit as a cue for an interrupt signal. - __ CompareRoot(masm->StackPointer(), Heap::kStackLimitRootIndex); - __ B(hs, &fallthrough); - GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode); + __ B(&fallthrough); } } diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc index ee150255209e2d..a689c3131d67e1 100644 --- a/deps/v8/src/builtins/ia32/builtins-ia32.cc +++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc @@ -698,24 +698,15 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, Runtime::kCompileOptimized_Concurrent); { - // Otherwise, the marker is InOptimizationQueue. + // Otherwise, the marker is InOptimizationQueue, so fall through hoping + // that an interrupt will eventually update the slot with optimized code. if (FLAG_debug_code) { __ cmp( optimized_code_entry, Immediate(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue))); __ Assert(equal, kExpectedOptimizationSentinel); } - - // Checking whether the queued function is ready for install is optional, - // since we come across interrupts and stack checks elsewhere. However, - // not checking may delay installing ready functions, and always checking - // would be quite expensive. A good compromise is to first check against - // stack limit as a cue for an interrupt signal. - ExternalReference stack_limit = - ExternalReference::address_of_stack_limit(masm->isolate()); - __ cmp(esp, Operand::StaticVariable(stack_limit)); - __ j(above_equal, &fallthrough); - GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode); + __ jmp(&fallthrough); } } diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc index e8f846c10a5891..4835fb0b1bd2f3 100644 --- a/deps/v8/src/builtins/mips/builtins-mips.cc +++ b/deps/v8/src/builtins/mips/builtins-mips.cc @@ -760,21 +760,14 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, Runtime::kCompileOptimized_Concurrent); { - // Otherwise, the marker is InOptimizationQueue. + // Otherwise, the marker is InOptimizationQueue, so fall through hoping + // that an interrupt will eventually update the slot with optimized code. if (FLAG_debug_code) { __ Assert( eq, kExpectedOptimizationSentinel, optimized_code_entry, Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue))); } - - // Checking whether the queued function is ready for install is optional, - // since we come across interrupts and stack checks elsewhere. However, - // not checking may delay installing ready functions, and always checking - // would be quite expensive. A good compromise is to first check against - // stack limit as a cue for an interrupt signal. - __ LoadRoot(at, Heap::kStackLimitRootIndex); - __ Branch(&fallthrough, hs, sp, Operand(at)); - GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode); + __ jmp(&fallthrough); } } diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc index f62750b06106a4..2584444f1f6cc2 100644 --- a/deps/v8/src/builtins/mips64/builtins-mips64.cc +++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc @@ -760,21 +760,14 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, Runtime::kCompileOptimized_Concurrent); { - // Otherwise, the marker is InOptimizationQueue. + // Otherwise, the marker is InOptimizationQueue, so fall through hoping + // that an interrupt will eventually update the slot with optimized code. if (FLAG_debug_code) { __ Assert( eq, kExpectedOptimizationSentinel, optimized_code_entry, Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue))); } - - // Checking whether the queued function is ready for install is optional, - // since we come across interrupts and stack checks elsewhere. However, - // not checking may delay installing ready functions, and always checking - // would be quite expensive. A good compromise is to first check against - // stack limit as a cue for an interrupt signal. - __ LoadRoot(t0, Heap::kStackLimitRootIndex); - __ Branch(&fallthrough, hs, sp, Operand(t0)); - GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode); + __ jmp(&fallthrough); } } diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc index 3ed3eb686de585..c242be5cf822bc 100644 --- a/deps/v8/src/builtins/ppc/builtins-ppc.cc +++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc @@ -780,23 +780,15 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, Runtime::kCompileOptimized_Concurrent); { - // Otherwise, the marker is InOptimizationQueue. + // Otherwise, the marker is InOptimizationQueue, so fall through hoping + // that an interrupt will eventually update the slot with optimized code. if (FLAG_debug_code) { __ CmpSmiLiteral( optimized_code_entry, Smi::FromEnum(OptimizationMarker::kInOptimizationQueue), r0); __ Assert(eq, kExpectedOptimizationSentinel); } - - // Checking whether the queued function is ready for install is optional, - // since we come across interrupts and stack checks elsewhere. However, - // not checking may delay installing ready functions, and always checking - // would be quite expensive. A good compromise is to first check against - // stack limit as a cue for an interrupt signal. - __ LoadRoot(ip, Heap::kStackLimitRootIndex); - __ cmpl(sp, ip); - __ bge(&fallthrough); - GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode); + __ b(&fallthrough); } } diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc index e9ef390c695f19..aa9e62f2174e41 100644 --- a/deps/v8/src/builtins/s390/builtins-s390.cc +++ b/deps/v8/src/builtins/s390/builtins-s390.cc @@ -783,22 +783,15 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, Runtime::kCompileOptimized_Concurrent); { - // Otherwise, the marker is InOptimizationQueue. + // Otherwise, the marker is InOptimizationQueue, so fall through hoping + // that an interrupt will eventually update the slot with optimized code. if (FLAG_debug_code) { __ CmpSmiLiteral( optimized_code_entry, Smi::FromEnum(OptimizationMarker::kInOptimizationQueue), r0); __ Assert(eq, kExpectedOptimizationSentinel); } - - // Checking whether the queued function is ready for install is optional, - // since we come across interrupts and stack checks elsewhere. However, - // not checking may delay installing ready functions, and always checking - // would be quite expensive. A good compromise is to first check against - // stack limit as a cue for an interrupt signal. - __ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex)); - __ bge(&fallthrough, Label::kNear); - GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode); + __ b(&fallthrough, Label::kNear); } } diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc index 713475cd34e1f5..81c92681d50af9 100644 --- a/deps/v8/src/builtins/x64/builtins-x64.cc +++ b/deps/v8/src/builtins/x64/builtins-x64.cc @@ -781,21 +781,14 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, Runtime::kCompileOptimized_Concurrent); { - // Otherwise, the marker is InOptimizationQueue. + // Otherwise, the marker is InOptimizationQueue, so fall through hoping + // that an interrupt will eventually update the slot with optimized code. if (FLAG_debug_code) { __ SmiCompare(optimized_code_entry, Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)); __ Assert(equal, kExpectedOptimizationSentinel); } - - // Checking whether the queued function is ready for install is optional, - // since we come across interrupts and stack checks elsewhere. However, - // not checking may delay installing ready functions, and always checking - // would be quite expensive. A good compromise is to first check against - // stack limit as a cue for an interrupt signal. - __ CompareRoot(rsp, Heap::kStackLimitRootIndex); - __ j(above_equal, &fallthrough); - GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode); + __ jmp(&fallthrough); } } diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc index 1cc00f5b7e4182..b445037d08671e 100644 --- a/deps/v8/src/runtime/runtime-compiler.cc +++ b/deps/v8/src/runtime/runtime-compiler.cc @@ -302,27 +302,6 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) { return NULL; } - -RUNTIME_FUNCTION(Runtime_TryInstallOptimizedCode) { - HandleScope scope(isolate); - DCHECK_EQ(1, args.length()); - CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0); - - // First check if this is a real stack overflow. - StackLimitCheck check(isolate); - if (check.JsHasOverflowed(kStackSpaceRequiredForCompilation * KB)) { - return isolate->StackOverflow(); - } - - // Only try to install optimized functions if the interrupt was InstallCode. - if (isolate->stack_guard()->CheckAndClearInstallCode()) { - isolate->optimizing_compile_dispatcher()->InstallOptimizedFunctions(); - } - - return (function->IsOptimized()) ? function->code() - : function->shared()->code(); -} - static Object* CompileGlobalEval(Isolate* isolate, Handle source, Handle outer_info, LanguageMode language_mode, diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h index e7084a8ccaa969..a11d274d25ad7f 100644 --- a/deps/v8/src/runtime/runtime.h +++ b/deps/v8/src/runtime/runtime.h @@ -120,7 +120,6 @@ namespace internal { F(NotifyStubFailure, 0, 1) \ F(NotifyDeoptimized, 0, 1) \ F(CompileForOnStackReplacement, 1, 1) \ - F(TryInstallOptimizedCode, 1, 1) \ F(ResolvePossiblyDirectEval, 6, 1) \ F(InstantiateAsmJs, 4, 1)