diff --git a/Build/NuGet/.pack-version b/Build/NuGet/.pack-version index a7ee35a3ea7..bfa363e76ed 100644 --- a/Build/NuGet/.pack-version +++ b/Build/NuGet/.pack-version @@ -1 +1 @@ -1.8.3 +1.8.4 diff --git a/lib/Backend/BackwardPass.cpp b/lib/Backend/BackwardPass.cpp index 949c8c572e9..a3249f680d8 100644 --- a/lib/Backend/BackwardPass.cpp +++ b/lib/Backend/BackwardPass.cpp @@ -4124,8 +4124,9 @@ BackwardPass::UpdateArrayBailOutKind(IR::Instr *const instr) IR::BailOutKind includeBailOutKinds = IR::BailOutInvalid; if(!baseValueType.IsNotNativeArray() && - (!baseValueType.IsLikelyNativeArray() || instr->GetSrc1()->IsVar()) && - !currentBlock->noImplicitCallNativeArrayUses->IsEmpty()) + (!baseValueType.IsLikelyNativeArray() || !instr->GetSrc1()->IsInt32()) && + !currentBlock->noImplicitCallNativeArrayUses->IsEmpty() && + !(instr->GetBailOutKind() & IR::BailOutOnArrayAccessHelperCall)) { // There is an upwards-exposed use of a native array. Since the array referenced by this instruction can be aliased, // this instruction needs to bail out if it converts the native array even if this array specifically is not @@ -4231,6 +4232,11 @@ BackwardPass::ProcessStackSymUse(StackSym * stackSym, BOOLEAN isNonByteCodeUse) return true; } + if (this->DoMarkTempNumbers()) + { + Assert((block->loop != nullptr) == block->tempNumberTracker->HasTempTransferDependencies()); + block->tempNumberTracker->ProcessUse(stackSym, this); + } if (this->DoMarkTempObjects()) { Assert((block->loop != nullptr) == block->tempObjectTracker->HasTempTransferDependencies()); @@ -4293,17 +4299,7 @@ BackwardPass::ProcessSymUse(Sym * sym, bool isRegOpndUse, BOOLEAN isNonByteCodeU } } - StackSym * stackSym = sym->AsStackSym(); - bool isUsed = ProcessStackSymUse(stackSym, isNonByteCodeUse); - - if (!IsCollectionPass() && isRegOpndUse && this->DoMarkTempNumbers()) - { - // Collect mark temp number information - Assert((block->loop != nullptr) == block->tempNumberTracker->HasTempTransferDependencies()); - block->tempNumberTracker->ProcessUse(stackSym, this); - } - - return isUsed; + return ProcessStackSymUse(sym->AsStackSym(), isNonByteCodeUse); } bool diff --git a/lib/Backend/GlobOpt.cpp b/lib/Backend/GlobOpt.cpp index 76daa7f7e78..a1dba18ed77 100644 --- a/lib/Backend/GlobOpt.cpp +++ b/lib/Backend/GlobOpt.cpp @@ -6482,6 +6482,8 @@ GlobOpt::OptConstPeep(IR::Instr *instr, IR::Opnd *constSrc, Value **pDstVal, Val instr->m_opcode = Js::OpCode::Ld_A; + InvalidateInductionVariables(instr); + return true; } @@ -7088,16 +7090,7 @@ GlobOpt::OptConstFoldUnary( } } - // If this is an induction variable, then treat it the way the prepass would have if it had seen - // the assignment and the resulting change to the value number, and mark it as indeterminate. - for (Loop * loop = this->currentBlock->loop; loop; loop = loop->parent) - { - InductionVariable *iv = nullptr; - if (loop->inductionVariables && loop->inductionVariables->TryGetReference(dstSym->m_id, &iv)) - { - iv->SetChangeIsIndeterminate(); - } - } + InvalidateInductionVariables(instr); return true; } @@ -12422,16 +12415,7 @@ GlobOpt::OptConstFoldBinary( this->ToInt32Dst(instr, dst->AsRegOpnd(), this->currentBlock); } - // If this is an induction variable, then treat it the way the prepass would have if it had seen - // the assignment and the resulting change to the value number, and mark it as indeterminate. - for (Loop * loop = this->currentBlock->loop; loop; loop = loop->parent) - { - InductionVariable *iv = nullptr; - if (loop->inductionVariables && loop->inductionVariables->TryGetReference(dstSym->m_id, &iv)) - { - iv->SetChangeIsIndeterminate(); - } - } + InvalidateInductionVariables(instr); return true; } diff --git a/lib/Backend/GlobOpt.h b/lib/Backend/GlobOpt.h index 0c0d2b8f0a0..116f61ec73d 100644 --- a/lib/Backend/GlobOpt.h +++ b/lib/Backend/GlobOpt.h @@ -685,6 +685,7 @@ class GlobOpt void DetectUnknownChangesToInductionVariables(GlobOptBlockData *const blockData); void SetInductionVariableValueNumbers(GlobOptBlockData *const blockData); void FinalizeInductionVariables(Loop *const loop, GlobOptBlockData *const headerData); + void InvalidateInductionVariables(IR::Instr * instr); enum class SymBoundType {OFFSET, VALUE, UNKNOWN}; SymBoundType DetermineSymBoundOffsetOrValueRelativeToLandingPad(StackSym *const sym, const bool landingPadValueIsLowerBound, ValueInfo *const valueInfo, const IntBounds *const bounds, GlobOptBlockData *const landingPadGlobOptBlockData, int *const boundOffsetOrValueRef); diff --git a/lib/Backend/GlobOptIntBounds.cpp b/lib/Backend/GlobOptIntBounds.cpp index 19f2b200e5f..9d456eeb262 100644 --- a/lib/Backend/GlobOptIntBounds.cpp +++ b/lib/Backend/GlobOptIntBounds.cpp @@ -1262,6 +1262,30 @@ void GlobOpt::FinalizeInductionVariables(Loop *const loop, GlobOptBlockData *con } } +void +GlobOpt::InvalidateInductionVariables(IR::Instr * instr) +{ + Assert(instr->GetDst() != nullptr && instr->GetDst()->IsRegOpnd()); + + // Induction variables are always var syms. + StackSym * dstSym = instr->GetDst()->AsRegOpnd()->m_sym; + if (!dstSym->IsVar()) + { + dstSym = dstSym->GetVarEquivSym(this->func); + } + + // If this is an induction variable, then treat it the way the prepass would have if it had seen + // the assignment and the resulting change to the value number, and mark it as indeterminate. + for (Loop * loop = this->currentBlock->loop; loop; loop = loop->parent) + { + InductionVariable *iv = nullptr; + if (loop->inductionVariables && loop->inductionVariables->TryGetReference(dstSym->m_id, &iv)) + { + iv->SetChangeIsIndeterminate(); + } + } +} + GlobOpt::SymBoundType GlobOpt::DetermineSymBoundOffsetOrValueRelativeToLandingPad( StackSym *const sym, const bool landingPadValueIsLowerBound, diff --git a/lib/Backend/Inline.cpp b/lib/Backend/Inline.cpp index fdde8527c20..5fc88cd7165 100644 --- a/lib/Backend/Inline.cpp +++ b/lib/Backend/Inline.cpp @@ -2089,11 +2089,11 @@ Inline::InlineBuiltInFunction(IR::Instr *callInstr, const FunctionJITTimeInfo * callInstr->m_opcode = inlineCallOpCode; SetupInlineInstrForCallDirect(builtInFunctionId, callInstr, argoutInstr); + WrapArgsOutWithCoerse(builtInFunctionId, callInstr); + // Generate ByteCodeArgOutCaptures and move the ArgOut_A/ArgOut_A_Inline close to the call instruction callInstr->MoveArgs(/*generateByteCodeCapture*/ true); - WrapArgsOutWithCoerse(builtInFunctionId, callInstr); - inlineBuiltInEndInstr = callInstr; } else diff --git a/lib/Backend/Lower.cpp b/lib/Backend/Lower.cpp index e09754158de..e956c5edb93 100644 --- a/lib/Backend/Lower.cpp +++ b/lib/Backend/Lower.cpp @@ -5110,7 +5110,7 @@ Lowerer::LowerUpdateNewScObjectCache(IR::Instr * insertInstr, IR::Opnd *dst, IR: indirOpnd = IR::IndirOpnd::New(r1, Js::Type::GetOffsetOfTypeId(), TyInt32, this->m_func); IR::IntConstOpnd *intOpnd = IR::IntConstOpnd::New(Js::TypeIds_Function, TyInt32, this->m_func, true); IR::BranchInstr* branchInstr = InsertCompareBranch(indirOpnd, intOpnd, Js::OpCode::BrNeq_A, labelFallThru, insertInstr); - InsertObjectPoison(src1RegOpnd, branchInstr, insertInstr); + InsertObjectPoison(src1RegOpnd, branchInstr, insertInstr, false); } // Every function has a constructor cache, even if only the default blank one. @@ -5304,13 +5304,15 @@ Lowerer::LowerNewScObjArray(IR::Instr *newObjInstr) newObjInstr->SetSrc1(IR::HelperCallOpnd::New(helperMethod, func)); newObjInstr = GenerateDirectCall(newObjInstr, targetOpnd, Js::CallFlags_New); - InsertCompareBranch( + IR::BranchInstr* branchInstr = InsertCompareBranch( IR::IndirOpnd::New(resultObjOpnd, 0, TyMachPtr, func), LoadVTableValueOpnd(insertInstr, VTableValue::VtableJavascriptArray), Js::OpCode::BrEq_A, true, labelDone, insertInstr); + + InsertObjectPoison(resultObjOpnd, branchInstr, insertInstr, true); // We know we have a native array, so store the weak ref and call site index. InsertMove( IR::IndirOpnd::New(resultObjOpnd, Js::JavascriptNativeArray::GetOffsetOfArrayCallSiteIndex(), TyUint16, func), @@ -7198,7 +7200,7 @@ Lowerer::GenerateCachedTypeCheck(IR::Instr *instrChk, IR::PropertySymOpnd *prope IR::LabelInstr* labelCheckEquivalentType = IR::LabelInstr::New(Js::OpCode::Label, func, true); IR::BranchInstr* branchInstr = InsertCompareBranch(typeOpnd, expectedTypeOpnd, Js::OpCode::BrNeq_A, labelCheckEquivalentType, instrChk); - InsertObjectPoison(regOpnd, branchInstr, instrChk); + InsertObjectPoison(regOpnd, branchInstr, instrChk, false); IR::LabelInstr *labelTypeCheckSucceeded = IR::LabelInstr::New(Js::OpCode::Label, func, false); InsertBranch(Js::OpCode::Br, labelTypeCheckSucceeded, instrChk); @@ -7249,7 +7251,7 @@ Lowerer::GenerateCachedTypeCheck(IR::Instr *instrChk, IR::PropertySymOpnd *prope else { IR::BranchInstr* branchInstr = InsertCompareBranch(typeOpnd, expectedTypeOpnd, Js::OpCode::BrNeq_A, labelSecondChance != nullptr ? labelSecondChance : labelTypeCheckFailed, instrChk); - InsertObjectPoison(regOpnd, branchInstr, instrChk); + InsertObjectPoison(regOpnd, branchInstr, instrChk, false); } // Don't pin the type for polymorphic operations. The code can successfully execute even if this type is no longer referenced by any objects, @@ -7265,10 +7267,10 @@ Lowerer::GenerateCachedTypeCheck(IR::Instr *instrChk, IR::PropertySymOpnd *prope } void -Lowerer::InsertObjectPoison(IR::Opnd* poisonedOpnd, IR::BranchInstr* branchInstr, IR::Instr* insertInstr) +Lowerer::InsertObjectPoison(IR::Opnd* poisonedOpnd, IR::BranchInstr* branchInstr, IR::Instr* insertInstr, bool isForStore) { #ifndef _M_ARM - LowererMD::InsertObjectPoison(poisonedOpnd, branchInstr, insertInstr); + LowererMD::InsertObjectPoison(poisonedOpnd, branchInstr, insertInstr, isForStore); #endif } @@ -7627,7 +7629,7 @@ Lowerer::GeneratePropertyGuardCheck(IR::Instr *insertPointInstr, IR::PropertySym IR::MemRefOpnd* guardOpnd = IR::MemRefOpnd::New(guard, TyMachPtr, this->m_func, IR::AddrOpndKindDynamicGuardValueRef); IR::BranchInstr *branchInstr = InsertCompareBranch(guardOpnd, zeroOpnd, Js::OpCode::BrEq_A, labelBailOut, insertPointInstr); IR::RegOpnd *objPtrReg = IR::RegOpnd::New(propertySymOpnd->GetObjectSym(), TyMachPtr, m_func); - InsertObjectPoison(objPtrReg, branchInstr, insertPointInstr); + InsertObjectPoison(objPtrReg, branchInstr, insertPointInstr, false); } else { @@ -8341,7 +8343,7 @@ Lowerer::LowerAddLeftDeadForString(IR::Instr *instr) labelHelper, insertBeforeInstr); - InsertObjectPoison(opndLeft->AsRegOpnd(), branchInstr, insertBeforeInstr); + InsertObjectPoison(opndLeft->AsRegOpnd(), branchInstr, insertBeforeInstr, false); GenerateStringTest(opndRight->AsRegOpnd(), insertBeforeInstr, labelHelper); @@ -13842,7 +13844,7 @@ void Lowerer::GenerateObjectTypeTest(IR::RegOpnd *srcReg, IR::Instr *instrInsert labelHelper, instrInsert); - InsertObjectPoison(srcReg, branchInstr, instrInsert); + InsertObjectPoison(srcReg, branchInstr, instrInsert, false); } const VTableValue Lowerer::VtableAddresses[static_cast(ObjectType::Count)] = @@ -14339,10 +14341,7 @@ IR::RegOpnd *Lowerer::GenerateArrayTest( goodArrayLabel, insertBeforeInstr); - if (!isStore) - { - InsertObjectPoison(arrayOpnd, branchInstr, insertBeforeInstr); - } + InsertObjectPoison(arrayOpnd, branchInstr, insertBeforeInstr, isStore); IR::LabelInstr *notFloatArrayLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func, true); insertBeforeInstr->InsertBefore(notFloatArrayLabel); @@ -14366,10 +14365,7 @@ IR::RegOpnd *Lowerer::GenerateArrayTest( isNotArrayLabel, insertBeforeInstr); - if (!isStore) - { - InsertObjectPoison(arrayOpnd, branchInstr, insertBeforeInstr); - } + InsertObjectPoison(arrayOpnd, branchInstr, insertBeforeInstr, isStore); m_lowererMD.LoadHelperArgument(insertBeforeInstr, arrayOpnd); @@ -14404,10 +14400,7 @@ IR::RegOpnd *Lowerer::GenerateArrayTest( isNotArrayLabel, insertBeforeInstr); - if (!isStore) - { - InsertObjectPoison(arrayOpnd, branchInstr, insertBeforeInstr); - } + InsertObjectPoison(arrayOpnd, branchInstr, insertBeforeInstr, isStore); insertBeforeInstr->InsertBefore(goodArrayLabel); } else @@ -14419,10 +14412,7 @@ IR::RegOpnd *Lowerer::GenerateArrayTest( isNotArrayLabel, insertBeforeInstr); - if (!isStore) - { - InsertObjectPoison(arrayOpnd, branchInstr, insertBeforeInstr); - } + InsertObjectPoison(arrayOpnd, branchInstr, insertBeforeInstr, isStore); } } @@ -15260,12 +15250,10 @@ Lowerer::GenerateFastElemIStringIndexCommon(IR::Instr * instrInsert, bool isStor IR::IndirOpnd::New(indexOpnd, 0, TyMachPtr, m_func), LoadVTableValueOpnd(instrInsert, VTableValue::VtablePropertyString), Js::OpCode::BrNeq_A, notPropStrLabel, instrInsert); - InsertBranch(Js::OpCode::Br, propStrLoadedLabel, instrInsert); - if (!isStore) - { - InsertObjectPoison(indexOpnd, branchInstr, instrInsert); - } + InsertObjectPoison(indexOpnd, branchInstr, instrInsert, isStore); + + InsertBranch(Js::OpCode::Br, propStrLoadedLabel, instrInsert); instrInsert->InsertBefore(notPropStrLabel); @@ -15274,10 +15262,7 @@ Lowerer::GenerateFastElemIStringIndexCommon(IR::Instr * instrInsert, bool isStor LoadVTableValueOpnd(instrInsert, VTableValue::VtableLiteralStringWithPropertyStringPtr), Js::OpCode::BrNeq_A, labelHelper, instrInsert); - if (!isStore) - { - InsertObjectPoison(indexOpnd, branchInstr, instrInsert); - } + InsertObjectPoison(indexOpnd, branchInstr, instrInsert, isStore); IR::IndirOpnd * propStrOpnd = IR::IndirOpnd::New(indexOpnd, Js::LiteralStringWithPropertyStringPtr::GetOffsetOfPropertyString(), TyMachPtr, m_func); InsertCompareBranch(propStrOpnd, IR::IntConstOpnd::New(NULL, TyMachPtr, m_func), Js::OpCode::BrNeq_A, labelHelper, instrInsert); @@ -15506,7 +15491,8 @@ Lowerer::GenerateFastElemIIntIndexCommon( if(arrayRegOpnd->HeadSegmentLengthSym()) { headSegmentLengthOpnd = IR::RegOpnd::New(arrayRegOpnd->HeadSegmentLengthSym(), TyUint32, m_func); - DebugOnly(headSegmentLengthOpnd->AsRegOpnd()->FreezeSymValue()); + // This value can change over the course of this function + //DebugOnly(headSegmentLengthOpnd->AsRegOpnd()->FreezeSymValue()); autoReuseHeadSegmentLengthOpnd.Initialize(headSegmentLengthOpnd, m_func); } if (arrayRegOpnd->EliminatedLowerBoundCheck()) @@ -15638,6 +15624,8 @@ Lowerer::GenerateFastElemIIntIndexCommon( const bool needBailOutToHelper = !!(bailOutKind & (IR::BailOutOnArrayAccessHelperCall)); const bool needBailOutOnSegmentLengthCompare = needBailOutToHelper || needBailOutOnInvalidLength; + bool usingSegmentLengthIncreasedLabel = false; + if(indexIsLessThanHeadSegmentLength || needBailOutOnSegmentLengthCompare) { if (needBailOutOnSegmentLengthCompare) @@ -15715,6 +15703,7 @@ Lowerer::GenerateFastElemIIntIndexCommon( else if (isStore && !baseValueType.IsLikelyTypedArray()) // #if (opcode == StElemI_A) { IR::LabelInstr *labelDone = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); + LABELNAME(labelDone); IR::LabelInstr *labelSegmentLengthIncreased = nullptr; const bool isPush = instr->m_opcode != Js::OpCode::StElemI_A && instr->m_opcode != Js::OpCode::StElemI_A_Strict; @@ -15738,7 +15727,11 @@ Lowerer::GenerateFastElemIIntIndexCommon( // for the case where the length was increased (index >= length), and pass it back to GenerateFastStElemI, which // will fill in the rest. labelSegmentLengthIncreased = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, isInHelperBlock); + LABELNAME(labelSegmentLengthIncreased); *pLabelSegmentLengthIncreased = labelSegmentLengthIncreased; + + // Since this is effectively a separate exit point, we need to do the spectre mitigations in this place as well. + usingSegmentLengthIncreasedLabel = true; } else { @@ -15787,16 +15780,24 @@ Lowerer::GenerateFastElemIIntIndexCommon( // jae $helper // and [array + offsetOf(objectArrayOrFlags)], ~Js::DynamicObjectFlags::HasNoMissingValues // indexLessThanSize: + // if(!index->IsConstOpnd()) { + // sub temp, index, [headSegment + offset(size)] + // sar temp, 31 + // and index, temp + // } IR::LabelInstr *const indexGreaterThanLengthLabel = InsertLabel(true /* isHelper */, instr); + LABELNAME(indexGreaterThanLengthLabel); IR::LabelInstr *const indexLessThanSizeLabel = InsertLabel(isInHelperBlock, instr); + LABELNAME(indexLessThanSizeLabel); // jne indexGreaterThanLength // branch for (cmp index, headSegmentLength) + InsertBranch(Js::OpCode::BrNeq_A, indexGreaterThanLengthLabel, indexGreaterThanLengthLabel); + // cmp index, [headSegment + offset(size)] // jae $helper // jmp indexLessThanSize // indexGreaterThanLength: - InsertBranch(Js::OpCode::BrNeq_A, indexGreaterThanLengthLabel, indexGreaterThanLengthLabel); InsertCompareBranch( indexValueOpnd, IR::IndirOpnd::New(headSegmentOpnd, offsetof(Js::SparseArraySegmentBase, size), TyUint32, m_func), @@ -15804,6 +15805,7 @@ Lowerer::GenerateFastElemIIntIndexCommon( true /* isUnsigned */, labelHelper, indexGreaterThanLengthLabel); + InsertBranch(Js::OpCode::Br, indexLessThanSizeLabel, indexGreaterThanLengthLabel); // indexGreaterThanLength: @@ -15818,6 +15820,7 @@ Lowerer::GenerateFastElemIIntIndexCommon( true /* isUnsigned */, labelHelper, indexLessThanSizeLabel); + CompileAssert( static_cast(static_cast(Js::DynamicObjectFlags::HasNoMissingValues)) == Js::DynamicObjectFlags::HasNoMissingValues); @@ -15831,7 +15834,36 @@ Lowerer::GenerateFastElemIIntIndexCommon( true), indexLessThanSizeLabel); + // In speculative cases, we want to avoid a write to an array setting the length to something huge, which + // would then allow subsequent reads to hit arbitrary memory (in the speculative path). This is done with + // a mask generated from the difference between the index and the size. Since we should have already gone + // to the helper in any case where this would execute, it's a functional no-op. + // indexLessThanSize: + // if(!index->IsConstOpnd()) { + // sub temp, index, [headSegment + offset(size)] + // sar temp, 31 + // and index, temp + // } + if (!indexValueOpnd->IsConstOpnd() + && (baseValueType.IsLikelyTypedArray() + ? CONFIG_FLAG_RELEASE(PoisonTypedArrayStore) + : ((indirType == TyVar && CONFIG_FLAG_RELEASE(PoisonVarArrayStore)) + || (IRType_IsNativeInt(indirType) && CONFIG_FLAG_RELEASE(PoisonIntArrayStore)) + || (IRType_IsFloat(indirType) && CONFIG_FLAG_RELEASE(PoisonFloatArrayStore))) + ) + ) + { + IR::RegOpnd* temp = IR::RegOpnd::New(TyUint32, m_func); + InsertSub( + false, + temp, + indexValueOpnd, + IR::IndirOpnd::New(headSegmentOpnd, offsetof(Js::SparseArraySegmentBase, size), TyUint32, m_func), + instr); + InsertShift(Js::OpCode::Shr_A, false, temp, temp, IR::IntConstOpnd::New(31, TyInt8, m_func), instr); + InsertAnd(indexValueOpnd, indexValueOpnd, temp, instr); + } break; } } @@ -15845,6 +15877,7 @@ Lowerer::GenerateFastElemIIntIndexCommon( if(isPush) { IR::LabelInstr *const updateLengthLabel = InsertLabel(isInHelperBlock, instr); + LABELNAME(updateLengthLabel); if(!doUpperBoundCheck && !headSegmentLengthOpnd) { @@ -15921,6 +15954,12 @@ Lowerer::GenerateFastElemIIntIndexCommon( indirOpnd = IR::IndirOpnd::New(headSegmentOpnd, offsetof(Js::SparseArraySegmentBase, length), TyUint32, this->m_func); InsertMove(indirOpnd, newLengthOpnd, instr); + // We've changed the head segment length, so we may need to change the head segment length opnd + if (headSegmentLengthOpnd != nullptr && !headSegmentLengthOpnd->IsIndirOpnd()) + { + InsertMove(headSegmentLengthOpnd, newLengthOpnd, instr); + } + if (checkArrayLengthOverflow) { // CMP newLength, [base + offset(length)] @@ -15960,6 +15999,7 @@ Lowerer::GenerateFastElemIIntIndexCommon( InsertMove(instr->GetDst(), newLengthOpnd, instr); } + // Calling code assumes that indirOpnd is initialized before labelSegmentLengthIncreased is reached if(labelSegmentLengthIncreased && labelSegmentLengthIncreased != labelDone) { // labelSegmentLengthIncreased: @@ -15976,6 +16016,7 @@ Lowerer::GenerateFastElemIIntIndexCommon( if (*pIsTypedArrayElement && isStore) { IR::LabelInstr *labelInlineSet = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); + LABELNAME(labelInlineSet); //For positive index beyond length or negative index its essentially nop for typed array store InsertBranch( @@ -15989,6 +16030,7 @@ Lowerer::GenerateFastElemIIntIndexCommon( { // Enter an ophelper block IR::LabelInstr * opHelper = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); + LABELNAME(opHelper); instr->InsertBefore(opHelper); IR::Instr *toNumberInstr = IR::Instr::New(Js::OpCode::Call, this->m_func); @@ -16092,6 +16134,12 @@ Lowerer::GenerateFastElemIIntIndexCommon( IR::IndirOpnd *lengthIndirOpnd = IR::IndirOpnd::New(headSegmentOpnd, offsetof(Js::SparseArraySegmentBase, length), TyUint32, this->m_func); InsertMove(lengthIndirOpnd, newLengthOpnd, instr); + // We've changed the head segment length, so we may need to change the head segment length opnd + if (headSegmentLengthOpnd != nullptr && !headSegmentLengthOpnd->IsIndirOpnd()) + { + InsertMove(headSegmentLengthOpnd, newLengthOpnd, instr); + } + // MOV [base + offset(length)], newLength lengthIndirOpnd = IR::IndirOpnd::New(arrayOpnd, Js::JavascriptArray::GetOffsetOfLength(), TyUint32, this->m_func); InsertMove(lengthIndirOpnd, newLengthOpnd, instr); @@ -16107,12 +16155,40 @@ Lowerer::GenerateFastElemIIntIndexCommon( } } // #endif + // Should we poison the load of the address to/from which the store/load happens? bool shouldPoisonLoad = maskOpnd != nullptr - && (baseValueType.IsLikelyTypedArray() - ? CONFIG_FLAG_RELEASE(PoisonTypedArrayLoad) - : ((indirType == TyVar && CONFIG_FLAG_RELEASE(PoisonVarArrayLoad)) - || (IRType_IsNativeInt(indirType) && CONFIG_FLAG_RELEASE(PoisonIntArrayLoad)) - || (IRType_IsFloat(indirType) && CONFIG_FLAG_RELEASE(PoisonFloatArrayLoad)))); + && ( + (!isStore && + (baseValueType.IsLikelyTypedArray() + ? CONFIG_FLAG_RELEASE(PoisonTypedArrayLoad) + : ((indirType == TyVar && CONFIG_FLAG_RELEASE(PoisonVarArrayLoad)) + || (IRType_IsNativeInt(indirType) && CONFIG_FLAG_RELEASE(PoisonIntArrayLoad)) + || (IRType_IsFloat(indirType) && CONFIG_FLAG_RELEASE(PoisonFloatArrayLoad))) + ) + ) + || + (isStore && + (baseValueType.IsLikelyTypedArray() + ? CONFIG_FLAG_RELEASE(PoisonTypedArrayStore) + : ((indirType == TyVar && CONFIG_FLAG_RELEASE(PoisonVarArrayStore)) + || (IRType_IsNativeInt(indirType) && CONFIG_FLAG_RELEASE(PoisonIntArrayStore)) + || (IRType_IsFloat(indirType) && CONFIG_FLAG_RELEASE(PoisonFloatArrayStore))) + ) + ) + ) + ; + + // We have two exit paths for this function in the store case when we might grow the head + // segment, due to tracking for missing elements. This unfortunately means that we need a + // copy of the poisoning code on the other exit path, since the determination of the path + // and the use of the path determination to decide whether we found the missing value are + // things that have to happen on opposite sides of the poisoning. + IR::Instr* insertForSegmentLengthIncreased = nullptr; + if (shouldPoisonLoad && usingSegmentLengthIncreasedLabel) + { + insertForSegmentLengthIncreased = (*pLabelSegmentLengthIncreased)->m_next; + } + #if TARGET_32 if (shouldPoisonLoad) { @@ -16125,6 +16201,10 @@ Lowerer::GenerateFastElemIIntIndexCommon( { IR::RegOpnd* newIndexValueOpnd = IR::RegOpnd::New(TyUint32, m_func); InsertAnd(newIndexValueOpnd, indexValueOpnd, IR::IntConstOpnd::New(INT32_MAX, TyUint32, m_func), instr); + if(insertForSegmentLengthIncreased != nullptr) + { + InsertAnd(newIndexValueOpnd, indexValueOpnd, IR::IntConstOpnd::New(INT32_MAX, TyUint32, m_func), insertForSegmentLengthIncreased); + } indexValueOpnd = newIndexValueOpnd; } } @@ -16140,7 +16220,12 @@ Lowerer::GenerateFastElemIIntIndexCommon( indirOpnd = IR::IndirOpnd::New(arrayOpnd, bufferOffset, TyMachPtr, this->m_func); headSegmentOpnd = IR::RegOpnd::New(TyMachPtr, this->m_func); autoReuseHeadSegmentOpnd.Initialize(headSegmentOpnd, m_func); + IR::AutoReuseOpnd reuseIndir(indirOpnd, m_func); InsertMove(headSegmentOpnd, indirOpnd, instr); + if(insertForSegmentLengthIncreased != nullptr) + { + InsertMove(headSegmentOpnd, indirOpnd, insertForSegmentLengthIncreased); + } } // indirOpnd = [headSegment + index] @@ -16181,7 +16266,7 @@ Lowerer::GenerateFastElemIIntIndexCommon( if (baseValueType.IsLikelyTypedArray()) { int lengthOffset; - lengthOffset = Js::Float64Array::GetOffsetOfLength(); + lengthOffset = GetArrayOffsetOfHeadSegment(baseValueType); headSegmentLengthOpnd = IR::IndirOpnd::New(arrayOpnd, lengthOffset, TyUint32, m_func); autoReuseHeadSegmentLengthOpnd.Initialize(headSegmentLengthOpnd, m_func); } @@ -16208,6 +16293,12 @@ Lowerer::GenerateFastElemIIntIndexCommon( IR::Instr * instrMov = IR::Instr::New(Js::OpCode::MOV_TRUNC, lengthOpnd, headSegmentLengthOpnd, m_func); instr->InsertBefore(instrMov); LowererMD::Legalize(instrMov); + if (insertForSegmentLengthIncreased != nullptr) + { + IR::Instr * instrMov2 = IR::Instr::New(Js::OpCode::MOV_TRUNC, lengthOpnd, headSegmentLengthOpnd, m_func); + insertForSegmentLengthIncreased->InsertBefore(instrMov2); + LowererMD::Legalize(instrMov2); + } if (lengthOpnd->GetSize() != MachPtr) { @@ -16223,6 +16314,13 @@ Lowerer::GenerateFastElemIIntIndexCommon( instr->InsertBefore(instrMov); LowererMD::Legalize(instrMov); + if (insertForSegmentLengthIncreased != nullptr) + { + IR::Instr * instrMov2 = IR::Instr::New(Js::OpCode::MOV_TRUNC, indexValueRegOpnd, indexValueOpnd, m_func); + insertForSegmentLengthIncreased->InsertBefore(instrMov2); + LowererMD::Legalize(instrMov2); + } + if (indexValueRegOpnd->GetSize() != MachPtr) { indexValueRegOpnd = indexValueRegOpnd->UseWithNewType(TyMachPtr, this->m_func)->AsRegOpnd(); @@ -16231,13 +16329,23 @@ Lowerer::GenerateFastElemIIntIndexCommon( localMaskOpnd = IR::RegOpnd::New(TyMachPtr, m_func); InsertSub(false, localMaskOpnd, indexValueRegOpnd, lengthOpnd, instr); InsertShift(Js::OpCode::Shr_A, false, localMaskOpnd, localMaskOpnd, IR::IntConstOpnd::New(63, TyInt8, m_func), instr); + if (insertForSegmentLengthIncreased != nullptr) + { + InsertSub(false, localMaskOpnd, indexValueRegOpnd, lengthOpnd, insertForSegmentLengthIncreased); + InsertShift(Js::OpCode::Shr_A, false, localMaskOpnd, localMaskOpnd, IR::IntConstOpnd::New(63, TyInt8, m_func), insertForSegmentLengthIncreased); + } #else localMaskOpnd = IR::RegOpnd::New(TyInt32, m_func); InsertSub(false, localMaskOpnd, indexValueOpnd, headSegmentLengthOpnd, instr); InsertShift(Js::OpCode::Shr_A, false, localMaskOpnd, localMaskOpnd, IR::IntConstOpnd::New(31, TyInt8, m_func), instr); + if (insertForSegmentLengthIncreased != nullptr) + { + InsertSub(false, localMaskOpnd, indexValueOpnd, headSegmentLengthOpnd, insertForSegmentLengthIncreased); + InsertShift(Js::OpCode::Shr_A, false, localMaskOpnd, localMaskOpnd, IR::IntConstOpnd::New(31, TyInt8, m_func), insertForSegmentLengthIncreased); + } #endif - if (IRType_IsNativeInt(indirType) || indirType == TyVar) + if ((IRType_IsNativeInt(indirType) || indirType == TyVar) && !isStore) { *maskOpnd = localMaskOpnd; } @@ -16253,6 +16361,10 @@ Lowerer::GenerateFastElemIIntIndexCommon( IR::RegOpnd* fullIndexOpnd = IR::RegOpnd::New(indirOpnd->GetIndexOpnd()->GetType(), m_func); InsertShift(Js::OpCode::Shl_A, false, fullIndexOpnd, indirOpnd->GetIndexOpnd(), IR::IntConstOpnd::New(indirOpnd->GetScale(), TyInt8, m_func), instr); IR::IndirOpnd* newIndir = IR::IndirOpnd::New(indirOpnd->GetBaseOpnd(), fullIndexOpnd, indirType, m_func); + if (insertForSegmentLengthIncreased != nullptr) + { + InsertShift(Js::OpCode::Shl_A, false, fullIndexOpnd, indirOpnd->GetIndexOpnd(), IR::IntConstOpnd::New(indirOpnd->GetScale(), TyInt8, m_func), insertForSegmentLengthIncreased); + } if (indirOpnd->GetOffset() != 0) { newIndir->SetOffset(indirOpnd->GetOffset()); @@ -16260,9 +16372,22 @@ Lowerer::GenerateFastElemIIntIndexCommon( indirOpnd = newIndir; } #endif + IR::AutoReuseOpnd reuseIndir(indirOpnd, m_func); InsertLea(loadAddr, indirOpnd, instr); InsertAnd(loadAddr, loadAddr, localMaskOpnd, instr); + if (insertForSegmentLengthIncreased != nullptr) + { + InsertLea(loadAddr, indirOpnd, insertForSegmentLengthIncreased); + InsertAnd(loadAddr, loadAddr, localMaskOpnd, insertForSegmentLengthIncreased); + + // We want to export a segmentLengthIncreasedLabel to the caller that is after the poisoning + // code, since that's also the code that generates indirOpnd in this case. + IR::LabelInstr* exportedSegmentLengthIncreasedLabel = IR::LabelInstr::New(Js::OpCode::Label, insertForSegmentLengthIncreased->m_func, (*pLabelSegmentLengthIncreased)->isOpHelper); + LABELNAME(exportedSegmentLengthIncreasedLabel); + insertForSegmentLengthIncreased->InsertBefore(exportedSegmentLengthIncreasedLabel); + *pLabelSegmentLengthIncreased = exportedSegmentLengthIncreasedLabel; + } indirOpnd = IR::IndirOpnd::New(loadAddr, 0, indirType, m_func); } } @@ -16998,7 +17123,7 @@ Lowerer::GenerateFastStElemI(IR::Instr *& stElem, bool *instrIsInHelperBlockRef) // TEST index, 1 -- index tagged int // JEQ $helper // MOV r2, index - // SAR r2, Js::VarTag_Shift -- remote atom tag + // SAR r2, Js::VarTag_Shift -- remove atom tag // JS $helper -- exclude negative index // MOV r4, [base + offset(head)] // CMP r2, [r4 + offset(length)] -- bounds check @@ -17042,6 +17167,7 @@ Lowerer::GenerateFastStElemI(IR::Instr *& stElem, bool *instrIsInHelperBlockRef) } bool isTypedArrayElement, isStringIndex, indirOpndOverflowed = false; + IR::Opnd* maskOpnd = nullptr; indirOpnd = GenerateFastElemICommon( stElem, @@ -17053,7 +17179,7 @@ Lowerer::GenerateFastStElemI(IR::Instr *& stElem, bool *instrIsInHelperBlockRef) &isTypedArrayElement, &isStringIndex, &emitBailout, - nullptr, + &maskOpnd, &labelSegmentLengthIncreased, true, /* checkArrayLengthOverflow */ false, /* forceGenerateFastPath */ @@ -17423,7 +17549,34 @@ Lowerer::GenerateFastStElemI(IR::Instr *& stElem, bool *instrIsInHelperBlockRef) IR::Opnd *indexOpnd = indirOpnd->GetIndexOpnd(); if (indexOpnd == nullptr) { - indexOpnd = IR::IntConstOpnd::New(indirOpnd->GetOffset(), TyInt32, this->m_func); + if (indirOpnd->GetOffset() == 0) + { + // There are two ways that we can get an indirOpnd with no index and 0 offset. + // The first is that we're storing to element 0 in the array by constant offset. + // The second is that we got a pointer back that has spectre masking, so it's going + // to not have the appropriate index into the array. In that case, we need to regen + // the index. + // The plan is + // 1. get the backing buffer pointer + // 2. subtract that from the indexOpnd to get the numeric index + // This is unfortunately slightly worse perf for constant writes of vars to index 0 + // of Uint8ClampedArrays, but that's hopefully uncommon enough that the impact will + // be minimal + + // MOV backingBufferOpnd, [base + offset(arrayBuffer)] + // SUB indexOpnd, backingBufferOpnd + int bufferOffset = GetArrayOffsetOfHeadSegment(baseValueType); + IR::IndirOpnd* arrayBufferOpnd = IR::IndirOpnd::New(stElem->GetDst()->AsIndirOpnd()->GetBaseOpnd(), bufferOffset, TyMachPtr, this->m_func); + IR::RegOpnd* backingBufferOpnd = IR::RegOpnd::New(TyMachPtr, this->m_func); + InsertMove(backingBufferOpnd, arrayBufferOpnd, instr); + IR::RegOpnd* tempIndexOpnd = IR::RegOpnd::New(TyMachPtr, this->m_func); + InsertSub(false, tempIndexOpnd, indirOpnd->GetBaseOpnd(), backingBufferOpnd, instr); + indexOpnd = tempIndexOpnd->UseWithNewType(TyInt32, this->m_func); + } + else + { + indexOpnd = IR::IntConstOpnd::New(indirOpnd->GetOffset(), TyInt32, this->m_func); + } } else { @@ -17556,6 +17709,9 @@ Lowerer::GenerateFastStElemI(IR::Instr *& stElem, bool *instrIsInHelperBlockRef) { IR::Instr *const insertBeforeInstr = labelSegmentLengthIncreased->m_next; + // We might be changing the array to have missing values here, or we might be + // changing it to extend it; in either case, we're not going to make it _not_ + // have missing values after this operation, so just write and fallthrough. // labelSegmentLengthIncreased: // mov [segment + index], src // jmp $fallThru @@ -23335,7 +23491,7 @@ Lowerer::GenerateLdHomeObj(IR::Instr* instr) IR::BranchInstr* branchInstr = InsertCompareBranch(IR::IndirOpnd::New(instanceRegOpnd, 0, TyMachPtr, func), vtableAddressOpnd, Js::OpCode::BrEq_A, true, labelDone, instr); - InsertObjectPoison(instanceRegOpnd, branchInstr, instr); + InsertObjectPoison(instanceRegOpnd, branchInstr, instr, false); IR::IndirOpnd *indirOpnd = IR::IndirOpnd::New(instanceRegOpnd, Js::ScriptFunction::GetOffsetOfHomeObj(), TyMachPtr, func); Lowerer::InsertMove(instanceRegOpnd, indirOpnd, instr); @@ -23510,7 +23666,7 @@ Lowerer::GenerateSetHomeObj(IR::Instr* instrInsert) IR::BranchInstr *branchInstr = InsertCompareBranch(IR::IndirOpnd::New(funcObjRegOpnd, 0, TyMachPtr, func), vtableAddressOpnd, Js::OpCode::BrNeq_A, true, labelScriptFunction, instrInsert); - InsertObjectPoison(funcObjRegOpnd, branchInstr, instrInsert); + InsertObjectPoison(funcObjRegOpnd, branchInstr, instrInsert, false); instrInsert->InsertBefore(labelForGeneratorScriptFunction); @@ -23598,7 +23754,7 @@ Lowerer::GenerateGetCurrentFunctionObject(IR::Instr * instr) IR::LabelInstr * labelDone = IR::LabelInstr::New(Js::OpCode::Label, func, false); IR::BranchInstr *branchInstr = InsertCompareBranch(IR::IndirOpnd::New(functionObjectOpnd, 0, TyMachPtr, func), vtableAddressOpnd, Js::OpCode::BrNeq_A, true, labelDone, insertBeforeInstr); - InsertObjectPoison(functionObjectOpnd, branchInstr, insertBeforeInstr); + InsertObjectPoison(functionObjectOpnd, branchInstr, insertBeforeInstr, false); IR::RegOpnd * boxedFunctionObjectOpnd = IR::RegOpnd::New(TyMachPtr, func); InsertMove(boxedFunctionObjectOpnd, IR::IndirOpnd::New(functionObjectOpnd, Js::StackScriptFunction::GetOffsetOfBoxedScriptFunction(), TyMachPtr, func), insertBeforeInstr); @@ -25568,7 +25724,7 @@ Lowerer::GenerateStringTest(IR::RegOpnd *srcReg, IR::Instr *insertInstr, IR::Lab { branchInstr = InsertCompareBranch(src1, src2, Js::OpCode::BrNeq_A, labelHelper, insertInstr); } - InsertObjectPoison(srcReg, branchInstr, insertInstr); + InsertObjectPoison(srcReg, branchInstr, insertInstr, false); } } diff --git a/lib/Backend/Lower.h b/lib/Backend/Lower.h index 4eb5c807e0a..ac23a9de031 100644 --- a/lib/Backend/Lower.h +++ b/lib/Backend/Lower.h @@ -649,7 +649,7 @@ class Lowerer IR::Instr * LowerSlotArrayCheck(IR::Instr * instr); void InsertSlotArrayCheck(IR::Instr * instr, StackSym * dstSym, uint32 slotId); void InsertFrameDisplayCheck(IR::Instr * instr, StackSym * dstSym, FrameDisplayCheckRecord * record); - static void InsertObjectPoison(IR::Opnd* poisonedOpnd, IR::BranchInstr* branchInstr, IR::Instr* insertInstr); + static void InsertObjectPoison(IR::Opnd* poisonedOpnd, IR::BranchInstr* branchInstr, IR::Instr* insertInstr, bool isForStore); IR::RegOpnd * LoadIndexFromLikelyFloat(IR::RegOpnd *indexOpnd, const bool skipNegativeCheck, IR::LabelInstr *const notTaggedIntLabel, IR::LabelInstr *const negativeLabel, IR::Instr *const insertBeforeInstr); diff --git a/lib/Backend/LowerMDShared.cpp b/lib/Backend/LowerMDShared.cpp index 1449a00bbd2..7c0eb7fdb17 100644 --- a/lib/Backend/LowerMDShared.cpp +++ b/lib/Backend/LowerMDShared.cpp @@ -7149,7 +7149,7 @@ bool LowererMD::GenerateObjectTest(IR::Opnd * opndSrc, IR::Instr * insertInstr, // JNE $labelHelper IR::BranchInstr* branchInstr = IR::BranchInstr::New(Js::OpCode::JNE, labelTarget, this->m_func); insertInstr->InsertBefore(branchInstr); - InsertObjectPoison(opndSrc, branchInstr, insertInstr); + InsertObjectPoison(opndSrc, branchInstr, insertInstr, false); } return true; } @@ -9464,9 +9464,9 @@ LowererMD::LowerTypeof(IR::Instr * typeOfInstr) } void -LowererMD::InsertObjectPoison(IR::Opnd* poisonedOpnd, IR::BranchInstr* branchInstr, IR::Instr* insertInstr) +LowererMD::InsertObjectPoison(IR::Opnd* poisonedOpnd, IR::BranchInstr* branchInstr, IR::Instr* insertInstr, bool isForStore) { - if (CONFIG_FLAG_RELEASE(PoisonObjects)) + if ((isForStore && CONFIG_FLAG_RELEASE(PoisonObjectsForStores)) || (!isForStore && CONFIG_FLAG_RELEASE(PoisonObjectsForLoads))) { Js::OpCode opcode; if (branchInstr->m_opcode == Js::OpCode::JNE) @@ -9475,7 +9475,7 @@ LowererMD::InsertObjectPoison(IR::Opnd* poisonedOpnd, IR::BranchInstr* branchIns } else { - AssertOrFailFast(branchInstr->m_opcode == Js::OpCode::JEQ); + AssertOrFailFastMsg(branchInstr->m_opcode == Js::OpCode::JEQ, "Unexpected branch type in InsertObjectPoison preceeding instruction"); opcode = Js::OpCode::CMOVE; } AssertOrFailFast(branchInstr->m_prev->m_opcode == Js::OpCode::CMP || branchInstr->m_prev->m_opcode == Js::OpCode::TEST); diff --git a/lib/Backend/LowerMDShared.h b/lib/Backend/LowerMDShared.h index 5e9efaaf64c..8c155a93a1d 100644 --- a/lib/Backend/LowerMDShared.h +++ b/lib/Backend/LowerMDShared.h @@ -248,7 +248,7 @@ class LowererMD void GenerateIsJsObjectTest(IR::RegOpnd* instanceReg, IR::Instr* insertInstr, IR::LabelInstr* labelHelper); void LowerTypeof(IR::Instr * typeOfInstr); - static void InsertObjectPoison(IR::Opnd* poisonedOpnd, IR::BranchInstr* branchInstr, IR::Instr* insertInstr); + static void InsertObjectPoison(IR::Opnd* poisonedOpnd, IR::BranchInstr* branchInstr, IR::Instr* insertInstr, bool isForStore); public: // // These methods are simply forwarded to lowererMDArch diff --git a/lib/Backend/NativeCodeGenerator.cpp b/lib/Backend/NativeCodeGenerator.cpp index 36e493d0435..ca2326b4535 100644 --- a/lib/Backend/NativeCodeGenerator.cpp +++ b/lib/Backend/NativeCodeGenerator.cpp @@ -3257,8 +3257,7 @@ NativeCodeGenerator::FreeNativeCodeGenAllocation(void* codeAddress) #if PDATA_ENABLED && defined(_WIN32) DelayDeletingFunctionTable::Clear(); #endif - ThreadContext * context = this->scriptContext->GetThreadContext(); - HRESULT hr = JITManager::GetJITManager()->FreeAllocation(context->GetRemoteThreadContextAddr(), (intptr_t)codeAddress); + HRESULT hr = JITManager::GetJITManager()->FreeAllocation(this->scriptContext->GetRemoteScriptAddr(), (intptr_t)codeAddress); JITManager::HandleServerCallResult(hr, RemoteCallType::MemFree); } else if(this->backgroundAllocators) diff --git a/lib/Backend/ServerScriptContext.cpp b/lib/Backend/ServerScriptContext.cpp index 25d58a6eac7..40b34ba3989 100644 --- a/lib/Backend/ServerScriptContext.cpp +++ b/lib/Backend/ServerScriptContext.cpp @@ -26,6 +26,7 @@ ServerScriptContext::ServerScriptContext(ScriptContextDataIDL * contextData, Ser m_asmJsInterpreterThunkBufferManager(&m_sourceCodeArena, threadContextInfo->GetThunkPageAllocators(), nullptr, threadContextInfo, _u("Asm.js interpreter thunk buffer"), GetThreadContext()->GetProcessHandle()), m_domFastPathHelperMap(nullptr), m_moduleRecords(&HeapAllocator::Instance), + m_codeGenAlloc(nullptr, nullptr, threadContextInfo, threadContextInfo->GetCodePageAllocators(), threadContextInfo->GetProcessHandle()), m_globalThisAddr(0), #ifdef PROFILE_EXEC m_codeGenProfiler(nullptr), @@ -33,6 +34,11 @@ ServerScriptContext::ServerScriptContext(ScriptContextDataIDL * contextData, Ser m_refCount(0), m_isClosed(false) { + +#if !TARGET_64 && _CONTROL_FLOW_GUARD + m_codeGenAlloc.canCreatePreReservedSegment = threadContextInfo->CanCreatePreReservedSegment(); +#endif + #ifdef PROFILE_EXEC if (Js::Configuration::Global.flags.IsEnabled(Js::ProfileFlag)) { @@ -357,7 +363,9 @@ ServerScriptContext::Close() { Assert(!IsClosed()); m_isClosed = true; - + + m_codeGenAlloc.emitBufferManager.Decommit(); + #ifdef STACK_BACK_TRACE ServerContextManager::RecordCloseContext(this); #endif @@ -381,6 +389,12 @@ ServerScriptContext::Release() } } +OOPCodeGenAllocators * +ServerScriptContext::GetCodeGenAllocators() +{ + return &m_codeGenAlloc; +} + Field(Js::Var)* ServerScriptContext::GetModuleExportSlotArrayAddress(uint moduleIndex, uint slotIndex) { diff --git a/lib/Backend/ServerScriptContext.h b/lib/Backend/ServerScriptContext.h index 926ff9d9384..2a9eefc59fc 100644 --- a/lib/Backend/ServerScriptContext.h +++ b/lib/Backend/ServerScriptContext.h @@ -87,6 +87,7 @@ class ServerScriptContext : public ScriptContextInfo Js::ScriptContextProfiler * GetCodeGenProfiler() const; ServerThreadContext* GetThreadContext() { return threadContextHolder.threadContextInfo; } + OOPCodeGenAllocators * GetCodeGenAllocators(); ArenaAllocator * GetSourceCodeArena(); void Close(); void AddRef(); @@ -107,6 +108,8 @@ class ServerScriptContext : public ScriptContextInfo uint m_refCount; + OOPCodeGenAllocators m_codeGenAlloc; + bool m_isPRNGSeeded; bool m_isClosed; #endif diff --git a/lib/Backend/ServerThreadContext.cpp b/lib/Backend/ServerThreadContext.cpp index ac74156f64a..1ffad6f83a0 100644 --- a/lib/Backend/ServerThreadContext.cpp +++ b/lib/Backend/ServerThreadContext.cpp @@ -14,24 +14,21 @@ ServerThreadContext::ServerThreadContext(ThreadContextDataIDL* data, ProcessCont m_numericPropertyBV(nullptr), m_preReservedSectionAllocator(processContext->processHandle), m_sectionAllocator(processContext->processHandle), - m_thunkPageAllocators(nullptr, /* allocXData */ false, &m_sectionAllocator, nullptr, processContext->processHandle), m_codePageAllocators(nullptr, ALLOC_XDATA, &m_sectionAllocator, &m_preReservedSectionAllocator, processContext->processHandle), + m_thunkPageAllocators(nullptr, /* allocXData */ false, &m_sectionAllocator, nullptr, processContext->processHandle), #if defined(_CONTROL_FLOW_GUARD) && !defined(_M_ARM) m_jitThunkEmitter(this, &m_sectionAllocator, processContext->processHandle), #endif - m_codeGenAlloc(nullptr, nullptr, this, &m_codePageAllocators, processContext->processHandle), m_pageAlloc(nullptr, Js::Configuration::Global.flags, PageAllocatorType_BGJIT, AutoSystemInfo::Data.IsLowMemoryProcess() ? PageAllocator::DefaultLowMaxFreePageCount : PageAllocator::DefaultMaxFreePageCount ), - processContext(processContext) + processContext(processContext), + m_canCreatePreReservedSegment(data->allowPrereserveAlloc != FALSE) { m_pid = GetProcessId(processContext->processHandle); -#if !TARGET_64 && _CONTROL_FLOW_GUARD - m_codeGenAlloc.canCreatePreReservedSegment = data->allowPrereserveAlloc != FALSE; -#endif m_numericPropertyBV = HeapNew(BVSparse, &HeapAllocator::Instance); } @@ -121,22 +118,16 @@ ServerThreadContext::GetThunkPageAllocators() return &m_thunkPageAllocators; } -CustomHeap::OOPCodePageAllocators * -ServerThreadContext::GetCodePageAllocators() -{ - return &m_codePageAllocators; -} - SectionAllocWrapper * ServerThreadContext::GetSectionAllocator() { return &m_sectionAllocator; } -OOPCodeGenAllocators * -ServerThreadContext::GetCodeGenAllocators() +CustomHeap::OOPCodePageAllocators * +ServerThreadContext::GetCodePageAllocators() { - return &m_codeGenAlloc; + return &m_codePageAllocators; } #if defined(_CONTROL_FLOW_GUARD) && !defined(_M_ARM) @@ -172,6 +163,12 @@ ServerThreadContext::GetForegroundPageAllocator() return &m_pageAlloc; } +bool +ServerThreadContext::CanCreatePreReservedSegment() const +{ + return m_canCreatePreReservedSegment; +} + bool ServerThreadContext::IsNumericProperty(Js::PropertyId propertyId) { diff --git a/lib/Backend/ServerThreadContext.h b/lib/Backend/ServerThreadContext.h index b58db17834e..f8f764a67a2 100644 --- a/lib/Backend/ServerThreadContext.h +++ b/lib/Backend/ServerThreadContext.h @@ -55,7 +55,6 @@ class ServerThreadContext : public ThreadContextInfo virtual ptrdiff_t GetChakraBaseAddressDifference() const override; virtual ptrdiff_t GetCRTBaseAddressDifference() const override; - OOPCodeGenAllocators * GetCodeGenAllocators(); #if defined(_CONTROL_FLOW_GUARD) && !defined(_M_ARM) OOPJITThunkEmitter * GetJITThunkEmitter(); #endif @@ -72,6 +71,7 @@ class ServerThreadContext : public ThreadContextInfo intptr_t GetRuntimeChakraBaseAddress() const; intptr_t GetRuntimeCRTBaseAddress() const; + bool CanCreatePreReservedSegment() const; static intptr_t GetJITCRTBaseAddress(); @@ -84,7 +84,6 @@ class ServerThreadContext : public ThreadContextInfo SectionAllocWrapper m_sectionAllocator; CustomHeap::OOPCodePageAllocators m_thunkPageAllocators; CustomHeap::OOPCodePageAllocators m_codePageAllocators; - OOPCodeGenAllocators m_codeGenAlloc; #if defined(_CONTROL_FLOW_GUARD) && !defined(_M_ARM) OOPJITThunkEmitter m_jitThunkEmitter; #endif @@ -96,5 +95,6 @@ class ServerThreadContext : public ThreadContextInfo CriticalSection m_cs; uint m_refCount; + bool m_canCreatePreReservedSegment; }; #endif diff --git a/lib/Backend/arm64/LowerMD.cpp b/lib/Backend/arm64/LowerMD.cpp index 75384d8410c..fb5b13e74b4 100644 --- a/lib/Backend/arm64/LowerMD.cpp +++ b/lib/Backend/arm64/LowerMD.cpp @@ -7954,9 +7954,9 @@ LowererMD::LowerTypeof(IR::Instr* typeOfInstr) } void -LowererMD::InsertObjectPoison(IR::Opnd* poisonedOpnd, IR::BranchInstr* branchInstr, IR::Instr* insertInstr) +LowererMD::InsertObjectPoison(IR::Opnd* poisonedOpnd, IR::BranchInstr* branchInstr, IR::Instr* insertInstr, bool isForStore) { - if (CONFIG_FLAG_RELEASE(PoisonObjects)) + if ((isForStore && CONFIG_FLAG_RELEASE(PoisonObjectsForStores)) || (!isForStore && CONFIG_FLAG_RELEASE(PoisonObjectsForLoads))) { Js::OpCode opcode; if (branchInstr->m_opcode == Js::OpCode::BNE) @@ -7965,7 +7965,7 @@ LowererMD::InsertObjectPoison(IR::Opnd* poisonedOpnd, IR::BranchInstr* branchIns } else { - AssertOrFailFast(branchInstr->m_opcode == Js::OpCode::BEQ); + AssertOrFailFastMsg(branchInstr->m_opcode == Js::OpCode::BEQ, "Unexpected branch type in InsertObjectPoison preceeding instruction"); opcode = Js::OpCode::CSELNE; } AssertOrFailFast(branchInstr->m_prev->m_opcode == Js::OpCode::SUBS || branchInstr->m_prev->m_opcode == Js::OpCode::ANDS); diff --git a/lib/Backend/arm64/LowerMD.h b/lib/Backend/arm64/LowerMD.h index 9aab46dc0dc..0ca2539a8cc 100644 --- a/lib/Backend/arm64/LowerMD.h +++ b/lib/Backend/arm64/LowerMD.h @@ -270,7 +270,8 @@ class LowererMD void GenerateMemInit(IR::RegOpnd * opnd, int32 offset, size_t value, IR::Instr * insertBeforeInstr, bool isZeroed = false); - static void InsertObjectPoison(IR::Opnd* poisonedOpnd, IR::BranchInstr* branchInstr, IR::Instr* insertInstr); + static void InsertObjectPoison(IR::Opnd* poisonedOpnd, IR::BranchInstr* branchInstr, IR::Instr* insertInstr, bool isForStore); + private: static IR::Instr * ChangeToAssign(IR::Instr * instr, IRType destType); void GenerateFlagInlineCacheCheckForGetterSetter( diff --git a/lib/Common/ChakraCoreVersion.h b/lib/Common/ChakraCoreVersion.h index 43adb163722..9503cf10387 100644 --- a/lib/Common/ChakraCoreVersion.h +++ b/lib/Common/ChakraCoreVersion.h @@ -17,7 +17,7 @@ // ChakraCore version number definitions (used in ChakraCore binary metadata) #define CHAKRA_CORE_MAJOR_VERSION 1 #define CHAKRA_CORE_MINOR_VERSION 8 -#define CHAKRA_CORE_PATCH_VERSION 3 +#define CHAKRA_CORE_PATCH_VERSION 4 #define CHAKRA_CORE_VERSION_RELEASE_QFE 0 // Redundant with PATCH_VERSION. Keep this value set to 0. // ------------- diff --git a/lib/Common/ConfigFlagsList.h b/lib/Common/ConfigFlagsList.h index 8d7137d1eed..19e8b3ffe69 100644 --- a/lib/Common/ConfigFlagsList.h +++ b/lib/Common/ConfigFlagsList.h @@ -495,7 +495,13 @@ PHASE(All) #define DEFAULT_CONFIG_PoisonFloatArrayLoad (true) #define DEFAULT_CONFIG_PoisonTypedArrayLoad (true) #define DEFAULT_CONFIG_PoisonStringLoad (true) -#define DEFAULT_CONFIG_PoisonObjects (true) +#define DEFAULT_CONFIG_PoisonObjectsForLoads (true) + +#define DEFAULT_CONFIG_PoisonVarArrayStore (true) +#define DEFAULT_CONFIG_PoisonIntArrayStore (true) +#define DEFAULT_CONFIG_PoisonFloatArrayStore (true) +#define DEFAULT_CONFIG_PoisonTypedArrayStore (true) +#define DEFAULT_CONFIG_PoisonObjectsForStores (true) #ifdef RECYCLER_PAGE_HEAP #define DEFAULT_CONFIG_PageHeap ((Js::Number) PageHeapMode::PageHeapModeOff) @@ -1244,7 +1250,13 @@ FLAGPR(Boolean, MitigateSpectre, PoisonIntArrayLoad, "Poison loads from Int arra FLAGPR(Boolean, MitigateSpectre, PoisonFloatArrayLoad, "Poison loads from Float arrays", DEFAULT_CONFIG_PoisonFloatArrayLoad) FLAGPR(Boolean, MitigateSpectre, PoisonTypedArrayLoad, "Poison loads from TypedArrays", DEFAULT_CONFIG_PoisonTypedArrayLoad) FLAGPR(Boolean, MitigateSpectre, PoisonStringLoad, "Poison indexed loads from strings", DEFAULT_CONFIG_PoisonStringLoad) -FLAGPR(Boolean, MitigateSpectre, PoisonObjects, "Poison objects after type checks", DEFAULT_CONFIG_PoisonObjects) +FLAGPR(Boolean, MitigateSpectre, PoisonObjectsForLoads, "Poison objects after type checks", DEFAULT_CONFIG_PoisonObjectsForLoads) + +FLAGPR(Boolean, MitigateSpectre, PoisonVarArrayStore, "Poison stores from Var arrays", DEFAULT_CONFIG_PoisonVarArrayStore) +FLAGPR(Boolean, MitigateSpectre, PoisonIntArrayStore, "Poison stores from Int arrays", DEFAULT_CONFIG_PoisonIntArrayStore) +FLAGPR(Boolean, MitigateSpectre, PoisonFloatArrayStore, "Poison stores from Float arrays", DEFAULT_CONFIG_PoisonFloatArrayStore) +FLAGPR(Boolean, MitigateSpectre, PoisonTypedArrayStore, "Poison stores from TypedArrays", DEFAULT_CONFIG_PoisonTypedArrayStore) +FLAGPR(Boolean, MitigateSpectre, PoisonObjectsForStores, "Poison objects after type checks", DEFAULT_CONFIG_PoisonObjectsForStores) FLAGNR(Number, MinInterpretCount , "Minimum number of times a function must be interpreted", 0) FLAGNR(Number, MinSimpleJitRunCount , "Minimum number of times a function must be run in simple jit", 0) diff --git a/lib/JITClient/JITManager.cpp b/lib/JITClient/JITManager.cpp index b5923208d8c..147f71445c2 100644 --- a/lib/JITClient/JITManager.cpp +++ b/lib/JITClient/JITManager.cpp @@ -586,7 +586,7 @@ JITManager::CloseScriptContext( HRESULT JITManager::FreeAllocation( - __in PTHREADCONTEXT_HANDLE threadContextInfoAddress, + __in PSCRIPTCONTEXT_HANDLE scriptContextInfoAddress, __in intptr_t codeAddress) { Assert(IsOOPJITEnabled()); @@ -594,7 +594,7 @@ JITManager::FreeAllocation( HRESULT hr = E_FAIL; RpcTryExcept { - hr = ClientFreeAllocation(m_rpcBindingHandle, threadContextInfoAddress, codeAddress); + hr = ClientFreeAllocation(m_rpcBindingHandle, scriptContextInfoAddress, codeAddress); } RpcExcept(RpcExceptionFilter(RpcExceptionCode())) { diff --git a/lib/JITClient/JITManager.h b/lib/JITClient/JITManager.h index 699424b2dfb..984d7c6ffb3 100644 --- a/lib/JITClient/JITManager.h +++ b/lib/JITClient/JITManager.h @@ -79,7 +79,7 @@ class JITManager __in PSCRIPTCONTEXT_HANDLE scriptContextInfoAddress); HRESULT FreeAllocation( - __in PTHREADCONTEXT_HANDLE threadContextInfoAddress, + __in PSCRIPTCONTEXT_HANDLE scriptContextInfoAddress, __in intptr_t codeAddress); HRESULT SetIsPRNGSeeded( @@ -213,7 +213,7 @@ class JITManager { Assert(false); return E_FAIL; } HRESULT FreeAllocation( - __in PTHREADCONTEXT_HANDLE threadContextInfoAddress, + __in PSCRIPTCONTEXT_HANDLE scriptContextInfoAddress, __in intptr_t codeAddress) { Assert(false); return E_FAIL; } diff --git a/lib/JITIDL/ChakraJIT.idl b/lib/JITIDL/ChakraJIT.idl index e3d5a8e01cc..5a816a700be 100644 --- a/lib/JITIDL/ChakraJIT.idl +++ b/lib/JITIDL/ChakraJIT.idl @@ -85,7 +85,7 @@ interface IChakraJIT HRESULT FreeAllocation( [in] handle_t binding, - [in] PTHREADCONTEXT_HANDLE threadContextInfoAddress, + [in] PSCRIPTCONTEXT_HANDLE scriptContextInfoAddress, [in] CHAKRA_PTR codeAddress); HRESULT NewInterpreterThunkBlock( diff --git a/lib/JITServer/JITServer.cpp b/lib/JITServer/JITServer.cpp index 869a880867b..12fd170ead5 100644 --- a/lib/JITServer/JITServer.cpp +++ b/lib/JITServer/JITServer.cpp @@ -676,10 +676,10 @@ ServerIsInterpreterThunkAddr( HRESULT ServerFreeAllocation( /* [in] */ handle_t binding, - /* [in] */ __RPC__in PTHREADCONTEXT_HANDLE threadContextInfo, + /* [in] */ __RPC__in PSCRIPTCONTEXT_HANDLE scriptContextInfo, /* [in] */ intptr_t codeAddress) { - ServerThreadContext * context = (ServerThreadContext*)DecodePointer(threadContextInfo); + ServerScriptContext* context = (ServerScriptContext*)DecodePointer(scriptContextInfo); if (context == nullptr) { @@ -709,7 +709,7 @@ ServerIsNativeAddr( *result = false; - ServerThreadContext * context = (ServerThreadContext*)DecodePointer(threadContextInfo); + ServerThreadContext* context = (ServerThreadContext*)DecodePointer(threadContextInfo); if (context == nullptr) { Assert(false); @@ -850,7 +850,7 @@ ServerRemoteCodeGen( nullptr, nullptr, jitWorkItem->GetPolymorphicInlineCacheInfo(), - threadContextInfo->GetCodeGenAllocators(), + scriptContextInfo->GetCodeGenAllocators(), #if !FLOATVAR nullptr, // number allocator #endif diff --git a/lib/Runtime/Base/CrossSite.cpp b/lib/Runtime/Base/CrossSite.cpp index db1da4fdfb5..66a62280952 100644 --- a/lib/Runtime/Base/CrossSite.cpp +++ b/lib/Runtime/Base/CrossSite.cpp @@ -228,6 +228,9 @@ namespace Js } #endif + // Marshaling should not cause any re-entrancy. + JS_REENTRANCY_LOCK(jsReentLock, scriptContext->GetThreadContext()); + #if ENABLE_COPYONACCESS_ARRAY JavascriptLibrary::CheckAndConvertCopyOnAccessNativeIntArray(object); #endif @@ -297,9 +300,18 @@ namespace Js { if (!dynamicObject->IsCrossSiteObject()) { - TTD_XSITE_LOG(object->GetScriptContext(), "MarshalDynamicObjectAndPrototype", object); + if (JavascriptProxy::Is(dynamicObject)) + { + // We don't need to marshal the prototype chain in the case of Proxy. Otherwise we will go to the user code. + TTD_XSITE_LOG(object->GetScriptContext(), "MarshalDynamicObject", object); + MarshalDynamicObject(scriptContext, dynamicObject); + } + else + { + TTD_XSITE_LOG(object->GetScriptContext(), "MarshalDynamicObjectAndPrototype", object); - MarshalDynamicObjectAndPrototype(scriptContext, dynamicObject); + MarshalDynamicObjectAndPrototype(scriptContext, dynamicObject); + } } } else diff --git a/lib/Runtime/Base/ScriptContext.cpp b/lib/Runtime/Base/ScriptContext.cpp index b310f190608..549f9c4c258 100644 --- a/lib/Runtime/Base/ScriptContext.cpp +++ b/lib/Runtime/Base/ScriptContext.cpp @@ -701,6 +701,13 @@ namespace Js } #endif +#if ENABLE_NATIVE_CODEGEN + if (m_remoteScriptContextAddr) + { + JITManager::GetJITManager()->CloseScriptContext(m_remoteScriptContextAddr); + } +#endif + #ifdef ENABLE_SCRIPT_PROFILING // Stop profiling if present DeRegisterProfileProbe(S_OK, nullptr); @@ -1452,12 +1459,6 @@ namespace Js #endif } -#if ENABLE_NATIVE_CODEGEN - if (m_remoteScriptContextAddr) - { - JITManager::GetJITManager()->CloseScriptContext(m_remoteScriptContextAddr); - } -#endif this->PrintStats(); } } diff --git a/lib/Runtime/Base/ThreadContext.cpp b/lib/Runtime/Base/ThreadContext.cpp index eace63e2a00..31812ac5f13 100644 --- a/lib/Runtime/Base/ThreadContext.cpp +++ b/lib/Runtime/Base/ThreadContext.cpp @@ -2604,6 +2604,44 @@ ThreadContext::PreSweepCallback() this->dynamicObjectEnumeratorCacheMap.Clear(); } +void +ThreadContext::PreRescanMarkCallback() +{ + // If this feature is turned off or if we're already in profile collection mode, do nothing + // We also do nothing if expiration is explicitly disabled by someone lower down the stack + if (!PHASE_OFF1(Js::ExpirableCollectPhase) && InExpirableCollectMode() && !this->disableExpiration) + { + this->DoExpirableCollectModeStackWalk(); + } +} + +void +ThreadContext::DoExpirableCollectModeStackWalk() +{ + if (this->entryExitRecord != nullptr) + { + // If we're in script, we will do a stack walk, find the JavascriptFunction's on the stack + // and mark their entry points as being used so that we don't prematurely expire them + + Js::ScriptContext* topScriptContext = this->entryExitRecord->scriptContext; + Js::JavascriptStackWalker walker(topScriptContext, TRUE); + + Js::JavascriptFunction* javascriptFunction = nullptr; + while (walker.GetCallerWithoutInlinedFrames(&javascriptFunction)) + { + if (javascriptFunction != nullptr && Js::ScriptFunction::Test(javascriptFunction)) + { + Js::ScriptFunction* scriptFunction = (Js::ScriptFunction*) javascriptFunction; + Js::FunctionEntryPointInfo* entryPointInfo = scriptFunction->GetFunctionEntryPointInfo(); + entryPointInfo->SetIsObjectUsed(); + scriptFunction->GetFunctionBody()->MapEntryPoints([](int index, Js::FunctionEntryPointInfo* entryPoint){ + entryPoint->SetIsObjectUsed(); + }); + } + } + } +} + void ThreadContext::CollectionCallBack(RecyclerCollectCallBackFlags flags) { @@ -2998,30 +3036,6 @@ ThreadContext::TryEnterExpirableCollectMode() Assert(object); object->EnterExpirableCollectMode(); } - - if (this->entryExitRecord != nullptr) - { - // If we're in script, we will do a stack walk, find the JavascriptFunction's on the stack - // and mark their entry points as being used so that we don't prematurely expire them - - Js::ScriptContext* topScriptContext = this->entryExitRecord->scriptContext; - Js::JavascriptStackWalker walker(topScriptContext, TRUE); - - Js::JavascriptFunction* javascriptFunction = nullptr; - while (walker.GetCallerWithoutInlinedFrames(&javascriptFunction)) - { - if (javascriptFunction != nullptr && Js::ScriptFunction::Test(javascriptFunction)) - { - Js::ScriptFunction* scriptFunction = (Js::ScriptFunction*) javascriptFunction; - Js::FunctionEntryPointInfo* entryPointInfo = scriptFunction->GetFunctionEntryPointInfo(); - entryPointInfo->SetIsObjectUsed(); - scriptFunction->GetFunctionBody()->MapEntryPoints([](int index, Js::FunctionEntryPointInfo* entryPoint){ - entryPoint->SetIsObjectUsed(); - }); - } - } - - } } } diff --git a/lib/Runtime/Base/ThreadContext.h b/lib/Runtime/Base/ThreadContext.h index ed89c7ffdcc..74622aa2837 100644 --- a/lib/Runtime/Base/ThreadContext.h +++ b/lib/Runtime/Base/ThreadContext.h @@ -1674,6 +1674,7 @@ class ThreadContext sealed : // DefaultCollectWrapper virtual void PreCollectionCallBack(CollectionFlags flags) override; virtual void PreSweepCallback() override; + virtual void PreRescanMarkCallback() override; virtual void WaitCollectionCallBack() override; virtual void PostCollectionCallBack() override; virtual BOOL ExecuteRecyclerCollectionFunction(Recycler * recycler, CollectionFunction function, CollectionFlags flags) override; @@ -1683,6 +1684,8 @@ class ThreadContext sealed : virtual void DisposeObjects(Recycler * recycler) override; virtual void PreDisposeObjectsCallBack() override; + void DoExpirableCollectModeStackWalk(); + typedef DList ExpirableObjectList; ExpirableObjectList* expirableObjectList; ExpirableObjectList* expirableObjectDisposeList; diff --git a/lib/Runtime/Language/JavascriptExceptionOperators.cpp b/lib/Runtime/Language/JavascriptExceptionOperators.cpp index 28ed1779409..15250cb3bcb 100644 --- a/lib/Runtime/Language/JavascriptExceptionOperators.cpp +++ b/lib/Runtime/Language/JavascriptExceptionOperators.cpp @@ -199,6 +199,13 @@ namespace Js WalkStackForCleaningUpInlineeInfo(scriptContext, nullptr /* start stackwalk from the current frame */, scriptContext->GetThreadContext()->GetTryCatchFrameAddr()); } } + else + { + if (exception->GetExceptionContext() && exception->GetExceptionContext()->ThrowingFunction()) + { + WalkStackForCleaningUpInlineeInfo(scriptContext, nullptr /* start stackwalk from the current frame */, frame); + } + } #endif bool hasBailedOut = *(bool*)((char*)frame + hasBailedOutOffset); // stack offsets are negative if (hasBailedOut) @@ -371,6 +378,13 @@ namespace Js WalkStackForCleaningUpInlineeInfo(scriptContext, nullptr /* start stackwalk from the current frame */, scriptContext->GetThreadContext()->GetTryCatchFrameAddr()); } } + else + { + if (exception->GetExceptionContext() && exception->GetExceptionContext()->ThrowingFunction()) + { + WalkStackForCleaningUpInlineeInfo(scriptContext, nullptr /* start stackwalk from the current frame */, framePtr); + } + } #endif // Clone static exception object early in case finally block overwrites it exception = exception->CloneIfStaticExceptionObject(scriptContext); @@ -701,6 +715,13 @@ namespace Js WalkStackForCleaningUpInlineeInfo(scriptContext, nullptr /* start stackwalk from the current frame */, scriptContext->GetThreadContext()->GetTryCatchFrameAddr()); } } + else + { + if (pExceptionObject->GetExceptionContext() && pExceptionObject->GetExceptionContext()->ThrowingFunction()) + { + WalkStackForCleaningUpInlineeInfo(scriptContext, nullptr /* start stackwalk from the current frame */, framePtr); + } + } #endif // Clone static exception object early in case finally block overwrites it pExceptionObject = pExceptionObject->CloneIfStaticExceptionObject(scriptContext); diff --git a/lib/Runtime/Language/JavascriptOperators.cpp b/lib/Runtime/Language/JavascriptOperators.cpp index 9e1787e9e87..438a3bdab3a 100644 --- a/lib/Runtime/Language/JavascriptOperators.cpp +++ b/lib/Runtime/Language/JavascriptOperators.cpp @@ -4530,6 +4530,10 @@ namespace Js ScriptContext* scriptContext, PropertyOperationFlags flags) { + + INT_PTR vt = (INT_PTR)nullptr; + vt = VirtualTableInfoBase::GetVirtualTable(instance); + if (TaggedInt::Is(aElementIndex)) { int32 indexInt = TaggedInt::ToInt32(aElementIndex); @@ -4540,11 +4544,12 @@ namespace Js { arr->SetItem(indexInt, iValue); } - return TRUE; + return vt != VirtualTableInfoBase::GetVirtualTable(instance); } } - return JavascriptOperators::OP_SetElementI(instance, aElementIndex, JavascriptNumber::ToVar(iValue, scriptContext), scriptContext, flags); + JavascriptOperators::OP_SetElementI(instance, aElementIndex, JavascriptNumber::ToVar(iValue, scriptContext), scriptContext, flags); + return vt != VirtualTableInfoBase::GetVirtualTable(instance); } BOOL JavascriptOperators::OP_SetNativeIntElementI_UInt32( @@ -4586,6 +4591,10 @@ namespace Js PropertyOperationFlags flags, double dValue) { + + INT_PTR vt = (INT_PTR)nullptr; + vt = VirtualTableInfoBase::GetVirtualTable(instance); + if (TaggedInt::Is(aElementIndex)) { int32 indexInt = TaggedInt::ToInt32(aElementIndex); @@ -4596,16 +4605,17 @@ namespace Js { arr->SetItem(indexInt, dValue); } - return TRUE; + return vt != VirtualTableInfoBase::GetVirtualTable(instance); } } - return JavascriptOperators::OP_SetElementI(instance, aElementIndex, JavascriptNumber::ToVarWithCheck(dValue, scriptContext), scriptContext, flags); + JavascriptOperators::OP_SetElementI(instance, aElementIndex, JavascriptNumber::ToVarWithCheck(dValue, scriptContext), scriptContext, flags); + return vt != VirtualTableInfoBase::GetVirtualTable(instance); } BOOL JavascriptOperators::OP_SetNativeFloatElementI_UInt32( - Var instance, uint32 - aElementIndex, + Var instance, + uint32 aElementIndex, ScriptContext* scriptContext, PropertyOperationFlags flags, double dValue) diff --git a/lib/Runtime/Library/BoundFunction.cpp b/lib/Runtime/Library/BoundFunction.cpp index d7332bd8034..3e108b90bf2 100644 --- a/lib/Runtime/Library/BoundFunction.cpp +++ b/lib/Runtime/Library/BoundFunction.cpp @@ -155,12 +155,12 @@ namespace Js // OACR thinks that this can change between here and the check in the for loop below const unsigned int argCount = args.Info.Count; - if ((boundFunction->count + argCount) > CallInfo::kMaxCountArgs) + if ((boundFunction->count + args.GetArgCountWithExtraArgs()) > CallInfo::kMaxCountArgs) { JavascriptError::ThrowRangeError(scriptContext, JSERR_ArgListTooLarge); } - Field(Var) *newValues = RecyclerNewArray(scriptContext->GetRecycler(), Field(Var), boundFunction->count + argCount); + Field(Var) *newValues = RecyclerNewArray(scriptContext->GetRecycler(), Field(Var), boundFunction->count + args.GetArgCountWithExtraArgs()); uint index = 0; @@ -188,8 +188,15 @@ namespace Js newValues[index++] = args[i]; } + if (args.HasExtraArg()) + { + newValues[index++] = args.Values[argCount]; + } + actualArgs = Arguments(args.Info, unsafe_write_barrier_cast(newValues)); actualArgs.Info.Count = boundFunction->count + argCount; + + Assert(index == actualArgs.GetArgCountWithExtraArgs()); } else { diff --git a/lib/Runtime/Library/DataView.cpp b/lib/Runtime/Library/DataView.cpp index 487ca8fd501..28c779d6b14 100644 --- a/lib/Runtime/Library/DataView.cpp +++ b/lib/Runtime/Library/DataView.cpp @@ -597,8 +597,7 @@ namespace Js { JavascriptError::ThrowTypeError(scriptContext, JSERR_NeedArrayBufferObject); } - - return arrayBuffer; + return CrossSite::MarshalVar(scriptContext, arrayBuffer); } Var DataView::EntryGetterByteLength(RecyclableObject* function, CallInfo callInfo, ...) diff --git a/lib/Runtime/Library/JavascriptArray.inl b/lib/Runtime/Library/JavascriptArray.inl index e66fb990b45..d7cd4813fdd 100644 --- a/lib/Runtime/Library/JavascriptArray.inl +++ b/lib/Runtime/Library/JavascriptArray.inl @@ -155,6 +155,7 @@ namespace Js DetermineInlineHeadSegmentPointer(array); if(wasZeroAllocated) { + AssertOrFailFast(size <= SparseArraySegmentBase::INLINE_CHUNK_SIZE); if(length != 0) { head->length = length; @@ -238,6 +239,14 @@ namespace Js DetermineAllocationSize(length, &allocationPlusSize, &alignedInlineElementSlots); } + // alignedInlineElementSlots is actually the 'size' of the segment. The size of the segment should not be greater than InlineHead segment limit, otherwise the inline + // segment may not be interpreted as inline segment if the length extends to the size. + // the size could increase because of allignment. + // Update the size so that it does not exceed SparseArraySegmentBase::INLINE_CHUNK_SIZE. + + uint inlineChunkSize = SparseArraySegmentBase::INLINE_CHUNK_SIZE; + uint size = min(alignedInlineElementSlots, inlineChunkSize); + array = RecyclerNewPlusZ(recycler, allocationPlusSize, className, length, arrayType); // An new array's head segment length is initialized to zero despite the array length being nonzero because the segment @@ -250,9 +259,9 @@ namespace Js // a variable until it is fully initialized, there is no way for script code to use the array while it still has missing // values. SparseArraySegment *head = - InitArrayAndHeadSegment(array, length, alignedInlineElementSlots, true); + InitArrayAndHeadSegment(array, length, size, true); - head->FillSegmentBuffer(length, alignedInlineElementSlots); + head->FillSegmentBuffer(length, size); Assert(array->HasNoMissingValues()); return array; diff --git a/lib/Runtime/Library/JavascriptFunction.cpp b/lib/Runtime/Library/JavascriptFunction.cpp index 250c0e37ca4..6c32f9d1030 100644 --- a/lib/Runtime/Library/JavascriptFunction.cpp +++ b/lib/Runtime/Library/JavascriptFunction.cpp @@ -1601,16 +1601,8 @@ void __cdecl _alloca_probe_16() Assert(functionInfo); - ScriptFunctionWithInlineCache * funcObjectWithInlineCache = ScriptFunctionWithInlineCache::Is(*functionRef) ? ScriptFunctionWithInlineCache::FromVar(*functionRef) : nullptr; if (functionInfo->IsDeferredParseFunction()) { - if (funcObjectWithInlineCache) - { - // If inline caches were populated from a function body that has been redeferred, the caches have been cleaned up, - // so clear the pointers. REVIEW: Is this a perf loss in some cases? - funcObjectWithInlineCache->ClearBorrowedInlineCacheOnFunctionObject(); - } - funcBody = functionInfo->Parse(functionRef); fParsed = funcBody->IsFunctionParsed() ? TRUE : FALSE; @@ -1636,18 +1628,6 @@ void __cdecl _alloca_probe_16() JavascriptMethod thunkEntryPoint = (*functionRef)->UpdateUndeferredBody(funcBody); - if (funcObjectWithInlineCache && !funcObjectWithInlineCache->GetHasOwnInlineCaches()) - { - // If the function object needs to use the inline caches from the function body, point them to the - // function body's caches. This is required in two redeferral cases: - // - // 1. We might have cleared the caches on the function object (ClearBorrowedInlineCacheOnFunctionObject) - // above if the function body was redeferred. - // 2. Another function object could have been called before and undeferred the function body, thereby creating - // new inline caches. This function object would still be pointing to the old ones and needs updating. - funcObjectWithInlineCache->SetInlineCachesFromFunctionBody(); - } - return thunkEntryPoint; } diff --git a/lib/Runtime/Library/ScriptFunction.cpp b/lib/Runtime/Library/ScriptFunction.cpp index b3df730e5a2..75e46710fc9 100644 --- a/lib/Runtime/Library/ScriptFunction.cpp +++ b/lib/Runtime/Library/ScriptFunction.cpp @@ -83,50 +83,47 @@ namespace Js bool hasSuperReference = functionProxy->HasSuperReference(); + ScriptFunction * pfuncScript = nullptr; + if (functionProxy->IsFunctionBody() && functionProxy->GetFunctionBody()->GetInlineCachesOnFunctionObject()) { - Js::FunctionBody * functionBody = functionProxy->GetFunctionBody(); - ScriptFunctionWithInlineCache* pfuncScriptWithInlineCache = scriptContext->GetLibrary()->CreateScriptFunctionWithInlineCache(functionProxy); - pfuncScriptWithInlineCache->SetEnvironment(environment); - JS_ETW(EventWriteJSCRIPT_RECYCLER_ALLOCATE_FUNCTION(pfuncScriptWithInlineCache, EtwTrace::GetFunctionId(functionProxy))); - - Assert(functionBody->GetInlineCacheCount() + functionBody->GetIsInstInlineCacheCount()); - + FunctionBody * functionBody = functionProxy->GetFunctionBody(); if (functionBody->GetIsFirstFunctionObject()) { - // point the inline caches of the first function object to those on the function body. - pfuncScriptWithInlineCache->SetInlineCachesFromFunctionBody(); functionBody->SetIsNotFirstFunctionObject(); } else { + ScriptFunctionWithInlineCache* pfuncScriptWithInlineCache = scriptContext->GetLibrary()->CreateScriptFunctionWithInlineCache(functionProxy); // allocate inline cache for this function object pfuncScriptWithInlineCache->CreateInlineCache(); - } - pfuncScriptWithInlineCache->SetHasSuperReference(hasSuperReference); + Assert(functionBody->GetInlineCacheCount() + functionBody->GetIsInstInlineCacheCount()); + if (PHASE_TRACE1(Js::ScriptFunctionWithInlineCachePhase)) + { + char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; - if (PHASE_TRACE1(Js::ScriptFunctionWithInlineCachePhase)) - { - char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; + Output::Print(_u("Function object with inline cache: function number: (%s)\tfunction name: %s\n"), + functionBody->GetDebugNumberSet(debugStringBuffer), functionBody->GetDisplayName()); + Output::Flush(); + } - Output::Print(_u("Function object with inline cache: function number: (%s)\tfunction name: %s\n"), - functionBody->GetDebugNumberSet(debugStringBuffer), functionBody->GetDisplayName()); - Output::Flush(); + pfuncScript = pfuncScriptWithInlineCache; } - return pfuncScriptWithInlineCache; } - else + + if (pfuncScript == nullptr) { - ScriptFunction* pfuncScript = scriptContext->GetLibrary()->CreateScriptFunction(functionProxy); - pfuncScript->SetEnvironment(environment); + pfuncScript = scriptContext->GetLibrary()->CreateScriptFunction(functionProxy); + } - pfuncScript->SetHasSuperReference(hasSuperReference); + pfuncScript->SetEnvironment(environment); - JS_ETW(EventWriteJSCRIPT_RECYCLER_ALLOCATE_FUNCTION(pfuncScript, EtwTrace::GetFunctionId(functionProxy))); + pfuncScript->SetHasSuperReference(hasSuperReference); - return pfuncScript; - } + JS_ETW(EventWriteJSCRIPT_RECYCLER_ALLOCATE_FUNCTION(pfuncScript, EtwTrace::GetFunctionId(functionProxy))); + + return pfuncScript; } void ScriptFunction::SetEnvironment(FrameDisplay * environment) @@ -749,11 +746,11 @@ namespace Js #endif ScriptFunctionWithInlineCache::ScriptFunctionWithInlineCache(FunctionProxy * proxy, ScriptFunctionType* deferredPrototypeType) : - ScriptFunction(proxy, deferredPrototypeType), hasOwnInlineCaches(false) + ScriptFunction(proxy, deferredPrototypeType) {} ScriptFunctionWithInlineCache::ScriptFunctionWithInlineCache(DynamicType * type) : - ScriptFunction(type), hasOwnInlineCaches(false) + ScriptFunction(type) {} bool ScriptFunctionWithInlineCache::Is(Var func) @@ -786,45 +783,6 @@ namespace Js return reinterpret_cast(PointerValue(inlineCaches[index])); } - Field(void**) ScriptFunctionWithInlineCache::GetInlineCaches() - { - // If script function have inline caches pointing to function body and function body got reparsed we need to reset cache - if (this->GetHasInlineCaches() && !this->GetHasOwnInlineCaches()) - { - // Script function have inline caches pointing to function body - if (!this->HasFunctionBody()) - { - // Function body got re-deferred and have not been re-parsed yet. Reset cache to null - this->m_inlineCaches = nullptr; - this->inlineCacheCount = 0; - this->SetHasInlineCaches(false); - } - else if (this->m_inlineCaches != this->GetFunctionBody()->GetInlineCaches()) - { - // Function body got reparsed we need to reset cache - Assert(this->GetFunctionBody()->GetCompileCount() > 1); - this->SetInlineCachesFromFunctionBody(); - } - } - - return this->m_inlineCaches; - } - - void ScriptFunctionWithInlineCache::SetInlineCachesFromFunctionBody() - { - SetHasInlineCaches(true); - Js::FunctionBody* functionBody = this->GetFunctionBody(); - this->m_inlineCaches = functionBody->GetInlineCaches(); -#if DBG - this->m_inlineCacheTypes = functionBody->GetInlineCacheTypes(); -#endif - this->rootObjectLoadInlineCacheStart = functionBody->GetRootObjectLoadInlineCacheStart(); - this->rootObjectLoadMethodInlineCacheStart = functionBody->GetRootObjectLoadMethodInlineCacheStart(); - this->rootObjectStoreInlineCacheStart = functionBody->GetRootObjectStoreInlineCacheStart(); - this->inlineCacheCount = functionBody->GetInlineCacheCount(); - this->isInstInlineCacheCount = functionBody->GetIsInstInlineCacheCount(); - } - void ScriptFunctionWithInlineCache::CreateInlineCache() { Js::FunctionBody *functionBody = this->GetFunctionBody(); @@ -835,7 +793,6 @@ namespace Js SetHasInlineCaches(true); AllocateInlineCache(); - hasOwnInlineCaches = true; } void ScriptFunctionWithInlineCache::Finalize(bool isShutdown) @@ -854,7 +811,7 @@ namespace Js { uint isInstInlineCacheStart = this->GetInlineCacheCount(); uint totalCacheCount = isInstInlineCacheStart + isInstInlineCacheCount; - if (this->GetHasInlineCaches() && this->m_inlineCaches && this->hasOwnInlineCaches) + if (this->GetHasInlineCaches() && this->m_inlineCaches) { Js::ScriptContext* scriptContext = this->GetParseableFunctionInfo()->GetScriptContext(); uint i = 0; @@ -1087,13 +1044,4 @@ namespace Js } SetHasInlineCaches(false); } - - void ScriptFunctionWithInlineCache::ClearBorrowedInlineCacheOnFunctionObject() - { - if (this->hasOwnInlineCaches) - { - return; - } - ClearInlineCacheOnFunctionObject(); - } } diff --git a/lib/Runtime/Library/ScriptFunction.h b/lib/Runtime/Library/ScriptFunction.h index d5423d8f149..8c911c8fad8 100644 --- a/lib/Runtime/Library/ScriptFunction.h +++ b/lib/Runtime/Library/ScriptFunction.h @@ -184,7 +184,6 @@ namespace Js { private: Field(void**) m_inlineCaches; - Field(bool) hasOwnInlineCaches; #if DBG #define InlineCacheTypeNone 0x00 @@ -212,12 +211,9 @@ namespace Js void CreateInlineCache(); void AllocateInlineCache(); void ClearInlineCacheOnFunctionObject(); - void ClearBorrowedInlineCacheOnFunctionObject(); InlineCache * GetInlineCache(uint index); uint GetInlineCacheCount() { return inlineCacheCount; } - Field(void**) GetInlineCaches(); - bool GetHasOwnInlineCaches() { return hasOwnInlineCaches; } - void SetInlineCachesFromFunctionBody(); + Field(void**) GetInlineCaches() const { return m_inlineCaches; } static uint32 GetOffsetOfInlineCaches() { return offsetof(ScriptFunctionWithInlineCache, m_inlineCaches); }; template void FreeOwnInlineCaches(); diff --git a/lib/Runtime/Library/WebAssemblyInstance.cpp b/lib/Runtime/Library/WebAssemblyInstance.cpp index c25b6b816c9..5645cb5c88f 100644 --- a/lib/Runtime/Library/WebAssemblyInstance.cpp +++ b/lib/Runtime/Library/WebAssemblyInstance.cpp @@ -121,7 +121,7 @@ WebAssemblyInstance::GetterExports(RecyclableObject* function, CallInfo callInfo Assert(UNREACHED); exports = scriptContext->GetLibrary()->GetUndefined(); } - return exports; + return CrossSite::MarshalVar(scriptContext, exports); } WebAssemblyInstance * diff --git a/lib/Runtime/Library/WebAssemblyMemory.cpp b/lib/Runtime/Library/WebAssemblyMemory.cpp index 99590a5a81b..9ef973e2cb5 100644 --- a/lib/Runtime/Library/WebAssemblyMemory.cpp +++ b/lib/Runtime/Library/WebAssemblyMemory.cpp @@ -239,7 +239,7 @@ WebAssemblyMemory::EntryGetterBuffer(RecyclableObject* function, CallInfo callIn WebAssemblyMemory* memory = WebAssemblyMemory::FromVar(args[0]); Assert(ArrayBuffer::Is(memory->m_buffer)); - return memory->m_buffer; + return CrossSite::MarshalVar(scriptContext, memory->m_buffer); } WebAssemblyMemory * diff --git a/test/typedarray/Uint8ClampedArray2.js b/test/typedarray/Uint8ClampedArray2.js new file mode 100644 index 00000000000..426e9f93033 --- /dev/null +++ b/test/typedarray/Uint8ClampedArray2.js @@ -0,0 +1,43 @@ +//------------------------------------------------------------------------------------------------------- +// Copyright (C) Microsoft. All rights reserved. +// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information. +//------------------------------------------------------------------------------------------------------- + +var log = Array(1000000); +var i = 0; + +function test() { + var cqjmyu; + for (var wetavm = 0; wetavm < 1000; ++wetavm) { + cqjmyu = new Uint16Array([1, 1, 1, 1, 1, 1, 1, 1, 1]); + cqjmyu_0 = new Uint8ClampedArray(cqjmyu); + cqjmyu_0[8] = "5"; + log[i++] = cqjmyu_0[0]; + } + return cqjmyu[0]; +} +for(var j =0;j<100;j++) test(); +test(); +test(); +test(); +test(); +test(); +test(); +test(); +test(); +test(); +test(); +test(); + +var failed = false; +for(var k = 0; k < i; k++) { + if(log[k] != 1) { + WScript.Echo("failed at " + k); + failed = true; + break; + } +} +if(!failed) +{ + WScript.Echo("PASSED"); +} diff --git a/test/typedarray/rlexe.xml b/test/typedarray/rlexe.xml index 611ea3d786b..b829b2b407a 100644 --- a/test/typedarray/rlexe.xml +++ b/test/typedarray/rlexe.xml @@ -299,6 +299,13 @@ Below test fails with difference in space. Investigate the cause and re-enable t typedarray + + + Uint8ClampedArray2.js + typedarray + -minInterpretCount:1 -maxInterpretCount:1 -off:simpleJit + + setDifferentTypes.js