diff --git a/docs/ABI/Mangling.rst b/docs/ABI/Mangling.rst index fb68ed8e602f6..14b056b2470c8 100644 --- a/docs/ABI/Mangling.rst +++ b/docs/ABI/Mangling.rst @@ -56,6 +56,7 @@ Globals global ::= nominal-type 'Mr' // generic type completion function global ::= nominal-type 'Mi' // generic type instantiation function global ::= nominal-type 'MI' // generic type instantiation cache + global ::= nominal-type 'Ml' // in-place type initialization cache global ::= nominal-type 'Mm' // class metaclass global ::= nominal-type 'Mn' // nominal type descriptor global ::= module 'MXM' // module descriptor diff --git a/include/swift/ABI/Metadata.h b/include/swift/ABI/Metadata.h index a8d55ac80f446..586a8a80aa61e 100644 --- a/include/swift/ABI/Metadata.h +++ b/include/swift/ABI/Metadata.h @@ -3364,6 +3364,14 @@ class TargetTypeContextDescriptor return TypeContextDescriptorFlags(this->Flags.getKindSpecificFlags()); } + /// Does this type have non-trivial "in place" metadata initialization? + /// + /// The type of the initialization-control structure differs by subclass, + /// so it doesn't appear here. + bool hasInPlaceMetadataInitialization() const { + return getTypeContextDescriptorFlags().hasInPlaceMetadataInitialization(); + } + const TargetTypeGenericContextDescriptorHeader & getFullGenericContextHeader() const; @@ -3709,10 +3717,49 @@ class TargetClassDescriptor final using ClassDescriptor = TargetClassDescriptor; +/// The cache structure for non-trivial initialization of singleton value +/// metadata. +template +struct TargetInPlaceValueMetadataCache { + /// The metadata pointer. Clients can do dependency-ordered loads + /// from this, and if they see a non-zero value, it's a Complete + /// metadata. + std::atomic> Metadata; + + /// The private cache data. + std::atomic> Private; +}; +using InPlaceValueMetadataCache = + TargetInPlaceValueMetadataCache; + +/// The control structure for performing non-trivial initialization of +/// singleton value metadata, which is required when e.g. a non-generic +/// value type has a resilient component type. +template +struct TargetInPlaceValueMetadataInitialization { + /// The initialization cache. Out-of-line because mutable. + TargetRelativeDirectPointer> + InitializationCache; + + /// The incomplete metadata. + TargetRelativeDirectPointer> + IncompleteMetadata; + + /// The completion function. The pattern will always be null. + TargetRelativeDirectPointer + CompletionFunction; +}; + template class TargetValueTypeDescriptor : public TargetTypeContextDescriptor { public: + using InPlaceMetadataInitialization = + TargetInPlaceValueMetadataInitialization; + + const InPlaceMetadataInitialization &getInPlaceMetadataInitialization() const; + static bool classof(const TargetContextDescriptor *cd) { return cd->getKind() == ContextDescriptorKind::Struct || cd->getKind() == ContextDescriptorKind::Enum; @@ -3724,16 +3771,30 @@ template class TargetStructDescriptor final : public TargetValueTypeDescriptor, public TrailingGenericContextObjects, - TargetTypeGenericContextDescriptorHeader> { + TargetTypeGenericContextDescriptorHeader, + TargetInPlaceValueMetadataInitialization> { +public: + using InPlaceMetadataInitialization = + TargetInPlaceValueMetadataInitialization; + private: using TrailingGenericContextObjects = TrailingGenericContextObjects, - TargetTypeGenericContextDescriptorHeader>; + TargetTypeGenericContextDescriptorHeader, + InPlaceMetadataInitialization>; using TrailingObjects = typename TrailingGenericContextObjects::TrailingObjects; friend TrailingObjects; + template + using OverloadToken = typename TrailingObjects::template OverloadToken; + + using TrailingGenericContextObjects::numTrailingObjects; + size_t numTrailingObjects(OverloadToken) const{ + return this->hasInPlaceMetadataInitialization() ? 1 : 0; + } + public: using TrailingGenericContextObjects::getGenericContext; using TrailingGenericContextObjects::getGenericContextHeader; @@ -3752,6 +3813,11 @@ class TargetStructDescriptor final /// its stored properties. bool hasFieldOffsetVector() const { return FieldOffsetVectorOffset != 0; } + const InPlaceMetadataInitialization &getInPlaceMetadataInitialization() const{ + assert(this->hasInPlaceMetadataInitialization()); + return *this->template getTrailingObjects(); + } + static constexpr int32_t getGenericArgumentOffset() { return TargetStructMetadata::getGenericArgumentOffset(); } @@ -3767,16 +3833,30 @@ template class TargetEnumDescriptor final : public TargetValueTypeDescriptor, public TrailingGenericContextObjects, - TargetTypeGenericContextDescriptorHeader> { + TargetTypeGenericContextDescriptorHeader, + TargetInPlaceValueMetadataInitialization> { +public: + using InPlaceMetadataInitialization = + TargetInPlaceValueMetadataInitialization; + private: using TrailingGenericContextObjects = TrailingGenericContextObjects, - TargetTypeGenericContextDescriptorHeader>; + TargetTypeGenericContextDescriptorHeader, + InPlaceMetadataInitialization>; using TrailingObjects = typename TrailingGenericContextObjects::TrailingObjects; friend TrailingObjects; + template + using OverloadToken = typename TrailingObjects::template OverloadToken; + + using TrailingGenericContextObjects::numTrailingObjects; + size_t numTrailingObjects(OverloadToken) const{ + return this->hasInPlaceMetadataInitialization() ? 1 : 0; + } + public: using TrailingGenericContextObjects::getGenericContext; using TrailingGenericContextObjects::getGenericContextHeader; @@ -3813,6 +3893,11 @@ class TargetEnumDescriptor final return TargetEnumMetadata::getGenericArgumentOffset(); } + const InPlaceMetadataInitialization &getInPlaceMetadataInitialization() const{ + assert(this->hasInPlaceMetadataInitialization()); + return *this->template getTrailingObjects(); + } + static bool classof(const TargetContextDescriptor *cd) { return cd->getKind() == ContextDescriptorKind::Enum; } @@ -3901,6 +3986,21 @@ TargetTypeContextDescriptor::getGenericParams() const { } } +template +inline const TargetInPlaceValueMetadataInitialization & +TargetValueTypeDescriptor::getInPlaceMetadataInitialization() const { + switch (this->getKind()) { + case ContextDescriptorKind::Enum: + return llvm::cast>(this) + ->getInPlaceMetadataInitialization(); + case ContextDescriptorKind::Struct: + return llvm::cast>(this) + ->getInPlaceMetadataInitialization(); + default: + swift_runtime_unreachable("Not a value type descriptor."); + } +} + } // end namespace swift #pragma clang diagnostic pop diff --git a/include/swift/ABI/MetadataValues.h b/include/swift/ABI/MetadataValues.h index 4345f9e6328e6..a1e657030c091 100644 --- a/include/swift/ABI/MetadataValues.h +++ b/include/swift/ABI/MetadataValues.h @@ -1202,6 +1202,14 @@ class TypeContextDescriptorFlags : public FlagSet { /// declarations associated with the same declaration. IsSynthesizedRelatedEntity = 3, + /// Set if the type requires non-trivial but non-generic metadata + /// initialization. It may or may not be truly "in place" depending + /// on the kind of metadata. + /// + /// Currently only meaningful for value descriptors, but will be + /// extended to class descriptors. + HasInPlaceMetadataInitialization = 4, + /// Set if the context descriptor is includes metadata for dynamically /// constructing a class's vtables at metadata instantiation time. /// @@ -1237,6 +1245,10 @@ class TypeContextDescriptorFlags : public FlagSet { isSynthesizedRelatedEntity, setIsSynthesizedRelatedEntity) + FLAGSET_DEFINE_FLAG_ACCESSORS(HasInPlaceMetadataInitialization, + hasInPlaceMetadataInitialization, + setHasInPlaceMetadataInitialization) + FLAGSET_DEFINE_FLAG_ACCESSORS(Class_HasVTable, class_hasVTable, class_setHasVTable) diff --git a/include/swift/Demangling/DemangleNodes.def b/include/swift/Demangling/DemangleNodes.def index 1997a9119b2fa..927dbbb8ed0cf 100644 --- a/include/swift/Demangling/DemangleNodes.def +++ b/include/swift/Demangling/DemangleNodes.def @@ -183,6 +183,7 @@ NODE(TypeMetadataAccessFunction) NODE(TypeMetadataCompletionFunction) NODE(TypeMetadataInstantiationCache) NODE(TypeMetadataInstantiationFunction) +NODE(TypeMetadataInPlaceInitializationCache) NODE(TypeMetadataLazyCache) NODE(UncurriedFunctionType) #define REF_STORAGE(Name, ...) NODE(Name) diff --git a/include/swift/IRGen/Linking.h b/include/swift/IRGen/Linking.h index 3e9541391754a..053d168f7a7dc 100644 --- a/include/swift/IRGen/Linking.h +++ b/include/swift/IRGen/Linking.h @@ -163,6 +163,10 @@ class LinkEntity { /// The pointer is a NominalTypeDecl*. TypeMetadataInstantiationFunction, + /// The in-place initialization cache for a generic nominal type. + /// The pointer is a NominalTypeDecl*. + TypeMetadataInPlaceInitializationCache, + /// The completion function for a generic or resilient nominal type. /// The pointer is a NominalTypeDecl*. TypeMetadataCompletionFunction, @@ -503,12 +507,19 @@ class LinkEntity { return entity; } - static LinkEntity forTypeMetadataInstantiationFunction(NominalTypeDecl *decl){ + static LinkEntity forTypeMetadataInstantiationFunction(NominalTypeDecl *decl) { LinkEntity entity; entity.setForDecl(Kind::TypeMetadataInstantiationFunction, decl); return entity; } + static LinkEntity forTypeMetadataInPlaceInitializationCache( + NominalTypeDecl *decl) { + LinkEntity entity; + entity.setForDecl(Kind::TypeMetadataInPlaceInitializationCache, decl); + return entity; + } + static LinkEntity forTypeMetadataCompletionFunction(NominalTypeDecl *decl) { LinkEntity entity; entity.setForDecl(Kind::TypeMetadataCompletionFunction, decl); diff --git a/include/swift/Remote/MetadataReader.h b/include/swift/Remote/MetadataReader.h index 73d43b3525f1f..ea8de7b987123 100644 --- a/include/swift/Remote/MetadataReader.h +++ b/include/swift/Remote/MetadataReader.h @@ -1050,8 +1050,10 @@ class MetadataReader { sizeof(flags))) return nullptr; + TypeContextDescriptorFlags typeFlags(flags.getKindSpecificFlags()); unsigned baseSize = 0; unsigned genericHeaderSize = sizeof(GenericContextDescriptorHeader); + unsigned inPlaceInitSize = 0; bool hasVTable = false; switch (auto kind = flags.getKind()) { case ContextDescriptorKind::Module: @@ -1067,16 +1069,23 @@ class MetadataReader { case ContextDescriptorKind::Class: baseSize = sizeof(TargetClassDescriptor); genericHeaderSize = sizeof(TypeGenericContextDescriptorHeader); - hasVTable = TypeContextDescriptorFlags(flags.getKindSpecificFlags()) - .class_hasVTable(); + hasVTable = typeFlags.class_hasVTable(); break; case ContextDescriptorKind::Enum: baseSize = sizeof(TargetEnumDescriptor); genericHeaderSize = sizeof(TypeGenericContextDescriptorHeader); + if (typeFlags.hasInPlaceMetadataInitialization()) { + inPlaceInitSize = + sizeof(TargetInPlaceValueMetadataInitialization); + } break; case ContextDescriptorKind::Struct: baseSize = sizeof(TargetStructDescriptor); genericHeaderSize = sizeof(TypeGenericContextDescriptorHeader); + if (typeFlags.hasInPlaceMetadataInitialization()) { + inPlaceInitSize = + sizeof(TargetInPlaceValueMetadataInitialization); + } break; case ContextDescriptorKind::Protocol: baseSize = sizeof(TargetProtocolDescriptorRef); @@ -1122,7 +1131,7 @@ class MetadataReader { + header.VTableSize * sizeof(TargetMethodDescriptor); } - unsigned size = baseSize + genericsSize + vtableSize; + unsigned size = baseSize + genericsSize + vtableSize + inPlaceInitSize; auto buffer = (uint8_t *)malloc(size); if (!Reader->readBytes(RemoteAddress(address), buffer, size)) { free(buffer); diff --git a/include/swift/Runtime/Metadata.h b/include/swift/Runtime/Metadata.h index e66c9ab0fefce..2cf401fc4a3ee 100644 --- a/include/swift/Runtime/Metadata.h +++ b/include/swift/Runtime/Metadata.h @@ -335,6 +335,13 @@ ClassMetadataBounds getResilientMetadataBounds( const ClassDescriptor *descriptor); int32_t getResilientImmediateMembersOffset(const ClassDescriptor *descriptor); +/// \brief Fetch a uniqued metadata object for a nominal type which requires +/// in-place metadata initialization. +SWIFT_RUNTIME_EXPORT SWIFT_CC(swift) +MetadataResponse +swift_getInPlaceMetadata(MetadataRequest request, + const TypeContextDescriptor *description); + /// \brief Fetch a uniqued metadata object for a generic nominal type. SWIFT_RUNTIME_EXPORT SWIFT_CC(swift) MetadataResponse diff --git a/include/swift/Runtime/RuntimeFunctions.def b/include/swift/Runtime/RuntimeFunctions.def index 5e09b80478d86..1a7ba9ec8fc13 100644 --- a/include/swift/Runtime/RuntimeFunctions.def +++ b/include/swift/Runtime/RuntimeFunctions.def @@ -653,6 +653,13 @@ FUNCTION(GetForeignWitnessTable, swift_getForeignWitnessTable, C_CC, ProtocolDescriptorPtrTy), ATTRS(NoUnwind, ReadNone)) +// MetadataResponse swift_getInPlaceMetadata(MetadataRequest request, +// TypeContextDescriptor *type); +FUNCTION(GetInPlaceMetadata, swift_getInPlaceMetadata, SwiftCC, + RETURNS(TypeMetadataResponseTy), + ARGS(SizeTy, TypeContextDescriptorPtrTy), + ATTRS(NoUnwind, ReadNone)) + // MetadataResponse swift_getGenericMetadata(MetadataRequest request, // const void * const *arguments, // TypeContextDescriptor *type); diff --git a/lib/Demangling/Demangler.cpp b/lib/Demangling/Demangler.cpp index 8cdd019c0adf1..42f4cba1611fe 100644 --- a/lib/Demangling/Demangler.cpp +++ b/lib/Demangling/Demangler.cpp @@ -1469,6 +1469,9 @@ NodePointer Demangler::demangleMetatype() { return createWithPoppedType(Node::Kind::TypeMetadataInstantiationFunction); case 'r': return createWithPoppedType(Node::Kind::TypeMetadataCompletionFunction); + case 'l': + return createWithPoppedType( + Node::Kind::TypeMetadataInPlaceInitializationCache); case 'L': return createWithPoppedType(Node::Kind::TypeMetadataLazyCache); case 'm': diff --git a/lib/Demangling/NodePrinter.cpp b/lib/Demangling/NodePrinter.cpp index 296e5f4c81af8..52a34ab9a7692 100644 --- a/lib/Demangling/NodePrinter.cpp +++ b/lib/Demangling/NodePrinter.cpp @@ -429,6 +429,7 @@ class NodePrinter { case Node::Kind::TypeMetadataCompletionFunction: case Node::Kind::TypeMetadataInstantiationCache: case Node::Kind::TypeMetadataInstantiationFunction: + case Node::Kind::TypeMetadataInPlaceInitializationCache: case Node::Kind::TypeMetadataLazyCache: case Node::Kind::UncurriedFunctionType: #define REF_STORAGE(Name, ...) \ @@ -1509,6 +1510,10 @@ NodePointer NodePrinter::print(NodePointer Node, bool asPrefixContext) { Printer << "type metadata instantiation function for "; print(Node->getChild(0)); return nullptr; + case Node::Kind::TypeMetadataInPlaceInitializationCache: + Printer << "type metadata in-place initialization cache for "; + print(Node->getChild(0)); + return nullptr; case Node::Kind::TypeMetadataCompletionFunction: Printer << "type metadata completion function for "; print(Node->getChild(0)); diff --git a/lib/Demangling/OldRemangler.cpp b/lib/Demangling/OldRemangler.cpp index 3f4acc9a20532..c3d2cb0b49b6c 100644 --- a/lib/Demangling/OldRemangler.cpp +++ b/lib/Demangling/OldRemangler.cpp @@ -716,6 +716,11 @@ void Remangler::mangleTypeMetadataInstantiationFunction(Node *node) { mangleSingleChildNode(node); // type } +void Remangler::mangleTypeMetadataInPlaceInitializationCache(Node *node) { + Out << "Ml"; + mangleSingleChildNode(node); // type +} + void Remangler::mangleTypeMetadataCompletionFunction(Node *node) { Out << "Mr"; mangleSingleChildNode(node); // type diff --git a/lib/Demangling/Remangler.cpp b/lib/Demangling/Remangler.cpp index 8a56b2942332c..bcbeec43a249e 100644 --- a/lib/Demangling/Remangler.cpp +++ b/lib/Demangling/Remangler.cpp @@ -1766,6 +1766,11 @@ void Remangler::mangleTypeMetadataInstantiationFunction(Node *node) { Buffer << "Mi"; } +void Remangler::mangleTypeMetadataInPlaceInitializationCache(Node *node) { + mangleSingleChildNode(node); + Buffer << "Ml"; +} + void Remangler::mangleTypeMetadataCompletionFunction(Node *node) { mangleSingleChildNode(node); Buffer << "Mr"; diff --git a/lib/IRGen/GenDecl.cpp b/lib/IRGen/GenDecl.cpp index 1ee45516489b1..354e240faa538 100644 --- a/lib/IRGen/GenDecl.cpp +++ b/lib/IRGen/GenDecl.cpp @@ -1498,6 +1498,7 @@ SILLinkage LinkEntity::getLinkage(ForDefinition_t forDefinition) const { case Kind::TypeMetadataInstantiationCache: case Kind::TypeMetadataInstantiationFunction: + case Kind::TypeMetadataInPlaceInitializationCache: case Kind::TypeMetadataCompletionFunction: case Kind::TypeMetadataPattern: return SILLinkage::Private; @@ -1715,6 +1716,7 @@ bool LinkEntity::isAvailableExternally(IRGenModule &IGM) const { case Kind::AnonymousDescriptor: case Kind::TypeMetadataInstantiationCache: case Kind::TypeMetadataInstantiationFunction: + case Kind::TypeMetadataInPlaceInitializationCache: case Kind::TypeMetadataCompletionFunction: case Kind::TypeMetadataPattern: return false; @@ -2500,8 +2502,6 @@ IRGenModule::getAddrOfLLVMVariable(LinkEntity entity, Alignment alignment, // forward declaration. if (definitionType) { assert(existing->isDeclaration() && "already defined"); - assert(entry->getType()->getPointerElementType() == defaultType - || entry->getType()->getPointerElementType() == definition.getType()); updateLinkageForDefinition(*this, existing, entity); // If the existing entry is a variable of the right type, @@ -2548,7 +2548,7 @@ IRGenModule::getAddrOfLLVMVariable(LinkEntity entity, Alignment alignment, // new variable. if (entry) { auto existing = cast(entry); - auto castVar = getElementBitCast(var, defaultType); + auto castVar = llvm::ConstantExpr::getBitCast(var, entry->getType()); existing->replaceAllUsesWith(castVar); existing->eraseFromParent(); } @@ -3271,8 +3271,45 @@ IRGenModule::getAddrOfTypeMetadataLazyCacheVariable(CanType type, ForDefinition_t forDefinition) { assert(!type->hasArchetype() && !type->hasTypeParameter()); LinkEntity entity = LinkEntity::forTypeMetadataLazyCacheVariable(type); - return getAddrOfLLVMVariable(entity, getPointerAlignment(), forDefinition, - TypeMetadataPtrTy, DebugTypeInfo()); + auto variable = + getAddrOfLLVMVariable(entity, getPointerAlignment(), forDefinition, + TypeMetadataPtrTy, DebugTypeInfo()); + + // Zero-initialize if we're asking for a definition. + if (forDefinition) { + cast(variable)->setInitializer( + llvm::ConstantPointerNull::get(TypeMetadataPtrTy)); + } + + return variable; +} + +llvm::Constant * +IRGenModule::getAddrOfTypeMetadataInPlaceInitializationCache( + NominalTypeDecl *D, + ForDefinition_t forDefinition) { + // Build the cache type. + llvm::Type *cacheTy; + if (isa(D) || isa(D)) { + // This is struct InPlaceValueMetadataCache. + cacheTy = llvm::StructType::get(getLLVMContext(), + {TypeMetadataPtrTy, Int8PtrTy}); + } else { + llvm_unreachable("in-place initialization for classes not yet supported"); + } + + LinkEntity entity = LinkEntity::forTypeMetadataInPlaceInitializationCache(D); + auto variable = + getAddrOfLLVMVariable(entity, getPointerAlignment(), forDefinition, + cacheTy, DebugTypeInfo()); + + // Zero-initialize if we're asking for a definition. + if (forDefinition) { + cast(variable)->setInitializer( + llvm::Constant::getNullValue(cacheTy)); + } + + return variable; } /// Define the metadata for a type. @@ -4274,9 +4311,17 @@ IRGenModule::getAddrOfWitnessTableLazyCacheVariable( assert(!conformingType->hasArchetype()); LinkEntity entity = LinkEntity::forProtocolWitnessTableLazyCacheVariable(conf, conformingType); - return getAddrOfLLVMVariable(entity, getPointerAlignment(), - forDefinition, WitnessTablePtrTy, - DebugTypeInfo()); + auto variable = getAddrOfLLVMVariable(entity, getPointerAlignment(), + forDefinition, WitnessTablePtrTy, + DebugTypeInfo()); + + // Zero-initialize if we're asking for a definition. + if (forDefinition) { + cast(variable)->setInitializer( + llvm::ConstantPointerNull::get(WitnessTablePtrTy)); + } + + return variable; } /// Look up the address of a witness table. diff --git a/lib/IRGen/GenMeta.cpp b/lib/IRGen/GenMeta.cpp index d4575c29e5be1..25edcd9183775 100644 --- a/lib/IRGen/GenMeta.cpp +++ b/lib/IRGen/GenMeta.cpp @@ -121,6 +121,73 @@ void IRGenModule::setTrueConstGlobal(llvm::GlobalVariable *var) { } } +/*****************************************************************************/ +/** Metadata completion ******************************************************/ +/*****************************************************************************/ + +/// Does the metadata for the given type, which we are currently emitting, +/// require in-place metadata initialiation structures and functions? +static bool needsInPlaceMetadataInitialization(IRGenModule &IGM, + NominalTypeDecl *typeDecl) { + // Generic types never have in-place metadata initialization. + if (typeDecl->isGenericContext()) + return false; + + assert(isa(typeDecl) || isa(typeDecl)); + + // If the type is known to be fixed-layout, we can emit its metadata such + // that it doesn't need dynamic initialization. + auto &ti = IGM.getTypeInfoForUnlowered(typeDecl->getDeclaredTypeInContext()); + if (ti.isFixedSize(ResilienceExpansion::Maximal)) + return false; + + return true; +} + +using MetadataCompletionBodyEmitter = + void (IRGenFunction &IGF, + llvm::Value *metadata, + MetadataDependencyCollector *collector); + +static void emitMetadataCompletionFunction(IRGenModule &IGM, + NominalTypeDecl *typeDecl, + llvm::function_ref body) { + llvm::Function *f = + IGM.getAddrOfTypeMetadataCompletionFunction(typeDecl, ForDefinition); + f->setAttributes(IGM.constructInitialAttributes()); + + IRGenFunction IGF(IGM, f); + + // Skip instrumentation when building for TSan to avoid false positives. + // The synchronization for this happens in the Runtime and we do not see it. + if (IGM.IRGen.Opts.Sanitizers & SanitizerKind::Thread) + f->removeFnAttr(llvm::Attribute::SanitizeThread); + + if (IGM.DebugInfo) + IGM.DebugInfo->emitArtificialFunction(IGF, f); + + Explosion params = IGF.collectParameters(); + llvm::Value *metadata = params.claimNext(); + llvm::Value *context = params.claimNext(); + llvm::Value *templatePointer = params.claimNext(); + + // TODO: use these? + (void) context; + (void) templatePointer; + + MetadataDependencyCollector collector; + + body(IGF, metadata, &collector); + + // At the current insertion point, the metadata is now complete. + + // Merge with any metadata dependencies we may have collected. + auto dependency = collector.finish(IGF); + auto returnValue = dependency.combine(IGF); + + IGF.Builder.CreateRet(returnValue); +} + /*****************************************************************************/ /** Nominal Type Descriptor Emission *****************************************/ /*****************************************************************************/ @@ -637,6 +704,7 @@ namespace { protected: NominalTypeDecl *Type; RequireMetadata_t HasMetadata; + bool HasInPlaceMetadataInitialization; using super::IGM; using super::B; @@ -648,8 +716,10 @@ namespace { TypeContextDescriptorBuilderBase(IRGenModule &IGM, NominalTypeDecl *Type, RequireMetadata_t requireMetadata) : super(IGM), Type(Type), - HasMetadata(requireMetadata) - {} + HasMetadata(requireMetadata), + HasInPlaceMetadataInitialization( + computeHasInPlaceMetadataInitialization()) { + } void layout() { super::layout(); @@ -658,6 +728,7 @@ namespace { // ABI TODO: layout info should be superseded by remote mirror metadata asImpl().addLayoutInfo(); asImpl().addGenericSignature(); + asImpl().maybeAddInPlaceMetadataInitialization(); } void addName() { @@ -769,6 +840,61 @@ namespace { } } + bool computeHasInPlaceMetadataInitialization() { + // Not if we don't have metadata. + if (!HasMetadata) + return false; + + // Only struct and enums for now. Classes currently use an eager + // mechanism that doesn't properly support recursive dependencies, so + // their equivalent of in-place initialization does not yet use this + // infrastructure. + if (!isa(Type) && !isa(Type)) + return false; + + return needsInPlaceMetadataInitialization(IGM, Type); + } + + bool hasInPlaceMetadataInitialization() { + return HasInPlaceMetadataInitialization; + } + + void setHasInPlaceMetadataInitialization(TypeContextDescriptorFlags &flags){ + flags.setHasInPlaceMetadataInitialization( + HasInPlaceMetadataInitialization); + } + + void maybeAddInPlaceMetadataInitialization() { + if (!HasInPlaceMetadataInitialization) + return; + + if (isa(Type) || isa(Type)) { + asImpl().addInPlaceValueMetadataInitialization(); + } else { + llvm_unreachable("unexpected type allowing in-place initialization"); + } + } + + /// Add an InPlaceValueMetadataInitialization structure to the descriptor. + void addInPlaceValueMetadataInitialization() { + // Relative pointer to the initialization cache. + // Note that we trigger the definition of it when emitting the + // completion function. + auto cache = IGM.getAddrOfTypeMetadataInPlaceInitializationCache(Type, + NotForDefinition); + B.addRelativeAddress(cache); + + // Relative pointer to the metadata. + auto type = Type->getDeclaredTypeInContext()->getCanonicalType(); + auto metadata = IGM.getAddrOfTypeMetadata(type); + B.addRelativeAddress(metadata); + + // Completion function. + auto completionFunction = + IGM.getAddrOfTypeMetadataCompletionFunction(Type, NotForDefinition); + B.addRelativeAddress(completionFunction); + } + // Subclasses should provide: // ContextDescriptorKind getContextKind(); // void addLayoutInfo(); // ABI TODO: should be superseded @@ -885,6 +1011,8 @@ namespace { flags.setIsReflectable( !IGM.shouldEmitOpaqueTypeMetadataRecord(getType())); + setHasInPlaceMetadataInitialization(flags); + getClangImportedFlags(flags); return flags.getOpaqueValue(); } @@ -943,6 +1071,8 @@ namespace { flags.setIsReflectable(Strategy.isReflectable()); + setHasInPlaceMetadataInitialization(flags); + getClangImportedFlags(flags); return flags.getOpaqueValue(); } @@ -1347,53 +1477,26 @@ namespace { // MetadataDependency(Metadata *type, // MetadataCompletionContext *context, // const GenericMetadataPattern *pattern); - llvm::Function *f = - IGM.getAddrOfTypeMetadataCompletionFunction(Target, ForDefinition); - f->setAttributes(IGM.constructInitialAttributes()); - - IRGenFunction IGF(IGM, f); - - // Skip instrumentation when building for TSan to avoid false positives. - // The synchronization for this happens in the Runtime and we do not see it. - if (IGM.IRGen.Opts.Sanitizers & SanitizerKind::Thread) - f->removeFnAttr(llvm::Attribute::SanitizeThread); - - if (IGM.DebugInfo) - IGM.DebugInfo->emitArtificialFunction(IGF, f); - - Explosion params = IGF.collectParameters(); - llvm::Value *metadata = params.claimNext(); - llvm::Value *context = params.claimNext(); - llvm::Value *templatePointer = params.claimNext(); - - (void) context; - (void) templatePointer; - - // Bind the generic arguments. - // FIXME: this will be problematic if we ever try to bind superclass - // types from type metadata! - if (Target->isGenericContext()) { - auto type = Target->getDeclaredTypeInContext()->getCanonicalType(); - IGF.bindLocalTypeDataFromTypeMetadata(type, IsExact, metadata, - MetadataState::Abstract); - } - - // A dependent VWT means that we have dependent metadata. - if (HasDependentVWT) - HasDependentMetadata = true; - - MetadataDependencyCollector collector; + emitMetadataCompletionFunction(IGM, Target, + [&](IRGenFunction &IGF, llvm::Value *metadata, + MetadataDependencyCollector *collector) { + // Bind the generic arguments. + // FIXME: this will be problematic if we ever try to bind superclass + // types from type metadata! + if (Target->isGenericContext()) { + auto type = Target->getDeclaredTypeInContext()->getCanonicalType(); + IGF.bindLocalTypeDataFromTypeMetadata(type, IsExact, metadata, + MetadataState::Abstract); + } - if (HasDependentMetadata) { - asImpl().emitInitializeMetadata(IGF, metadata, false, &collector); - } - - // The metadata is now complete. Finalize any metadata dependencies - // we may have collected. - auto dependency = collector.finish(IGF); - auto returnValue = dependency.combine(IGF); + // A dependent VWT means that we have dependent metadata. + if (HasDependentVWT) + HasDependentMetadata = true; - IGF.Builder.CreateRet(returnValue); + if (HasDependentMetadata) { + asImpl().emitInitializeMetadata(IGF, metadata, false, collector); + } + }); } /// The information necessary to fill in a GenericMetadataPartialPattern @@ -2260,6 +2363,7 @@ namespace { auto type =cast(Target->getDeclaredType()->getCanonicalType()); (void) getTypeMetadataAccessFunction(IGM, type, ForDefinition, + CacheStrategy::Lazy, [&](IRGenFunction &IGF, DynamicMetadataRequest request, llvm::Constant *cacheVar) -> MetadataResponse { // There's an interesting special case where we can do the @@ -2275,17 +2379,17 @@ namespace { } // Otherwise, use the generic path. - return emitInPlaceTypeMetadataAccessFunctionBody(IGF, type, cacheVar, + return emitOnceTypeMetadataAccessFunctionBody(IGF, type, cacheVar, [&](IRGenFunction &IGF, llvm::Value *metadata) { - return emitInPlaceMetadataInitialization(IGF, type, metadata); + return emitOnceMetadataInitialization(IGF, type, metadata); }); }); } private: - llvm::Value *emitInPlaceMetadataInitialization(IRGenFunction &IGF, - CanClassType type, - llvm::Value *metadata) { + llvm::Value *emitOnceMetadataInitialization(IRGenFunction &IGF, + CanClassType type, + llvm::Value *metadata) { // Many of the things done by generic instantiation are unnecessary here: // initializing the metaclass pointer // initializing the ro-data pointer @@ -2707,10 +2811,38 @@ IRGenFunction::emitValueWitnessTableRef(SILType type, // Value types (structs and enums) //===----------------------------------------------------------------------===// +namespace { + /// A helper class for laying out value metadata. + template + class ValueMetadataBuilderBase : public Base { + protected: + using Base::IGM; + using Base::Target; + using Base::asImpl; + + using Base::Base; + + public: + /// Create the runtime data structures and functions necessary to + /// support in-place metadata initialization on this type. + void maybeCreateInPlaceMetadataInitialization() { + if (!needsInPlaceMetadataInitialization(IGM, Target)) + return; + + emitMetadataCompletionFunction(IGM, Target, + [&](IRGenFunction &IGF, llvm::Value *metadata, + MetadataDependencyCollector *collector) { + asImpl().emitInitializeMetadata(IGF, metadata, /*vwt mutable*/true, + collector); + }); + } + }; +} + static llvm::Value * -emitInPlaceValueTypeMetadataInitialization(IRGenFunction &IGF, - CanNominalType type, - llvm::Value *metadata, +emitOnceValueTypeMetadataInitialization(IRGenFunction &IGF, + CanNominalType type, + llvm::Value *metadata, MetadataDependencyCollector *collector) { // All the value types are basically similar, as are foreign types. assert(isa(type) || isa(type) || @@ -2733,27 +2865,46 @@ emitInPlaceValueTypeMetadataInitialization(IRGenFunction &IGF, return metadata; } -/// Create an access function for the type metadata of the given -/// non-generic nominal type. -static void createInPlaceValueTypeMetadataAccessFunction(IRGenModule &IGM, - NominalTypeDecl *typeDecl) { +/// Create an access function for the given type which triggers the +/// in-place initialization path. +static void +createInPlaceInitializationMetadataAccessFunction(IRGenModule &IGM, + NominalTypeDecl *typeDecl, + CanType type) { assert(!typeDecl->isGenericContext()); - auto type = - cast(typeDecl->getDeclaredType()->getCanonicalType()); (void) getTypeMetadataAccessFunction(IGM, type, ForDefinition, + CacheStrategy::InPlaceInitialization, [&](IRGenFunction &IGF, DynamicMetadataRequest request, llvm::Constant *cacheVariable) { - return emitInPlaceTypeMetadataAccessFunctionBody(IGF, type, cacheVariable, - [&](IRGenFunction &IGF, llvm::Value *metadata) { - MetadataDependencyCollector *collector = nullptr; // FIXME - return emitInPlaceValueTypeMetadataInitialization(IGF, type, metadata, - collector); - }); + llvm::Value *descriptor = + IGF.IGM.getAddrOfTypeContextDescriptor(typeDecl, RequireMetadata); + auto responsePair = + IGF.Builder.CreateCall(IGF.IGM.getGetInPlaceMetadataFn(), + {request.get(IGF), descriptor}); + return MetadataResponse::handle(IGF, request, responsePair); }); } +/// Create an access function for the given non-generic type. +static void createNonGenericMetadataAccessFunction(IRGenModule &IGM, + NominalTypeDecl *typeDecl) { + assert(!typeDecl->isGenericContext()); + auto type = typeDecl->getDeclaredType()->getCanonicalType(); + + // If the type requires the in-place initialization pattern, use it. + if (needsInPlaceMetadataInitialization(IGM, typeDecl)) { + createInPlaceInitializationMetadataAccessFunction(IGM, typeDecl, type); + return; + } + + // Otherwise, use the lazy pattern, which should be emitted using a + // direct reference to the metadata. + (void) getTypeMetadataAccessFunction(IGM, type, ForDefinition); +} + + //===----------------------------------------------------------------------===// // Structs //===----------------------------------------------------------------------===// @@ -2761,8 +2912,9 @@ static void createInPlaceValueTypeMetadataAccessFunction(IRGenModule &IGM, namespace { /// An adapter for laying out struct metadata. template - class StructMetadataBuilderBase : public StructMetadataVisitor { - using super = StructMetadataVisitor; + class StructMetadataBuilderBase + : public ValueMetadataBuilderBase> { + using super = ValueMetadataBuilderBase>; protected: ConstantStructBuilder &B; @@ -2837,6 +2989,18 @@ namespace { void addGenericWitnessTable(CanType type, ProtocolConformanceRef conf) { B.addNullPointer(IGM.WitnessTablePtrTy); } + + void emitInitializeMetadata(IRGenFunction &IGF, + llvm::Value *metadata, + bool isVWTMutable, + MetadataDependencyCollector *collector) { + auto loweredTy = getLoweredType(); + auto &fixedTI = IGM.getTypeInfo(loweredTy); + if (isa(fixedTI)) return; + + emitInitializeFieldOffsetVector(IGF, loweredTy, metadata, isVWTMutable, + collector); + } }; class StructMetadataBuilder : @@ -2857,7 +3021,8 @@ namespace { } void createMetadataAccessFunction() { - createInPlaceValueTypeMetadataAccessFunction(IGM, Target); + createNonGenericMetadataAccessFunction(IGM, Target); + maybeCreateInPlaceMetadataInitialization(); } }; @@ -2964,18 +3129,6 @@ namespace { bool hasCompletionFunction() { return !isa(IGM.getTypeInfo(getLoweredType())); } - - void emitInitializeMetadata(IRGenFunction &IGF, - llvm::Value *metadata, - bool isVWTMutable, - MetadataDependencyCollector *collector) { - auto loweredTy = getLoweredType(); - auto &fixedTI = IGM.getTypeInfo(loweredTy); - if (isa(fixedTI)) return; - - emitInitializeFieldOffsetVector(IGF, loweredTy, metadata, isVWTMutable, - collector); - } }; } // end anonymous namespace @@ -3031,8 +3184,9 @@ void IRGenerator::noteUseOfAnyParentTypeMetadata(NominalTypeDecl *type) { namespace { template - class EnumMetadataBuilderBase : public EnumMetadataVisitor { - using super = EnumMetadataVisitor; + class EnumMetadataBuilderBase + : public ValueMetadataBuilderBase> { + using super = ValueMetadataBuilderBase>; protected: ConstantStructBuilder &B; @@ -3100,6 +3254,18 @@ namespace { auto &strategy = getEnumImplStrategy(IGM, enumTy); return Size(strategy.getPayloadSizeForMetadata()); } + + void emitInitializeMetadata(IRGenFunction &IGF, + llvm::Value *metadata, + bool isVWTMutable, + MetadataDependencyCollector *collector) { + // Nominal types are always preserved through SIL lowering. + auto enumTy = getLoweredType(); + + auto &strategy = getEnumImplStrategy(IGF.IGM, enumTy); + strategy.initializeMetadata(IGF, metadata, isVWTMutable, enumTy, + collector); + } }; class EnumMetadataBuilder @@ -3111,8 +3277,6 @@ namespace { ConstantStructBuilder &B) : EnumMetadataBuilderBase(IGM, theEnum, B) {} - - void addPayloadSize() { auto payloadSize = getConstantPayloadSize(); if (!payloadSize) { @@ -3129,7 +3293,8 @@ namespace { } void createMetadataAccessFunction() { - createInPlaceValueTypeMetadataAccessFunction(IGM, Target); + createNonGenericMetadataAccessFunction(IGM, Target); + maybeCreateInPlaceMetadataInitialization(); } }; @@ -3181,18 +3346,6 @@ namespace { bool hasCompletionFunction() { return !isa(IGM.getTypeInfo(getLoweredType())); } - - void emitInitializeMetadata(IRGenFunction &IGF, - llvm::Value *metadata, - bool isVWTMutable, - MetadataDependencyCollector *collector) { - // Nominal types are always preserved through SIL lowering. - auto enumTy = getLoweredType(); - - auto &strategy = getEnumImplStrategy(IGF.IGM, enumTy); - strategy.initializeMetadata(IGF, metadata, isVWTMutable, enumTy, - collector); - } }; } // end anonymous namespace @@ -3346,15 +3499,15 @@ namespace { auto type = cast(asImpl().getTargetType()); (void) getTypeMetadataAccessFunction(IGM, type, ForDefinition, + CacheStrategy::Lazy, [&](IRGenFunction &IGF, DynamicMetadataRequest request, llvm::Constant *cacheVariable) { - return emitInPlaceTypeMetadataAccessFunctionBody(IGF, type, - cacheVariable, + return emitOnceTypeMetadataAccessFunctionBody(IGF, type, cacheVariable, [&](IRGenFunction &IGF, llvm::Value *candidate) { MetadataDependencyCollector *collector = nullptr; auto metadata = uniqueForeignTypeMetadataRef(IGF, candidate); - return emitInPlaceValueTypeMetadataInitialization(IGF, type, + return emitOnceValueTypeMetadataInitialization(IGF, type, metadata, collector); }); diff --git a/lib/IRGen/GenProto.cpp b/lib/IRGen/GenProto.cpp index 5e001f24f1ce2..de5a049d6f4e3 100644 --- a/lib/IRGen/GenProto.cpp +++ b/lib/IRGen/GenProto.cpp @@ -1092,8 +1092,8 @@ getWitnessTableLazyAccessFunction(IRGenModule &IGM, auto cacheVariable = cast(IGM.getAddrOfWitnessTableLazyCacheVariable( rootConformance, conformingType, ForDefinition)); - emitLazyCacheAccessFunction(IGM, accessor, cacheVariable, - [&](IRGenFunction &IGF, Explosion ¶ms) { + emitCacheAccessFunction(IGM, accessor, cacheVariable, CacheStrategy::Lazy, + [&](IRGenFunction &IGF, Explosion ¶ms) { llvm::Value *conformingMetadataCache = nullptr; return MetadataResponse::forComplete( emitWitnessTableAccessorCall(IGF, conformance, diff --git a/lib/IRGen/IRGenMangler.h b/lib/IRGen/IRGenMangler.h index f49bdd6736760..880679a650ed5 100644 --- a/lib/IRGen/IRGenMangler.h +++ b/lib/IRGen/IRGenMangler.h @@ -82,6 +82,11 @@ class IRGenMangler : public Mangle::ASTMangler { return mangleNominalTypeSymbol(Decl, "Mi"); } + std::string mangleTypeMetadataInPlaceInitializationCache( + const NominalTypeDecl *Decl) { + return mangleNominalTypeSymbol(Decl, "Ml"); + } + std::string mangleTypeMetadataCompletionFunction(const NominalTypeDecl *Decl){ return mangleNominalTypeSymbol(Decl, "Mr"); } diff --git a/lib/IRGen/IRGenModule.h b/lib/IRGen/IRGenModule.h index 33bb1368002e2..5030cc7b4bbab 100644 --- a/lib/IRGen/IRGenModule.h +++ b/lib/IRGen/IRGenModule.h @@ -1157,6 +1157,9 @@ private: \ ForDefinition_t forDefinition); llvm::Constant *getAddrOfTypeMetadataInstantiationCache(NominalTypeDecl *D, ForDefinition_t forDefinition); + llvm::Constant *getAddrOfTypeMetadataInPlaceInitializationCache( + NominalTypeDecl *D, + ForDefinition_t forDefinition); llvm::Function *getAddrOfTypeMetadataAccessFunction(CanType type, ForDefinition_t forDefinition); llvm::Function *getAddrOfGenericTypeMetadataAccessFunction( diff --git a/lib/IRGen/Linking.cpp b/lib/IRGen/Linking.cpp index f3b6bcdd2ec8a..33da35f91dd3e 100644 --- a/lib/IRGen/Linking.cpp +++ b/lib/IRGen/Linking.cpp @@ -96,6 +96,10 @@ std::string LinkEntity::mangleAsString() const { return mangler.mangleTypeMetadataInstantiationFunction( cast(getDecl())); + case Kind::TypeMetadataInPlaceInitializationCache: + return mangler.mangleTypeMetadataInPlaceInitializationCache( + cast(getDecl())); + case Kind::TypeMetadataCompletionFunction: return mangler.mangleTypeMetadataCompletionFunction( cast(getDecl())); diff --git a/lib/IRGen/MetadataRequest.cpp b/lib/IRGen/MetadataRequest.cpp index 7ddf999781260..a266a35fdc1dc 100644 --- a/lib/IRGen/MetadataRequest.cpp +++ b/lib/IRGen/MetadataRequest.cpp @@ -1284,16 +1284,18 @@ static bool isLoadFrom(llvm::Value *value, Address address) { return false; } -/// Emit the body of a lazy cache accessor. +/// Emit the body of a cache accessor. /// /// If cacheVariable is null, we perform the direct access every time. /// This is used for metadata accessors that come about due to resilience, /// where the direct access is completely trivial. -void irgen::emitLazyCacheAccessFunction(IRGenModule &IGM, - llvm::Function *accessor, - llvm::GlobalVariable *cacheVariable, - LazyCacheEmitter getValue, - bool isReadNone) { +void irgen::emitCacheAccessFunction(IRGenModule &IGM, + llvm::Function *accessor, + llvm::Constant *cacheVariable, + CacheStrategy cacheStrategy, + CacheEmitter getValue, + bool isReadNone) { + assert((cacheStrategy == CacheStrategy::None) == (cacheVariable == nullptr)); accessor->setDoesNotThrow(); // This function is logically 'readnone': the caller does not need @@ -1310,8 +1312,10 @@ void irgen::emitLazyCacheAccessFunction(IRGenModule &IGM, bool returnsResponse = (accessor->getReturnType() == IGM.TypeMetadataResponseTy); + switch (cacheStrategy) { + // If there's no cache variable, just perform the direct access. - if (cacheVariable == nullptr) { + case CacheStrategy::None: { auto response = getValue(IGF, parameters); llvm::Value *ret; if (returnsResponse) { @@ -1325,13 +1329,22 @@ void irgen::emitLazyCacheAccessFunction(IRGenModule &IGM, return; } - // Set up the cache variable. + // For in-place initialization, drill to the first element of the cache. + case CacheStrategy::InPlaceInitialization: + cacheVariable = + llvm::ConstantExpr::getBitCast(cacheVariable, + IGM.TypeMetadataPtrTy->getPointerTo()); + break; + + case CacheStrategy::Lazy: + break; + } + llvm::Constant *null = llvm::ConstantPointerNull::get( - cast(cacheVariable->getValueType())); + cast( + cacheVariable->getType()->getPointerElementType())); - cacheVariable->setInitializer(null); - cacheVariable->setAlignment(IGM.getPointerAlignment().getValue()); Address cache(cacheVariable, IGM.getPointerAlignment()); // Okay, first thing, check the cache variable. @@ -1381,33 +1394,41 @@ void irgen::emitLazyCacheAccessFunction(IRGenModule &IGM, // Emit a branch around the caching code if we're working with responses // and the fetched result is not complete. We can avoid doing this if - // the response is statically known to be complete. + // the response is statically known to be complete, and we don't need to + // do it if this is an in-place initiazation cache because the store + // is done within the runtime. llvm::BasicBlock *completionCheckBB = nullptr; llvm::Value *directState = nullptr; - if (returnsResponse && !response.isStaticallyKnownComplete()) { - completionCheckBB = IGF.Builder.GetInsertBlock(); + if (cacheStrategy == CacheStrategy::InPlaceInitialization) { directState = response.getDynamicState(); + completionCheckBB = IGF.Builder.GetInsertBlock(); + } else { + if (returnsResponse && + !response.isStaticallyKnownComplete()) { + completionCheckBB = IGF.Builder.GetInsertBlock(); + directState = response.getDynamicState(); - auto isCompleteBB = IGF.createBasicBlock("is_complete"); - auto isComplete = - IGF.Builder.CreateICmpEQ(directState, completedState); + auto isCompleteBB = IGF.createBasicBlock("is_complete"); + auto isComplete = + IGF.Builder.CreateICmpEQ(directState, completedState); - IGF.Builder.CreateCondBr(isComplete, isCompleteBB, contBB); - IGF.Builder.emitBlock(isCompleteBB); - } + IGF.Builder.CreateCondBr(isComplete, isCompleteBB, contBB); + IGF.Builder.emitBlock(isCompleteBB); + } - // Store it back to the cache variable. This needs to be a store-release - // because it needs to propagate memory visibility to the other threads - // that can access the cache: the initializing stores might be visible - // to this thread, but they aren't transitively guaranteed to be visible - // to other threads unless this is a store-release. - // - // However, we can skip this if the value was actually loaded from the - // cache. This is a simple, if hacky, peephole that's useful for the - // code in emitInPlaceTypeMetadataAccessFunctionBody. - if (!isLoadFrom(directResult, cache)) { - IGF.Builder.CreateStore(directResult, cache) - ->setAtomic(llvm::AtomicOrdering::Release); + // Store it back to the cache variable. This needs to be a store-release + // because it needs to propagate memory visibility to the other threads + // that can access the cache: the initializing stores might be visible + // to this thread, but they aren't transitively guaranteed to be visible + // to other threads unless this is a store-release. + // + // However, we can skip this if the value was actually loaded from the + // cache. This is a simple, if hacky, peephole that's useful for the + // code in emitOnceTypeMetadataAccessFunctionBody. + if (!isLoadFrom(directResult, cache)) { + IGF.Builder.CreateStore(directResult, cache) + ->setAtomic(llvm::AtomicOrdering::Release); + } } IGF.Builder.CreateBr(contBB); @@ -1424,12 +1445,14 @@ void irgen::emitLazyCacheAccessFunction(IRGenModule &IGM, // Add a phi for the metadata state if we're returning a response. llvm::Value *stateToReturn = nullptr; if (directState) { - phi->addIncoming(directResult, completionCheckBB); + if (storeBB != completionCheckBB) + phi->addIncoming(directResult, completionCheckBB); auto completionStatePHI = IGF.Builder.CreatePHI(IGM.SizeTy, 3); completionStatePHI->addIncoming(completedState, loadBB); completionStatePHI->addIncoming(directState, completionCheckBB); - completionStatePHI->addIncoming(completedState, storeBB); + if (storeBB != completionCheckBB) + completionStatePHI->addIncoming(completedState, storeBB); stateToReturn = completionStatePHI; } else if (returnsResponse) { stateToReturn = completedState; @@ -1565,11 +1588,11 @@ emitGenericTypeMetadataAccessFunction(IRGenFunction &IGF, /// Emit a helper function for swift_once that performs in-place /// initialization of the given nominal type. static llvm::Constant * -createInPlaceMetadataInitializationFunction(IRGenModule &IGM, - CanNominalType type, - llvm::Constant *metadata, - llvm::Constant *cacheVariable, - InPlaceMetadataInitializer &&initialize) { +createOnceMetadataInitializationFunction(IRGenModule &IGM, + CanNominalType type, + llvm::Constant *metadata, + llvm::Constant *cacheVariable, + OnceMetadataInitializer initialize) { // There's an ignored i8* parameter. auto fnTy = llvm::FunctionType::get(IGM.VoidTy, {IGM.Int8PtrTy}, /*variadic*/ false); @@ -1603,12 +1626,12 @@ createInPlaceMetadataInitializationFunction(IRGenModule &IGM, } /// Emit the function body for the type metadata accessor of a nominal type -/// that might require in-place initialization. +/// that uses swift_once to control in-place initialization. MetadataResponse -irgen::emitInPlaceTypeMetadataAccessFunctionBody(IRGenFunction &IGF, - CanNominalType type, - llvm::Constant *cacheVariable, - InPlaceMetadataInitializer &&initializer) { +irgen::emitOnceTypeMetadataAccessFunctionBody(IRGenFunction &IGF, + CanNominalType type, + llvm::Constant *cacheVariable, + OnceMetadataInitializer initializer) { llvm::Constant *metadata = IGF.IGM.requiresForeignTypeMetadata(type) ? IGF.IGM.getAddrOfForeignTypeMetadataCandidate(type) @@ -1635,9 +1658,9 @@ irgen::emitInPlaceTypeMetadataAccessFunctionBody(IRGenFunction &IGF, // Create the protected function. swift_once wants this as an i8*. llvm::Value *onceFn = - createInPlaceMetadataInitializationFunction(IGF.IGM, type, metadata, - cacheVariable, - std::move(initializer)); + createOnceMetadataInitializationFunction(IGF.IGM, type, metadata, + cacheVariable, + std::move(initializer)); onceFn = IGF.Builder.CreateBitCast(onceFn, IGF.IGM.Int8PtrTy); auto context = llvm::UndefValue::get(IGF.IGM.Int8PtrTy); @@ -1653,7 +1676,7 @@ irgen::emitInPlaceTypeMetadataAccessFunctionBody(IRGenFunction &IGF, if (IGF.IGM.IRGen.Opts.Sanitizers & SanitizerKind::Thread) relocatedMetadata->setOrdering(llvm::AtomicOrdering::Acquire); - // emitLazyCacheAccessFunction will see that the value was loaded from + // emitCacheAccessFunction will see that the value was loaded from // the guard variable and skip the redundant store back. return MetadataResponse::forComplete(relocatedMetadata); } @@ -1728,6 +1751,7 @@ llvm::Function * irgen::getTypeMetadataAccessFunction(IRGenModule &IGM, CanType type, ForDefinition_t shouldDefine, + CacheStrategy cacheStrategy, MetadataAccessGenerator generator) { assert(!type->hasArchetype()); // Type should be bound unless it's type erased. @@ -1744,20 +1768,37 @@ irgen::getTypeMetadataAccessFunction(IRGenModule &IGM, return accessor; // Okay, define the accessor. - llvm::GlobalVariable *cacheVariable = nullptr; + llvm::Constant *cacheVariable = nullptr; // If our preferred access method is to go via an accessor, it means // there is some non-trivial computation that needs to be cached. - if (!isTypeMetadataAccessTrivial(IGM, type)) { - cacheVariable = cast( - IGM.getAddrOfTypeMetadataLazyCacheVariable(type, ForDefinition)); + if (isTypeMetadataAccessTrivial(IGM, type)) { + cacheStrategy = CacheStrategy::None; + } else { + switch (cacheStrategy) { + // Nothing to do. + case CacheStrategy::None: + break; + + // For lazy initialization, the cache variable is just a pointer. + case CacheStrategy::Lazy: + cacheVariable = + IGM.getAddrOfTypeMetadataLazyCacheVariable(type, ForDefinition); + break; + + // For in-place initialization, drill down to the first element. + case CacheStrategy::InPlaceInitialization: + cacheVariable = IGM.getAddrOfTypeMetadataInPlaceInitializationCache( + type->getAnyNominal(), ForDefinition); + break; + } if (IGM.getOptions().optimizeForSize()) accessor->addFnAttr(llvm::Attribute::NoInline); } - emitLazyCacheAccessFunction(IGM, accessor, cacheVariable, - [&](IRGenFunction &IGF, Explosion ¶ms) { + emitCacheAccessFunction(IGM, accessor, cacheVariable, cacheStrategy, + [&](IRGenFunction &IGF, Explosion ¶ms) { auto request = DynamicMetadataRequest(params.claimNext()); return generator(IGF, request, cacheVariable); }); @@ -1770,6 +1811,7 @@ llvm::Function *irgen::getTypeMetadataAccessFunction(IRGenModule &IGM, CanType type, ForDefinition_t shouldDefine) { return getTypeMetadataAccessFunction(IGM, type, shouldDefine, + CacheStrategy::Lazy, [&](IRGenFunction &IGF, DynamicMetadataRequest request, llvm::Constant *cacheVariable) { @@ -1805,12 +1847,12 @@ irgen::getGenericTypeMetadataAccessFunction(IRGenModule &IGM, bool isReadNone = (genericArgs.Types.size() <= NumDirectGenericTypeMetadataAccessFunctionArgs); - emitLazyCacheAccessFunction(IGM, accessor, /*cacheVariable=*/nullptr, - [&](IRGenFunction &IGF, Explosion ¶ms) { - return emitGenericTypeMetadataAccessFunction( + emitCacheAccessFunction(IGM, accessor, /*cache*/nullptr, CacheStrategy::None, + [&](IRGenFunction &IGF, Explosion ¶ms) { + return emitGenericTypeMetadataAccessFunction( IGF, params, nominal, genericArgs); - }, - isReadNone); + }, + isReadNone); return accessor; } diff --git a/lib/IRGen/MetadataRequest.h b/lib/IRGen/MetadataRequest.h index ec1beba276d01..ec5d1a83fed4c 100644 --- a/lib/IRGen/MetadataRequest.h +++ b/lib/IRGen/MetadataRequest.h @@ -476,9 +476,21 @@ using MetadataAccessGenerator = DynamicMetadataRequest request, llvm::Constant *cache)>; +enum class CacheStrategy { + /// No cache. + None, + + /// A simple lazy cache. + Lazy, + + /// An InPlaceValueMetadataCache initialization cache. + InPlaceInitialization, +}; + llvm::Function *getTypeMetadataAccessFunction(IRGenModule &IGM, CanType type, ForDefinition_t shouldDefine, + CacheStrategy cacheStrategy, MetadataAccessGenerator generator); llvm::Function * @@ -491,27 +503,28 @@ getRequiredTypeMetadataAccessFunction(IRGenModule &IGM, NominalTypeDecl *theDecl, ForDefinition_t shouldDefine); -using InPlaceMetadataInitializer = +using OnceMetadataInitializer = llvm::function_ref; MetadataResponse -emitInPlaceTypeMetadataAccessFunctionBody(IRGenFunction &IGF, - CanNominalType type, - llvm::Constant *cacheVariable, - InPlaceMetadataInitializer &&initializer); +emitOnceTypeMetadataAccessFunctionBody(IRGenFunction &IGF, + CanNominalType type, + llvm::Constant *cacheVariable, + OnceMetadataInitializer initializer); llvm::Value *uniqueForeignTypeMetadataRef(IRGenFunction &IGF, llvm::Value *candidate); -using LazyCacheEmitter = +using CacheEmitter = llvm::function_ref; /// Emit the body of a lazy cache access function. -void emitLazyCacheAccessFunction(IRGenModule &IGM, - llvm::Function *accessor, - llvm::GlobalVariable *cache, - LazyCacheEmitter getValue, - bool isReadNone = true); +void emitCacheAccessFunction(IRGenModule &IGM, + llvm::Function *accessor, + llvm::Constant *cache, + CacheStrategy cacheStrategy, + CacheEmitter getValue, + bool isReadNone = true); /// Emit a declaration reference to a metatype object. void emitMetatypeRef(IRGenFunction &IGF, CanMetatypeType type, diff --git a/stdlib/public/runtime/Metadata.cpp b/stdlib/public/runtime/Metadata.cpp index e4aa11f4ff9f5..4789db9791d67 100644 --- a/stdlib/public/runtime/Metadata.cpp +++ b/stdlib/public/runtime/Metadata.cpp @@ -175,6 +175,14 @@ areAllTransitiveMetadataComplete_cheap(const Metadata *metadata); static MetadataDependency checkTransitiveCompleteness(const Metadata *metadata); +static PrivateMetadataState inferStateForMetadata(Metadata *metadata) { + if (metadata->getValueWitnesses()->isIncomplete()) + return PrivateMetadataState::Abstract; + + // TODO: internal vs. external layout-complete? + return PrivateMetadataState::LayoutComplete; +} + namespace { struct GenericCacheEntry final : VariadicMetadataCacheEntryBase { @@ -212,14 +220,6 @@ namespace { return { metadata, state }; } - PrivateMetadataState inferStateForMetadata(Metadata *metadata) { - if (metadata->getValueWitnesses()->isIncomplete()) - return PrivateMetadataState::Abstract; - - // TODO: internal vs. external layout-complete? - return PrivateMetadataState::LayoutComplete; - } - static const TypeContextDescriptor *getDescription(Metadata *type) { if (auto classType = dyn_cast(type)) return classType->getDescription(); @@ -533,6 +533,169 @@ swift::swift_getGenericMetadata(MetadataRequest request, return result.second; } +/***************************************************************************/ +/*** In-place metadata initialization **************************************/ +/***************************************************************************/ + +namespace { + /// A cache entry for "in-place" metadata initializations. + class InPlaceMetadataCacheEntry final + : public MetadataCacheEntryBase { + ValueType Value = nullptr; + + friend MetadataCacheEntryBase; + ValueType getValue() { + return Value; + } + void setValue(ValueType value) { + Value = value; + } + + public: + // We have to give MetadataCacheEntryBase a non-empty list of trailing + // objects or else it gets annoyed. + static size_t numTrailingObjects(OverloadToken) { return 0; } + + static const char *getName() { return "InPlaceMetadataCache"; } + + InPlaceMetadataCacheEntry() {} + + AllocationResult allocate(const TypeContextDescriptor *description) { + auto valueTypeDescriptor = cast(description); + auto &initialization = + valueTypeDescriptor->getInPlaceMetadataInitialization(); + + auto metadata = initialization.IncompleteMetadata.get(); + + auto state = inferStateForMetadata(metadata); + return { metadata, state }; + } + + static const TypeContextDescriptor *getDescription(Metadata *type) { + return cast(type)->getDescription(); + } + + TryInitializeResult tryInitialize(Metadata *metadata, + PrivateMetadataState state, + PrivateMetadataCompletionContext *context) { + assert(state != PrivateMetadataState::Complete); + + // Finish the completion function. + if (state < PrivateMetadataState::NonTransitiveComplete) { + // Find a pattern. Currently we always use the default pattern. + auto &initialization = + cast(metadata)->getDescription() + ->getInPlaceMetadataInitialization(); + + // Complete the metadata's instantiation. + auto dependency = + initialization.CompletionFunction(metadata, &context->Public, + /*pattern*/ nullptr); + + // If this failed with a dependency, infer the current metadata state + // and return. + if (dependency) { + return { inferStateForMetadata(metadata), dependency }; + } + } + + // Check for transitive completeness. + if (auto dependency = checkTransitiveCompleteness(metadata)) { + return { PrivateMetadataState::NonTransitiveComplete, dependency }; + } + + // We're done. + publishCompleteMetadata(metadata); + return { PrivateMetadataState::Complete, MetadataDependency() }; + } + + void publishCompleteMetadata(Metadata *metadata) { + auto &init = cast(metadata)->getDescription() + ->getInPlaceMetadataInitialization(); + auto &cache = *init.InitializationCache.get(); + cache.Metadata.store(metadata, std::memory_order_release); + } + }; + + /// An implementation of LockingConcurrentMapStorage that's more + /// appropriate for the in-place metadata cache. + /// + /// TODO: delete the cache entry when initialization is complete. + class InPlaceMetadataCacheStorage { + ConcurrencyControl Concurrency; + + public: + using KeyType = const TypeContextDescriptor *; + using EntryType = InPlaceMetadataCacheEntry; + + ConcurrencyControl &getConcurrency() { return Concurrency; } + + template + std::pair + getOrInsert(KeyType key, ArgTys &&...args) { + auto &init = + cast(key)->getInPlaceMetadataInitialization(); + auto &cache = *init.InitializationCache.get(); + + // Check for an existing entry. + auto existingEntry = cache.Private.load(std::memory_order_acquire); + + // If there isn't one there, optimistically create an entry and + // try to swap it in. + if (!existingEntry) { + auto allocatedEntry = new InPlaceMetadataCacheEntry(); + if (cache.Private.compare_exchange_strong(existingEntry, + allocatedEntry, + std::memory_order_acq_rel, + std::memory_order_acquire)) { + // If that succeeded, return the entry we allocated and tell the + // caller we allocated it. + return { allocatedEntry, true }; + } + + // Otherwise, use the new entry and destroy the one we allocated. + assert(existingEntry && "spurious failure of strong compare-exchange?"); + delete allocatedEntry; + } + + return { static_cast(existingEntry), false }; + } + + EntryType *find(KeyType key) { + auto &init = + cast(key)->getInPlaceMetadataInitialization(); + + return static_cast( + init.InitializationCache->Private.load(std::memory_order_acquire)); + } + + /// A default implementation for resolveEntry that assumes that the + /// key type is a lookup key for the map. + EntryType *resolveExistingEntry(KeyType key) { + auto entry = find(key); + assert(entry && "entry doesn't already exist!"); + return entry; + } + }; + + class InPlaceMetadataCache + : public LockingConcurrentMap { + }; +} // end anonymous namespace + +/// The cache of all in-place metadata initializations. +static Lazy InPlaceMetadata; + +MetadataResponse +swift::swift_getInPlaceMetadata(MetadataRequest request, + const TypeContextDescriptor *description) { + auto result = InPlaceMetadata.get().getOrInsert(description, request, + description); + + return result.second; +} + /***************************************************************************/ /*** Objective-C class wrappers ********************************************/ /***************************************************************************/ @@ -875,7 +1038,8 @@ class TupleCacheEntry } }; -class TupleCache : public MetadataCache { +class TupleCacheStorage : + public LockingConcurrentMapStorage { public: // FIXME: https://bugs.swift.org/browse/SR-1155 #pragma clang diagnostic push @@ -892,6 +1056,10 @@ class TupleCache : public MetadataCache { #pragma clang diagnostic pop }; +class TupleCache : + public LockingConcurrentMap { +}; + } // end anonymous namespace /// The uniquing structure for tuple type metadata. @@ -1269,7 +1437,7 @@ TupleCacheEntry::TupleCacheEntry(const Key &key, MetadataRequest request, for (size_t i = 0, e = key.NumElements; i != e; ++i) Data.getElement(i).Type = key.Elements[i]; - assert(TupleCache::resolveExistingEntry(&Data) == this); + assert(TupleCacheStorage::resolveExistingEntry(&Data) == this); } TupleCacheEntry::AllocationResult @@ -3293,7 +3461,7 @@ class WitnessTableCacheEntry : } // end anonymous namespace using GenericWitnessTableCache = - LockingConcurrentMap; + MetadataCache; using LazyGenericWitnessTableCache = Lazy; /// Fetch the cache for a generic witness-table structure. @@ -3465,6 +3633,10 @@ static Result performOnMetadataCache(const Metadata *metadata, } if (!description->isGeneric()) { + if (description->hasInPlaceMetadataInitialization()) { + return std::move(callbacks).forInPlaceMetadata(description); + } + return std::move(callbacks).forOtherMetadata(metadata); } @@ -3493,6 +3665,10 @@ bool swift::addToMetadataQueue(MetadataCompletionQueueEntry *queueEntry, return cache.enqueue(key, QueueEntry, Dependency); } + bool forInPlaceMetadata(const TypeContextDescriptor *description) && { + return InPlaceMetadata.get().enqueue(description, QueueEntry, Dependency); + } + bool forTupleMetadata(const TupleTypeMetadata *metadata) { return TupleTypes.get().enqueue(metadata, QueueEntry, Dependency); } @@ -3516,6 +3692,10 @@ void swift::resumeMetadataCompletion(MetadataCompletionQueueEntry *queueEntry) { cache.resumeInitialization(key, QueueEntry); } + void forInPlaceMetadata(const TypeContextDescriptor *description) && { + InPlaceMetadata.get().resumeInitialization(description, QueueEntry); + } + void forTupleMetadata(const TupleTypeMetadata *metadata) { TupleTypes.get().resumeInitialization(metadata, QueueEntry); } @@ -3542,6 +3722,11 @@ MetadataResponse swift::swift_checkMetadataState(MetadataRequest request, return cache.await(key, Request); } + MetadataResponse forInPlaceMetadata( + const TypeContextDescriptor *description) && { + return InPlaceMetadata.get().await(description, Request); + } + MetadataResponse forTupleMetadata(const TupleTypeMetadata *metadata) { return TupleTypes.get().await(metadata, Request); } @@ -3625,6 +3810,11 @@ areAllTransitiveMetadataComplete_cheap(const Metadata *type) { return true; } + bool forInPlaceMetadata(const TypeContextDescriptor *description) && { + // TODO: this could be cheap enough. + return true; + } + bool forTupleMetadata(const TupleTypeMetadata *metadata) { // TODO: this could be cheap enough. return true; @@ -3782,6 +3972,11 @@ checkMetadataDependency(MetadataDependency dependency) { return cache.checkDependency(key, Requirement); } + MetadataDependency forInPlaceMetadata( + const TypeContextDescriptor *description) && { + return InPlaceMetadata.get().checkDependency(description, Requirement); + } + MetadataDependency forTupleMetadata(const TupleTypeMetadata *metadata) { return TupleTypes.get().checkDependency(metadata, Requirement); } diff --git a/stdlib/public/runtime/MetadataCache.h b/stdlib/public/runtime/MetadataCache.h index 1f764ab7fbfa6..fad75ec4ffc3c 100644 --- a/stdlib/public/runtime/MetadataCache.h +++ b/stdlib/public/runtime/MetadataCache.h @@ -83,6 +83,41 @@ enum class ConcurrencyRequest { struct ConcurrencyControl { Mutex Lock; ConditionVariable Queue; + + ConcurrencyControl() = default; +}; + +template +class LockingConcurrentMapStorage { + ConcurrentMap Map; + StaticOwningPointer Concurrency; + +public: + LockingConcurrentMapStorage() : Concurrency(new ConcurrencyControl()) {} + + MetadataAllocator &getAllocator() { return Map.getAllocator(); } + + ConcurrencyControl &getConcurrency() { return *Concurrency; } + + template + std::pair + getOrInsert(KeyType key, ArgTys &&...args) { + return Map.getOrInsert(key, args...); + } + + template + EntryType *find(KeyType key) { + return Map.find(key); + } + + /// A default implementation for resolveEntry that assumes that the + /// key type is a lookup key for the map. + template + EntryType *resolveExistingEntry(KeyType key) { + auto entry = Map.find(key); + assert(entry && "entry doesn't already exist!"); + return entry; + } }; /// A map for which there is a phase of initialization that is guaranteed @@ -122,38 +157,28 @@ struct ConcurrencyControl { /// /// implemented if checkDependency is called on the map. /// MetadataDependency checkDependency(ConcurrencyControl &concurrency, /// ArgTys...); -template +template > class LockingConcurrentMap { - ConcurrentMap Map; - - StaticOwningPointer Concurrency; - -protected: - using Impl = - typename std::conditional::value, - LockingConcurrentMap, - OptImpl>::type; - Impl &asImpl() { return static_cast(*this); } - + StorageType Storage; using Status = typename EntryType::Status; public: - LockingConcurrentMap() : Concurrency(new ConcurrencyControl()) {} + LockingConcurrentMap() = default; - MetadataAllocator &getAllocator() { return Map.getAllocator(); } + MetadataAllocator &getAllocator() { return Storage.getAllocator(); } template std::pair getOrInsert(KeyType key, ArgTys &&...args) { - auto result = Map.getOrInsert(key, args...); + auto result = Storage.getOrInsert(key, args...); auto entry = result.first; // If we are not inserting the entry, we need to potentially block on // currently satisfies our conditions. if (!result.second) { auto status = - entry->await(*Concurrency, std::forward(args)...); + entry->await(Storage.getConcurrency(), std::forward(args)...); return { entry, status }; } @@ -162,66 +187,63 @@ class LockingConcurrentMap { // Allocation. This can fast-path and bypass initialization by returning // a status. - if (auto status = entry->beginAllocation(*Concurrency, args...)) { + if (auto status = + entry->beginAllocation(Storage.getConcurrency(), args...)) { return { entry, *status }; } // Initialization. - auto status = entry->beginInitialization(*Concurrency, + auto status = entry->beginInitialization(Storage.getConcurrency(), std::forward(args)...); return { entry, status }; } template EntryType *find(KeyType key) { - return Map.find(key); + return Storage.find(key); } template std::pair resumeInitialization(KeyType key, ArgTys &&...args) { - EntryType *entry = asImpl().resolveExistingEntry(key); + EntryType *entry = Storage.resolveExistingEntry(key); auto status = - entry->resumeInitialization(*Concurrency, std::forward(args)...); + entry->resumeInitialization(Storage.getConcurrency(), + std::forward(args)...); return { entry, status }; } template bool enqueue(KeyType key, ArgTys &&...args) { - EntryType *entry = asImpl().resolveExistingEntry(key); - return entry->enqueue(*Concurrency, std::forward(args)...); + EntryType *entry = Storage.resolveExistingEntry(key); + return entry->enqueue(Storage.getConcurrency(), + std::forward(args)...); } /// Given that an entry already exists, await it. template Status await(KeyType key, ArgTys &&...args) { - EntryType *entry = asImpl().resolveExistingEntry(key); - return entry->await(*Concurrency, std::forward(args)...); + EntryType *entry = Storage.resolveExistingEntry(key); + return entry->await(Storage.getConcurrency(), + std::forward(args)...); } /// If an entry already exists, await it; otherwise report failure. template Optional tryAwaitExisting(KeyType key, ArgTys &&...args) { - EntryType *entry = Map.find(key); + EntryType *entry = Storage.find(key); if (!entry) return None; - return entry->await(*Concurrency, std::forward(args)...); + return entry->await(Storage.getConcurrency(), + std::forward(args)...); } /// Given that an entry already exists, check whether it has an active /// dependency. template MetadataDependency checkDependency(KeyType key, ArgTys &&...args) { - EntryType *entry = asImpl().resolveExistingEntry(key); - return entry->checkDependency(*Concurrency, std::forward(args)...); - } - - /// A default implementation for resolveEntry that assumes that the - /// key type is a lookup key for the map. - template - EntryType *resolveExistingEntry(KeyType key) { - auto entry = Map.find(key); - assert(entry && "entry doesn't already exist!"); - return entry; + EntryType *entry = Storage.resolveExistingEntry(key); + return entry->checkDependency(Storage.getConcurrency(), + std::forward(args)...); } }; @@ -1255,9 +1277,10 @@ class VariadicMetadataCacheEntryBase : } }; -template +template class MetadataCache : - public LockingConcurrentMap { + public LockingConcurrentMap> { }; } // namespace swift diff --git a/test/IRGen/enum_resilience.swift b/test/IRGen/enum_resilience.swift index 692f7c67214f9..60e4964ea7afa 100644 --- a/test/IRGen/enum_resilience.swift +++ b/test/IRGen/enum_resilience.swift @@ -44,6 +44,20 @@ import resilient_struct // CHECK: %T15enum_resilience10EitherFastO = type <{ [[REFERENCE_TYPE]] }> +// CHECK: @"$S15enum_resilience24EnumWithResilientPayloadOMl" = +// CHECK-SAME: internal global { %swift.type*, i8* } zeroinitializer, align + +// CHECK: @"$S15enum_resilience24EnumWithResilientPayloadOMn" = {{.*}}constant +// 1310802 == 0x00140052 +// 0x0010 - HasInPlaceMetadataInitialization +// 0x0014 - IsReflectable +// 0x 0040 - IsUnique +// 0x 0012 - Enum +// CHECK-SAME: i32 1310802, +// CHECK-SAME: @"$S15enum_resilience24EnumWithResilientPayloadOMl" +// CHECK-SAME: @"$S15enum_resilience24EnumWithResilientPayloadOMf", i32 0, i32 1) +// CHECK-SAME: @"$S15enum_resilience24EnumWithResilientPayloadOMr" + public class Class {} public struct Reference { @@ -267,17 +281,22 @@ public func getResilientEnumType() -> Any.Type { // Public metadata accessor for our resilient enum // CHECK-LABEL: define{{( dllexport)?}}{{( protected)?}} swiftcc %swift.metadata_response @"$S15enum_resilience24EnumWithResilientPayloadOMa"( -// CHECK: [[METADATA:%.*]] = load %swift.type*, %swift.type** @"$S15enum_resilience24EnumWithResilientPayloadOML" -// CHECK-NEXT: [[COND:%.*]] = icmp eq %swift.type* [[METADATA]], null +// CHECK: [[LOAD_METADATA:%.*]] = load %swift.type*, %swift.type** getelementptr inbounds ({ %swift.type*, i8* }, { %swift.type*, i8* }* @"$S15enum_resilience24EnumWithResilientPayloadOMl", i32 0, i32 0), align +// CHECK-NEXT: [[COND:%.*]] = icmp eq %swift.type* [[LOAD_METADATA]], null // CHECK-NEXT: br i1 [[COND]], label %cacheIsNull, label %cont // CHECK: cacheIsNull: -// CHECK-NEXT: call void @swift_once([[INT]]* @"$S15enum_resilience24EnumWithResilientPayloadOMa.once_token", i8* bitcast (void (i8*)* @initialize_metadata_EnumWithResilientPayload to i8*), i8* undef) -// CHECK-NEXT: [[METADATA2:%.*]] = load %swift.type*, %swift.type** @"$S15enum_resilience24EnumWithResilientPayloadOML" +// CHECK-NEXT: [[RESPONSE:%.*]] = call swiftcc %swift.metadata_response @swift_getInPlaceMetadata([[INT]] %0, %swift.type_descriptor* bitcast ({{.*}} @"$S15enum_resilience24EnumWithResilientPayloadOMn" to %swift.type_descriptor*)) +// CHECK-NEXT: [[RESPONSE_METADATA:%.*]] = extractvalue %swift.metadata_response [[RESPONSE]], 0 +// CHECK-NEXT: [[RESPONSE_STATE:%.*]] = extractvalue %swift.metadata_response [[RESPONSE]], 1 // CHECK-NEXT: br label %cont // CHECK: cont: -// CHECK-NEXT: [[RESULT:%.*]] = phi %swift.type* [ [[METADATA]], %entry ], [ [[METADATA2]], %cacheIsNull ] +// CHECK-NEXT: [[RESULT_METADATA:%.*]] = phi %swift.type* [ [[LOAD_METADATA]], %entry ], [ [[RESPONSE_METADATA]], %cacheIsNull ] +// CHECK-NEXT: [[RESULT_STATE:%.*]] = phi [[INT]] [ 0, %entry ], [ [[RESPONSE_STATE]], %cacheIsNull ] +// CHECK-NEXT: [[T0:%.*]] = insertvalue %swift.metadata_response undef, %swift.type* [[RESULT_METADATA]], 0 +// CHECK-NEXT: [[T1:%.*]] = insertvalue %swift.metadata_response [[T0]], [[INT]] [[RESULT_STATE]], 1 +// CHECK-NEXT: ret %swift.metadata_response [[T1]] // Methods inside extensions of resilient enums fish out type parameters // from metadata -- make sure we can do that @@ -311,9 +330,22 @@ public func constructFullyFixed() -> FullyFixedLayout { return .noPayload } -// CHECK-LABEL: define private void @initialize_metadata_EnumWithResilientPayload(i8*) -// CHECK: call void @swift_initEnumMetadataMultiPayload(%swift.type* {{.*}}, [[INT]] 256, [[INT]] 2, i8*** {{.*}}) - +// CHECK-LABEL: define internal swiftcc %swift.metadata_response @"$S15enum_resilience24EnumWithResilientPayloadOMr"(%swift.type*, i8*, i8**) +// CHECK: [[SIZE_RESPONSE:%.*]] = call swiftcc %swift.metadata_response @"$S16resilient_struct4SizeVMa"([[INT]] 319) +// CHECK-NEXT: [[SIZE_METADATA:%.*]] = extractvalue %swift.metadata_response [[SIZE_RESPONSE]], 0 +// CHECK-NEXT: [[SIZE_STATE:%.*]] = extractvalue %swift.metadata_response [[SIZE_RESPONSE]], 1 +// CHECK-NEXT: [[T0:%.*]] = icmp ule [[INT]] [[SIZE_STATE]], 63 +// CHECK-NEXT: br i1 [[T0]], label %[[SATISFIED1:.*]], label +// CHECK: [[SATISFIED1]]: +// CHECK: [[TUPLE_RESPONSE:%.*]] = call swiftcc %swift.metadata_response @swift_getTupleTypeMetadata2([[INT]] 319, %swift.type* [[SIZE_METADATA]], %swift.type* [[SIZE_METADATA]], i8* null, i8** null) +// CHECK-NEXT: [[TUPLE_METADATA:%.*]] = extractvalue %swift.metadata_response [[TUPLE_RESPONSE]], 0 +// CHECK-NEXT: [[TUPLE_STATE:%.*]] = extractvalue %swift.metadata_response [[TUPLE_RESPONSE]], 1 +// CHECK-NEXT: [[T0:%.*]] = icmp ule [[INT]] [[TUPLE_STATE]], 63 +// CHECK-NEXT: br i1 [[T0]], label %[[SATISFIED2:.*]], label +// CHECK: [[SATISFIED2]]: +// CHECK: call void @swift_initEnumMetadataMultiPayload +// CHECK: phi %swift.type* [ [[SIZE_METADATA]], %entry ], [ [[TUPLE_METADATA]], %[[SATISFIED1]] ], [ null, %[[SATISFIED2]] ] +// CHECK: phi [[INT]] [ 63, %entry ], [ 63, %[[SATISFIED1]] ], [ 0, %[[SATISFIED2]] ] public protocol Prot { diff --git a/test/IRGen/struct_resilience.swift b/test/IRGen/struct_resilience.swift index 9fec84ccbec34..61f5841fc2606 100644 --- a/test/IRGen/struct_resilience.swift +++ b/test/IRGen/struct_resilience.swift @@ -196,18 +196,20 @@ public func resilientAny(s : ResilientWeakRef) { // CHECK: ret %swift.metadata_response { %swift.type* bitcast ([[INT]]* getelementptr inbounds {{.*}} @"$S17struct_resilience6MySizeVMf", i32 0, i32 1) to %swift.type*), [[INT]] 0 } -// CHECK-LABEL: define{{( dllexport)?}}{{( protected)?}} private void @initialize_metadata_StructWithResilientStorage(i8*) +// CHECK-LABEL: define internal swiftcc %swift.metadata_response @"$S17struct_resilience26StructWithResilientStorageVMr"(%swift.type*, i8*, i8**) // CHECK: [[FIELDS:%.*]] = alloca [4 x i8**] // CHECK: [[FIELDS_ADDR:%.*]] = getelementptr inbounds [4 x i8**], [4 x i8**]* [[FIELDS]], i32 0, i32 0 // public let s: Size +// CHECK: call swiftcc %swift.metadata_response @"$S16resilient_struct4SizeVMa"([[INT]] 319) // CHECK: [[FIELD_1:%.*]] = getelementptr inbounds i8**, i8*** [[FIELDS_ADDR]], i32 0 // CHECK: store i8** [[SIZE_AND_ALIGNMENT:%.*]], i8*** [[FIELD_1]] // public let ss: (Size, Size) +// CHECK: call swiftcc %swift.metadata_response @swift_getTupleTypeMetadata2([[INT]] 319, // CHECK: [[FIELD_2:%.*]] = getelementptr inbounds i8**, i8*** [[FIELDS_ADDR]], i32 1 // CHECK: store i8** [[SIZE_AND_ALIGNMENT:%.*]], i8*** [[FIELD_2]] @@ -219,9 +221,8 @@ public func resilientAny(s : ResilientWeakRef) { // Resilient aggregate with one field -- make sure we don't look inside it // public let i: ResilientInt +// CHECK: call swiftcc %swift.metadata_response @"$S16resilient_struct12ResilientIntVMa"([[INT]] 319) // CHECK: [[FIELD_4:%.*]] = getelementptr inbounds i8**, i8*** [[FIELDS_ADDR]], i32 3 // CHECK: store i8** [[SIZE_AND_ALIGNMENT:%.*]], i8*** [[FIELD_4]] // CHECK: call void @swift_initStructMetadata(%swift.type* {{.*}}, [[INT]] 256, [[INT]] 4, i8*** [[FIELDS_ADDR]], i32* {{.*}}) -// CHECK: store atomic %swift.type* {{.*}} @"$S17struct_resilience26StructWithResilientStorageVMf{{.*}}, %swift.type** @"$S17struct_resilience26StructWithResilientStorageVML" release, -// CHECK: ret void diff --git a/test/Interpreter/resilient_metadata_cycles.swift b/test/Interpreter/resilient_metadata_cycles.swift new file mode 100644 index 0000000000000..c0fc4c4571e80 --- /dev/null +++ b/test/Interpreter/resilient_metadata_cycles.swift @@ -0,0 +1,28 @@ +// RUN: %empty-directory(%t) + +// RUN: %target-build-swift-dylib(%t/libresilient_struct.%target-dylib-extension) -Xfrontend -enable-resilience %S/../Inputs/resilient_struct.swift -emit-module -emit-module-path %t/resilient_struct.swiftmodule -module-name resilient_struct +// RUN: %target-codesign %t/libresilient_struct.%target-dylib-extension + +// RUN: %target-build-swift %s -L %t -I %t -lresilient_struct -o %t/main -Xlinker -rpath -Xlinker %t + +// RUN: %target-run %t/main %t/libresilient_struct.%target-dylib-extension + +import StdlibUnittest + +import resilient_struct + +var ResilientMetadataCycleTests = TestSuite("Resilient metadata cycle tests") + +// SR-7876 +enum test0_Node { + case link(size: Size, children: [test0_Node]) + + static func test() -> [test0_Node] { + return [] + } +} +ResilientMetadataCycleTests.test("SR-7876") { + _ = test0_Node.test() +} + +runAllTests()