diff --git a/clang/docs/LanguageExtensions.rst b/clang/docs/LanguageExtensions.rst index 51e6a4460336fd..22c735c91125ac 100644 --- a/clang/docs/LanguageExtensions.rst +++ b/clang/docs/LanguageExtensions.rst @@ -13,6 +13,7 @@ Clang Language Extensions BlockLanguageSpec Block-ABI-Apple AutomaticReferenceCounting + PointerAuthentication Introduction ============ @@ -2828,6 +2829,10 @@ reordering of memory accesses and side effect instructions. Other instructions like simple arithmetic may be reordered around the intrinsic. If you expect to have no reordering at all, use inline assembly instead. +Pointer Authentication +^^^^^^^^^^^^^^^^^^^^^^ +See :doc:`PointerAuthentication`. + X86/X86-64 Language Extensions ------------------------------ diff --git a/clang/docs/PointerAuthentication.rst b/clang/docs/PointerAuthentication.rst new file mode 100644 index 00000000000000..04adc52c900a07 --- /dev/null +++ b/clang/docs/PointerAuthentication.rst @@ -0,0 +1,877 @@ +Pointer Authentication +====================== + +.. contents:: + :local: + +Introduction +------------ + +Pointer authentication is a technology which offers strong probabilistic protection against exploiting a broad class of memory bugs to take control of program execution. When adopted consistently in a language ABI, it provides a form of relatively fine-grained control flow integrity (CFI) check that resists both return-oriented programming (ROP) and jump-oriented programming (JOP) attacks. + +While pointer authentication can be implemented purely in software, direct hardware support (e.g. as provided by ARMv8.3) can dramatically lower the execution speed and code size costs. Similarly, while pointer authentication can be implemented on any architecture, taking advantage of the (typically) excess addressing range of a target with 64-bit pointers minimizes the impact on memory performance and can allow interoperation with existing code (by disabling pointer authentication dynamically). This document will generally attempt to present the pointer authentication feature independent of any hardware implementation or ABI. Considerations that are implementation-specific are clearly identified throughout. + +Note that there are several different terms in use: + +- **Pointer authentication** is a target-independent language technology. + +- **ARMv8.3** is an AArch64 architecture revision of that provides hardware support for pointer authentication. It is implemented on several shipping processors, including the Apple A12 and later. + +* **arm64e** is a specific ABI for (not yet fully stable) for implementing pointer authentication on ARMv8.3 on certain Apple operating systems. + +This document serves four purposes: + +- It describes the basic ideas of pointer authentication. + +- It documents several language extensions that are useful on targets using pointer authentication. + +- It presents a theory of operation for the security mitigation, describing the basic requirements for correctness, various weaknesses in the mechanism, and ways in which programmers can strengthen its protections (including recommendations for language implementors). + +- It documents the language ABIs currently used for C, C++, Objective-C, and Swift on arm64e, although these are not yet stable on any target. + +Basic Concepts +-------------- + +The simple address of an object or function is a **raw pointer**. A raw pointer can be **signed** to produce a **signed pointer**. A signed pointer can be then **authenticated** in order to verify that it was **validly signed** and extract the original raw pointer. These terms reflect the most likely implementation technique: computing and storing a cryptographic signature along with the pointer. The security of pointer authentication does not rely on attackers not being able to separately overwrite the signature. + +An **abstract signing key** is a name which refers to a secret key which can used to sign and authenticate pointers. The key value for a particular name is consistent throughout a process. + +A **discriminator** is an arbitrary value used to **diversify** signed pointers so that one validly-signed pointer cannot simply be copied over another. A discriminator is simply opaque data of some implementation-defined size that is included in the signature as a salt. + +Nearly all aspects of pointer authentication use just these two primary operations: + +- ``sign(raw_pointer, key, discriminator)`` produces a signed pointer given a raw pointer, an abstract signing key, and a discriminator. + +- ``auth(signed_pointer, key, discriminator)`` produces a raw pointer given a signed pointer, an abstract signing key, and a discriminator. + +``auth(sign(raw_pointer, key, discriminator), key, discriminator)`` must succeed and produce ``raw_pointer``. ``auth`` applied to a value that was ultimately produced in any other way is expected to immediately halt the program. However, it is permitted for ``auth`` to fail to detect that a signed pointer was not produced in this way, in which case it may return anything; this is what makes pointer authentication a probabilistic mitigation rather than a perfect one. + +There are two secondary operations which are required only to implement certain intrinsics in ````: + +- ``strip(signed_pointer, key)`` produces a raw pointer given a signed pointer and a key it was presumptively signed with. This is useful for certain kinds of tooling, such as crash backtraces; it should generally not be used in the basic language ABI except in very careful ways. + +- ``sign_generic(value)`` produces a cryptographic signature for arbitrary data, not necessarily a pointer. This is useful for efficiently verifying that non-pointer data has not been tampered with. + +Whenever any of these operations is called for, the key value must be known statically. This is because the layout of a signed pointer may vary according to the signing key. (For example, in ARMv8.3, the layout of a signed pointer depends on whether TBI is enabled, which can be set independently for code and data pointers.) + +.. admonition:: Note for API designers and language implementors + + These are the *primitive* operations of pointer authentication, provided for clarity of description. They are not suitable either as high-level interfaces or as primitives in a compiler IR because they expose raw pointers. Raw pointers require special attention in the language implementation to avoid the accidental creation of exploitable code sequences; see the section on `Attackable code sequences`_. + +The following details are all implementation-defined: + +- the nature of a signed pointer +- the size of a discriminator +- the number and nature of the signing keys +- the implementation of the ``sign``, ``auth``, ``strip``, and ``sign_generic`` operations + +While the use of the terms "sign" and "signed pointer" suggest the use of a cryptographic signature, other implementations may be possible. See `Alternative implementations`_ for an exploration of implementation options. + +.. admonition:: Implementation example: ARMv8.3 + + Readers may find it helpful to know how these terms map to ARMv8.3: + + - A signed pointer is a pointer with a signature stored in the otherwise-unused high bits. The kernel configures the signature width based on the system's addressing needs, accounting for whether the AArch64 TBI feature is enabled for the kind of pointer (code or data). + + - A discriminator is a 64-bit integer. Constant discriminators are 16-bit integers. Blending a constant discriminator into an address consists of replacing the top 16 bits of the address with the constant. + + - There are five 128-bit signing-key registers, each of which can only be directly read or set by privileged code. Of these, four are used for signing pointers, and the fifth is used only for ``sign_generic``. The key data is simply a pepper added to the hash, not an encryption key, and so can be initialized using random data. + + - ``sign`` computes a cryptographic hash of the pointer, discriminator, and signing key, and stores it in the high bits as the signature. ``auth`` removes the signature, computes the same hash, and compares the result with the stored signature. ``strip`` removes the signature without authenticating it. While ARMv8.3's ``aut*`` instructions do not themselves trap on failure, the compiler only ever emits them in sequences that will trap. + + - ``sign_generic`` corresponds to the ``pacga`` instruction, which takes two 64-bit values and produces a 64-bit cryptographic hash. Implementations of this instruction may not produce meaningful data in all bits of the result. + +Discriminators +~~~~~~~~~~~~~~ + +A discriminator is arbitrary extra data which alters the signature on a pointer. When two pointers are signed differently --- either with different keys or with different discriminators --- an attacker cannot simply replace one pointer with the other. For more information on why discriminators are important and how to use them effectively, see the section on `Substitution attacks`_. + +To use standard cryptographic terminology, a discriminator acts as a salt in the signing of a pointer, and the key data acts as a pepper. That is, both the discriminator and key data are ultimately just added as inputs to the signing algorithm along with the pointer, but they serve significantly different roles. The key data is a common secret added to every signature, whereas the discriminator is a signing-specific value that can be derived from the circumstances of how a pointer is signed. However, unlike a password salt, it's important that discriminators be *independently* derived from the circumstances of the signing; they should never simply be stored alongside a pointer. + +The intrinsic interface in ```` allows an arbitrary discriminator value to be provided, but can only be used when running normal code. The discriminators used by language ABIs must be restricted to make it feasible for the loader to sign pointers stored in global memory without needing excessive amounts of metadata. Under these restrictions, a discriminator may consist of either or both of the following: + +- The address at which the pointer is stored in memory. A pointer signed with a discriminator which incorporates its storage address is said to have **address diversity**. In general, using address diversity means that a pointer cannot be reliably replaced by an attacker or used to reliably replace a different pointer. However, an attacker may still be able to attack a larger call sequence if they can alter the address through which the pointer is accessed. Furthermore, some situations cannot use address diversity because of language or other restrictions. + +- A constant integer, called a **constant discriminator**. A pointer signed with a non-zero constant discriminator is said to have **constant diversity**. If the discriminator is specific to a single declaration, it is said to have **declaration diversity**; if the discriminator is specific to a type of value, it is said to have **type diversity**. For example, C++ v-tables on arm64e sign their component functions using a hash of their method names and signatures, which provides declaration diversity; similarly, C++ member function pointers sign their invocation functions using a hash of the member pointer type, which provides type diversity. + +The implementation may need to restrict constant discriminators to be significantly smaller than the full size of a discriminator. For example, on arm64e, constant discriminators are only 16-bit values. This is believed to not significantly weaken the mitigation, since collisions remain uncommon. + +The algorithm for blending a constant discriminator with a storage address is implementation-defined. + +.. _Signing schemas: + +Signing schemas +~~~~~~~~~~~~~~~ + +Correct use of pointer authentication requires the signing code and the authenticating code to agree about the **signing schema** for the pointer: + +- the abstract signing key with which the pointer should be signed and +- an algorithm for computing the discriminator. + +As described in the section above on `Discriminators`_, in most situations, the discriminator is produced by taking a constant discriminator and optionally blending it with the storage address of the pointer. In these situations, the signing schema breaks down even more simply: + +- the abstract signing key, +- a constant discriminator, and +- whether to use address diversity. + +It is important that the signing schema be independently derived at all signing and authentication sites. Preferably, the schema should be hard-coded everywhere it is needed, but at the very least, it must not be derived by inspecting information stored along with the pointer. See the section on `Attacks on pointer authentication`_ for more information. + + + + + +Language Features +----------------- + +There are three levels of the pointer authentication language feature: + +- The language implementation automatically signs and authenticates function pointers (and certain data pointers) across a variety of standard situations, including return addresses, function pointers, and C++ virtual functions. The intent is for all pointers to code in program memory to be signed in some way and for all branches to code in program text to authenticate those signatures. + +- The language also provides extensions to override the default rules used by the language implementation. For example, the ``__ptrauth`` type qualifier can be used to change how pointers are signed when they are stored in a particular variable or field; this provides much stronger protection than is guaranteed by the default rules for C function and data pointers. + +- FInally, the language provides the ```` intrinsic interface for manually signing and authenticating pointers in code. These can be used in circumstances where very specific behavior is required. + +Language implementation +~~~~~~~~~~~~~~~~~~~~~~~ + +For the most part, pointer authentication is an unobserved detail of the implementation of the programming language. Any element of the language implementation that would perform an indirect branch to a pointer is implicitly altered so that the pointer is signed when first constructed and authenticated when the branch is performed. This includes: + +- indirect-call features in the programming language, such as C function pointers, C++ virtual functions, C++ member function pointers, the "blocks" C extension, and so on; + +- returning from a function, no matter how it is called; and + +- indirect calls introduced by the implementation, such as branches through the global offset table (GOT) used to implement direct calls to functions defined outside of the current shared object. + +For more information about this, see the `Language ABI`_ section. + +However, some aspects of the implementation are observable by the programmer or otherwise require special notice. + +C data pointers +^^^^^^^^^^^^^^^ + +The current implementation in Clang does not sign pointers to ordinary data by default. For a partial explanation of the reasoning behind this, see the `Theory of Operation`_ section. + +A specific data pointer which is more security-sensitive than most can be signed using the `__ptrauth qualifier`_ or using the ```` intrinsics. + +C function pointers +^^^^^^^^^^^^^^^^^^^ + +The C standard imposes restrictions on the representation and semantics of function pointer types which make it difficult to achieve satisfactory signature diversity in the default language rules. See `Attacks on pointer authentication`_ for more information about signature diversity. Programmers should strongly consider using the ``__ptrauth`` qualifier to improve the protections for important function pointers, such as the components of of a hand-rolled "v-table"; see the section on the `__ptrauth qualifier`_ for details. + +The value of a pointer to a C function includes a signature, even when the value is cast to a non-function-pointer type like ``void*`` or ``intptr_t``. On implementations that use high bits to store the signature, this means that relational comparisons and hashes will vary according to the exact signature value, which is likely to change between executions of a program. In some implementations, it may also vary based on the exact function pointer type. + +Null pointers +^^^^^^^^^^^^^ + +In principle, an implementation could derive the signed null pointer value simply by applying the standard signing algorithm to the raw null pointer value. However, for likely signing algorithms, this would mean that the signed null pointer value would no longer be statically known, which would have many negative consequences. For one, it would become substantially more expensive to emit null pointer values or to perform null-pointer checks. For another, the pervasive (even if technically unportable) assumption that null pointers are bitwise zero would be invalidated, making it substantially more difficult to adopt pointer authentication, as well as weakening common optimizations for zero-initialized memory such as the use of ``.bzz`` sections. Therefore it is beneficial to treat null pointers specially by giving them their usual representation. On AArch64, this requires additional code when working with possibly-null pointers, such as when copying a pointer field that has been signed with address diversity. + +Return addresses and frame pointers +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The current implementation in Clang implicitly signs both return addresses and frame pointers. While these values are technically implementation details of a function, there are some important libraries and development tools which rely on manually walking the chain of stack frames. These tools must be updated to correctly account for pointer authentication, either by stripping signatures (if security is not important for the tool, e.g. if it is capturing a stack trace during a crash) or properly authenticating them. More information about how these values are signed is available in the `Language ABI`_ section. + +C++ virtual functions +^^^^^^^^^^^^^^^^^^^^^ + +The current implementation in Clang signs virtual function pointers with a discriminator derived from the full signature of the overridden method, including the method name and parameter types. It is possible to write C++ code that relies on v-table layout remaining constant despite changes to a method signature; for example, a parameter might be a ``typedef`` that resolves to a different type based on a build setting. Such code violates C++'s One Definition Rule (ODR), but that violation is not normally detected; however, pointer authentication will detect it. + + +Language extensions +~~~~~~~~~~~~~~~~~~~ + +Feature testing +^^^^^^^^^^^^^^^ + +Whether the current target uses pointer authentication can be tested for with a number of different tests. + +- ``__has_feature(ptrauth_intrinsics)`` is true if ```` provides its normal interface. This may be true even on targets where pointer authentication is not enabled by default. + +- ``__has_feature(ptrauth_returns)`` is true if the target uses pointer authentication to protect return addresses. + +- ``__has_feature(ptrauth_calls)`` is true if the target uses pointer authentication to protect indirect branches. This implies ``__has_feature(ptrauth_returns)`` and ``__has_feature(ptrauth_intrinsics)``. + +Clang provides several other tests only for historical purposes; for current purposes they are all equivalent to ``ptrauth_calls``. + +__ptrauth qualifier +^^^^^^^^^^^^^^^^^^^ + +``__ptrauth(key, address, discriminator)`` is an extended type qualifier which causes so-qualified objects to hold pointers signed using the specified schema rather than the default schema for such types. + +In the current implementation in Clang, the qualified type must be a C pointer type, either to a function or to an object. It currently cannot be an Objective-C pointer type, a C++ reference type, or a block pointer type; these restrictions may be lifted in the future. + +The current implementation in Clang is known to not provide adequate safety guarantees against the creation of `signing oracles`_ when assigning data pointers to ``__ptrauth``-qualified gl-values. See the section on `safe derivation`_ for more information. + +The qualifier's operands are as follows: + +- ``key`` - an expression evaluating to a key value from ````; must be a constant expression + +- ``address`` - whether to use address diversity (1) or not (0); must be a constant expression with one of these two values + +- ``discriminator`` - a constant discriminator; must be a constant expression + +See `Discriminators`_ for more information about discriminators. + +Currently the operands must be constant-evaluable even within templates. In the future this restriction may be lifted to allow value-dependent expressions as long as they instantiate to a constant expression. + +Consistent with the ordinary C/C++ rule for parameters, top-level ``__ptrauth`` qualifiers on a parameter (after parameter type adjustment) are ignored when deriving the type of the function. The parameter will be passed using the default ABI for the unqualified pointer type. + +If ``x`` is an object of type ``__ptrauth(key, address, discriminator) T``, then the signing schema of the value stored in ``x`` is a key of ``key`` and a discriminator determined as follows: + +- if ``address`` is 0, then the discriminator is ``discriminator``; + +- if ``address`` is 1 and ``discriminator`` is 0, then the discriminator is ``&x``; otherwise + +- if ``address`` is 1 and ``discriminator`` is non-zero, then the discriminator is ``ptrauth_blend_discriminator(&x, discriminator)``; see `ptrauth_blend_discriminator`_. + +Non-triviality from address diversity ++++++++++++++++++++++++++++++++++++++ + +Address diversity must impose additional restrictions in order to allow the implementation to correctly copy values. In C++, a type qualified with address diversity is treated like a class type with non-trivial copy/move constructors and assignment operators, with the usual effect on containing classes and unions. C does not have a standard concept of non-triviality, and so we must describe the basic rules here, with the intention of imitating the emergent rules of C++: + +- A type may be **non-trivial to copy**. + +- A type may also be **illegal to copy**. Types that are illegal to copy are always non-trivial to copy. + +- A type may also be **address-sensitive**. + +- A type qualified with a ``ptrauth`` qualifier that requires address diversity is non-trivial to copy and address-sensitive. + +- An array type is illegal to copy, non-trivial to copy, or address-sensitive if its element type is illegal to copy, non-trivial to copy, or address-sensitive, respectively. + +- A struct type is illegal to copy, non-trivial to copy, or address-sensitive if it has a field whose type is illegal to copy, non-trivial to copy, or address-sensitive, respectively. + +- A union type is both illegal and non-trivial to copy if it has a field whose type is non-trivial or illegal to copy. + +- A union type is address-sensitive if it has a field whose type is address-sensitive. + +- A program is ill-formed if it uses a type that is illegal to copy as a function parameter, argument, or return type. + +- A program is ill-formed if an expression requires a type to be copied that is illegal to copy. + +- Otherwise, copying a type that is non-trivial to copy correctly copies its subobjects. + +- Types that are address-sensitive must always be passed and returned indirectly. Thus, changing the address-sensitivity of a type may be ABI-breaking even if its size and alignment do not change. + +```` +~~~~~~~~~~~~~~~ + +This header defines the following types and operations: + +``ptrauth_key`` +^^^^^^^^^^^^^^^ + +This ``enum`` is the type of abstract signing keys. In addition to defining the set of implementation-specific signing keys (for example, ARMv8.3 defines ``ptrauth_key_asia``), it also defines some portable aliases for those keys. For example, ``ptrauth_key_function_pointer`` is the key generally used for C function pointers, which will generally be suitable for other function-signing schemas. + +In all the operation descriptions below, key values must be constant values corresponding to one of the implementation-specific abstract signing keys from this ``enum``. + +``ptrauth_extra_data_t`` +^^^^^^^^^^^^^^^^^^^^^^^^ + +This is a ``typedef`` of a standard integer type of the correct size to hold a discriminator value. + +In the signing and authentication operation descriptions below, discriminator values must have either pointer type or integer type. If the discriminator is an integer, it will be coerced to ``ptrauth_extra_data_t``. + +``ptrauth_blend_discriminator`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: c + + ptrauth_blend_discriminator(pointer, integer) + +Produce a discriminator value which blends information from the given pointer and the given integer. + +Implementations may ignore some bits from each value, which is to say, the blending algorithm may be chosen for speed and convenience over theoretical strength as a hash-combining algorithm. For example, arm64e simply overwrites the high 16 bits of the pointer with the low 16 bits of the integer, which can be done in a single instruction with an immediate integer. + +``pointer`` must have pointer type, and ``integer`` must have integer type. The result has type ``ptrauth_extra_data_t``. + +``ptrauth_string_discriminator`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: c + + ptrauth_string_discriminator(string) + +Produce a discriminator value for the given string. ``string`` must be a string literal of ``char`` character type. The result has type ``ptrauth_extra_data_t``. + +The result is always a constant expression. The result value is never zero and always within range for both the ``__ptrauth`` qualifier and ``ptrauth_blend_discriminator``. + +``ptrauth_strip`` +^^^^^^^^^^^^^^^^^ + +.. code-block:: c + + ptrauth_strip(signedPointer, key) + +Given that ``signedPointer`` matches the layout for signed pointers signed with the given key, extract the raw pointer from it. This operation does not trap and cannot fail, even if the pointer is not validly signed. + +``ptrauth_sign_constant`` +^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: c + + ptrauth_sign_constant(pointer, key, discriminator) + +Return a signed pointer for a constant address in a manner which guarantees a non-attackable sequence. + +``pointer`` must be a constant expression of pointer type which evaluates to a non-null pointer. The result will have the same type as ``discriminator``. + +Calls to this are constant expressions if the discriminator is a null-pointer constant expression or an integer constant expression. Implementations may make allow other pointer expressions as well. + +``ptrauth_sign_unauthenticated`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: c + + ptrauth_sign_unauthenticated(pointer, key, discriminator) + +Produce a signed pointer for the given raw pointer without applying any authentication or extra treatment. This operation is not required to have the same behavior on a null pointer that the language implementation would. + +This is a treacherous operation that can easily result in `signing oracles`_. Programs should use it seldom and carefully. + +``ptrauth_auth_and_resign`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: c + + ptrauth_auth_and_resign(pointer, oldKey, oldDiscriminator, newKey, newDiscriminator) + +Authenticate that ``pointer`` is signed with ``oldKey`` and ``oldDiscriminator`` and then resign the raw-pointer result of that authentication with ``newKey`` and ``newDiscriminator``. + +``pointer`` must have pointer type. The result will have the same type as ``pointer``. This operation is not required to have the same behavior on a null pointer that the language implementation would. + +The code sequence produced for this operation must not be directly attackable. However, if the discriminator values are not constant integers, their computations may still be attackable. In the future, Clang should be enhanced to guaranteed non-attackability if these expressions are :ref:`safely-derived`. + +``ptrauth_auth_function`` +^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: c + + ptrauth_auth_function(pointer, key, discriminator) + +Authenticate that ``pointer`` is signed with ``key`` and ``discriminator`` and re-sign it to the standard schema for a function pointer of its type. + +``pointer`` must have function pointer type. The result will have the same type as ``pointer``. This operation is not required to have the same behavior on a null pointer that the language implementation would. + +This operation makes the same attackability guarantees as ``ptrauth_auth_and_resign``. + +If this operation appears syntactically as the function operand of a call, Clang guarantees that the call will directly authenticate the function value using the given schema rather than re-signing to the standard schema. + +``ptrauth_auth_data`` +^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: c + + ptrauth_auth_data(pointer, key, discriminator) + +Authenticate that ``pointer`` is signed with ``key`` and ``discriminator`` and remove the signature. + +``pointer`` must have object pointer type. The result will have the same type as ``pointer``. This operation is not required to have the same behavior on a null pointer that the language implementation would. + +In the future when Clang makes `safe derivation`_ guarantees, the result of this operation should be considered safely-derived. + +``ptrauth_sign_generic_data`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: c + + ptrauth_sign_generic_data(value1, value2) + +Computes a signature for the given pair of values, incorporating a secret signing key. + +This operation can be used to verify that arbitrary data has not be tampered with by computing a signature for the data, storing that signature, and then repeating this process and verifying that it yields the same result. This can be reasonably done in any number of ways; for example, a library could compute an ordinary checksum of the data and just sign the result in order to get the tamper-resistance advantages of the secret signing key (since otherwise an attacker could reliably overwrite both the data and the checksum). + +``value1`` and ``value2`` must be either pointers or integers. If the integers are larger than ``uintptr_t`` then data not representable in ``uintptr_t`` may be discarded. + +The result will have type ``ptrauth_generic_signature_t``, which is an integer type. Implementations are not required to make all bits of the result equally significant; in particular, some implementations are known to not leave meaningful data in the low bits. + +Standard ``__ptrauth`` qualifiers +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +```` additionally provides several macros which expand to ``__ptrauth`` qualifiers for common ABI situations. + +For convenience, these macros expand to nothing when pointer authentication is disabled. + +These macros can be found in the header; some details of these macros may be unstable or implementation-specific. + + + + + +Theory of Operation +------------------- + +The threat model of pointer authentication is as follows: + +- The attacker has the ability to read and write to a certain range of addresses, possibly the entire address space. However, they are constrained by the normal rules of the process: for example, they cannot write to memory that is mapped read-only, and if they access unmapped memory it will trigger a trap. + +- The attacker has no ability to add arbitrary executable code to the program. For example, the program does not include malicious code to begin with, and the attacker cannot alter existing instructions, load a malicious shared library, or remap writable pages as executable. If the attacker wants to get the process to perform a specific sequence of actions, they must somehow subvert the normal control flow of the process. + +In both of the above paragraphs, it is merely assumed that the attacker's *current* capabilities are restricted; that is, their current exploit does not directly give them the power to do these things. The attacker's immediate goal may well be to leverage their exploit to gain these capabilities, e.g. to load a malicious dynamic library into the process, even though the process does not directly contain code to do so. + +Note that any bug that fits the above threat model can be immediately exploited as a denial-of-service attack by simply performing an illegal access and crashing the program. Pointer authentication cannot protect against this. While denial-of-service attacks are unfortunate, they are also unquestionably the best possible result of a bug this severe. Therefore, pointer authentication enthusiastically embraces the idea of halting the program on a pointer authentication failure rather than continuing in a possibly-compromised state. + +Pointer authentication is a form of control-flow integrity (CFI) enforcement. The basic security hypothesis behind CFI enforcement is that many bugs can only be usefully exploited (other than as a denial-of-service) by leveraging them to subvert the control flow of the program. If this is true, then by inhibiting or limiting that subversion, it may be possible to largely mitigate the security consequences of those bugs by rendering them impractical (or, ideally, impossible) to exploit. + +Every indirect branch in a program has a purpose. Using human intelligence, a programmer can describe where a particular branch *should* go according to this purpose: a ``return`` in ``printf`` should return to the call site, a particular call in ``qsort`` should call the comparator that was passed in as an argument, and so on. But for CFI to enforce that every branch in a program goes where it *should* in this sense would require CFI to perfectly enforce every semantic rule of the program's abstract machine; that is, it would require making the programming environment perfectly sound. That is out of scope. Instead, the goal of CFI is merely to catch attempts to make a branch go somewhere that its obviously *shouldn't* for its purpose: for example, to stop a call from branching into the middle of a function rather than its beginning. As the information available to CFI gets better about the purpose of the branch, CFI can enforce tighter and tighter restrictions on where the branch is permitted to go. Still, ultimately CFI cannot make the program sound. This may help explain why pointer authentication makes some of the choices it does: for example, to sign and authenticate mostly code pointers rather than every pointer in the program. Preventing attackers from redirecting branches is both particularly important and particularly approachable as a goal. Detecting corruption more broadly is infeasible with these techniques, and the attempt would have far higher cost. + +Attacks on pointer authentication +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Pointer authentication works as follows. Every indirect branch in a program has a purpose. For every purpose, the implementation chooses a :ref:`signing schema`. At some place where a pointer is known to be correct for its purpose, it is signed according to the purpose's schema. At every place where the pointer is needed for its purpose, it is authenticated according to the purpose's schema. If that authentication fails, the program is halted. + +There are a variety of ways to attack this. + +Attacks of interest to programmers +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +These attacks arise from weaknesses in the default protections offered by pointer authentication. They can be addressed by using attributes or intrinsics to opt in to stronger protection. + +Substitution attacks +++++++++++++++++++++ + +An attacker can simply overwrite a pointer intended for one purpose with a pointer intended for another purpose if both purposes use the same signing schema and that schema does not use address diversity. + +The most common source of this weakness is when code relies on using the default language rules for C function pointers. The current implementation uses the exact same signing schema for all C function pointers, even for functions of substantially different type. While efforts are ongoing to improve constant diversity for C function pointers of different type, there are necessary limits to this. The C standard requires function pointers to be copyable with ``memcpy``, which means that function pointers can never use address diversity. Furthermore, even if a function pointer can only be replaced with another function of the exact same type, that can still be useful to an attacker, as in the following example of a hand-rolled "v-table": + +.. code-block:: c + + struct ObjectOperations { + void (*retain)(Object *); + void (*release)(Object *); + void (*deallocate)(Object *); + void (*logStatus)(Object *); + }; + +This weakness can be mitigated by using a more specific signing schema for each purpose. For example, in this example, the ``__ptrauth`` qualifier can be used with a different constant discriminator for each field. Since there's no particular reason it's important for this v-table to be copyable with ``memcpy``, the functions can also be signed with address diversity: + +.. code-block:: c + + #if __has_feature(ptrauth_calls) + #define objectOperation(discriminator) \ + __ptrauth(ptrauth_key_function_pointer, 1, discriminator) + #else + #define objectOperation(discriminator) + #endif + + struct ObjectOperations { + void (*objectOperation(0xf017) retain)(Object *); + void (*objectOperation(0x2639) release)(Object *); + void (*objectOperation(0x8bb0) deallocate)(Object *); + void (*objectOperation(0xc5d4) logStatus)(Object *); + }; + +This weakness can also sometimes be mitigated by simply keeping the signed pointer in constant memory, but this is less effective than using better signing diversity. + +.. _Access path attacks: + +Access path attacks ++++++++++++++++++++ + +If a signed pointer is often accessed indirectly (that is, by first loading the address of the object where the signed pointer is stored), an attacker can affect uses of it by overwriting the intermediate pointer in the access path. + +The most common scenario exhibiting this weakness is an object with a pointer to a "v-table" (a structure holding many function pointers). An attacker does not need to replace a signed function pointer in the v-table if they can instead simply replace the v-table pointer in the object with their own pointer --- perhaps to memory where they've constructed their own v-table, or to existing memory that coincidentally happens to contain a signed pointer at the right offset that's been signed with the right signing schema. + +This attack arises because data pointers are not signed by default. It works even if the signed pointer uses address diversity: address diversity merely means that each pointer is signed with its own storage address, which (by design) is invariant to changes in the accessing pointer. + +Using sufficiently diverse signing schemas within the v-table can provide reasonably strong mitigation against this weakness. Always use address diversity in v-tables to prevent attackers from assembling their own v-table. Avoid re-using constant discriminators to prevent attackers from replacing a v-table pointer with a pointer to totally unrelated memory that just happens to contain an similarly-signed pointer. + +Further mitigation can be attained by signing pointers to v-tables. Any signature at all should prevent attackers from forging v-table pointers; they will need to somehow harvest an existing signed pointer from elsewhere in memory. Using a meaningful constant discriminator will force this to be harvested from an object with similar structure (e.g. a different implementation of the same interface). Using address diversity will prevent such harvesting entirely. However, care must be taken when sourcing the v-table pointer originally; do not blindly sign a pointer that is not :ref:`safely derived`. + +.. _Signing oracles: + +Signing oracles ++++++++++++++++ + +A signing oracle is a bit of code which can be exploited by an attacker to sign an arbitrary pointer in a way that can later be recovered. Such oracles can be used by attackers to forge signatures matching the oracle's signing schema, which is likely to cause a total compromise of pointer authentication's effectiveness. + +This attack only affects ordinary programmers if they are using certain treacherous patterns of code. Currently this includes: + +- all uses of the ``__ptrauth_sign_unauthenticated`` intrinsic and +- assigning data pointers to ``__ptrauth``-qualified l-values. + +Care must be taken in these situations to ensure that the pointer being signed has been :ref:`safely derived` or is otherwise not possible to attack. (In some cases, this may be challenging without compiler support.) + +A diagnostic will be added in the future for implicitly dangerous patterns of code, such as assigning a non-safely-derived data pointer to a ``__ptrauth``-qualified l-value. + +.. _Authentication oracles: + +Authentication oracles +++++++++++++++++++++++ + +An authentication oracle is a bit of code which can be exploited by an attacker to leak whether a signed pointer is validly signed without halting the program if it isn't. Such oracles can be used to forge signatures matching the oracle's signing schema if the attacker can repeatedly invoke the oracle for different candidate signed pointers. This is likely to cause a total compromise of pointer authentication's effectiveness. + +There should be no way for an ordinary programmer to create an authentication oracle using the current set of operations. However, implementation flaws in the past have occasionally given rise to authentication oracles due to a failure to immediately trap on authentication failure. + +The likelihood of creating an authentication oracle is why there is currently no intrinsic which queries whether a signed pointer is validly signed. + + +Attacks of interest to implementors +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +These attacks are not inherent to the model; they arise from mistakes in either implementing or using the `sign` and `auth` operations. Avoiding these mistakes requires careful work throughout the system. + +Failure to trap on authentication failure ++++++++++++++++++++++++++++++++++++++++++ + +Any failure to halt the program on an authentication failure is likely to be exploitable by attackers to create an :ref:`authentication oracle`. + +There are several different ways to introduce this problem: + +- The implementation might try to halt the program in some way that can be intercepted. + + For example, the ``auth`` instruction in ARMv8.3 does not directly trap; instead it corrupts its result so that it is always an invalid pointer. If the program subsequently attempts to use that pointer, that will be a bad memory access, and it will trap into the kernel. However, kernels do not usually immediately halt programs that trigger traps due to bad memory accesses; instead they notify the process to give it an opportunity to recover. If this happens with an ``auth`` failure, the attacker may be able to exploit the recovery path in a way that creates an oracle. Kernels should ensure that these sorts of traps are not recoverable. + +- A compiler might use an intermediate representation (IR) for ``sign`` and ``auth`` operations that cannot make adequate correctness guarantees. + + For example, suppose that an IR uses ARMv8.3-like semantics for ``auth``: the operation merely corrupts its result on failure instead of promising the trap. A frontend might emit patterns of IR that always follow an ``auth`` with a memory access, thinking that this ensures correctness. But if the IR can be transformed to insert code between the ``auth`` and the access, or if the ``auth`` can be speculated, then this potentially creates an oracle. It is better for ``auth`` to semantically guarantee to trap, potentially requiring an explicit check in the generated code. An ARMv8.3-like target can avoid this explicit check in the common case by recognizing the pattern of an ``auth`` followed immediately by an access. + +Attackable code sequences ++++++++++++++++++++++++++ + +If code that is part of a pointer authentication operation is interleaved with code that may itself be vulnerable to attacks, an attacker may be able to use this to create a :ref:`signing` or :ref:`authentication` oracle. + +For example, suppose that the compiler is generating a call to a function and passing two arguments: a signed constant pointer and a value derived from a call. In ARMv8.3, this code might look like so: + +.. code-block:: asm + + adr x19, _callback. ; compute &_callback + paciza x19 ; sign it with a constant discriminator of 0 + blr _argGenerator ; call _argGenerator() (returns in x0) + mov x1, x0 ; move call result to second arg register + mov x0, x19 ; move signed &_callback to first arg register + blr _function ; call _function + +This code is correct, as would be a sequencing that does *both* the ``adr`` and the ``paciza`` after the call to ``_argGenerator``. But a sequence that computes the address of ``_callback`` but leaves it as a raw pointer in a register during the call to ``_argGenerator`` would be vulnerable: + +.. code-block:: asm + + adr x19, _callback. ; compute &_callback + blr _argGenerator ; call _argGenerator() (returns in x0) + mov x1, x0 ; move call result to second arg register + paciza x19 ; sign &_callback + mov x0, x19 ; move signed &_callback to first arg register + blr _function ; call _function + +If ``_argGenerator`` spills ``x19`` (a callee-save register), and if the attacker can perform a write during this call, then the attacker can overwrite the spill slot with an arbitrary pointer that will eventually be unconditionally signed after the function returns. This would be a signing oracle. + +The implementation can avoid this by obeying two basic rules: + +- The compiler's intermediate representations (IR) should not provide operations that expose intermediate raw pointers. This may require providing extra operations that perform useful combinations of operations. + + For example, there should be an "atomic" auth-and-resign operation that should be used instead of emitting an ``auth`` operation whose result is fed into a ``sign``. + + Similarly, if a pointer should be authenticated as part of doing a memory access or a call, then the access or call should be decorated with enough information to perform the authentication; there should not be a separate ``auth`` whose result is used as the pointer operand for the access or call. (In LLVM IR, we do this for calls, but not yet for loads or stores.) + + "Operations" includes things like materializing a signed pointer to a known function or global variable. The compiler must be able to recognize and emit this as a unified operation, rather than potentially splitting it up as in the example above. + +- The compiler backend should not be too aggressive about scheduling instructions that are part of a pointer authentication operation. This may require custom code-generation of these operations in some cases. + +Register clobbering ++++++++++++++++++++ + +As a refinement of the section on `Attackable code sequences`_, if the attacker has the ability to modify arbitrary *register* state at arbitrary points in the program, then special care must be taken. + +For example, ARMv8.3 might materialize a signed function pointer like so: + +.. code-block:: asm + + adr x0, _callback. ; compute &_callback + paciza x0 ; sign it with a constant discriminator of 0 + +If an attacker has the ability to overwrite ``x0`` between these two instructions, this code sequence is vulnerable to becoming a signing oracle. + +For the most part, this sort of attack is not possible: it is a basic element of the design of modern computation that register state is private and inviolable. However, in systems that support asynchronous interrupts, this property requires the cooperation of the interrupt-handling code. If that code saves register state to memory, and that memory can be overwritten by an attacker, then essentially the attack can overwrite arbitrary register state at an arbitrary point. This could be a concern if the threat model includes attacks on the kernel or if the program uses user-space preemptive multitasking. + +(Readers might object that an attacker cannot rely on asynchronous interrupts triggering at an exact instruction boundary. In fact, researchers have had some success in doing exactly that. Even ignoring that, though, we should aim to protect against lucky attackers just as much as good ones.) + +To protect against this, saved register state must be at least partially signed (using something like `ptrauth_sign_generic_data`_). This is required for correctness anyway because saved thread states include security-critical registers such as SP, FP, PC, and LR (where applicable). Ideally, this signature would cover all the registers, but since saving and restoring registers can be very performance-sensitive, that may not be acceptable. It is sufficient to set aside a small number of scratch registers that will be guaranteed to be preserved correctly; the compiler can then be careful to only store critical values like intermediate raw pointers in those registers. + +``setjmp`` and ``longjmp`` should sign and authenticate the core registers (SP, FP, PC, and LR), but they do not need to worry about intermediate values because ``setjmp`` can only be called synchronously, and the compiler should never schedule pointer-authentication operations interleaved with arbitrary calls. + +.. _Relative addresses: + +Attacks on relative addressing +++++++++++++++++++++++++++++++ + +Relative addressing is a technique used to compress and reduce the load-time cost of infrequently-used global data. The pointer authentication system is unlikely to support signing or authenticating a relative address, and in most cases it would defeat the point to do so: it would take additional storage space, and applying the signature would take extra work at load time. + +Relative addressing is not precluded by the use of pointer authentication, but it does take extra considerations to make it secure: + +- Relative addresses must only be stored in read-only memory. A writable relative address can be overwritten to point nearly anywhere, making it inherently insecure; this danger can only be compensated for with techniques for protecting arbitrary data like `ptrauth_sign_generic_data`_. + +- Relative addresses must only be accessed through signed pointers with adequate diversity. If an attacker can perform an `access path attack` to replace the pointer through which the relative address is accessed, they can easily cause the relative address to point wherever they want. + +Signature forging ++++++++++++++++++ + +If an attacker can exactly reproduce the behavior of the signing algorithm, and they know all the correct inputs to it, then they can perfectly forge a signature on an arbitrary pointer. + +There are three components to avoiding this mistake: + +- The abstract signing algorithm should be good: it should not have glaring flaws which would allow attackers to predict its result with better than random accuracy without knowing all the inputs (like the key values). + +- The key values should be kept secret. If at all possible, they should never be stored in accessible memory, or perhaps only stored encrypted. + +- Contexts that are meant to be independently protected should use different key values. For example, the kernel should not use the same keys as user processes. Different user processes should also use different keys from each other as much as possible, although this may pose its own technical challenges. + +Remapping ++++++++++ + +If an attacker can change the memory protections on certain pages of the program's memory, that can substantially weaken the protections afforded by pointer authentication. + +- If an attacker can inject their own executable code, they can also certainly inject code that can be used as a :ref:`signing oracle`. The same is true if they can write to the instruction stream. + +- If an attacker can remap read-only program sections to be writable, then any use of :ref:`relative addresses` in global data becomes insecure. + +- If an attacker can remap read-only program sections to be writable, then it is unsafe to use unsigned pointers in `global offset tables`_. + +Remapping memory in this way often requires the attacker to have already substantively subverted the control flow of the process. Nonetheless, if the operating system has a mechanism for mapping pages in a way that cannot be remapped, this should be used wherever possible. + + + +.. _Safe Derivation: + +Safe derivation +~~~~~~~~~~~~~~~ + +Whether a data pointer is stored, even briefly, as a raw pointer can affect the security-correctness of a program. (Function pointers are never implicitly stored as raw pointers; raw pointers to functions can only be produced with the ```` intrinsics.) Repeated re-signing can also impact performance. Clang makes a modest set of guarantees in this area: + +- An expression of pointer type is said to be **safely derived** if: + + - it takes the address of a global variable or function, or + + - it is a load from a gl-value of ``__ptrauth``-qualified type. + +- If a value that is safely derived is assigned to a ``__ptrauth``-qualified object, including by initialization, then the value will be directly signed as appropriate for the target qualifier and will not be stored as a raw pointer. + +- If the function expression of a call is a gl-value of ``__ptrauth``-qualified type, then the call will be authenticated directly according to the source qualifier and will not be resigned to the default rule for a function pointer of its type. + +These guarantees are known to be inadequate for data pointer security. In particular, Clang should be enhanced to make the following guarantees: + +- A pointer should additionally be considered safely derived if it is: + + - the address of a gl-value that is safely derived, + + - the result of pointer arithmetic on a pointer that is safely derived (with some restrictions on the integer operand), + + - the result of a comma operator where the second operand is safely derived, + + - the result of a conditional operator where the selected operand is safely derived, or + + - the result of loading from a safely derived gl-value. + +- A gl-value should be considered safely derived if it is: + + - a dereference of a safely derived pointer, + + - a member access into a safely derived gl-value, or + + - a reference to a variable. + +- An access to a safely derived gl-value should be guaranteed to not allow replacement of any of the safely-derived component values at any point in the access. "Access" should include loading a function pointer. + +- Assignments should include pointer-arithmetic operators like ``+=``. + +Making these guarantees will require further work, including significant new support in LLVM IR. + +Furthermore, Clang should implement a warning when assigning a data pointer that is not safely derived to a ``__ptrauth``-qualified gl-value. + + + +Language ABI +------------ + +This section describes the pointer-authentication ABI currently implemented in Clang for the Apple arm64e target. As other targets adopt pointer authentication, this section should be generalized to express their ABIs as well. + +Key assignments +~~~~~~~~~~~~~~~ + +ARMv8.3 provides four abstract signing keys: ``IA``, ``IB``, ``DA``, and ``DB``. The architecture designates ``IA`` and ``IB`` for signing code pointers and ``DA`` and ``DB`` for signing data pointers; this is reinforced by two properties: + +- The ISA provides instructions that perform combined auth+call and auth+load operations; these instructions can only use the ``I`` keys and ``D`` keys, respectively. + +- AArch64's TBI feature can be separately enabled for code pointers (controlling whether indirect-branch instructions ignore those bits) and data pointers (controlling whether memory-access instructions) ignore those bits. If TBI is enabled for a kind of pointer, the sign and auth operations preserve the TBI bits when signing with an associated keys (at the cost of shrinking the number of available signing bits by 8). + +arm64e then further subdivides the keys as follows: + +- The ``A`` keys are used for primarily "global" purposes like signing v-tables and function pointers. These keys are sometimes called *process-independent* or *cross-process* because on existing OSes they are not changed when changing processes, although this is not a platform guarantee. + +- The ``B`` keys are used for primarily "local" purposes like signing return addresses and frame pointers. These keys are sometimes called *process-specific* because they are typically different between processes. However, they are in fact shared across processes in one situation: systems which provide ``fork`` cannot change these keys in the child process; they can only be changed during ``exec``. + +Implementation-defined algorithms and quantities +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The cryptographic hash algorithm used to compute signatures in ARMv8.3 is a private detail of the hardware implementation. + +arm64e restricts constant discriminators (used in ``__ptrauth`` and ``ptrauth_blend_discriminator``) to the range from 0 to 65535, inclusive. A 0 discriminator generally signifies that no blending is required; see the documentation for ``ptrauth_blend_discriminator``. This range is somewhat narrow but has two advantages: + +- The AArch64 ISA allows an arbitrary 16-bit immediate to be written over the top 16 bits of a register in a single instruction: + + .. code-block:: asm + + movk xN, #0x4849, LSL 48 + + This is ideal for the discriminator blending operation because it adds minimal code-size overhead and avoids overwriting any interesting bits from the pointer. Blending in a wider constant discriminator would either clobber interesting bits (e.g. if it was loaded with ``movk xN, #0x4c4f, LSL 32``) or require significantly more code (e.g. if the discriminator was loaded with a ``mov+bfi`` sequence). + +- It is possible to pack a 16-bit discriminator into loader metadata with minimal compromises, whereas a wider discriminator would require extra metadata storage and therefore significantly impact load times. + +The string hash used by ``ptrauth_string_discriminator`` is a 64-bit SipHash-2-4 using the constant seed ``b5d4c9eb79104a796fec8b1b428781d4`` (big-endian), with the result reduced by modulo to the range of non-zero discriminators (i.e. ``(rawHash % 65535) + 1``). + +Return addresses +~~~~~~~~~~~~~~~~ + +The kernel must ensure that attackers cannot replace LR due to an asynchronous exception; see `Register clobbering`_. If this is done by generally protecting LR, then functions which don't spill LR to the stack can avoid signing it entirely. Otherwise, the return address must be signed; on arm64e it is signed with the ``IB`` key using the stack pointer on entry as the discriminator. + +Protecting return addresses is of such particular importance that the ``IB`` key is almost entirely reserved for this purpose. + +Global offset tables +~~~~~~~~~~~~~~~~~~~~ + +The global offset table (GOT) is not ABI, but it is a common implementation technique for dynamic linking which deserves special discussion here. + +Whenever possible, signed pointers should be materialized directly in code rather than via the GOT, e.g. using an ``adrp+add+pac`` sequence on ARMv8.3. This decreases the amount of work necessary at load time to initialize the GOT, but more importantly, it defines away the potential for several attacks: + +- Attackers cannot change instructions, so there is no way to cause this code sequence to materialize a different pointer, whereas an access via the GOT always has *at minimum* a probabilistic chance to be the target of successful `substitution attacks`_. + +- The GOT is a dense pool of fixed pointers at a fixed offset relative to code; attackers can search this pool for useful pointers that can be used in `substitution attacks`_, whereas pointers that are only materialized directly are not so easily available. + +- Similarly, attackers can use `access path attacks`_ to replace a pointer to a signed pointer with a pointer to the GOT if the signing schema used within the GOT happens to be the same as the original pointer. This kind of collision becomes much less likely to be useful the fewer pointers are in the GOT in the first place. + +If this can be done for a symbol, then the compiler need only ensure that it materializes the signed pointer using registers that are safe against `register clobbering`_. + +However, many symbols can only be accessed via the GOT, e.g. because they resolve to definitions outside of the current image. In this case, care must be taken to ensure that using the GOT does not introduce weaknesses. + +- If the entire GOT can be mapped read-only after loading, then no signing is required within the GOT. In fact, not signing pointers in the GOT is preferable in this case because it makes the GOT useless for the harvesting and access-path attacks above. Storing raw pointers in this way is usually extremely unsafe, but for the special case of an immutable GOT entry it's fine because the GOT is always accessed via an address that is directly materialized in code and thus provably unattackable. (But see `Remapping`_.) + +- Otherwise, GOT entries which are used for producing a signed pointer constant must be signed. The signing schema used in the GOT need not match the target signing schema for the signed constant. To counteract the threats of substitution attacks, it's best if GOT entries can be signed with address diversity. Using a good constant discriminator as well (perhaps derived from the symbol name) can make it less useful to use a pointer to the GOT as the replacement in an :ref:`access path attack`. + +In either case, the compiler must ensure that materializing the address of a GOT entry as part of producing a signed pointer constant is not vulnerable to `register clobbering`_. If the linker also generates code for this, e.g. for call stubs, this generated code must take the same precautions. + +C function pointers +~~~~~~~~~~~~~~~~~~~ + +On arm64e, C function pointers are currently signed with the ``IA`` key without address diversity and with a constant discriminator of 0. + +The C and C++ standards do not permit C function pointers to be signed with address diversity by default: in C++ terms, function pointer types are required to be trivially copyable, which means they must be copyable with ``memcpy``. + +The use of a uniform constant discriminator is seen as a serious defect which should be remedied, and improving this is under investigation. + +C++ virtual tables +~~~~~~~~~~~~~~~~~~ + +The pointer to a C++ virtual table is currently signed with the ``DA`` key, no address diversity, and a constant discriminator of 0. The use of no address diversity, as well as the uniform constant discriminator, are seen as weaknesses. Not using address diversity allows attackers to simply copy valid v-table pointers from one object to another. However, using a uniform discriminator of 0 does have positive performance and code-size implications on ARMv8.3, and diversity for the most important v-table access pattern (virtual dispatch) is already better assured by the signing schemas used on the virtual functions. It is also known that some code in practice copies objects containing v-tables with ``memcpy``, and while this is not permitted formally, it is something that may be invasive to eliminate. + +Virtual functions in a C++ virtual table are signed with the ``IA`` key, address diversity, and a constant discriminator equal to the string hash (see `ptrauth_string_discriminator`_) of the mangled name of the function which originally gave rise to the v-table slot. + +C++ member function pointers +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +A member function pointer is signed with the ``IA`` key, no address diversity, and a constant discriminator equal to the string hash (see `ptrauth_string_discriminator`_) of the member pointer type. Address diversity is not permitted by C++ for member function pointers because they must be trivially-copyable types. + +The Itanium C++ ABI specifies that member function pointers to virtual functions simply store an offset to the correct v-table slot. This ABI cannot be used securely with pointer authentication because there is no safe place to store the constant discriminator for the target v-table slot: if it's stored with the offset, an attacker can simply overwrite it with the right discriminator for the offset. Even if the programmer never uses pointers to virtual functions, the existence of this code path makes all member function pointer dereferences insecure. + +arm64e changes this ABI so that virtual function pointers are stored using dispatch thunks with vague linkage. Because arm64e supports interoperation with ``arm64`` code when pointer authentication is disabled, an arm64e member function pointer dereference still recognizes the virtual-function representation but uses an bogus discriminator on that path that should always trap if pointer authentication is enabled dynamically. + +The use of dispatch thunks means that ``==`` on member function pointers is no longer reliable for virtual functions, but this is acceptable because the standard makes no guarantees about it in the first place. + +The use of dispatch thunks also potentially enables v-tables to be signed using a declaration-specific constant discriminator in the future; otherwise this discriminator would also need to be stored in the member pointer. + +Blocks +~~~~~~ + +Block pointers are data pointers which must interoperate with the ObjC `id` type and therefore cannot be signed themselves. + +The invocation pointer in a block is signed with the ``IA`` key using address diversity and a constant dicriminator of 0. Using a uniform discriminator is seen as a weakness to be potentially improved, but this is tricky due to the subtype polymorphism directly permitted for blocks. + +Block descriptors and ``__block`` variables can contain pointers to functions that can be used to copy or destroy the object. These functions are signed with the ``IA`` key, address diversity, and a constant discriminator of 0. The structure of block descriptors is under consideration for improvement. + +Objective-C methods +~~~~~~~~~~~~~~~~~~~ + +Objective-C method lists sign methods with the ``IA`` key using address diversity and a constant discriminator of 0. Using a uniform constant discriminator is believed to be acceptable because these tables are only accessed internally to the Objective-C runtime. + +The Objective-C runtime provides additional protection to methods that have been loaded into the Objective-C method cache; this protection is private to the runtime. + +Pointer authentication cannot protect against access-path atacks against the Objective-C ``isa`` pointer, through which all dispatch occurs, because of compatibility requirements and existing and important usage of high bits in the pointer. + +Swift class methods +~~~~~~~~~~~~~~~~~~~ + +Class methods in Swift are signed in the class object with the ``IA`` key using address diversity and a constant discriminator equal to the string hash (see `ptrauth_string_discriminator`_) of the mangling of the original overridable method. + +Resilient class-method lookup relies on passing a method descriptor; this method descriptor should be signed but currently isn't. The lookup function returns a function pointer that is signed using ``IA`` without address diversity and with the correct constant discriminator for the looked-up method. + +Swift's equivalent of a C++ v-table pointer is the ``isa`` pointer of an object. On arm64e, this is constrained by Objective-C compatibility and cannot be a signed pointer. + +Swift heap destructors +~~~~~~~~~~~~~~~~~~~~~~ + +Objects that are retained and released with Swift's native reference-counting system, including both native classes and temporary "box" allocations, must provide a destructor function in their metadata. This destructor function is signed with the ``IA`` key using address diversity and a constant discriminator of ``0xbbbf``. + +Swift protocol requirements +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Protocol function requirements are signed in the protocol witness table with the ``IA`` key using address diversity and a constant discriminator equal to the string hash (see `ptrauth_string_discriminator`_) of the mangling of the protocol requirement. + +Swift function types +~~~~~~~~~~~~~~~~~~~~ + +The invocation pointers of Swift function values are signed using the ``IA`` key without address diversity and with a constant discriminator derived loosely from the function type. + +Address diversity cannot be used by default for function values because function types are intended to be a "loadable" type which can be held and passed in registers. + +The constant discriminator currently accounts for potential abstraction in the function signature in ways that decrease the diversity of signatures; improving this is under investigation. + +Swift metadata +~~~~~~~~~~~~~~ + +Type metadata pointers in Swift are not signed. + +Type context descriptors must be signed because they frequently contain `relative addresses`_. Type context descriptors are signed with the ``DA`` key without address diversity (except when stored in type metadata) and with a constant discriminator of ``0xae86``. + +Swift value witnesses +~~~~~~~~~~~~~~~~~~~~~ + +Value witness functions in Swift are signed in the value witness table using the ``IA`` key with address diversity and an operation-specific constant discriminator which can be found in the Swift project headers. + +Swift coroutines +~~~~~~~~~~~~~~~~ + +Resumption functions for Swift coroutines are signed using the ``IA`` key without address diversity and with a constant discriminator derived from the yield type of the coroutine. Resumption functions cannot be signed with address diversity as they are returned directly in registers from the coroutine. + + + + + +Alternative implementations +--------------------------- + +Signature storage +~~~~~~~~~~~~~~~~~ + +It is not critical for the security of pointer authentication that the signature be stored "together" with the pointer, as it is in ARMv8.3. An implementation could just as well store the signature in a separate word, so that the ``sizeof`` a signed pointer would be larger than the ``sizeof`` a raw pointer. + +Storing the signature in the high bits, as ARMv8.3 does, has several trade-offs: + +- Disadvantage: there are substantially fewer bits available for the signature, weakening the mitigation by making it much easier for an attacker to simply guess the correct signature. + +- Disadvantage: future growth of the address space will necessarily further weaken the mitigation. + +- Advantage: memory layouts don't change, so it's possible for pointer-authentication-enabled code (for example, in a system library) to efficiently interoperate with existing code, as long as pointer authentication can be disabled dynamically. + +- Advantage: the size of a signed pointer doesn't grow, which might significantly increase memory requirements, code size, and register pressure. + +- Advantage: the size of a signed pointer is the same as a raw pointer, so generic APIs which work in types like `void *` (such as `dlsym`) can still return signed pointers. This means that clients of these APIs will not require insecure code in order to correctly receive a function pointer. + +Hashing vs. encrypting pointers +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +ARMv8.3 implements ``sign`` by computing a cryptographic hash and storing that in the spare bits of the pointer. This means that there are relatively few possible values for the valid signed pointer, since the bits corresponding to the raw pointer are known. Together with an ``auth`` oracle, this can make it computationally feasible to discover the correct signature with brute force. (The implementation should of course endeavor not to introduce ``auth`` oracles, but this can be difficult, and attackers can be devious.) + +If the implementation can instead *encrypt* the pointer during ``sign`` and *decrypt* it during ``auth``, this brute-force attack becomes far less feasible, even with an ``auth`` oracle. However, there are several problems with this idea: + +- It's unclear whether this kind of encryption is even possible without increasing the storage size of a signed pointer. If the storage size can be increased, brute-force atacks can be equally well mitigated by simply storing a larger signature. + +- It would likely be impossible to implement a ``strip`` operation, which might make debuggers and other out-of-process tools far more difficult to write, as well as generally making primitive debugging more challenging. + +- Implementations can benefit from being able to extract the raw pointer immediately from a signed pointer. An ARMv8.3 processor executing an ``auth``-and-load instruction can perform the load and ``auth`` in parallel; a processor which instead encrypted the pointer would be forced to perform these operations serially. diff --git a/clang/include/clang/AST/ASTContext.h b/clang/include/clang/AST/ASTContext.h index 5e2f4031d96cce..7f69439ff24843 100644 --- a/clang/include/clang/AST/ASTContext.h +++ b/clang/include/clang/AST/ASTContext.h @@ -1147,6 +1147,9 @@ class ASTContext : public RefCountedBase { /// space. QualType removeAddrSpaceQualType(QualType T) const; + /// Return the "other" type-specific discriminator for the given type. + uint16_t getPointerAuthTypeDiscriminator(QualType T); + /// Apply Objective-C protocol qualifiers to the given type. /// \param allowOnPointerType specifies if we can apply protocol /// qualifiers on ObjCObjectPointerType. It can be set to true when @@ -1983,6 +1986,16 @@ class ASTContext : public RefCountedBase { return getQualifiedType(type.getUnqualifiedType(), Qs); } + /// \brief Return a type with the given __ptrauth qualifier. + QualType getPointerAuthType(QualType type, PointerAuthQualifier pointerAuth) { + assert(!type.getPointerAuth()); + assert(pointerAuth); + + Qualifiers qs; + qs.setPointerAuth(pointerAuth); + return getQualifiedType(type, qs); + } + unsigned char getFixedPointScale(QualType Ty) const; unsigned char getFixedPointIBits(QualType Ty) const; FixedPointSemantics getFixedPointSemantics(QualType Ty) const; diff --git a/clang/include/clang/AST/GlobalDecl.h b/clang/include/clang/AST/GlobalDecl.h index 145e961a23a389..5ceb5e24f47a6d 100644 --- a/clang/include/clang/AST/GlobalDecl.h +++ b/clang/include/clang/AST/GlobalDecl.h @@ -106,6 +106,10 @@ class GlobalDecl { LHS.MultiVersionIndex == RHS.MultiVersionIndex; } + bool operator!=(const GlobalDecl &Other) const { + return !(*this == Other); + } + void *getAsOpaquePtr() const { return Value.getOpaqueValue(); } static GlobalDecl getFromOpaquePtr(void *P) { diff --git a/clang/include/clang/AST/NonTrivialTypeVisitor.h b/clang/include/clang/AST/NonTrivialTypeVisitor.h index aafcedb9d10b80..ce389178b7b221 100644 --- a/clang/include/clang/AST/NonTrivialTypeVisitor.h +++ b/clang/include/clang/AST/NonTrivialTypeVisitor.h @@ -93,6 +93,8 @@ struct CopiedTypeVisitor { return asDerived().visitARCStrong(FT, std::forward(Args)...); case QualType::PCK_ARCWeak: return asDerived().visitARCWeak(FT, std::forward(Args)...); + case QualType::PCK_PtrAuth: + return asDerived().visitPtrAuth(FT, std::forward(Args)...); case QualType::PCK_Struct: return asDerived().visitStruct(FT, std::forward(Args)...); case QualType::PCK_Trivial: diff --git a/clang/include/clang/AST/StableHash.h b/clang/include/clang/AST/StableHash.h new file mode 100644 index 00000000000000..5c6f900a37289e --- /dev/null +++ b/clang/include/clang/AST/StableHash.h @@ -0,0 +1,46 @@ +//===--- StableHash.h - An ABI-stable string hash ---------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// The interface to an ABI-stable string hash algorithm. +// +//===----------------------------------------------------------------------===// + +#ifndef CLANG_AST_STABLEHASH_H +#define CLANG_AST_STABLEHASH_H + +#include + +namespace llvm { +class StringRef; +} + +namespace clang { +class ASTContext; + +/// Compute a stable 64-bit hash of the given string. +/// +/// The exact algorithm is the little-endian interpretation of the +/// non-doubled (i.e. 64-bit) result of applying a SipHash-2-4 using +/// a specific key value which can be found in the source. +/// +/// By "stable" we mean that the result of this hash algorithm will +/// the same across different compiler versions and target platforms. +uint64_t getStableStringHash(llvm::StringRef string); + +/// Compute a pointer-auth extra discriminator for the given string, +/// suitable for both the blend operation and the __ptrauth qualifier. +/// +/// The result of this hash will be the same across different compiler +/// versions but may vary between targets due to differences in the +/// range of discriminators desired by the target. +uint64_t getPointerAuthStringDiscriminator(const ASTContext &ctx, + llvm::StringRef string); + +} // end namespace clang + +#endif diff --git a/clang/include/clang/AST/Type.h b/clang/include/clang/AST/Type.h index ecbbd73e19fb45..93a0746cd117ee 100644 --- a/clang/include/clang/AST/Type.h +++ b/clang/include/clang/AST/Type.h @@ -128,6 +128,99 @@ using CanQualType = CanQual; #define TYPE(Class, Base) class Class##Type; #include "clang/AST/TypeNodes.inc" +/// Pointer-authentication qualifiers. +class PointerAuthQualifier { + enum { + EnabledShift = 0, + EnabledBits = 1, + EnabledMask = 1 << EnabledShift, + AddressDiscriminatedShift = EnabledShift + EnabledBits, + AddressDiscriminatedBits = 1, + AddressDiscriminatedMask = 1 << AddressDiscriminatedBits, + KeyShift = AddressDiscriminatedShift + AddressDiscriminatedBits, + KeyBits = 14, + KeyMask = ((1 << KeyBits) - 1) << KeyShift, + DiscriminatorShift = KeyShift + KeyBits, + DiscriminatorBits = 16 + }; + + // bits: |0 |1 |2..15|16 ... 31| + // |Enabled|Address|Key |Discriminator| + uint32_t Data; + +public: + enum { + /// The maximum supported pointer-authentication key. + MaxKey = (1u << KeyBits) - 1, + + /// The maximum supported pointer-authentication discriminator. + MaxDiscriminator = (1u << DiscriminatorBits) - 1 + }; + +public: + PointerAuthQualifier() : Data(0) {} + PointerAuthQualifier(unsigned key, bool isAddressDiscriminated, + unsigned extraDiscriminator) + : Data(EnabledMask + | (isAddressDiscriminated ? AddressDiscriminatedMask : 0) + | (key << KeyShift) + | (extraDiscriminator << DiscriminatorShift)) { + assert(key <= MaxKey); + assert(extraDiscriminator <= MaxDiscriminator); + } + + bool isPresent() const { + return Data != 0; + } + + explicit operator bool() const { + return isPresent(); + } + + unsigned getKey() const { + assert(isPresent()); + return (Data & KeyMask) >> KeyShift; + } + + bool isAddressDiscriminated() const { + assert(isPresent()); + return (Data & AddressDiscriminatedMask) >> AddressDiscriminatedShift; + } + + unsigned getExtraDiscriminator() const { + assert(isPresent()); + return (Data >> DiscriminatorShift); + } + + friend bool operator==(PointerAuthQualifier lhs, PointerAuthQualifier rhs) { + return lhs.Data == rhs.Data; + } + friend bool operator!=(PointerAuthQualifier lhs, PointerAuthQualifier rhs) { + return lhs.Data != rhs.Data; + } + + uint32_t getAsOpaqueValue() const { + return Data; + } + + // Deserialize pointer-auth qualifiers from an opaque representation. + static PointerAuthQualifier fromOpaqueValue(uint32_t opaque) { + PointerAuthQualifier result; + result.Data = opaque; + return result; + } + + std::string getAsString() const; + std::string getAsString(const PrintingPolicy &Policy) const; + + bool isEmptyWhenPrinted(const PrintingPolicy &Policy) const; + void print(raw_ostream &OS, const PrintingPolicy &Policy) const; + + void Profile(llvm::FoldingSetNodeID &ID) const { + ID.AddInteger(Data); + } +}; + /// The collection of all-type qualifiers we support. /// Clang supports five independent qualifiers: /// * C99: const, volatile, and restrict @@ -183,6 +276,8 @@ class Qualifiers { FastMask = (1 << FastWidth) - 1 }; + Qualifiers() : Mask(0), PtrAuth() {} + /// Returns the common set of qualifiers while removing them from /// the given sets. static Qualifiers removeCommonQualifiers(Qualifiers &L, Qualifiers &R) { @@ -218,6 +313,13 @@ class Qualifiers { L.removeAddressSpace(); R.removeAddressSpace(); } + + if (L.PtrAuth == R.PtrAuth) { + Q.PtrAuth = L.PtrAuth; + L.PtrAuth = PointerAuthQualifier(); + R.PtrAuth = PointerAuthQualifier(); + } + return Q; } @@ -240,15 +342,16 @@ class Qualifiers { } // Deserialize qualifiers from an opaque representation. - static Qualifiers fromOpaqueValue(unsigned opaque) { + static Qualifiers fromOpaqueValue(uint64_t opaque) { Qualifiers Qs; - Qs.Mask = opaque; + Qs.Mask = uint32_t(opaque); + Qs.PtrAuth = PointerAuthQualifier::fromOpaqueValue(uint32_t(opaque >> 32)); return Qs; } // Serialize these qualifiers into an opaque representation. - unsigned getAsOpaqueValue() const { - return Mask; + uint64_t getAsOpaqueValue() const { + return uint64_t(Mask) | (uint64_t(PtrAuth.getAsOpaqueValue()) << 32); } bool hasConst() const { return Mask & Const; } @@ -381,6 +484,16 @@ class Qualifiers { setAddressSpace(space); } + PointerAuthQualifier getPointerAuth() const { + return PtrAuth; + } + void setPointerAuth(PointerAuthQualifier q) { + PtrAuth = q; + } + void removePtrAuth() { + PtrAuth = PointerAuthQualifier(); + } + // Fast qualifiers are those that can be allocated directly // on a QualType object. bool hasFastQualifiers() const { return getFastQualifiers(); } @@ -403,7 +516,9 @@ class Qualifiers { /// Return true if the set contains any qualifiers which require an ExtQuals /// node to be allocated. - bool hasNonFastQualifiers() const { return Mask & ~FastMask; } + bool hasNonFastQualifiers() const { + return (Mask & ~FastMask) || PtrAuth; + } Qualifiers getNonFastQualifiers() const { Qualifiers Quals = *this; Quals.setFastQualifiers(0); @@ -411,8 +526,8 @@ class Qualifiers { } /// Return true if the set contains any qualifiers. - bool hasQualifiers() const { return Mask; } - bool empty() const { return !Mask; } + bool hasQualifiers() const { return Mask || PtrAuth; } + bool empty() const { return !hasQualifiers(); } /// Add the qualifiers from the given set to this set. void addQualifiers(Qualifiers Q) { @@ -429,6 +544,9 @@ class Qualifiers { if (Q.hasObjCLifetime()) addObjCLifetime(Q.getObjCLifetime()); } + + if (Q.PtrAuth) + PtrAuth = Q.PtrAuth; } /// Remove the qualifiers from the given set from this set. @@ -446,6 +564,9 @@ class Qualifiers { if (getAddressSpace() == Q.getAddressSpace()) removeAddressSpace(); } + + if (PtrAuth == Q.PtrAuth) + PtrAuth = PointerAuthQualifier(); } /// Add the qualifiers from the given set to this set, given that @@ -457,7 +578,10 @@ class Qualifiers { !hasObjCGCAttr() || !qs.hasObjCGCAttr()); assert(getObjCLifetime() == qs.getObjCLifetime() || !hasObjCLifetime() || !qs.hasObjCLifetime()); + assert(!PtrAuth || !qs.PtrAuth || PtrAuth == qs.PtrAuth); Mask |= qs.Mask; + if (qs.PtrAuth) + PtrAuth = qs.PtrAuth; } /// Returns true if address space A is equal to or a superset of B. @@ -490,6 +614,8 @@ class Qualifiers { // be changed. (getObjCGCAttr() == other.getObjCGCAttr() || !hasObjCGCAttr() || !other.hasObjCGCAttr()) && + // Pointer-auth qualifiers must match exactly. + PtrAuth == other.PtrAuth && // ObjC lifetime qualifiers must match exactly. getObjCLifetime() == other.getObjCLifetime() && // CVR qualifiers may subset. @@ -522,8 +648,12 @@ class Qualifiers { /// another set of qualifiers, not considering qualifier compatibility. bool isStrictSupersetOf(Qualifiers Other) const; - bool operator==(Qualifiers Other) const { return Mask == Other.Mask; } - bool operator!=(Qualifiers Other) const { return Mask != Other.Mask; } + bool operator==(Qualifiers Other) const { + return Mask == Other.Mask && PtrAuth == Other.PtrAuth; + } + bool operator!=(Qualifiers Other) const { + return Mask != Other.Mask || PtrAuth != Other.PtrAuth; + } explicit operator bool() const { return hasQualifiers(); } @@ -559,6 +689,7 @@ class Qualifiers { void Profile(llvm::FoldingSetNodeID &ID) const { ID.AddInteger(Mask); + PtrAuth.Profile(ID); } private: @@ -566,6 +697,8 @@ class Qualifiers { // |C R V|U|GCAttr|Lifetime|AddressSpace| uint32_t Mask = 0; + PointerAuthQualifier PtrAuth; + static const uint32_t UMask = 0x8; static const uint32_t UShift = 3; static const uint32_t GCAttrMask = 0x30; @@ -1078,6 +1211,14 @@ class QualType { // true when Type is objc's weak and weak is enabled but ARC isn't. bool isNonWeakInMRRWithObjCWeak(const ASTContext &Context) const; + PointerAuthQualifier getPointerAuth() const; + + bool hasAddressDiscriminatedPointerAuth() const { + if (auto ptrauth = getPointerAuth()) + return ptrauth.isAddressDiscriminated(); + return false; + } + enum PrimitiveDefaultInitializeKind { /// The type does not fall into any of the following categories. Note that /// this case is zero-valued so that values of this enum can be used as a @@ -1123,6 +1264,9 @@ class QualType { /// with the ARC __weak qualifier. PCK_ARCWeak, + /// The type is an address-discriminated signed pointer type. + PCK_PtrAuth, + /// The type is a struct containing a field whose type is neither /// PCK_Trivial nor PCK_VolatileTrivial. /// Note that a C++ struct type does not necessarily match this; C++ copying @@ -6270,6 +6414,11 @@ inline Qualifiers::GC QualType::getObjCGCAttr() const { return getQualifiers().getObjCGCAttr(); } +/// Return the pointer-auth qualifier of this type. +inline PointerAuthQualifier QualType::getPointerAuth() const { + return getQualifiers().getPointerAuth(); +} + inline bool QualType::hasNonTrivialToPrimitiveDefaultInitializeCUnion() const { if (auto *RD = getTypePtr()->getBaseElementTypeUnsafe()->getAsRecordDecl()) return hasNonTrivialToPrimitiveDefaultInitializeCUnion(RD); diff --git a/clang/include/clang/AST/VTableBuilder.h b/clang/include/clang/AST/VTableBuilder.h index 43c84292c09157..3a98c7ca08798a 100644 --- a/clang/include/clang/AST/VTableBuilder.h +++ b/clang/include/clang/AST/VTableBuilder.h @@ -345,6 +345,10 @@ class VTableContextBase { }; class ItaniumVTableContext : public VTableContextBase { +public: + typedef llvm::DenseMap + OriginalMethodMapTy; + private: /// Contains the index (relative to the vtable address point) @@ -368,6 +372,10 @@ class ItaniumVTableContext : public VTableContextBase { VirtualBaseClassOffsetOffsetsMapTy; VirtualBaseClassOffsetOffsetsMapTy VirtualBaseClassOffsetOffsets; + /// Map from a virtual method to the nearest method in the primary base class + /// chain that it overrides. + OriginalMethodMapTy OriginalMethodMap; + void computeVTableRelatedInformation(const CXXRecordDecl *RD) override; public: @@ -399,6 +407,27 @@ class ItaniumVTableContext : public VTableContextBase { CharUnits getVirtualBaseOffsetOffset(const CXXRecordDecl *RD, const CXXRecordDecl *VBase); + /// Return the method that added the v-table slot that will be used to call + /// the given method. + /// + /// In the Itanium ABI, where overrides always cause methods to be added to + /// the primary v-table if they're not already there, this will be the first + /// declaration in the primary base class chain for which the return type + /// adjustment is trivial. + GlobalDecl findOriginalMethod(GlobalDecl GD); + + const CXXMethodDecl *findOriginalMethodInMap(const CXXMethodDecl *MD) const; + + void setOriginalMethod(const CXXMethodDecl *Key, const CXXMethodDecl *Val) { + OriginalMethodMap[Key] = Val; + } + + /// This method is reserved for the implementation and shouldn't be used + /// directly. + const OriginalMethodMapTy &getOriginalMethodMap() { + return OriginalMethodMap; + } + static bool classof(const VTableContextBase *VT) { return !VT->isMicrosoft(); } diff --git a/clang/include/clang/Basic/ABI.h b/clang/include/clang/Basic/ABI.h index 2401ffa20494e9..b367bae66de909 100644 --- a/clang/include/clang/Basic/ABI.h +++ b/clang/include/clang/Basic/ABI.h @@ -184,7 +184,10 @@ struct ThunkInfo { /// Holds a pointer to the overridden method this thunk is for, /// if needed by the ABI to distinguish different thunks with equal - /// adjustments. Otherwise, null. + /// adjustments. + /// In the Itanium ABI, this field can hold the method that created the + /// vtable entry for this thunk. + /// Otherwise, null. /// CAUTION: In the unlikely event you need to sort ThunkInfos, consider using /// an ABI-specific comparator. const CXXMethodDecl *Method; diff --git a/clang/include/clang/Basic/Attr.td b/clang/include/clang/Basic/Attr.td index 653e069c28097a..d860885df840a0 100644 --- a/clang/include/clang/Basic/Attr.td +++ b/clang/include/clang/Basic/Attr.td @@ -2406,6 +2406,14 @@ def ObjCRequiresPropertyDefs : InheritableAttr { let Documentation = [Undocumented]; } +def PointerAuth : TypeAttr { + let Spellings = [Keyword<"__ptrauth">]; + let Args = [IntArgument<"Key">, + BoolArgument<"AddressDiscriminated", 1>, + IntArgument<"ExtraDiscriminator", 1>]; + let Documentation = [PtrAuthDocs]; +} + def Unused : InheritableAttr { let Spellings = [CXX11<"", "maybe_unused", 201603>, GCC<"unused">, C2x<"", "maybe_unused">]; diff --git a/clang/include/clang/Basic/AttrDocs.td b/clang/include/clang/Basic/AttrDocs.td index 2ba1d908d809ce..9e6d96a6bb74bf 100644 --- a/clang/include/clang/Basic/AttrDocs.td +++ b/clang/include/clang/Basic/AttrDocs.td @@ -1350,6 +1350,165 @@ Also see the documentation for `@available }]; } +def PtrAuthDocs : Documentation { + let Category = DocCatVariable; + let Content = [{ +The ``__ptrauth`` qualifier allows the programmer to directly control +how pointers are signed when they are stored in a particular variable. +This can be used to strengthen the default protections of pointer +authentication and make it more difficult for an attacker to escalate +an ability to alter memory into full control of a process. + +.. code-block:: c + + #include + + typedef void (*my_callback)(const void*); + my_callback __ptrauth(ptrauth_key_process_dependent_code, 1, 0xe27a) callback; + +The first argument to ``__ptrauth`` is the name of the signing key. +Valid key names for the target are defined in ````. + +On ARM64, there are four keys: + +- ``ptrauth_key_process_independent_data`` +- ``ptrauth_key_process_dependent_data`` +- ``ptrauth_key_process_independent_code`` +- ``ptrauth_key_process_dependent_code`` + +In general, prefer using a code key for function pointers and a data key +for object pointers. The ARM64 architecture allows loads and calls to +execute more efficiently when the pointer is signed with an appropriate +key. Using code keys only for function pointers also substantially lessens +the risk of creating a so-called "signing oracle" for function pointers; +see the general pointer authentication language documentation. + +Using a process-dependent key provides stronger protection against +cross-process attacks. However, it also inhibits certain memory +optimizations when a shared library is loaded into multiple processes. +Using a process-independent key also allows signed pointers to be passed +in shared memory. Note that even the process-independent keys may change +after a reboot, so signed values should never be serialized. + +The second argument to ``__ptrauth`` is a flag (0 or 1) specifying whether +the object should use address discrimination. If only one argument is +given, the flag defaults to 0. Address discrimination provides strong +protection against attacks which copy signed pointers around in memory. +An attacker cannot usefully copy an arbitrary signed pointer over an +address-discriminated object. Nor can a value taken from an +address-discriminated object be usefully copied over some other signed +pointer. However, it is more expensive to copy values from one +address-discriminated object to another, even if the other arguments to +``__ptrauth`` are the same, and it is not valid to copy them with +``memcpy``. It is also not valid to map memory containing an +address-discriminated object into different places in the address +space, e.g. with ``mmap``. + +The third argument to ``__ptrauth`` is a small non-negative integer +which allows additional discrimination between objects. Using a +unique extra discriminator provides strong protection against attacks +which work by substituting one signed value for another. For example, +an attacker cannot usefully overwrite an object with a pointer from an +object using a different extra discriminator; this protection is similar +to the protection offered by address discrimination. A unique extra +discriminator also protects against "slide" attacks where an attacker +alters a pointer instead of altering the memory that the pointer points to. +The extra discriminator must be a constant expression. On ARM64, +its value must be between 0 and 65535. If the argument is not provided, +the default value is 0. It is generally preferable not to use the value 0, +especially with the process-independent keys, as this combination is used +in various places in the standard language ABI. + +The type qualified by ``__ptrauth`` must be a pointer type. Currently +only C pointer types are allowed and not block pointers, Objective-C +object pointers, or C++ references. ``__ptrauth`` is parsed and interpreted +using the same language rules as qualifiers like ``const`` and ``volatile``. +For example: + +.. code-block:: c + + __ptrauth(...) int *ex0; /* invalid: qualifies 'int', which is not a pointer type */ + int * __ptrauth(...) ex1; /* valid: ex1 has qualified type */ + int * __ptrauth(...) *ex2; /* valid: ex2 is a pointer to a qualified object */ + + typedef int *intp; + __ptrauth(...) intp ex3; /* valid: ex3 has qualified type */ + intp __ptrauth(...) ex4; /* valid: means the exact same thing as ex3 */ + +Assigning a non-null pointer to a ``__ptrauth``-qualified l-value, or +initializing a ``__ptrauth``-qualified object with a non-null pointer, +causes the pointer to be signed according to the described schema before +being stored into memory. If such an initialization is a constant +initialization, then the signing is also done as part of constant +initialization: that is, it is done when the program is loaded, before +any dynamic initialization occurs. Loading a non-null pointer from a +``__ptrauth``-qualified l-value causes the pointer to be authenticated +according to the describe schema before being produced as the result +of the expression. A null pointer keeps its standard representation when +stored in a ``__ptrauth``-qualified object; on a typical target where this +is an all-zero pattern, this means that operations like ``bzero`` and +``calloc`` do still correctly initialize objects with null. + +If a ``__ptrauth``-qualified l-value of function pointer type is +used as the function operand of a call expression, the function pointer +will be authenticated "atomically" with the call, such that an attacker +will not be able to corrupt the destination of the call even in the +presence of undefined behavior. (That is, the compiler must not +leave an un-signed pointer that it will later unconditionally trust +in a place where it could be feasibly overwritten by an attacker, +such as the stack or a callee-save register during an intervening call. +The compiler is not required to protect against improbable attacks +such as corruption of the register file, as might occur with a +corrupted kernel. It also need not guard against jumps to an arbitrary +place in the instruction stream, since such jumps would require an +attacker to already fully control the PC.) + +If the ABI specifies that a pointer is always signed --- that is, +if the pointer is a function pointer and the target uses ABI function +pointer authentication --- then signing and authenticating it as part +of a load/store actually means resigning it to/from the standard ABI +signature schema. Similarly, if both operands of a simple assignment +operator are ``__ptrauth``-qualified, the pointer copied by the +assignment is resigned from the right-hand operand's schema to the +left-hand operand's schema. These resigning operations are also done +"atomically" in the same sense as above. + +As a final guarantee, if the right-hand operand of an assignment or +the expression used to initialize a ``__ptrauth``-qualified object is +a direct reference to an object or function (e.g. ``&my_var``), the +signing of that pointer is atomic with the evaluaton of the reference +in this same sense. + +Otherwise, there are no guarantees of atomicity, and it is the +programmer's responsibility to avoid allowing a store into a +``__ptrauth``-qualified object to create a potential "signing oracle" +which an attacker could use to sign an arbitrary pointer of their choice. +Such oracles are particularly problematic when the signing uses a code +key because the oracle could potentially be used to allow an attacker +to construct a validly-signed function pointer, v-table entry, or +return address that points to an arbitrary instruction, allowing them +to completely take over the PC. Programmers attempting to use +``__ptrauth`` to protect a data pointer, or to protect function pointers +on targets that do not use ABI function pointer authentication, should +aim to maintain a "chain of authentication" from initialization all +the way to the point at which the pointer is used. If this is infeasible, +they should consider using ``ptrauth_sign_generic_data`` instead. + +Types that are written in r-value positions, such as return types, +parameter types, and cast types, may not be ``__ptrauth``-qualified +at the outermost level. This may be supported in the future. + +In C++, the arguments to ``__ptrauth`` may not be instantiation-dependent. +This may be supported in the future. + +This feature may be tested for with ``__has_feature(ptrauth_qualifier)``. +It is enabled whenever the ``ptrauth`` intrinsics are enabled. + +```` provides predefined qualifiers for various language +features that implicitly use pointer authentication. + }]; +} + def ExternalSourceSymbolDocs : Documentation { let Category = DocCatDecl; let Content = [{ diff --git a/clang/include/clang/Basic/Builtins.def b/clang/include/clang/Basic/Builtins.def index 4ed00a13b0043c..b4161c2d0c3a6d 100644 --- a/clang/include/clang/Basic/Builtins.def +++ b/clang/include/clang/Basic/Builtins.def @@ -1499,6 +1499,16 @@ BUILTIN(__builtin_coro_end, "bv*Ib", "n") BUILTIN(__builtin_coro_suspend, "cIb", "n") BUILTIN(__builtin_coro_param, "bv*v*", "n") +// Pointer authentication builtins. +BUILTIN(__builtin_ptrauth_strip, "v*v*i", "tnc") +BUILTIN(__builtin_ptrauth_blend_discriminator, "zv*i", "tnc") +BUILTIN(__builtin_ptrauth_sign_constant, "v*v*iv*", "tnc") +BUILTIN(__builtin_ptrauth_sign_unauthenticated, "v*v*iv*", "tnc") +BUILTIN(__builtin_ptrauth_sign_generic_data, "zv*v*", "tnc") +BUILTIN(__builtin_ptrauth_auth_and_resign, "v*v*iv*iv*", "tn") +BUILTIN(__builtin_ptrauth_auth, "v*v*iv*", "tn") +BUILTIN(__builtin_ptrauth_string_discriminator, "zcC*", "nc") + // OpenCL v2.0 s6.13.16, s9.17.3.5 - Pipe functions. // We need the generic prototype, since the packet type could be anything. LANGBUILTIN(read_pipe, "i.", "tn", OCLC20_LANG) diff --git a/clang/include/clang/Basic/CodeGenOptions.h b/clang/include/clang/Basic/CodeGenOptions.h index 8881a316d1fb82..f91877c33366ed 100644 --- a/clang/include/clang/Basic/CodeGenOptions.h +++ b/clang/include/clang/Basic/CodeGenOptions.h @@ -14,6 +14,7 @@ #define LLVM_CLANG_BASIC_CODEGENOPTIONS_H #include "clang/Basic/DebugInfoOptions.h" +#include "clang/Basic/PointerAuthOptions.h" #include "clang/Basic/Sanitizers.h" #include "clang/Basic/XRayInstr.h" #include "llvm/Support/CodeGen.h" @@ -297,6 +298,9 @@ class CodeGenOptions : public CodeGenOptionsBase { std::vector Reciprocals; + /// Configuration for pointer-signing. + PointerAuthOptions PointerAuth; + /// The preferred width for auto-vectorization transforms. This is intended to /// override default transforms based on the width of the architected vector /// registers. diff --git a/clang/include/clang/Basic/DiagnosticDriverKinds.td b/clang/include/clang/Basic/DiagnosticDriverKinds.td index 5ff03e13356364..10a14a2cd0ac66 100644 --- a/clang/include/clang/Basic/DiagnosticDriverKinds.td +++ b/clang/include/clang/Basic/DiagnosticDriverKinds.td @@ -237,6 +237,9 @@ def err_drv_omp_host_ir_file_not_found : Error< "The provided host compiler IR file '%0' is required to generate code for OpenMP target regions but cannot be found.">; def err_drv_omp_host_target_not_supported : Error< "The target '%0' is not a supported OpenMP host target.">; +def err_drv_ptrauth_not_supported : Error< + "target '%0' does not support native pointer authentication">; + def err_drv_expecting_fopenmp_with_fopenmp_targets : Error< "The option -fopenmp-targets must be used in conjunction with a -fopenmp option compatible with offloading, please use -fopenmp=libomp or -fopenmp=libiomp5.">; def warn_drv_omp_offload_target_duplicate : Warning< diff --git a/clang/include/clang/Basic/DiagnosticGroups.td b/clang/include/clang/Basic/DiagnosticGroups.td index a8d04af1e52494..a7bba469ac817a 100644 --- a/clang/include/clang/Basic/DiagnosticGroups.td +++ b/clang/include/clang/Basic/DiagnosticGroups.td @@ -690,6 +690,7 @@ def ZeroLengthArray : DiagGroup<"zero-length-array">; def GNUZeroLineDirective : DiagGroup<"gnu-zero-line-directive">; def GNUZeroVariadicMacroArguments : DiagGroup<"gnu-zero-variadic-macro-arguments">; def Fallback : DiagGroup<"fallback">; +def PtrAuthNullPointers : DiagGroup<"ptrauth-null-pointers">; // This covers both the deprecated case (in C++98) // and the extension case (in C++11 onwards). diff --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td index 542aad909d8be4..5dbc9c0316bc02 100644 --- a/clang/include/clang/Basic/DiagnosticSemaKinds.td +++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td @@ -736,6 +736,56 @@ def warn_fortify_source_size_mismatch : Warning< "'%0' size argument is too large; destination buffer has size %1," " but size argument is %2">, InGroup; +def err_ptrauth_disabled_target : + Error<"this target does not support pointer authentication">; +def err_ptrauth_disabled : + Error<"pointer authentication is disabled for the current target">; +def err_ptrauth_invalid_key : + Error<"%0 does not identify a valid pointer authentication key for " + "the current target">; +def err_ptrauth_value_bad_type : + Error<"%select{signed value|extra discriminator|blended pointer|blended " + "integer}0 must have %select{pointer|integer|pointer or integer}1 " + "type; type here is %2">; +def err_ptrauth_bad_constant_pointer : + Error<"argument to ptrauth_sign_constant must refer to a global variable " + "or function">; +def err_ptrauth_bad_constant_discriminator : + Error<"discriminator argument to ptrauth_sign_constant must be a constant " + "integer, the address of the global variable where the result " + "will be stored, or a blend of the two">; +def warn_ptrauth_sign_null_pointer : + Warning<"signing a null pointer will yield a non-null pointer">, + InGroup; +def warn_ptrauth_auth_null_pointer : + Warning<"authenticating a null pointer will almost certainly trap">, + InGroup; +def err_ptrauth_string_not_literal : Error< + "argument must be a string literal%select{| of char type}0">; +def err_ptrauth_type_disc_variably_modified : Error< + "cannot pass variably-modified type %0 to " + "'__builtin_ptrauth_type_discriminator'">; + +// __ptrauth qualifier +def err_ptrauth_qualifier_return : Error< + "return types may not be qualified with __ptrauth; type is %0">; +def err_ptrauth_qualifier_param : Error< + "parameter types may not be qualified with __ptrauth; type is %0">; +def err_ptrauth_qualifier_cast : Error< + "cast types may not be qualified with __ptrauth; type is %0">; +def err_ptrauth_qualifier_nonpointer : Error< + "__ptrauth qualifier may only be applied to pointer types; type here is %0">; +def err_ptrauth_qualifier_redundant : Error< + "type %0 is already __ptrauth-qualified">; +def err_ptrauth_qualifier_bad_arg_count : Error< + "__ptrauth qualifier must take between 1 and 3 arguments">; +def err_ptrauth_qualifier_arg_not_ice : Error< + "argument to __ptrauth must be an integer constant expression">; +def err_ptrauth_qualifier_address_discrimination_invalid : Error< + "address discrimination flag for __ptrauth must be 0 or 1; value is %0">; +def err_ptrauth_qualifier_extra_discriminator_invalid : Error< + "extra discriminator for __ptrauth must between 0 and %1; value is %0">; + /// main() // static main() is not an error in C, just in C++. def warn_static_main : Warning<"'main' should not be declared static">, @@ -4940,7 +4990,7 @@ def note_deleted_special_member_class_subobject : Note< "%select{default|corresponding|default|default|default}4 constructor}0|" "destructor}5" "%select{||s||}4" - "|is an ObjC pointer}6">; + "|is an ObjC pointer|has an address-discriminated ptrauth qualifier}6">; def note_deleted_default_ctor_uninit_field : Note< "%select{default constructor of|constructor inherited by}0 " "%1 is implicitly deleted because field %2 of " @@ -7356,6 +7406,19 @@ def err_typecheck_incompatible_ownership : Error< "sending to parameter of different type}0,1" "|%diff{casting $ to type $|casting between types}0,1}2" " changes retain/release properties of pointer">; +def err_typecheck_incompatible_ptrauth : Error< + "%select{%diff{assigning $ to $|assigning to different types}1,0" + "|%diff{passing $ to parameter of type $|" + "passing to parameter of different type}0,1" + "|%diff{returning $ from a function with result type $|" + "returning from function with different return type}0,1" + "|%diff{converting $ to type $|converting between types}0,1" + "|%diff{initializing $ with an expression of type $|" + "initializing with expression of different type}0,1" + "|%diff{sending $ to parameter of type $|" + "sending to parameter of different type}0,1" + "|%diff{casting $ to type $|casting between types}0,1}2" + " changes pointer-authentication of pointee type">; def err_typecheck_comparison_of_distinct_blocks : Error< "comparison of distinct block types%diff{ ($ and $)|}0,1">; @@ -7660,6 +7723,8 @@ def ext_typecheck_cond_pointer_integer_mismatch : ExtWarn< "pointer/integer type mismatch in conditional expression" "%diff{ ($ and $)|}0,1">, InGroup>; +def err_typecheck_cond_incompatible_ptrauth : Error< + "__ptrauth qualification mismatch%diff{ ($ and $)|}0,1">; def err_typecheck_choose_expr_requires_constant : Error< "'__builtin_choose_expr' requires a constant expression">; def warn_unused_expr : Warning<"expression result unused">, diff --git a/clang/include/clang/Basic/Features.def b/clang/include/clang/Basic/Features.def index aa7ec50f5c912c..0f7b9b9290946a 100644 --- a/clang/include/clang/Basic/Features.def +++ b/clang/include/clang/Basic/Features.def @@ -91,6 +91,11 @@ FEATURE(memory_sanitizer, SanitizerKind::KernelMemory)) FEATURE(thread_sanitizer, LangOpts.Sanitize.has(SanitizerKind::Thread)) FEATURE(dataflow_sanitizer, LangOpts.Sanitize.has(SanitizerKind::DataFlow)) +FEATURE(ptrauth_intrinsics, LangOpts.PointerAuthIntrinsics) +FEATURE(ptrauth_qualifier, LangOpts.PointerAuthIntrinsics) +FEATURE(ptrauth_calls, LangOpts.PointerAuthCalls) +FEATURE(ptrauth_returns, LangOpts.PointerAuthReturns) +FEATURE(ptrauth_indirect_gotos, LangOpts.PointerAuthIndirectGotos) FEATURE(scudo, LangOpts.Sanitize.hasOneOf(SanitizerKind::Scudo)) // Objective-C features FEATURE(objc_arr, LangOpts.ObjCAutoRefCount) // FIXME: REMOVE? diff --git a/clang/include/clang/Basic/LangOptions.def b/clang/include/clang/Basic/LangOptions.def index 9d10d2068acbb1..8b6e57df8c82fe 100644 --- a/clang/include/clang/Basic/LangOptions.def +++ b/clang/include/clang/Basic/LangOptions.def @@ -143,6 +143,12 @@ LANGOPT(GNUAsm , 1, 1, "GNU-style inline assembly") LANGOPT(Coroutines , 1, 0, "C++20 coroutines") LANGOPT(DllExportInlines , 1, 1, "dllexported classes dllexport inline methods") LANGOPT(RelaxedTemplateTemplateArgs, 1, 0, "C++17 relaxed matching of template template arguments") +LANGOPT(PointerAuthIntrinsics, 1, 0, "pointer authentication intrinsics") +LANGOPT(PointerAuthCalls , 1, 0, "function pointer authentication") +LANGOPT(PointerAuthReturns, 1, 0, "return pointer authentication") +LANGOPT(PointerAuthIndirectGotos, 1, 0, "indirect gotos pointer authentication") +LANGOPT(PointerAuthAuthTraps, 1, 0, "pointer authentication failure traps") +LANGOPT(SoftPointerAuth , 1, 0, "software emulation of pointer authentication") LANGOPT(DoubleSquareBracketAttributes, 1, 0, "'[[]]' attributes extension for all language standard modes") diff --git a/clang/include/clang/Basic/PointerAuthOptions.h b/clang/include/clang/Basic/PointerAuthOptions.h new file mode 100644 index 00000000000000..d712c17ee2bea4 --- /dev/null +++ b/clang/include/clang/Basic/PointerAuthOptions.h @@ -0,0 +1,212 @@ +//===--- PointerAuthOptions.h -----------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines options for configuring pointer-auth technologies +// like ARMv8.3. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_BASIC_POINTERAUTHOPTIONS_H +#define LLVM_CLANG_BASIC_POINTERAUTHOPTIONS_H + +#include "clang/Basic/LLVM.h" +#include "llvm/ADT/Optional.h" +#include "llvm/Target/TargetOptions.h" +#include +#include +#include +#include +#include "llvm/Support/ErrorHandling.h" + +namespace clang { + +class PointerAuthSchema { +public: + enum class Kind { + None, + Soft, + ARM8_3, + }; + + /// Software pointer-signing "keys". + enum class SoftKey { + FunctionPointers = 0, + BlockInvocationFunctionPointers = 1, + BlockHelperFunctionPointers = 2, + ObjCMethodListFunctionPointers = 3, + CXXVTablePointers = 4, + CXXVirtualFunctionPointers = 5, + CXXMemberFunctionPointers = 6, + }; + + /// Hardware pointer-signing keys in ARM8.3. + /// + /// These values are the same used in ptrauth.h. + enum class ARM8_3Key { + ASIA = 0, + ASIB = 1, + ASDA = 2, + ASDB = 3 + }; + + /// Forms of extra discrimination. + enum class Discrimination { + /// No additional discrimination. + None, + + /// Include a hash of the entity's type. + Type, + + /// Include a hash of the entity's identity. + Decl, + }; + +private: + enum { + NumKindBits = 2 + }; + union { + /// A common header shared by all pointer authentication kinds. + struct { + unsigned Kind : NumKindBits; + unsigned AddressDiscriminated : 1; + unsigned Discrimination : 2; + } Common; + + struct { + unsigned Kind : NumKindBits; + unsigned AddressDiscriminated : 1; + unsigned Discrimination : 2; + unsigned Key : 3; + } Soft; + + struct { + unsigned Kind : NumKindBits; + unsigned AddressDiscriminated : 1; + unsigned Discrimination : 2; + unsigned Key : 2; + } ARM8_3; + }; + +public: + PointerAuthSchema() { + Common.Kind = unsigned(Kind::None); + } + + PointerAuthSchema(SoftKey key, bool isAddressDiscriminated, + Discrimination otherDiscrimination) { + Common.Kind = unsigned(Kind::Soft); + Common.AddressDiscriminated = isAddressDiscriminated; + Common.Discrimination = unsigned(otherDiscrimination); + Soft.Key = unsigned(key); + } + + PointerAuthSchema(ARM8_3Key key, bool isAddressDiscriminated, + Discrimination otherDiscrimination) { + Common.Kind = unsigned(Kind::ARM8_3); + Common.AddressDiscriminated = isAddressDiscriminated; + Common.Discrimination = unsigned(otherDiscrimination); + ARM8_3.Key = unsigned(key); + } + + Kind getKind() const { + return Kind(Common.Kind); + } + + explicit operator bool() const { + return isEnabled(); + } + + bool isEnabled() const { + return getKind() != Kind::None; + } + + bool isAddressDiscriminated() const { + assert(getKind() != Kind::None); + return Common.AddressDiscriminated; + } + + bool hasOtherDiscrimination() const { + return getOtherDiscrimination() != Discrimination::None; + } + + Discrimination getOtherDiscrimination() const { + assert(getKind() != Kind::None); + return Discrimination(Common.Discrimination); + } + + unsigned getKey() const { + switch (getKind()) { + case Kind::None: llvm_unreachable("calling getKey() on disabled schema"); + case Kind::Soft: return unsigned(getSoftKey()); + case Kind::ARM8_3: return unsigned(getARM8_3Key()); + } + llvm_unreachable("bad key kind"); + } + + SoftKey getSoftKey() const { + assert(getKind() == Kind::Soft); + return SoftKey(Soft.Key); + } + + ARM8_3Key getARM8_3Key() const { + assert(getKind() == Kind::ARM8_3); + return ARM8_3Key(ARM8_3.Key); + } +}; + + +struct PointerAuthOptions { + /// Do member function pointers to virtual functions need to be built + /// as thunks? + bool ThunkCXXVirtualMemberPointers = false; + + /// Should return addresses be authenticated? + bool ReturnAddresses = false; + + /// Do indirect goto label addresses need to be authenticated? + bool IndirectGotos = false; + + /// Do authentication failures cause a trap? + bool AuthTraps = false; + + /// The ABI for C function pointers. + PointerAuthSchema FunctionPointers; + + /// The ABI for block invocation function pointers. + PointerAuthSchema BlockInvocationFunctionPointers; + + /// The ABI for block object copy/destroy function pointers. + PointerAuthSchema BlockHelperFunctionPointers; + + /// The ABI for __block variable copy/destroy function pointers. + PointerAuthSchema BlockByrefHelperFunctionPointers; + + /// The ABI for Objective-C method lists. + PointerAuthSchema ObjCMethodListFunctionPointers; + + /// The ABI for C++ virtual table pointers (the pointer to the table + /// itself) as installed in an actual class instance. + PointerAuthSchema CXXVTablePointers; + + /// The ABI for C++ virtual table pointers as installed in a VTT. + PointerAuthSchema CXXVTTVTablePointers; + + /// The ABI for most C++ virtual function pointers, i.e. v-table entries. + PointerAuthSchema CXXVirtualFunctionPointers; + + /// The ABI for variadic C++ virtual function pointers. + PointerAuthSchema CXXVirtualVariadicFunctionPointers; + + /// The ABI for C++ member function pointers. + PointerAuthSchema CXXMemberFunctionPointers; +}; + +} // end namespace clang + +#endif diff --git a/clang/include/clang/Basic/TargetInfo.h b/clang/include/clang/Basic/TargetInfo.h index 9a3bb986930ef7..a30c60cc98f997 100644 --- a/clang/include/clang/Basic/TargetInfo.h +++ b/clang/include/clang/Basic/TargetInfo.h @@ -197,6 +197,8 @@ class TargetInfo : public virtual TransferrableTargetInfo, unsigned HasAArch64SVETypes : 1; + unsigned PointerAuthSupported : 1; + // TargetInfo Constructor. Default initializes all fields. TargetInfo(const llvm::Triple &T); @@ -1180,6 +1182,14 @@ class TargetInfo : public virtual TransferrableTargetInfo, return TLSSupported; } + /// \brief Whether the target supports pointer authentication at all. + /// + /// Whether pointer authentication is actually being used is determined + /// by the language option. + bool isPointerAuthSupported() const { + return PointerAuthSupported; + } + /// Return the maximum alignment (in bits) of a TLS variable /// /// Gets the maximum alignment (in bits) of a TLS variable on this target. @@ -1224,6 +1234,11 @@ class TargetInfo : public virtual TransferrableTargetInfo, const LangASMap &getAddressSpaceMap() const { return *AddrSpaceMap; } + /// Determine whether the given pointer-authentication key is valid. + /// + /// The value has been coerced to type 'int'. + virtual bool validatePointerAuthKey(const llvm::APSInt &value) const; + /// Map from the address space field in builtin description strings to the /// language address space. virtual LangAS getOpenCLBuiltinAddressSpace(unsigned AS) const { diff --git a/clang/include/clang/Basic/TokenKinds.def b/clang/include/clang/Basic/TokenKinds.def index 94fe1ba63a9f64..71bf6c54df5a25 100644 --- a/clang/include/clang/Basic/TokenKinds.def +++ b/clang/include/clang/Basic/TokenKinds.def @@ -314,7 +314,7 @@ KEYWORD(_Thread_local , KEYALL) KEYWORD(__func__ , KEYALL) KEYWORD(__objc_yes , KEYALL) KEYWORD(__objc_no , KEYALL) - +KEYWORD(__ptrauth , KEYALL) // C++ 2.11p1: Keywords. KEYWORD(asm , KEYCXX|KEYGNU) @@ -529,6 +529,8 @@ KEYWORD(__array_extent , KEYCXX) KEYWORD(__private_extern__ , KEYALL) KEYWORD(__module_private__ , KEYALL) +KEYWORD(__builtin_ptrauth_type_discriminator, KEYALL) + // Extension that will be enabled for Microsoft, Borland and PS4, but can be // disabled via '-fno-declspec'. KEYWORD(__declspec , 0) diff --git a/clang/include/clang/Basic/TypeTraits.h b/clang/include/clang/Basic/TypeTraits.h index 7c1b571f640c2c..ff162240f19aa4 100644 --- a/clang/include/clang/Basic/TypeTraits.h +++ b/clang/include/clang/Basic/TypeTraits.h @@ -104,6 +104,8 @@ namespace clang { /// __alignof returns the preferred alignment of a type, the alignment /// clang will attempt to give an object of the type if allowed by ABI. UETT_PreferredAlignOf, + /// __builtin_ptrauth_type_discriminator + UETT_PtrAuthTypeDiscriminator, }; } diff --git a/clang/include/clang/CodeGen/CodeGenABITypes.h b/clang/include/clang/CodeGen/CodeGenABITypes.h index 31f0cea572324c..a8457ac349cd66 100644 --- a/clang/include/clang/CodeGen/CodeGenABITypes.h +++ b/clang/include/clang/CodeGen/CodeGenABITypes.h @@ -28,6 +28,7 @@ #include "clang/CodeGen/CGFunctionInfo.h" namespace llvm { + class Constant; class DataLayout; class Module; class Function; @@ -42,6 +43,7 @@ class CXXMethodDecl; class CodeGenOptions; class CoverageSourceInfo; class DiagnosticsEngine; +class GlobalDecl; class HeaderSearchOptions; class ObjCMethodDecl; class PreprocessorOptions; @@ -84,6 +86,26 @@ llvm::Type *convertTypeForMemory(CodeGenModule &CGM, QualType T); unsigned getLLVMFieldNumber(CodeGenModule &CGM, const RecordDecl *RD, const FieldDecl *FD); +/// Compute a stable hash of the given string. +/// +/// The exact algorithm is the little-endian interpretation of the +/// non-doubled (i.e. 64-bit) result of applying a SipHash-2-4 using +/// a specific key value which can be found in the source. +uint64_t computeStableStringHash(StringRef string); + +/// Return a declaration discriminator for the given global decl. +uint16_t getPointerAuthDeclDiscriminator(CodeGenModule &CGM, GlobalDecl GD); + +/// Return a type discriminator for the given function type. +uint16_t getPointerAuthTypeDiscriminator(CodeGenModule &CGM, QualType fnType); + +/// Return a signed constant pointer. +llvm::Constant *getConstantSignedPointer(CodeGenModule &CGM, + llvm::Constant *pointer, + unsigned key, + llvm::Constant *storageAddress, + llvm::Constant *otherDiscriminator); + /// Returns the default constructor for a C struct with non-trivially copyable /// fields, generating it if necessary. The returned function uses the `cdecl` /// calling convention, returns void, and takes a single argument that is a diff --git a/clang/include/clang/CodeGen/ConstantInitBuilder.h b/clang/include/clang/CodeGen/ConstantInitBuilder.h index fd07e91ba6ae23..87cc00957196b3 100644 --- a/clang/include/clang/CodeGen/ConstantInitBuilder.h +++ b/clang/include/clang/CodeGen/ConstantInitBuilder.h @@ -25,8 +25,11 @@ #include namespace clang { -namespace CodeGen { +class GlobalDecl; +class PointerAuthSchema; +class QualType; +namespace CodeGen { class CodeGenModule; /// A convenience builder class for complex constant initializers, @@ -199,6 +202,17 @@ class ConstantAggregateBuilderBase { add(llvm::ConstantInt::get(intTy, value, isSigned)); } + /// Add a signed pointer using the given pointer authentication schema. + void addSignedPointer(llvm::Constant *pointer, + const PointerAuthSchema &schema, GlobalDecl calleeDecl, + QualType calleeType); + + /// Add a signed pointer using the given pointer authentication schema. + void addSignedPointer(llvm::Constant *pointer, + unsigned key, + bool useAddressDiscrimination, + llvm::Constant *otherDiscriminator); + /// Add a null pointer of a specific type. void addNullPointer(llvm::PointerType *ptrTy) { add(llvm::ConstantPointerNull::get(ptrTy)); diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index db39f3a1de13a7..08db77e4c80959 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -1950,6 +1950,29 @@ def fstrict_return : Flag<["-"], "fstrict-return">, Group, def fno_strict_return : Flag<["-"], "fno-strict-return">, Group, Flags<[CC1Option]>; +let Group = f_Group in { + let Flags = [CC1Option] in { + def fptrauth_intrinsics : Flag<["-"], "fptrauth-intrinsics">, + HelpText<"Enable pointer-authentication intrinsics">; + def fptrauth_calls : Flag<["-"], "fptrauth-calls">, + HelpText<"Enable signing and authentication of all indirect calls">; + def fptrauth_returns : Flag<["-"], "fptrauth-returns">, + HelpText<"Enable signing and authentication of return addresses">; + def fptrauth_indirect_gotos : Flag<["-"], "fptrauth-indirect-gotos">, + HelpText<"Enable signing and authentication of indirect goto targets">; + def fptrauth_auth_traps : Flag<["-"], "fptrauth-auth-traps">, + HelpText<"Enable traps on authentication failures">; + def fptrauth_soft : Flag<["-"], "fptrauth-soft">, + HelpText<"Enable software lowering of pointer authentication">; + } + def fno_ptrauth_intrinsics : Flag<["-"], "fno-ptrauth-intrinsics">; + def fno_ptrauth_calls : Flag<["-"], "fno-ptrauth-calls">; + def fno_ptrauth_returns : Flag<["-"], "fno-ptrauth-returns">; + def fno_ptrauth_indirect_gotos : Flag<["-"], "fno-ptrauth-indirect-gotos">; + def fno_ptrauth_auth_traps : Flag<["-"], "fno-ptrauth-auth-traps">; + def fno_ptrauth_soft : Flag<["-"], "fno-ptrauth-soft">; +} + def fallow_editor_placeholders : Flag<["-"], "fallow-editor-placeholders">, Group, Flags<[CC1Option]>, HelpText<"Treat editor placeholders as valid source code">; diff --git a/clang/include/clang/Parse/Parser.h b/clang/include/clang/Parse/Parser.h index 77e5183517c6f4..15d800a293ea1e 100644 --- a/clang/include/clang/Parse/Parser.h +++ b/clang/include/clang/Parse/Parser.h @@ -2632,6 +2632,8 @@ class Parser : public CodeCompletionHandler { void ParseAlignmentSpecifier(ParsedAttributes &Attrs, SourceLocation *endLoc = nullptr); + void ParsePtrauthQualifier(ParsedAttributes &Attrs); + VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const; VirtSpecifiers::Specifier isCXX11VirtSpecifier() const { return isCXX11VirtSpecifier(Tok); @@ -3104,6 +3106,8 @@ class Parser : public CodeCompletionHandler { ExprResult ParseArrayTypeTrait(); ExprResult ParseExpressionTrait(); + ExprResult ParseBuiltinPtrauthTypeDiscriminator(); + //===--------------------------------------------------------------------===// // Preprocessor code-completion pass-through void CodeCompleteDirective(bool InConditional) override; diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h index 67cc9401dacf6e..52fd2fb6c9583d 100644 --- a/clang/include/clang/Sema/Sema.h +++ b/clang/include/clang/Sema/Sema.h @@ -2139,6 +2139,9 @@ class Sema { SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); + void diagnosePointerAuthDisabled(SourceLocation loc, SourceRange range); + bool checkConstantPointerAuthKey(Expr *keyExpr, unsigned &key); + static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp index 5cccf7859794f7..b03bfe41da2c90 100644 --- a/clang/lib/AST/ASTContext.cpp +++ b/clang/lib/AST/ASTContext.cpp @@ -37,6 +37,7 @@ #include "clang/AST/RawCommentList.h" #include "clang/AST/RecordLayout.h" #include "clang/AST/RecursiveASTVisitor.h" +#include "clang/AST/StableHash.h" #include "clang/AST/Stmt.h" #include "clang/AST/TemplateBase.h" #include "clang/AST/TemplateName.h" @@ -2781,6 +2782,16 @@ QualType ASTContext::removeAddrSpaceQualType(QualType T) const { return QualType(TypeNode, Quals.getFastQualifiers()); } +uint16_t ASTContext::getPointerAuthTypeDiscriminator(QualType T) { + assert(!T->isDependentType() && + "cannot compute type discriminator of a dependent type"); + SmallString<256> Str; + llvm::raw_svector_ostream Out(Str); + std::unique_ptr MC(createMangleContext()); + MC->mangleTypeName(T, Out); + return getPointerAuthStringDiscriminator(*this, Str.c_str()); +} + QualType ASTContext::getObjCGCQualType(QualType T, Qualifiers::GC GCAttr) const { QualType CanT = getCanonicalType(T); @@ -6193,6 +6204,9 @@ bool ASTContext::BlockRequiresCopying(QualType Ty, return true; } + if (Ty.hasAddressDiscriminatedPointerAuth()) + return true; + // The block needs copy/destroy helpers if Ty is non-trivial to destructively // move or destroy. if (Ty.isNonTrivialToPrimitiveDestructiveMove() || Ty.isDestructedType()) @@ -8869,6 +8883,7 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || LQuals.getAddressSpace() != RQuals.getAddressSpace() || LQuals.getObjCLifetime() != RQuals.getObjCLifetime() || + LQuals.getPointerAuth() != RQuals.getPointerAuth() || LQuals.hasUnaligned() != RQuals.hasUnaligned()) return {}; diff --git a/clang/lib/AST/CMakeLists.txt b/clang/lib/AST/CMakeLists.txt index 5bae40c86539f9..6393d137efdf4e 100644 --- a/clang/lib/AST/CMakeLists.txt +++ b/clang/lib/AST/CMakeLists.txt @@ -91,6 +91,7 @@ add_clang_library(clangAST RecordLayoutBuilder.cpp ScanfFormatString.cpp SelectorLocationsKind.cpp + StableHash.cpp Stmt.cpp StmtCXX.cpp StmtIterator.cpp diff --git a/clang/lib/AST/DeclCXX.cpp b/clang/lib/AST/DeclCXX.cpp index 12ec44fa027919..5810f9f5a6bd60 100644 --- a/clang/lib/AST/DeclCXX.cpp +++ b/clang/lib/AST/DeclCXX.cpp @@ -1031,6 +1031,31 @@ void CXXRecordDecl::addedMember(Decl *D) { } else if (!T.isCXX98PODType(Context)) data().PlainOldData = false; + // If a class has an address-discriminated signed pointer member, it is a + // non-POD type and its copy constructor, move constructor, copy assignment + // operator, move assignment operator are non-trivial. + if (PointerAuthQualifier Q = T.getPointerAuth()) { + if (Q.isAddressDiscriminated()) { + struct DefinitionData &Data = data(); + Data.PlainOldData = false; + Data.HasTrivialSpecialMembers &= + ~(SMF_CopyConstructor | SMF_MoveConstructor | + SMF_CopyAssignment | SMF_MoveAssignment); + setArgPassingRestrictions(RecordDecl::APK_CanNeverPassInRegs); + + // Copy/move constructors/assignment operators of a union are deleted by + // default if it has an address-discriminated ptrauth field. + if (isUnion()) { + data().DefaultedCopyConstructorIsDeleted = true; + data().DefaultedMoveConstructorIsDeleted = true; + data().DefaultedMoveAssignmentIsDeleted = true; + data().NeedOverloadResolutionForCopyConstructor = true; + data().NeedOverloadResolutionForMoveConstructor = true; + data().NeedOverloadResolutionForMoveAssignment = true; + } + } + } + if (T->isReferenceType()) { if (!Field->hasInClassInitializer()) data().HasUninitializedReferenceMember = true; diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp index 7ed08218567086..754277fb1f6de5 100644 --- a/clang/lib/AST/ExprConstant.cpp +++ b/clang/lib/AST/ExprConstant.cpp @@ -48,6 +48,7 @@ #include "clang/AST/OSLog.h" #include "clang/AST/OptionalDiagnostic.h" #include "clang/AST/RecordLayout.h" +#include "clang/AST/StableHash.h" #include "clang/AST/StmtVisitor.h" #include "clang/AST/TypeLoc.h" #include "clang/Basic/Builtins.h" @@ -1853,6 +1854,19 @@ static bool IsStringLiteralCall(const CallExpr *E) { Builtin == Builtin::BI__builtin___NSStringMakeConstantString); } +static bool isGlobalCallLValue(const CallExpr *E) { + if (IsStringLiteralCall(E)) + return true; + + switch (E->getBuiltinCallee()) { + case Builtin::BI__builtin_ptrauth_sign_constant: + return true; + + default: + return false; + } +} + static bool IsGlobalLValue(APValue::LValueBase B) { // C++11 [expr.const]p3 An address constant expression is a prvalue core // constant expression of pointer type that evaluates to... @@ -1894,7 +1908,7 @@ static bool IsGlobalLValue(APValue::LValueBase B) { case Expr::ObjCBoxedExprClass: return cast(E)->isExpressibleAsConstantInitializer(); case Expr::CallExprClass: - return IsStringLiteralCall(cast(E)); + return isGlobalCallLValue(cast(E)); // For GCC compatibility, &&label has static storage duration. case Expr::AddrLabelExprClass: return true; @@ -8202,6 +8216,8 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E, } case Builtin::BI__builtin_operator_new: return HandleOperatorNewCall(Info, E, Result); + case Builtin::BI__builtin_ptrauth_sign_constant: + return Success(E); case Builtin::BI__builtin_launder: return evaluatePointer(E->getArg(0), Result); case Builtin::BIstrchr: @@ -10615,6 +10631,13 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E, case Builtin::BI__builtin_expect: return Visit(E->getArg(0)); + case Builtin::BI__builtin_ptrauth_string_discriminator: { + auto literal = cast(E->getArg(0)->IgnoreParenImpCasts()); + auto result = getPointerAuthStringDiscriminator(Info.Ctx, + literal->getString()); + return Success(result, E); + } + case Builtin::BI__builtin_ffs: case Builtin::BI__builtin_ffsl: case Builtin::BI__builtin_ffsll: { @@ -11908,6 +11931,12 @@ bool IntExprEvaluator::VisitUnaryExprOrTypeTraitExpr( E); } + case UETT_PtrAuthTypeDiscriminator: { + if (E->getArgumentType()->isDependentType()) + return false; + return Success( + Info.Ctx.getPointerAuthTypeDiscriminator(E->getArgumentType()), E); + } case UETT_VecStep: { QualType Ty = E->getTypeOfArgument(); diff --git a/clang/lib/AST/ItaniumMangle.cpp b/clang/lib/AST/ItaniumMangle.cpp index c55a9013757871..b2dbfcd968edfa 100644 --- a/clang/lib/AST/ItaniumMangle.cpp +++ b/clang/lib/AST/ItaniumMangle.cpp @@ -2319,38 +2319,41 @@ void CXXNameMangler::mangleQualifiers(Qualifiers Quals, const DependentAddressSp if (Quals.getObjCLifetime() == Qualifiers::OCL_Weak) mangleVendorQualifier("__weak"); + // The __unsafe_unretained qualifier is *not* mangled, so that + // __unsafe_unretained types in ARC produce the same manglings as the + // equivalent (but, naturally, unqualified) types in non-ARC, providing + // better ABI compatibility. + // + // It's safe to do this because unqualified 'id' won't show up + // in any type signatures that need to be mangled. + // __unaligned (from -fms-extensions) if (Quals.hasUnaligned()) mangleVendorQualifier("__unaligned"); - // Remaining ARC ownership qualifiers. - switch (Quals.getObjCLifetime()) { - case Qualifiers::OCL_None: - break; - - case Qualifiers::OCL_Weak: - // Do nothing as we already handled this case above. - break; - - case Qualifiers::OCL_Strong: + // The __strong ARC qualifier. + if (Quals.getObjCLifetime() == Qualifiers::OCL_Strong) mangleVendorQualifier("__strong"); - break; - case Qualifiers::OCL_Autoreleasing: - mangleVendorQualifier("__autoreleasing"); - break; + // __ptrauth. Note that this is parameterized. + if (auto ptrauth = Quals.getPointerAuth()) { + mangleVendorQualifier("__ptrauth"); - case Qualifiers::OCL_ExplicitNone: - // The __unsafe_unretained qualifier is *not* mangled, so that - // __unsafe_unretained types in ARC produce the same manglings as the - // equivalent (but, naturally, unqualified) types in non-ARC, providing - // better ABI compatibility. - // - // It's safe to do this because unqualified 'id' won't show up - // in any type signatures that need to be mangled. - break; + // For now, since we only allow non-dependent arguments, we can just + // inline the mangling of those arguments as literals. We treat the + // key and extra-discriminator arguments as 'unsigned int' and the + // address-discriminated argument as 'bool'. + Out << "I" + "Lj" << ptrauth.getKey() << "E" + "Lb" << unsigned(ptrauth.isAddressDiscriminated()) << "E" + "Lj" << ptrauth.getExtraDiscriminator() << "E" + "E"; } + // The __autoreleasing ARC qualifier. + if (Quals.getObjCLifetime() == Qualifiers::OCL_Autoreleasing) + mangleVendorQualifier("__autoreleasing"); + // ::= [r] [V] [K] # restrict (C99), volatile, const if (Quals.hasRestrict()) Out << 'r'; @@ -3995,6 +3998,14 @@ void CXXNameMangler::mangleExpression(const Expr *E, unsigned Arity) { case UETT_AlignOf: Out << 'a'; break; + case UETT_PtrAuthTypeDiscriminator: { + DiagnosticsEngine &Diags = Context.getDiags(); + unsigned DiagID = Diags.getCustomDiagID( + DiagnosticsEngine::Error, + "cannot yet mangle __builtin_ptrauth_type_discriminator expression"); + Diags.Report(E->getExprLoc(), DiagID); + return; + } case UETT_VecStep: { DiagnosticsEngine &Diags = Context.getDiags(); unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, diff --git a/clang/lib/AST/JSONNodeDumper.cpp b/clang/lib/AST/JSONNodeDumper.cpp index f60d761c996c59..fdc1b3a553161d 100644 --- a/clang/lib/AST/JSONNodeDumper.cpp +++ b/clang/lib/AST/JSONNodeDumper.cpp @@ -1233,6 +1233,8 @@ void JSONNodeDumper::VisitUnaryExprOrTypeTraitExpr( case UETT_AlignOf: JOS.attribute("name", "alignof"); break; case UETT_VecStep: JOS.attribute("name", "vec_step"); break; case UETT_PreferredAlignOf: JOS.attribute("name", "__alignof"); break; + case UETT_PtrAuthTypeDiscriminator: + JOS.attribute("name", "ptrauth_type_discriminator"); break; case UETT_OpenMPRequiredSimdAlign: JOS.attribute("name", "__builtin_omp_required_simd_align"); break; } diff --git a/clang/lib/AST/StableHash.cpp b/clang/lib/AST/StableHash.cpp new file mode 100644 index 00000000000000..f1bdf97ff6b0e3 --- /dev/null +++ b/clang/lib/AST/StableHash.cpp @@ -0,0 +1,178 @@ +//===--- StableHash.cpp - Context to hold long-lived AST nodes ------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements an ABI-stable string hash based on SipHash. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/StableHash.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/Support/Debug.h" +#include +#include + +using namespace clang; + +#define DEBUG_TYPE "clang-stable-hash" + +#define SIPHASH_ROTL(x, b) (uint64_t)(((x) << (b)) | ((x) >> (64 - (b)))) + +#define SIPHASH_U8TO64_LE(p) \ + (((uint64_t)((p)[0])) | ((uint64_t)((p)[1]) << 8) | \ + ((uint64_t)((p)[2]) << 16) | ((uint64_t)((p)[3]) << 24) | \ + ((uint64_t)((p)[4]) << 32) | ((uint64_t)((p)[5]) << 40) | \ + ((uint64_t)((p)[6]) << 48) | ((uint64_t)((p)[7]) << 56)) + +#define SIPHASH_SIPROUND \ + do { \ + v0 += v1; \ + v1 = SIPHASH_ROTL(v1, 13); \ + v1 ^= v0; \ + v0 = SIPHASH_ROTL(v0, 32); \ + v2 += v3; \ + v3 = SIPHASH_ROTL(v3, 16); \ + v3 ^= v2; \ + v0 += v3; \ + v3 = SIPHASH_ROTL(v3, 21); \ + v3 ^= v0; \ + v2 += v1; \ + v1 = SIPHASH_ROTL(v1, 17); \ + v1 ^= v2; \ + v2 = SIPHASH_ROTL(v2, 32); \ + } while (0) + +template +static inline ResultTy siphash(const uint8_t *in, uint64_t inlen, + const uint8_t (&k)[16]) { + static_assert(sizeof(ResultTy) == 8 || sizeof(ResultTy) == 16, + "result type should be uint64_t or uint128_t"); + uint64_t v0 = 0x736f6d6570736575ULL; + uint64_t v1 = 0x646f72616e646f6dULL; + uint64_t v2 = 0x6c7967656e657261ULL; + uint64_t v3 = 0x7465646279746573ULL; + uint64_t b; + uint64_t k0 = SIPHASH_U8TO64_LE(k); + uint64_t k1 = SIPHASH_U8TO64_LE(k + 8); + uint64_t m; + int i; + const uint8_t *end = in + inlen - (inlen % sizeof(uint64_t)); + const int left = inlen & 7; + b = ((uint64_t)inlen) << 56; + v3 ^= k1; + v2 ^= k0; + v1 ^= k1; + v0 ^= k0; + + if (sizeof(ResultTy) == 16) { + v1 ^= 0xee; + } + + for (; in != end; in += 8) { + m = SIPHASH_U8TO64_LE(in); + v3 ^= m; + + for (i = 0; i < cROUNDS; ++i) + SIPHASH_SIPROUND; + + v0 ^= m; + } + + switch (left) { + case 7: + b |= ((uint64_t)in[6]) << 48; + LLVM_FALLTHROUGH; + case 6: + b |= ((uint64_t)in[5]) << 40; + LLVM_FALLTHROUGH; + case 5: + b |= ((uint64_t)in[4]) << 32; + LLVM_FALLTHROUGH; + case 4: + b |= ((uint64_t)in[3]) << 24; + LLVM_FALLTHROUGH; + case 3: + b |= ((uint64_t)in[2]) << 16; + LLVM_FALLTHROUGH; + case 2: + b |= ((uint64_t)in[1]) << 8; + LLVM_FALLTHROUGH; + case 1: + b |= ((uint64_t)in[0]); + break; + case 0: + break; + } + + v3 ^= b; + + for (i = 0; i < cROUNDS; ++i) + SIPHASH_SIPROUND; + + v0 ^= b; + + if (sizeof(ResultTy) == 8) { + v2 ^= 0xff; + } else { + v2 ^= 0xee; + } + + for (i = 0; i < dROUNDS; ++i) + SIPHASH_SIPROUND; + + b = v0 ^ v1 ^ v2 ^ v3; + + // This mess with the result type would be easier with 'if constexpr'. + + uint64_t firstHalf = b; + if (sizeof(ResultTy) == 8) + return firstHalf; + + v1 ^= 0xdd; + + for (i = 0; i < dROUNDS; ++i) + SIPHASH_SIPROUND; + + b = v0 ^ v1 ^ v2 ^ v3; + uint64_t secondHalf = b; + + return firstHalf + | (ResultTy(secondHalf) << (sizeof(ResultTy) == 8 ? 0 : 64)); +} + +/// Compute an ABI-stable hash of the given string. +uint64_t clang::getStableStringHash(llvm::StringRef string) { + static const uint8_t K[16] = {0xb5, 0xd4, 0xc9, 0xeb, 0x79, 0x10, 0x4a, 0x79, + 0x6f, 0xec, 0x8b, 0x1b, 0x42, 0x87, 0x81, 0xd4}; + + // The aliasing is fine here because of omnipotent char. + auto data = reinterpret_cast(string.data()); + return siphash<2, 4, uint64_t>(data, string.size(), K); +} + +uint64_t clang::getPointerAuthStringDiscriminator(const ASTContext &ctxt, + llvm::StringRef string) { + auto rawHash = getStableStringHash(string); + + // Don't do anything target-specific yet. + + // Produce a non-zero 16-bit discriminator. + // We use a 16-bit discriminator because ARM64 can efficiently load + // a 16-bit immediate into the high bits of a register without disturbing + // the remainder of the value, which serves as a nice blend operation. + // 16 bits is also sufficiently compact to not inflate a loader relocation. + // We disallow zero to guarantee a different discriminator from the places + // in the ABI that use a constant zero. + uint64_t discriminator = (rawHash % 0xFFFF) + 1; + LLVM_DEBUG( + llvm::dbgs() << "Ptrauth string disc: " << llvm::utostr(discriminator) + << " (0x" << llvm::utohexstr(discriminator) << ")" + << " of: " << string << "\n"); + return discriminator; +} diff --git a/clang/lib/AST/StmtPrinter.cpp b/clang/lib/AST/StmtPrinter.cpp index 7759ff6c138982..7b74c05c68dbc9 100644 --- a/clang/lib/AST/StmtPrinter.cpp +++ b/clang/lib/AST/StmtPrinter.cpp @@ -1278,6 +1278,9 @@ void StmtPrinter::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *Node){ case UETT_PreferredAlignOf: OS << "__alignof"; break; + case UETT_PtrAuthTypeDiscriminator: + OS << "__builtin_ptrauth_type_discriminator"; + break; case UETT_VecStep: OS << "vec_step"; break; diff --git a/clang/lib/AST/TextNodeDumper.cpp b/clang/lib/AST/TextNodeDumper.cpp index 6899b91803a636..062c83268543a0 100644 --- a/clang/lib/AST/TextNodeDumper.cpp +++ b/clang/lib/AST/TextNodeDumper.cpp @@ -813,6 +813,9 @@ void TextNodeDumper::VisitUnaryExprOrTypeTraitExpr( case UETT_AlignOf: OS << " alignof"; break; + case UETT_PtrAuthTypeDiscriminator: + OS << "__builtin_ptrauth_type_discriminator"; + break; case UETT_VecStep: OS << " vec_step"; break; diff --git a/clang/lib/AST/Type.cpp b/clang/lib/AST/Type.cpp index d45f1c73fb4a5c..29cf68ae06df09 100644 --- a/clang/lib/AST/Type.cpp +++ b/clang/lib/AST/Type.cpp @@ -2365,6 +2365,8 @@ QualType::PrimitiveCopyKind QualType::isNonTrivialToPrimitiveCopy() const { case Qualifiers::OCL_Weak: return PCK_ARCWeak; default: + if (hasAddressDiscriminatedPointerAuth()) + return PCK_PtrAuth; return Qs.hasVolatile() ? PCK_VolatileTrivial : PCK_Trivial; } } diff --git a/clang/lib/AST/TypePrinter.cpp b/clang/lib/AST/TypePrinter.cpp index 386daf23260ce1..fe9fa48c64a096 100644 --- a/clang/lib/AST/TypePrinter.cpp +++ b/clang/lib/AST/TypePrinter.cpp @@ -1524,6 +1524,7 @@ void TypePrinter::printAttributedAfter(const AttributedType *T, case attr::Ptr64: case attr::SPtr: case attr::UPtr: + case attr::PointerAuth: case attr::AddressSpace: llvm_unreachable("This attribute should have been handled already"); @@ -1751,6 +1752,30 @@ void clang::printTemplateArgumentList(raw_ostream &OS, printTo(OS, Args, Policy, false); } +std::string PointerAuthQualifier::getAsString() const { + LangOptions LO; + return getAsString(PrintingPolicy(LO)); +} + +std::string PointerAuthQualifier::getAsString(const PrintingPolicy &P) const { + SmallString<64> Buf; + llvm::raw_svector_ostream StrOS(Buf); + print(StrOS, P); + return StrOS.str(); +} + +bool PointerAuthQualifier::isEmptyWhenPrinted(const PrintingPolicy &P) const { + return !isPresent(); +} + +void PointerAuthQualifier::print(raw_ostream &OS, + const PrintingPolicy &P) const { + if (!isPresent()) return; + OS << "__ptrauth(" << getKey() << "," + << unsigned(isAddressDiscriminated()) << "," + << getExtraDiscriminator() << ")"; +} + std::string Qualifiers::getAsString() const { LangOptions LO; return getAsString(PrintingPolicy(LO)); @@ -1780,6 +1805,10 @@ bool Qualifiers::isEmptyWhenPrinted(const PrintingPolicy &Policy) const { if (!(lifetime == Qualifiers::OCL_Strong && Policy.SuppressStrongLifetime)) return false; + if (auto pointerAuth = getPointerAuth()) + if (!pointerAuth.isEmptyWhenPrinted(Policy)) + return false; + return true; } @@ -1867,6 +1896,14 @@ void Qualifiers::print(raw_ostream &OS, const PrintingPolicy& Policy, } } + if (auto pointerAuth = getPointerAuth()) { + if (addSpace) + OS << ' '; + addSpace = true; + + pointerAuth.print(OS, Policy); + } + if (appendSpaceIfNonEmpty && addSpace) OS << ' '; } diff --git a/clang/lib/AST/VTableBuilder.cpp b/clang/lib/AST/VTableBuilder.cpp index 5688042dadd917..d19838936abc72 100644 --- a/clang/lib/AST/VTableBuilder.cpp +++ b/clang/lib/AST/VTableBuilder.cpp @@ -1133,11 +1133,38 @@ void ItaniumVTableBuilder::ComputeThisAdjustments() { continue; // Add it. - VTableThunks[VTableIndex].This = ThisAdjustment; + auto SetThisAdjustmentThunk = [&](uint64_t Idx) { + // If a this pointer adjustment is required, record the method that + // created the vtable entry. MD is not necessarily the method that + // created the entry since derived classes overwrite base class + // information in MethodInfoMap, hence findOriginalMethodInMap is called + // here. + // + // For example, in the following class hierarchy, if MD = D1::m and + // Overrider = D2:m, the original method that created the entry is B0:m, + // which is what findOriginalMethodInMap(MD) returns: + // + // struct B0 { int a; virtual void m(); }; + // struct D0 : B0 { int a; void m() override; }; + // struct D1 : B0 { int a; void m() override; }; + // struct D2 : D0, D1 { int a; void m() override; }; + // + // We need to record the method because we cannot + // call findOriginalMethod to find the method that created the entry if + // the method in the entry requires adjustment. + // + // Do not set ThunkInfo::Method if Idx is already in VTableThunks. This + // can happen when covariant return adjustment is required too. + if (!VTableThunks.count(Idx)) + VTableThunks[Idx].Method = VTables.findOriginalMethodInMap(MD); + VTableThunks[Idx].This = ThisAdjustment; + }; + + SetThisAdjustmentThunk(VTableIndex); if (isa(MD)) { // Add an adjustment for the deleting destructor as well. - VTableThunks[VTableIndex + 1].This = ThisAdjustment; + SetThisAdjustmentThunk(VTableIndex + 1); } } @@ -1496,6 +1523,8 @@ void ItaniumVTableBuilder::AddMethods( FindNearestOverriddenMethod(MD, PrimaryBases)) { if (ComputeReturnAdjustmentBaseOffset(Context, MD, OverriddenMD).isEmpty()) { + VTables.setOriginalMethod(MD, OverriddenMD); + // Replace the method info of the overridden method with our own // method. assert(MethodInfoMap.count(OverriddenMD) && @@ -1594,6 +1623,13 @@ void ItaniumVTableBuilder::AddMethods( ReturnAdjustment ReturnAdjustment = ComputeReturnAdjustment(ReturnAdjustmentOffset); + // If a return adjustment is required, record the method that created the + // vtable entry. We need to record the method because we cannot call + // findOriginalMethod to find the method that created the entry if the + // method in the entry requires adjustment. + if (!ReturnAdjustment.isEmpty()) + VTableThunks[Components.size()].Method = MD; + AddMethod(Overrider.Method, ReturnAdjustment); } } @@ -1868,11 +1904,32 @@ void ItaniumVTableBuilder::LayoutVTablesForVirtualBases( } } +static void printThunkMethod(const ThunkInfo &Info, raw_ostream &Out) { + if (Info.Method) { + std::string Str = + PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual, + Info.Method); + Out << " method: " << Str; + } +} + /// dumpLayout - Dump the vtable layout. void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) { // FIXME: write more tests that actually use the dumpLayout output to prevent // ItaniumVTableBuilder regressions. + Out << "Original map\n"; + + for (const auto &P : VTables.getOriginalMethodMap()) { + std::string Str0 = + PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual, + P.first); + std::string Str1 = + PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual, + P.second); + Out << " " << Str0 << " -> " << Str1 << "\n"; + } + if (isBuildingConstructorVTable()) { Out << "Construction vtable for ('"; MostDerivedClass->printQualifiedName(Out); @@ -1957,6 +2014,7 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) { } Out << ']'; + printThunkMethod(Thunk, Out); } // If this function pointer has a 'this' pointer adjustment, dump it. @@ -1970,6 +2028,7 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) { } Out << ']'; + printThunkMethod(Thunk, Out); } } @@ -2006,6 +2065,7 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) { Out << ']'; } + printThunkMethod(Thunk, Out); } break; @@ -2106,7 +2166,6 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) { ThunkInfoVectorTy ThunksVector = Thunks[MD]; llvm::sort(ThunksVector, [](const ThunkInfo &LHS, const ThunkInfo &RHS) { - assert(LHS.Method == nullptr && RHS.Method == nullptr); return std::tie(LHS.This, LHS.Return) < std::tie(RHS.This, RHS.Return); }); @@ -2263,6 +2322,35 @@ ItaniumVTableContext::getVirtualBaseOffsetOffset(const CXXRecordDecl *RD, return I->second; } +GlobalDecl ItaniumVTableContext::findOriginalMethod(GlobalDecl GD) { + const auto *MD = cast(GD.getDecl()); + computeVTableRelatedInformation(MD->getParent()); + const auto *OriginalMD = findOriginalMethodInMap(MD); + + if (const auto *DD = dyn_cast(OriginalMD)) + return GlobalDecl(DD, GD.getDtorType()); + return OriginalMD; +} + +const CXXMethodDecl * +ItaniumVTableContext::findOriginalMethodInMap(const CXXMethodDecl *MD) const { + // Traverse the chain of virtual methods until we find the method that added + // the v-table slot. + while (true) { + auto I = OriginalMethodMap.find(MD); + + // MD doesn't exist in OriginalMethodMap, so it must be the method we are + // looking for. + if (I == OriginalMethodMap.end()) + break; + + // Set MD to the overridden method. + MD = I->second; + } + + return MD; +} + static std::unique_ptr CreateVTableLayout(const ItaniumVTableBuilder &Builder) { SmallVector diff --git a/clang/lib/Basic/TargetInfo.cpp b/clang/lib/Basic/TargetInfo.cpp index 3a21a19e1f19a2..63303263ced40a 100644 --- a/clang/lib/Basic/TargetInfo.cpp +++ b/clang/lib/Basic/TargetInfo.cpp @@ -113,6 +113,7 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : TargetOpts(), Triple(T) { HasBuiltinMSVaList = false; IsRenderScriptTarget = false; HasAArch64SVETypes = false; + PointerAuthSupported = false; // Default to no types using fpret. RealTypeUsesObjCFPRet = 0; @@ -754,6 +755,10 @@ bool TargetInfo::validateInputConstraint( return true; } +bool TargetInfo::validatePointerAuthKey(const llvm::APSInt &value) const { + return false; +} + void TargetInfo::CheckFixedPointBits() const { // Check that the number of fractional and integral bits (and maybe sign) can // fit into the bits given for a fixed point type. diff --git a/clang/lib/Basic/Targets/AArch64.cpp b/clang/lib/Basic/Targets/AArch64.cpp index c86cc63e3d84c6..d59bc955c0e026 100644 --- a/clang/lib/Basic/Targets/AArch64.cpp +++ b/clang/lib/Basic/Targets/AArch64.cpp @@ -13,6 +13,7 @@ #include "AArch64.h" #include "clang/Basic/TargetBuiltins.h" #include "clang/Basic/TargetInfo.h" +#include "llvm/ADT/APSInt.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/StringExtras.h" @@ -91,6 +92,9 @@ AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple, else if (Triple.getOS() == llvm::Triple::UnknownOS) this->MCountName = Opts.EABIVersion == llvm::EABI::GNU ? "\01_mcount" : "mcount"; + + if (Triple.getArchName() == "arm64e") + PointerAuthSupported = true; } StringRef AArch64TargetInfo::getABI() const { return ABI; } @@ -506,6 +510,11 @@ int AArch64TargetInfo::getEHDataRegisterNumber(unsigned RegNo) const { return -1; } +bool AArch64TargetInfo::validatePointerAuthKey( + const llvm::APSInt &value) const { + return 0 <= value && value <= 3; +} + AArch64leTargetInfo::AArch64leTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts) : AArch64TargetInfo(Triple, Opts) {} @@ -650,6 +659,9 @@ void DarwinAArch64TargetInfo::getOSDefines(const LangOptions &Opts, Builder.defineMacro("__arm64", "1"); Builder.defineMacro("__arm64__", "1"); + if (Triple.getArchName() == "arm64e") + Builder.defineMacro("__arm64e__", "1"); + getDarwinDefines(Builder, Opts, Triple, PlatformName, PlatformMinVersion); } diff --git a/clang/lib/Basic/Targets/AArch64.h b/clang/lib/Basic/Targets/AArch64.h index b6aa07780edda0..85b6a7c9622d3b 100644 --- a/clang/lib/Basic/Targets/AArch64.h +++ b/clang/lib/Basic/Targets/AArch64.h @@ -97,6 +97,8 @@ class LLVM_LIBRARY_VISIBILITY AArch64TargetInfo : public TargetInfo { } int getEHDataRegisterNumber(unsigned RegNo) const override; + + bool validatePointerAuthKey(const llvm::APSInt &value) const override; }; class LLVM_LIBRARY_VISIBILITY AArch64leTargetInfo : public AArch64TargetInfo { diff --git a/clang/lib/Basic/Targets/OSTargets.cpp b/clang/lib/Basic/Targets/OSTargets.cpp index 72fdb0e7dde8a2..82906728633767 100644 --- a/clang/lib/Basic/Targets/OSTargets.cpp +++ b/clang/lib/Basic/Targets/OSTargets.cpp @@ -31,6 +31,9 @@ void getDarwinDefines(MacroBuilder &Builder, const LangOptions &Opts, if (Opts.Sanitize.has(SanitizerKind::Address)) Builder.defineMacro("_FORTIFY_SOURCE", "0"); + if (Opts.PointerAuthIntrinsics) + Builder.defineMacro("__PTRAUTH_INTRINSICS__"); + // Darwin defines __weak, __strong, and __unsafe_unretained even in C mode. if (!Opts.ObjC) { // __weak is always defined, for use in blocks and with objc pointers. diff --git a/clang/lib/CodeGen/BackendUtil.cpp b/clang/lib/CodeGen/BackendUtil.cpp index 8a4d3bd28824bd..4b8bddc5b096c9 100644 --- a/clang/lib/CodeGen/BackendUtil.cpp +++ b/clang/lib/CodeGen/BackendUtil.cpp @@ -336,6 +336,11 @@ static void addDataFlowSanitizerPass(const PassManagerBuilder &Builder, PM.add(createDataFlowSanitizerPass(LangOpts.SanitizerBlacklistFiles)); } +static void addSoftPointerAuthPass(const PassManagerBuilder &Builder, + legacy::PassManagerBase &PM) { + PM.add(createSoftPointerAuthPass()); +} + static TargetLibraryInfoImpl *createTLII(llvm::Triple &TargetTriple, const CodeGenOptions &CodeGenOpts) { TargetLibraryInfoImpl *TLII = new TargetLibraryInfoImpl(TargetTriple); @@ -685,6 +690,13 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM, addDataFlowSanitizerPass); } + if (LangOpts.SoftPointerAuth) { + PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast, + addSoftPointerAuthPass); + PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0, + addSoftPointerAuthPass); + } + // Set up the per-function pass manager. FPM.add(new TargetLibraryInfoWrapperPass(*TLII)); if (CodeGenOpts.VerifyModule) diff --git a/clang/lib/CodeGen/CGBlocks.cpp b/clang/lib/CodeGen/CGBlocks.cpp index f90d9439af257f..708ff5341f2270 100644 --- a/clang/lib/CodeGen/CGBlocks.cpp +++ b/clang/lib/CodeGen/CGBlocks.cpp @@ -69,6 +69,7 @@ namespace { /// entity that's captured by a block. enum class BlockCaptureEntityKind { CXXRecord, // Copy or destroy + AddressDiscriminatedPointerAuth, ARCWeak, ARCStrong, NonTrivialCStruct, @@ -123,7 +124,7 @@ static std::string getBlockDescriptorName(const CGBlockInfo &BlockInfo, std::string Name = "__block_descriptor_"; Name += llvm::to_string(BlockInfo.BlockSize.getQuantity()) + "_"; - if (BlockInfo.needsCopyDisposeHelpers()) { + if (BlockInfo.needsCopyDisposeHelpers(CGM.getContext())) { if (CGM.getLangOpts().Exceptions) Name += "e"; if (CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) @@ -222,14 +223,17 @@ static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM, // Optional copy/dispose helpers. bool hasInternalHelper = false; - if (blockInfo.needsCopyDisposeHelpers()) { + if (blockInfo.needsCopyDisposeHelpers(CGM.getContext())) { + auto &schema = + CGM.getCodeGenOpts().PointerAuth.BlockHelperFunctionPointers; + // copy_func_helper_decl llvm::Constant *copyHelper = buildCopyHelper(CGM, blockInfo); - elements.add(copyHelper); + elements.addSignedPointer(copyHelper, schema, GlobalDecl(), QualType()); // destroy_func_decl llvm::Constant *disposeHelper = buildDisposeHelper(CGM, blockInfo); - elements.add(disposeHelper); + elements.addSignedPointer(disposeHelper, schema, GlobalDecl(), QualType()); if (cast(copyHelper->getOperand(0))->hasInternalLinkage() || cast(disposeHelper->getOperand(0)) @@ -623,6 +627,10 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF, lifetime = Qualifiers::OCL_Strong; } + // So do types with address-discriminated pointer authentication. + } else if (variable->getType().hasAddressDiscriminatedPointerAuth()) { + info.NeedsCopyDispose = true; + // So do types that require non-trivial copy construction. } else if (CI.hasCopyExpr()) { info.NeedsCopyDispose = true; @@ -966,7 +974,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) { flags = BLOCK_HAS_SIGNATURE; if (blockInfo.HasCapturedVariableLayout) flags |= BLOCK_HAS_EXTENDED_LAYOUT; - if (blockInfo.needsCopyDisposeHelpers()) + if (blockInfo.needsCopyDisposeHelpers(CGM.getContext())) flags |= BLOCK_HAS_COPY_DISPOSE; if (blockInfo.HasCXXObject) flags |= BLOCK_HAS_CXX_OBJ; @@ -1009,11 +1017,26 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) { llvm::ConstantInt::get(IntTy, blockInfo.BlockAlign.getQuantity()), getIntSize(), "block.align"); } - addHeaderField(blockFn, GenVoidPtrSize, "block.invoke"); - if (!IsOpenCL) + + if (!IsOpenCL) { + llvm::Value *blockFnPtr = llvm::ConstantExpr::getBitCast(InvokeFn, VoidPtrTy); + auto blockFnPtrAddr = projectField(index, "block.invoke"); + if (auto &schema = + CGM.getCodeGenOpts().PointerAuth.BlockInvocationFunctionPointers) { + QualType type = blockInfo.getBlockExpr()->getType() + ->castAs()->getPointeeType(); + auto authInfo = EmitPointerAuthInfo(schema, blockFnPtrAddr.getPointer(), + GlobalDecl(), type); + blockFnPtr = EmitPointerAuthSign(authInfo, blockFnPtr); + } + Builder.CreateStore(blockFnPtr, blockFnPtrAddr); + offset += getPointerSize(); + index++; + addHeaderField(descriptor, getPointerSize(), "block.descriptor"); - else if (auto *Helper = - CGM.getTargetCodeGenInfo().getTargetOpenCLBlockHelper()) { + } else if (auto *Helper = + CGM.getTargetCodeGenInfo().getTargetOpenCLBlockHelper()) { + addHeaderField(blockFn, GenVoidPtrSize, "block.invoke"); for (auto I : Helper->getCustomFieldValues(*this, blockInfo)) { addHeaderField( I.first, @@ -1021,7 +1044,8 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) { CGM.getDataLayout().getTypeAllocSize(I.first->getType())), I.second); } - } + } else + addHeaderField(blockFn, GenVoidPtrSize, "block.invoke"); } // Finally, capture all the values into the block. @@ -1261,6 +1285,8 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E, ASTContext &Ctx = getContext(); CallArgList Args; + llvm::Value *FuncPtr = nullptr; + if (getLangOpts().OpenCL) { // For OpenCL, BlockPtr is already casted to generic block literal. @@ -1278,7 +1304,7 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E, if (!isa(E->getCalleeDecl())) Func = CGM.getOpenCLRuntime().getInvokeFunction(E->getCallee()); else { - llvm::Value *FuncPtr = Builder.CreateStructGEP(GenBlockTy, BlockPtr, 2); + FuncPtr = Builder.CreateStructGEP(GenBlockTy, BlockPtr, 2); Func = Builder.CreateAlignedLoad(FuncPtr, getPointerAlign()); } } else { @@ -1286,7 +1312,7 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E, BlockPtr = Builder.CreatePointerCast( BlockPtr, llvm::PointerType::get(GenBlockTy, 0), "block.literal"); // Get pointer to the block invoke function - llvm::Value *FuncPtr = Builder.CreateStructGEP(GenBlockTy, BlockPtr, 3); + FuncPtr = Builder.CreateStructGEP(GenBlockTy, BlockPtr, 3); // First argument is a block literal casted to a void pointer BlockPtr = Builder.CreatePointerCast(BlockPtr, VoidPtrTy); @@ -1309,7 +1335,14 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E, Func = Builder.CreatePointerCast(Func, BlockFTyPtr); // Prepare the callee. - CGCallee Callee(CGCalleeInfo(), Func); + CGPointerAuthInfo PointerAuth; + if (auto &AuthSchema = + CGM.getCodeGenOpts().PointerAuth.BlockInvocationFunctionPointers) { + assert(FuncPtr != nullptr && "Missing function pointer for AuthInfo"); + PointerAuth = EmitPointerAuthInfo(AuthSchema, FuncPtr, + GlobalDecl(), FnType); + } + CGCallee Callee(CGCalleeInfo(), Func, PointerAuth); // And call the block. return EmitCall(FnInfo, Callee, ReturnValue, Args); @@ -1411,14 +1444,25 @@ static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM, // Reserved fields.addInt(CGM.IntTy, 0); + + // Function + if (auto &schema = + CGM.getCodeGenOpts().PointerAuth.BlockInvocationFunctionPointers) { + QualType fnType = blockInfo.getBlockExpr() + ->getType() + ->castAs() + ->getPointeeType(); + fields.addSignedPointer(blockFn, schema, GlobalDecl(), fnType); + } else { + fields.add(blockFn); + } } else { fields.addInt(CGM.IntTy, blockInfo.BlockSize.getQuantity()); fields.addInt(CGM.IntTy, blockInfo.BlockAlign.getQuantity()); + // Function + fields.add(blockFn); } - // Function - fields.add(blockFn); - if (!IsOpenCL) { // Descriptor fields.add(buildBlockDescriptor(CGM, blockInfo)); @@ -1702,6 +1746,10 @@ computeCopyInfoForBlockCapture(const BlockDecl::Capture &CI, QualType T, return std::make_pair(BlockCaptureEntityKind::BlockObject, Flags); } + if (T.hasAddressDiscriminatedPointerAuth()) + return std::make_pair( + BlockCaptureEntityKind::AddressDiscriminatedPointerAuth, Flags); + Flags = BLOCK_FIELD_IS_OBJECT; bool isBlockPointer = T->isBlockPointerType(); if (isBlockPointer) @@ -1722,6 +1770,10 @@ computeCopyInfoForBlockCapture(const BlockDecl::Capture &CI, QualType T, return std::make_pair(!isBlockPointer ? BlockCaptureEntityKind::ARCStrong : BlockCaptureEntityKind::BlockObject, Flags); + case QualType::PCK_PtrAuth: + return std::make_pair( + BlockCaptureEntityKind::AddressDiscriminatedPointerAuth, + BlockFieldFlags()); case QualType::PCK_Trivial: case QualType::PCK_VolatileTrivial: { if (!T->isObjCRetainableType()) @@ -1847,6 +1899,13 @@ static std::string getBlockCaptureStr(const BlockCaptureManagedEntity &E, case BlockCaptureEntityKind::ARCStrong: Str += "s"; break; + case BlockCaptureEntityKind::AddressDiscriminatedPointerAuth: { + auto PtrAuth = CaptureTy.getPointerAuth(); + assert(PtrAuth && PtrAuth.isAddressDiscriminated()); + Str += "p" + llvm::to_string(PtrAuth.getKey()) + "d" + + llvm::to_string(PtrAuth.getExtraDiscriminator()); + break; + } case BlockCaptureEntityKind::BlockObject: { const VarDecl *Var = CI.getVariable(); unsigned F = Flags.getBitMask(); @@ -1962,6 +2021,7 @@ static void pushCaptureCleanup(BlockCaptureEntityKind CaptureKind, } break; } + case BlockCaptureEntityKind::AddressDiscriminatedPointerAuth: case BlockCaptureEntityKind::None: break; } @@ -2066,6 +2126,14 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) { case BlockCaptureEntityKind::ARCWeak: EmitARCCopyWeak(dstField, srcField); break; + case BlockCaptureEntityKind::AddressDiscriminatedPointerAuth: { + auto type = CI.getVariable()->getType(); + auto ptrauth = type.getPointerAuth(); + assert(ptrauth && ptrauth.isAddressDiscriminated()); + EmitPointerAuthCopy(ptrauth, type, dstField, srcField); + // We don't need to push cleanups for ptrauth types. + continue; + } case BlockCaptureEntityKind::NonTrivialCStruct: { // If this is a C struct that requires non-trivial copy construction, // emit a call to its copy constructor. @@ -2409,6 +2477,33 @@ class CXXByrefHelpers final : public BlockByrefHelpers { } }; +/// Emits the copy/dispose helpers for a __block variable with +/// address-discriminated pointer authentication. +class AddressDiscriminatedByrefHelpers final : public BlockByrefHelpers { + QualType VarType; + +public: + AddressDiscriminatedByrefHelpers(CharUnits alignment, QualType type) + : BlockByrefHelpers(alignment), VarType(type) { + assert(type.hasAddressDiscriminatedPointerAuth()); + } + + void emitCopy(CodeGenFunction &CGF, Address destField, + Address srcField) override { + CGF.EmitPointerAuthCopy(VarType.getPointerAuth(), VarType, + destField, srcField); + } + + bool needsDispose() const override { return false; } + void emitDispose(CodeGenFunction &CGF, Address field) override { + llvm_unreachable("should never be called"); + } + + void profileImpl(llvm::FoldingSetNodeID &id) const override { + id.AddPointer(VarType.getCanonicalType().getAsOpaquePtr()); + } +}; + /// Emits the copy/dispose helpers for a __block variable that is a non-trivial /// C struct. class NonTrivialCStructByrefHelpers final : public BlockByrefHelpers { @@ -2628,6 +2723,11 @@ CodeGenFunction::buildByrefHelpers(llvm::StructType &byrefType, CGM, byrefInfo, CXXByrefHelpers(valueAlignment, type, copyExpr)); } + if (type.hasAddressDiscriminatedPointerAuth()) { + return ::buildByrefHelpers( + CGM, byrefInfo, AddressDiscriminatedByrefHelpers(valueAlignment, type)); + } + // If type is a non-trivial C struct type that is non-trivial to // destructly move or destroy, build the copy and dispose helpers. if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct || @@ -2826,8 +2926,16 @@ void CodeGenFunction::emitByrefStructureInit(const AutoVarEmission &emission) { unsigned nextHeaderIndex = 0; CharUnits nextHeaderOffset; auto storeHeaderField = [&](llvm::Value *value, CharUnits fieldSize, - const Twine &name) { + const Twine &name, bool isFunction = false) { auto fieldAddr = Builder.CreateStructGEP(addr, nextHeaderIndex, name); + if (isFunction) { + if (auto &schema = CGM.getCodeGenOpts().PointerAuth + .BlockByrefHelperFunctionPointers) { + auto pointerAuth = EmitPointerAuthInfo(schema, fieldAddr.getPointer(), + GlobalDecl(), QualType()); + value = EmitPointerAuthSign(pointerAuth, value); + } + } Builder.CreateStore(value, fieldAddr); nextHeaderIndex++; @@ -2910,9 +3018,9 @@ void CodeGenFunction::emitByrefStructureInit(const AutoVarEmission &emission) { if (helpers) { storeHeaderField(helpers->CopyHelper, getPointerSize(), - "byref.copyHelper"); + "byref.copyHelper", /*function*/ true); storeHeaderField(helpers->DisposeHelper, getPointerSize(), - "byref.disposeHelper"); + "byref.disposeHelper", /*function*/ true); } if (ByRefHasLifetime && HasByrefExtendedLayout) { diff --git a/clang/lib/CodeGen/CGBlocks.h b/clang/lib/CodeGen/CGBlocks.h index c4bfde6661542e..ade5126027a486 100644 --- a/clang/lib/CodeGen/CGBlocks.h +++ b/clang/lib/CodeGen/CGBlocks.h @@ -287,7 +287,7 @@ class CGBlockInfo { CGBlockInfo(const BlockDecl *blockDecl, StringRef Name); // Indicates whether the block needs a custom copy or dispose function. - bool needsCopyDisposeHelpers() const { + bool needsCopyDisposeHelpers(const ASTContext &Ctx) const { return NeedsCopyDispose && !Block->doesNotEscape(); } }; diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index 648837a6d15125..970de010bf78a9 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -3490,6 +3490,78 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__iso_volatile_store64: return RValue::get(EmitISOVolatileStore(*this, E)); + case Builtin::BI__builtin_ptrauth_sign_constant: + return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType())); + + case Builtin::BI__builtin_ptrauth_auth: + case Builtin::BI__builtin_ptrauth_auth_and_resign: + case Builtin::BI__builtin_ptrauth_blend_discriminator: + case Builtin::BI__builtin_ptrauth_sign_generic_data: + case Builtin::BI__builtin_ptrauth_sign_unauthenticated: + case Builtin::BI__builtin_ptrauth_strip: { + // Emit the arguments. + SmallVector args; + for (auto argExpr : E->arguments()) + args.push_back(EmitScalarExpr(argExpr)); + + // Cast the value to intptr_t, saving its original type. + llvm::Type *origValueType = args[0]->getType(); + if (origValueType->isPointerTy()) + args[0] = Builder.CreatePtrToInt(args[0], IntPtrTy); + + switch (BuiltinID) { + case Builtin::BI__builtin_ptrauth_auth_and_resign: + if (args[4]->getType()->isPointerTy()) + args[4] = Builder.CreatePtrToInt(args[4], IntPtrTy); + LLVM_FALLTHROUGH; + + case Builtin::BI__builtin_ptrauth_auth: + case Builtin::BI__builtin_ptrauth_sign_constant: + case Builtin::BI__builtin_ptrauth_sign_unauthenticated: + if (args[2]->getType()->isPointerTy()) + args[2] = Builder.CreatePtrToInt(args[2], IntPtrTy); + break; + + case Builtin::BI__builtin_ptrauth_sign_generic_data: + if (args[1]->getType()->isPointerTy()) + args[1] = Builder.CreatePtrToInt(args[1], IntPtrTy); + break; + + case Builtin::BI__builtin_ptrauth_blend_discriminator: + case Builtin::BI__builtin_ptrauth_strip: + break; + } + + // Call the intrinsic. + auto intrinsicID = [&]() -> unsigned { + switch (BuiltinID) { + case Builtin::BI__builtin_ptrauth_auth: + return llvm::Intrinsic::ptrauth_auth; + case Builtin::BI__builtin_ptrauth_auth_and_resign: + return llvm::Intrinsic::ptrauth_resign; + case Builtin::BI__builtin_ptrauth_blend_discriminator: + return llvm::Intrinsic::ptrauth_blend; + case Builtin::BI__builtin_ptrauth_sign_generic_data: + return llvm::Intrinsic::ptrauth_sign_generic; + case Builtin::BI__builtin_ptrauth_sign_constant: + case Builtin::BI__builtin_ptrauth_sign_unauthenticated: + return llvm::Intrinsic::ptrauth_sign; + case Builtin::BI__builtin_ptrauth_strip: + return llvm::Intrinsic::ptrauth_strip; + } + llvm_unreachable("bad ptrauth intrinsic"); + }(); + auto intrinsic = CGM.getIntrinsic(intrinsicID, { IntPtrTy }); + llvm::Value *result = EmitRuntimeCall(intrinsic, args); + + if (BuiltinID != Builtin::BI__builtin_ptrauth_sign_generic_data && + BuiltinID != Builtin::BI__builtin_ptrauth_blend_discriminator && + origValueType->isPointerTy()) { + result = Builder.CreateIntToPtr(result, origValueType); + } + return RValue::get(result); + } + case Builtin::BI__exception_code: case Builtin::BI_exception_code: return RValue::get(EmitSEHExceptionCode()); @@ -4117,7 +4189,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, // using exactly the normal call path. if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID)) return emitLibraryCall(*this, FD, E, - cast(EmitScalarExpr(E->getCallee()))); + CGM.getRawFunctionPointer(FD)); // Check that a call to a target specific builtin has the correct target // features. diff --git a/clang/lib/CodeGen/CGCXX.cpp b/clang/lib/CodeGen/CGCXX.cpp index 7e5fe0fd6b1d5f..e9d673181a8935 100644 --- a/clang/lib/CodeGen/CGCXX.cpp +++ b/clang/lib/CodeGen/CGCXX.cpp @@ -264,7 +264,16 @@ static CGCallee BuildAppleKextVirtualCall(CodeGenFunction &CGF, CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfnkxt"); llvm::Value *VFunc = CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.PointerAlignInBytes); - CGCallee Callee(GD, VFunc); + + CGPointerAuthInfo PointerAuth; + if (auto &Schema = + CGM.getCodeGenOpts().PointerAuth.CXXVirtualFunctionPointers) { + auto OrigMD = + CGM.getItaniumVTableContext().findOriginalMethod(GD.getCanonicalDecl()); + PointerAuth = CGF.EmitPointerAuthInfo(Schema, VFuncPtr, OrigMD, QualType()); + } + + CGCallee Callee(GD, VFunc, PointerAuth); return Callee; } diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp index 62e8fa0370131f..b1049b6cdc88c3 100644 --- a/clang/lib/CodeGen/CGCall.cpp +++ b/clang/lib/CodeGen/CGCall.cpp @@ -1780,6 +1780,14 @@ void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone, FuncAttrs.addAttribute("stackrealign"); if (CodeGenOpts.Backchain) FuncAttrs.addAttribute("backchain"); + if (CodeGenOpts.PointerAuth.ReturnAddresses) + FuncAttrs.addAttribute("ptrauth-returns"); + if (CodeGenOpts.PointerAuth.FunctionPointers) + FuncAttrs.addAttribute("ptrauth-calls"); + if (CodeGenOpts.PointerAuth.IndirectGotos) + FuncAttrs.addAttribute("ptrauth-indirect-gotos"); + if (CodeGenOpts.PointerAuth.AuthTraps) + FuncAttrs.addAttribute("ptrauth-auth-traps"); if (CodeGenOpts.SpeculativeLoadHardening) FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening); @@ -3629,7 +3637,8 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, } if (HasAggregateEvalKind && isa(E) && - cast(E)->getCastKind() == CK_LValueToRValue) { + cast(E)->getCastKind() == CK_LValueToRValue && + !type.isNonTrivialToPrimitiveCopy()) { LValue L = EmitLValue(cast(E)->getSubExpr()); assert(L.isSimple()); args.addUncopiedAggregate(L, type); @@ -3797,7 +3806,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, ReturnValueSlot ReturnValue, const CallArgList &CallArgs, llvm::CallBase **callOrInvoke, - SourceLocation Loc) { + SourceLocation Loc, + bool IsVirtualFunctionPointerThunk) { // FIXME: We no longer need the types from CallArgs; lift up and simplify. assert(Callee.isOrdinary() || Callee.isVirtual()); @@ -3872,7 +3882,10 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, Address SRetAlloca = Address::invalid(); llvm::Value *UnusedReturnSizePtr = nullptr; if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) { - if (!ReturnValue.isNull()) { + if (IsVirtualFunctionPointerThunk && RetAI.isIndirect()) { + SRetPtr = Address(CurFn->arg_begin() + IRFunctionArgs.getSRetArgNo(), + CharUnits::fromQuantity(1)); + } else if (!ReturnValue.isNull()) { SRetPtr = ReturnValue.getValue(); } else { SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca); @@ -4370,6 +4383,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, SmallVector BundleList = getBundlesForFunclet(CalleePtr); + // Add the pointer-authentication bundle. + EmitPointerAuthOperandBundle(ConcreteCallee.getPointerAuthInfo(), BundleList); + // Emit the actual call/invoke instruction. llvm::CallBase *CI; if (!InvokeDest) { @@ -4480,7 +4496,14 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, CallArgs.freeArgumentMemory(*this); // Extract the return value. - RValue Ret = [&] { + RValue Ret; + + // If the current function is a virtual function pointer thunk, avoid copying + // the return value of the musttail call to a temporary. + if (IsVirtualFunctionPointerThunk) + Ret = RValue::get(CI); + else + Ret = [&] { switch (RetAI.getKind()) { case ABIArgInfo::CoerceAndExpand: { auto coercionType = RetAI.getCoerceAndExpandType(); diff --git a/clang/lib/CodeGen/CGCall.h b/clang/lib/CodeGen/CGCall.h index cc11ded704abbf..8f90474304ac32 100644 --- a/clang/lib/CodeGen/CGCall.h +++ b/clang/lib/CodeGen/CGCall.h @@ -62,6 +62,42 @@ class CGCalleeInfo { const GlobalDecl getCalleeDecl() const { return CalleeDecl; } }; + /// Information necessary for pointer authentication. + class CGPointerAuthInfo { + unsigned Signed : 1; + unsigned Key : 31; + llvm::Value *Discriminator; + public: + CGPointerAuthInfo() { + Signed = false; + } + CGPointerAuthInfo(unsigned key, llvm::Value *discriminator) + : Discriminator(discriminator) { + assert(!discriminator || + discriminator->getType()->isIntegerTy() || + discriminator->getType()->isPointerTy()); + Signed = true; + Key = key; + } + + explicit operator bool() const { + return isSigned(); + } + + bool isSigned() const { + return Signed; + } + + unsigned getKey() const { + assert(isSigned()); + return Key; + } + llvm::Value *getDiscriminator() const { + assert(isSigned()); + return Discriminator; + } + }; + /// All available information about a concrete callee. class CGCallee { enum class SpecialKind : uintptr_t { @@ -73,6 +109,10 @@ class CGCalleeInfo { Last = Virtual }; + struct OrdinaryInfoStorage { + CGCalleeInfo AbstractInfo; + CGPointerAuthInfo PointerAuthInfo; + }; struct BuiltinInfoStorage { const FunctionDecl *Decl; unsigned ID; @@ -89,7 +129,7 @@ class CGCalleeInfo { SpecialKind KindOrFunctionPointer; union { - CGCalleeInfo AbstractInfo; + OrdinaryInfoStorage OrdinaryInfo; BuiltinInfoStorage BuiltinInfo; PseudoDestructorInfoStorage PseudoDestructorInfo; VirtualInfoStorage VirtualInfo; @@ -108,9 +148,11 @@ class CGCalleeInfo { /// Construct a callee. Call this constructor directly when this /// isn't a direct call. - CGCallee(const CGCalleeInfo &abstractInfo, llvm::Value *functionPtr) + CGCallee(const CGCalleeInfo &abstractInfo, llvm::Value *functionPtr, + const CGPointerAuthInfo &pointerAuthInfo) : KindOrFunctionPointer(SpecialKind(uintptr_t(functionPtr))) { - AbstractInfo = abstractInfo; + OrdinaryInfo.AbstractInfo = abstractInfo; + OrdinaryInfo.PointerAuthInfo = pointerAuthInfo; assert(functionPtr && "configuring callee without function pointer"); assert(functionPtr->getType()->isPointerTy()); assert(functionPtr->getType()->getPointerElementType()->isFunctionTy()); @@ -132,13 +174,13 @@ class CGCalleeInfo { static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo = CGCalleeInfo()) { - return CGCallee(abstractInfo, functionPtr); + return CGCallee(abstractInfo, functionPtr, CGPointerAuthInfo()); } static CGCallee forDirect(llvm::FunctionCallee functionPtr, const CGCalleeInfo &abstractInfo = CGCalleeInfo()) { - return CGCallee(abstractInfo, functionPtr.getCallee()); + return CGCallee(abstractInfo, functionPtr.getCallee(), CGPointerAuthInfo()); } static CGCallee forVirtual(const CallExpr *CE, GlobalDecl MD, Address Addr, @@ -178,7 +220,11 @@ class CGCalleeInfo { if (isVirtual()) return VirtualInfo.MD; assert(isOrdinary()); - return AbstractInfo; + return OrdinaryInfo.AbstractInfo; + } + const CGPointerAuthInfo &getPointerAuthInfo() const { + assert(isOrdinary()); + return OrdinaryInfo.PointerAuthInfo; } llvm::Value *getFunctionPointer() const { assert(isOrdinary()); @@ -188,6 +234,10 @@ class CGCalleeInfo { assert(isOrdinary()); KindOrFunctionPointer = SpecialKind(uintptr_t(functionPtr)); } + void setPointerAuthInfo(CGPointerAuthInfo pointerAuth) { + assert(isOrdinary()); + OrdinaryInfo.PointerAuthInfo = pointerAuth; + } bool isVirtual() const { return KindOrFunctionPointer == SpecialKind::Virtual; diff --git a/clang/lib/CodeGen/CGClass.cpp b/clang/lib/CodeGen/CGClass.cpp index 04ef912b18bd49..6c241f7a754f7e 100644 --- a/clang/lib/CodeGen/CGClass.cpp +++ b/clang/lib/CodeGen/CGClass.cpp @@ -910,6 +910,9 @@ namespace { Qualifiers Qual = F->getType().getQualifiers(); if (Qual.hasVolatile() || Qual.hasObjCLifetime()) return false; + if (PointerAuthQualifier Q = F->getType().getPointerAuth()) + if (Q.isAddressDiscriminated()) + return false; return true; } @@ -2494,6 +2497,13 @@ void CodeGenFunction::InitializeVTablePointer(const VPtr &Vptr) { VTableField = Builder.CreateBitCast(VTableField, VTablePtrTy->getPointerTo()); VTableAddressPoint = Builder.CreateBitCast(VTableAddressPoint, VTablePtrTy); + if (auto &Schema = CGM.getCodeGenOpts().PointerAuth.CXXVTablePointers) { + CGPointerAuthInfo PointerAuth = EmitPointerAuthInfo(Schema, nullptr, + GlobalDecl(), + QualType()); + VTableAddressPoint = EmitPointerAuthSign(PointerAuth, VTableAddressPoint); + } + llvm::StoreInst *Store = Builder.CreateStore(VTableAddressPoint, VTableField); TBAAAccessInfo TBAAInfo = CGM.getTBAAVTablePtrAccessInfo(VTablePtrTy); CGM.DecorateInstructionWithTBAA(Store, TBAAInfo); @@ -2593,6 +2603,13 @@ llvm::Value *CodeGenFunction::GetVTablePtr(Address This, TBAAAccessInfo TBAAInfo = CGM.getTBAAVTablePtrAccessInfo(VTableTy); CGM.DecorateInstructionWithTBAA(VTable, TBAAInfo); + if (auto &Schema = CGM.getCodeGenOpts().PointerAuth.CXXVTablePointers) { + CGPointerAuthInfo PointerAuth = EmitPointerAuthInfo(Schema, nullptr, + GlobalDecl(), + QualType()); + VTable = cast(EmitPointerAuthAuth(PointerAuth, VTable)); + } + if (CGM.getCodeGenOpts().OptimizationLevel > 0 && CGM.getCodeGenOpts().StrictVTablePointers) CGM.DecorateInstructionWithInvariantGroup(VTable, RD); diff --git a/clang/lib/CodeGen/CGDebugInfo.cpp b/clang/lib/CodeGen/CGDebugInfo.cpp index 7c63743f3b43d1..0c62053781f3ac 100644 --- a/clang/lib/CodeGen/CGDebugInfo.cpp +++ b/clang/lib/CodeGen/CGDebugInfo.cpp @@ -849,6 +849,15 @@ llvm::DIType *CGDebugInfo::CreateQualifiedType(QualType Ty, } else if (Qc.hasRestrict()) { Tag = llvm::dwarf::DW_TAG_restrict_type; Qc.removeRestrict(); + } else if (Qc.getPointerAuth().isPresent()) { + unsigned Key = Qc.getPointerAuth().getKey(); + bool IsDiscr = Qc.getPointerAuth().isAddressDiscriminated(); + unsigned ExtraDiscr = Qc.getPointerAuth().getExtraDiscriminator(); + Qc.removePtrAuth(); + assert(Qc.empty() && "Unknown type qualifier for debug info"); + auto *FromTy = getOrCreateType(QualType(T, 0), Unit); + return DBuilder.createPtrAuthQualifiedType(FromTy, Key, IsDiscr, + ExtraDiscr); } else { assert(Qc.empty() && "Unknown type qualifier for debug info"); return getOrCreateType(QualType(T, 0), Unit); @@ -874,8 +883,8 @@ llvm::DIType *CGDebugInfo::CreateType(const ObjCObjectPointerType *Ty, Ty->getPointeeType(), Unit); } -llvm::DIType *CGDebugInfo::CreateType(const PointerType *Ty, - llvm::DIFile *Unit) { +llvm::DIType * +CGDebugInfo::CreateType(const PointerType *Ty, llvm::DIFile *Unit) { return CreatePointerLikeType(llvm::dwarf::DW_TAG_pointer_type, Ty, Ty->getPointeeType(), Unit); } @@ -997,10 +1006,9 @@ CGDebugInfo::getOrCreateRecordFwdDecl(const RecordType *Ty, return RetTy; } -llvm::DIType *CGDebugInfo::CreatePointerLikeType(llvm::dwarf::Tag Tag, - const Type *Ty, - QualType PointeeTy, - llvm::DIFile *Unit) { +llvm::DIType *CGDebugInfo::CreatePointerLikeType( + llvm::dwarf::Tag Tag, const Type *Ty, QualType PointeeTy, + llvm::DIFile *Unit) { // Bit size, align and offset of the type. // Size is always the size of a pointer. We can't use getTypeSize here // because that does not return the correct value for references. diff --git a/clang/lib/CodeGen/CGDecl.cpp b/clang/lib/CodeGen/CGDecl.cpp index 563841c068f60b..bf66b85a6e72d9 100644 --- a/clang/lib/CodeGen/CGDecl.cpp +++ b/clang/lib/CodeGen/CGDecl.cpp @@ -741,7 +741,13 @@ void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue, bool capturedByInit) { Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime(); if (!lifetime) { - llvm::Value *value = EmitScalarExpr(init); + llvm::Value *value; + if (auto ptrauth = lvalue.getQuals().getPointerAuth()) { + value = EmitPointerAuthQualify(ptrauth, init, lvalue.getAddress()); + lvalue.getQuals().removePtrAuth(); + } else { + value = EmitScalarExpr(init); + } if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast(D)); EmitNullabilityCheck(lvalue, value, init->getExprLoc()); diff --git a/clang/lib/CodeGen/CGDeclCXX.cpp b/clang/lib/CodeGen/CGDeclCXX.cpp index 5b172a3480be17..bfd68a60280e63 100644 --- a/clang/lib/CodeGen/CGDeclCXX.cpp +++ b/clang/lib/CodeGen/CGDeclCXX.cpp @@ -225,7 +225,7 @@ void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D, /// Create a stub function, suitable for being passed to atexit, /// which passes the given address to the given destructor function. -llvm::Function *CodeGenFunction::createAtExitStub(const VarDecl &VD, +llvm::Constant *CodeGenFunction::createAtExitStub(const VarDecl &VD, llvm::FunctionCallee dtor, llvm::Constant *addr) { // Get the destructor function type, void(*)(void). @@ -254,7 +254,12 @@ llvm::Function *CodeGenFunction::createAtExitStub(const VarDecl &VD, CGF.FinishFunction(); - return fn; + // Get a proper function pointer. + FunctionProtoType::ExtProtoInfo EPI(getContext().getDefaultCallingConvention( + /*IsVariadic=*/false, /*IsCXXMethod=*/false)); + QualType fnType = getContext().getFunctionType(getContext().VoidTy, + {getContext().VoidPtrTy}, EPI); + return CGM.getFunctionPointer(fn, fnType); } /// Register a global destructor using the C atexit runtime function. diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp index dcd365c8eaf0fb..b8481848474a5d 100644 --- a/clang/lib/CodeGen/CGExpr.cpp +++ b/clang/lib/CodeGen/CGExpr.cpp @@ -1772,6 +1772,15 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue, /// method emits the address of the lvalue, then loads the result as an rvalue, /// returning the rvalue. RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) { + // Load from __ptrauth. + if (auto ptrauth = LV.getQuals().getPointerAuth()) { + LV.getQuals().removePtrAuth(); + auto value = EmitLoadOfLValue(LV, Loc).getScalarVal(); + return RValue::get(EmitPointerAuthUnqualify(ptrauth, value, + LV.getType(), LV.getAddress(), + /*known nonnull*/ false)); + } + if (LV.isObjCWeak()) { // load of a __weak object. Address AddrWeakObj = LV.getAddress(); @@ -1950,6 +1959,13 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, return EmitStoreThroughBitfieldLValue(Src, Dst); } + // Handle __ptrauth qualification by re-signing the value. + if (auto pointerAuth = Dst.getQuals().getPointerAuth()) { + Src = RValue::get(EmitPointerAuthQualify(pointerAuth, Src.getScalarVal(), + Dst.getType(), Dst.getAddress(), + /*known nonnull*/ false)); + } + // There's special magic for assigning into an ARC-qualified l-value. if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) { switch (Lifetime) { @@ -2393,14 +2409,14 @@ static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, return LV; } -static llvm::Constant *EmitFunctionDeclPointer(CodeGenModule &CGM, - const FunctionDecl *FD) { +llvm::Constant *CodeGenModule::getRawFunctionPointer(const FunctionDecl *FD, + llvm::Type *Ty) { if (FD->hasAttr()) { - ConstantAddress aliasee = CGM.GetWeakRefReference(FD); + ConstantAddress aliasee = GetWeakRefReference(FD); return aliasee.getPointer(); } - llvm::Constant *V = CGM.GetAddrOfFunction(FD); + llvm::Constant *V = GetAddrOfFunction(FD, Ty); if (!FD->hasPrototype()) { if (const FunctionProtoType *Proto = FD->getType()->getAs()) { @@ -2408,10 +2424,9 @@ static llvm::Constant *EmitFunctionDeclPointer(CodeGenModule &CGM, // isn't the same as the type of a use. Correct for this with a // bitcast. QualType NoProtoType = - CGM.getContext().getFunctionNoProtoType(Proto->getReturnType()); - NoProtoType = CGM.getContext().getPointerType(NoProtoType); - V = llvm::ConstantExpr::getBitCast(V, - CGM.getTypes().ConvertType(NoProtoType)); + getContext().getFunctionNoProtoType(Proto->getReturnType()); + NoProtoType = getContext().getPointerType(NoProtoType); + V = llvm::ConstantExpr::getBitCast(V,getTypes().ConvertType(NoProtoType)); } } return V; @@ -2419,7 +2434,7 @@ static llvm::Constant *EmitFunctionDeclPointer(CodeGenModule &CGM, static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E, const FunctionDecl *FD) { - llvm::Value *V = EmitFunctionDeclPointer(CGF.CGM, FD); + llvm::Constant *V = CGF.CGM.getFunctionPointer(FD); CharUnits Alignment = CGF.getContext().getDeclAlign(FD); return CGF.MakeAddrLValue(V, E->getType(), Alignment, AlignmentSource::Decl); @@ -3872,7 +3887,8 @@ LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { bool IsBaseCXXThis = IsWrappedCXXThis(BaseExpr); if (IsBaseCXXThis) SkippedChecks.set(SanitizerKind::Alignment, true); - if (IsBaseCXXThis || isa(BaseExpr)) + if (IsBaseCXXThis || isa(BaseExpr) || + isa(Addr.getPointer())) SkippedChecks.set(SanitizerKind::Null, true); EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr.getPointer(), PtrTy, /*Alignment=*/CharUnits::Zero(), SkippedChecks); @@ -4548,10 +4564,70 @@ static CGCallee EmitDirectCallee(CodeGenFunction &CGF, const FunctionDecl *FD) { return CGCallee::forBuiltin(builtinID, FD); } - llvm::Constant *calleePtr = EmitFunctionDeclPointer(CGF.CGM, FD); + llvm::Constant *calleePtr = CGF.CGM.getRawFunctionPointer(FD); return CGCallee::forDirect(calleePtr, GlobalDecl(FD)); } +static unsigned getPointerAuthKeyValue(const ASTContext &Context, + const Expr *key) { + Expr::EvalResult result; + bool success = key->EvaluateAsInt(result, Context); + assert(success && "pointer auth key wasn't a constant?"); (void) success; + return result.Val.getInt().getZExtValue(); +} + +static bool isFunctionPointerAuth(CodeGenModule &CGM, const Expr *key, + const Expr *discriminator) { + // Verify that the ABI uses function-pointer signing at all. + auto &authSchema = CGM.getCodeGenOpts().PointerAuth.FunctionPointers; + if (!authSchema.isEnabled()) + return false; + + // Verify that the key matches the ABI's key. + if (authSchema.getKey() != getPointerAuthKeyValue(CGM.getContext(), key)) + return false; + + // If the ABI uses weird discrimination for function pointers, just give up. + assert(!authSchema.isAddressDiscriminated()); + if (authSchema.getOtherDiscrimination() + != PointerAuthSchema::Discrimination::None) { + return false; + } + + if (discriminator->getType()->isPointerType()) { + return discriminator->isNullPointerConstant(CGM.getContext(), + Expr::NPC_NeverValueDependent); + } else { + assert(discriminator->getType()->isIntegerType()); + Expr::EvalResult result; + return (discriminator->EvaluateAsInt(result, CGM.getContext()) && + result.Val.getInt() == 0); + } +} + +/// Given an expression for a function pointer that's been signed with +/// a variant scheme, and given a constant expression for the key value +/// and an expression for the discriminator, produce a callee for the +/// function pointer using that scheme. +static CGCallee EmitSignedFunctionPointerCallee(CodeGenFunction &CGF, + const Expr *functionPointerExpr, + const Expr *keyExpr, + const Expr *discriminatorExpr) { + llvm::Value *calleePtr = CGF.EmitScalarExpr(functionPointerExpr); + auto key = getPointerAuthKeyValue(CGF.getContext(), keyExpr); + auto discriminator = CGF.EmitScalarExpr(discriminatorExpr); + + if (discriminator->getType()->isPointerTy()) + discriminator = CGF.Builder.CreatePtrToInt(discriminator, CGF.IntPtrTy); + + auto functionType = + functionPointerExpr->getType()->castAs()->getPointeeType(); + CGCalleeInfo calleeInfo(functionType->getAs()); + CGPointerAuthInfo pointerAuth(key, discriminator); + CGCallee callee(calleeInfo, calleePtr, pointerAuth); + return callee; +} + CGCallee CodeGenFunction::EmitCallee(const Expr *E) { E = E->IgnoreParens(); @@ -4562,6 +4638,27 @@ CGCallee CodeGenFunction::EmitCallee(const Expr *E) { return EmitCallee(ICE->getSubExpr()); } + // Try to remember the original __ptrauth qualifier for loads of + // function pointers. + if (ICE->getCastKind() == CK_LValueToRValue) { + auto subExpr = ICE->getSubExpr(); + if (auto ptrType = subExpr->getType()->getAs()) { + auto result = EmitOrigPointerRValue(E); + + QualType functionType = ptrType->getPointeeType(); + assert(functionType->isFunctionType()); + + GlobalDecl GD; + if (const auto *VD = + dyn_cast_or_null(E->getReferencedDeclOfCallee())) { + GD = GlobalDecl(VD); + } + CGCalleeInfo calleeInfo(functionType->getAs(), GD); + CGCallee callee(calleeInfo, result.first, result.second); + return callee; + } + } + // Resolve direct calls. } else if (auto DRE = dyn_cast(E)) { if (auto FD = dyn_cast(DRE->getDecl())) { @@ -4580,6 +4677,36 @@ CGCallee CodeGenFunction::EmitCallee(const Expr *E) { // Treat pseudo-destructor calls differently. } else if (auto PDE = dyn_cast(E)) { return CGCallee::forPseudoDestructor(PDE); + + // Peephole specific builtin calls. + } else if (auto CE = dyn_cast(E)) { + if (unsigned builtin = CE->getBuiltinCallee()) { + // If the callee is a __builtin_ptrauth_sign_unauthenticated to the + // ABI function-pointer signing schema, perform an unauthenticated call. + if (builtin == Builtin::BI__builtin_ptrauth_sign_unauthenticated && + isFunctionPointerAuth(CGM, CE->getArg(1), CE->getArg(2))) { + CGCallee callee = EmitCallee(CE->getArg(0)); + if (callee.isOrdinary()) + callee.setPointerAuthInfo(CGPointerAuthInfo()); + return callee; + } + + // If the callee is a __builtin_ptrauth_auth_and_resign to the + // ABI function-pointer signing schema, avoid the intermediate resign. + if (builtin == Builtin::BI__builtin_ptrauth_auth_and_resign && + isFunctionPointerAuth(CGM, CE->getArg(3), CE->getArg(4))) { + return EmitSignedFunctionPointerCallee(*this, CE->getArg(0), + CE->getArg(1), CE->getArg(2)); + + // If the callee is a __builtin_ptrauth_auth when ABI function pointer + // signing is disabled, we need to promise to use the unattackable + // OperandBundle code pattern. + } else if (builtin == Builtin::BI__builtin_ptrauth_auth && + !CGM.getCodeGenOpts().PointerAuth.FunctionPointers.isEnabled()) { + return EmitSignedFunctionPointerCallee(*this, CE->getArg(0), + CE->getArg(1), CE->getArg(2)); + } + } } // Otherwise, we have an indirect reference. @@ -4593,14 +4720,14 @@ CGCallee CodeGenFunction::EmitCallee(const Expr *E) { calleePtr = EmitLValue(E).getPointer(); } assert(functionType->isFunctionType()); - GlobalDecl GD; if (const auto *VD = dyn_cast_or_null(E->getReferencedDeclOfCallee())) GD = GlobalDecl(VD); CGCalleeInfo calleeInfo(functionType->getAs(), GD); - CGCallee callee(calleeInfo, calleePtr); + CGPointerAuthInfo pointerAuth = CGM.getFunctionPointerAuthInfo(functionType); + CGCallee callee(calleeInfo, calleePtr, pointerAuth); return callee; } @@ -4623,6 +4750,17 @@ LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) { switch (getEvaluationKind(E->getType())) { case TEK_Scalar: { + if (auto ptrauth = E->getLHS()->getType().getPointerAuth()) { + LValue LV = EmitCheckedLValue(E->getLHS(), TCK_Store); + LValue CopiedLV = LV; + CopiedLV.getQuals().removePtrAuth(); + llvm::Value *RV = EmitPointerAuthQualify(ptrauth, E->getRHS(), + CopiedLV.getAddress()); + EmitNullabilityCheck(CopiedLV, RV, E->getExprLoc()); + EmitStoreThroughLValue(RValue::get(RV), CopiedLV); + return LV; + } + switch (E->getLHS()->getType().getObjCLifetime()) { case Qualifiers::OCL_Strong: return EmitARCStoreStrong(E, /*ignored*/ false).first; diff --git a/clang/lib/CodeGen/CGExprConstant.cpp b/clang/lib/CodeGen/CGExprConstant.cpp index 96e8c9c0d0e61e..ad62a597f30a68 100644 --- a/clang/lib/CodeGen/CGExprConstant.cpp +++ b/clang/lib/CodeGen/CGExprConstant.cpp @@ -1428,8 +1428,37 @@ llvm::GlobalValue *ConstantEmitter::getCurrentAddrPrivate() { return global; } +static llvm::Constant *getUnfoldableValue(llvm::Constant *C) { + // Look through any constant expressions that might get folded + while (auto CE = dyn_cast(C)) { + switch (CE->getOpcode()) { + // Simple type changes. + case llvm::Instruction::BitCast: + case llvm::Instruction::IntToPtr: + case llvm::Instruction::PtrToInt: + break; + + // GEPs, if all the indices are zero. + case llvm::Instruction::GetElementPtr: + for (unsigned i = 1, e = CE->getNumOperands(); i != e; ++i) + if (!CE->getOperand(i)->isNullValue()) + return C; + break; + + default: + return C; + } + C = CE->getOperand(0); + } + return C; +} + void ConstantEmitter::registerCurrentAddrPrivate(llvm::Constant *signal, llvm::GlobalValue *placeholder) { + // Strip anything from the signal value that might get folded into other + // constant expressions in the final initializer. + signal = getUnfoldableValue(signal); + assert(!PlaceholderAddresses.empty()); assert(PlaceholderAddresses.back().first == nullptr); assert(PlaceholderAddresses.back().second == placeholder); @@ -1487,7 +1516,7 @@ namespace { // messing around with llvm::Constant structures, which never itself // does anything that should be visible in compiler output. for (auto &entry : Locations) { - assert(entry.first->getParent() == nullptr && "not a placeholder!"); + assert(entry.first->getName() == "" && "not a placeholder!"); entry.first->replaceAllUsesWith(entry.second); entry.first->eraseFromParent(); } @@ -1725,10 +1754,13 @@ namespace { struct ConstantLValue { llvm::Constant *Value; bool HasOffsetApplied; + bool HasDestPointerAuth; /*implicit*/ ConstantLValue(llvm::Constant *value, - bool hasOffsetApplied = false) - : Value(value), HasOffsetApplied(false) {} + bool hasOffsetApplied = false, + bool hasDestPointerAuth = false) + : Value(value), HasOffsetApplied(false), + HasDestPointerAuth(hasDestPointerAuth) {} /*implicit*/ ConstantLValue(ConstantAddress address) : ConstantLValue(address.getPointer()) {} @@ -1772,6 +1804,14 @@ class ConstantLValueEmitter : public ConstStmtVisitor + emitPointerAuthDiscriminator(const Expr *E); + llvm::Constant *tryEmitConstantSignedPointer(llvm::Constant *ptr, + PointerAuthQualifier auth); + bool hasNonZeroOffset() const { return !Value.getLValueOffset().isZero(); } @@ -1830,6 +1870,14 @@ llvm::Constant *ConstantLValueEmitter::tryEmit() { value = applyOffset(value); } + // Apply pointer-auth signing from the destination type. + if (auto pointerAuth = DestType.getPointerAuth()) { + if (!result.HasDestPointerAuth) { + value = tryEmitConstantSignedPointer(value, pointerAuth); + if (!value) return nullptr; + } + } + // Convert to the appropriate type; this could be an lvalue for // an integer. FIXME: performAddrSpaceCast if (isa(destTy)) @@ -1867,8 +1915,15 @@ ConstantLValueEmitter::tryEmitBase(const APValue::LValueBase &base) { if (D->hasAttr()) return CGM.GetWeakRefReference(D).getPointer(); - if (auto FD = dyn_cast(D)) - return CGM.GetAddrOfFunction(FD); + if (auto FD = dyn_cast(D)) { + if (auto pointerAuth = DestType.getPointerAuth()) { + llvm::Constant *C = CGM.getRawFunctionPointer(FD); + C = applyOffset(C); + C = tryEmitConstantSignedPointer(C, pointerAuth); + return ConstantLValue(C, /*applied offset*/ true, /*signed*/ true); + } + return CGM.getFunctionPointer(FD); + } if (auto VD = dyn_cast(D)) { // We can never refer to a variable with local storage. @@ -1958,6 +2013,9 @@ ConstantLValueEmitter::VisitAddrLabelExpr(const AddrLabelExpr *E) { ConstantLValue ConstantLValueEmitter::VisitCallExpr(const CallExpr *E) { unsigned builtin = E->getBuiltinCallee(); + if (builtin == Builtin::BI__builtin_ptrauth_sign_constant) + return emitPointerAuthSignConstant(E); + if (builtin != Builtin::BI__builtin___CFStringMakeConstantString && builtin != Builtin::BI__builtin___NSStringMakeConstantString) return nullptr; @@ -1971,6 +2029,99 @@ ConstantLValueEmitter::VisitCallExpr(const CallExpr *E) { } } +/// Try to emit a constant signed pointer, given a raw pointer and the +/// destination ptrauth qualifier. +/// +/// This can fail if the qualifier needs address discrimination and the +/// emitter is in an abstract mode. +llvm::Constant * +ConstantLValueEmitter::tryEmitConstantSignedPointer( + llvm::Constant *unsignedPointer, + PointerAuthQualifier schema) { + assert(schema && "applying trivial ptrauth schema"); + auto key = schema.getKey(); + + // Create an address placeholder if we're using address discrimination. + llvm::GlobalValue *storageAddress = nullptr; + if (schema.isAddressDiscriminated()) { + // We can't do this if the emitter is in an abstract state. + if (Emitter.isAbstract()) + return nullptr; + + storageAddress = Emitter.getCurrentAddrPrivate(); + } + + // Fetch the extra discriminator. + llvm::Constant *otherDiscriminator = + llvm::ConstantInt::get(CGM.IntPtrTy, schema.getExtraDiscriminator()); + + auto signedPointer = + CGM.getConstantSignedPointer(unsignedPointer, key, storageAddress, + otherDiscriminator); + + if (schema.isAddressDiscriminated()) + Emitter.registerCurrentAddrPrivate(signedPointer, storageAddress); + + return signedPointer; +} + +ConstantLValue +ConstantLValueEmitter::emitPointerAuthSignConstant(const CallExpr *E) { + auto unsignedPointer = emitPointerAuthPointer(E->getArg(0)); + auto key = emitPointerAuthKey(E->getArg(1)); + llvm::Constant *storageAddress; + llvm::Constant *otherDiscriminator; + std::tie(storageAddress, otherDiscriminator) = + emitPointerAuthDiscriminator(E->getArg(2)); + + auto signedPointer = + CGM.getConstantSignedPointer(unsignedPointer, key, storageAddress, + otherDiscriminator); + return signedPointer; +} + +llvm::Constant *ConstantLValueEmitter::emitPointerAuthPointer(const Expr *E) { + Expr::EvalResult result; + bool succeeded = E->EvaluateAsRValue(result, CGM.getContext()); + assert(succeeded); (void) succeeded; + + // The assertions here are all checked by Sema. + assert(result.Val.isLValue()); + auto base = result.Val.getLValueBase().get(); + if (auto decl = dyn_cast_or_null(base)) { + assert(result.Val.getLValueOffset().isZero()); + return CGM.getRawFunctionPointer(decl); + } + return ConstantEmitter(CGM, Emitter.CGF) + .emitAbstract(E->getExprLoc(), result.Val, E->getType()); +} + +unsigned ConstantLValueEmitter::emitPointerAuthKey(const Expr *E) { + return E->EvaluateKnownConstInt(CGM.getContext()).getZExtValue(); +} + +std::pair +ConstantLValueEmitter::emitPointerAuthDiscriminator(const Expr *E) { + E = E->IgnoreParens(); + + if (auto call = dyn_cast(E)) { + if (call->getBuiltinCallee() == + Builtin::BI__builtin_ptrauth_blend_discriminator) { + auto pointer = ConstantEmitter(CGM).emitAbstract(call->getArg(0), + call->getArg(0)->getType()); + auto extra = ConstantEmitter(CGM).emitAbstract(call->getArg(1), + call->getArg(1)->getType()); + return { pointer, extra }; + } + } + + auto result = ConstantEmitter(CGM).emitAbstract(E, E->getType()); + if (result->getType()->isPointerTy()) + return { result, nullptr }; + else + return { nullptr, result }; +} + ConstantLValue ConstantLValueEmitter::VisitBlockExpr(const BlockExpr *E) { StringRef functionName; diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp index 55a413a2a7179a..91a0b61f2fda35 100644 --- a/clang/lib/CodeGen/CGExprScalar.cpp +++ b/clang/lib/CodeGen/CGExprScalar.cpp @@ -1930,6 +1930,58 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { return V; } +static bool isDeclRefKnownNonNull(CodeGenFunction &CGF, const ValueDecl *D) { + return !D->isWeak(); +} + +static bool isLValueKnownNonNull(CodeGenFunction &CGF, const Expr *E) { + E = E->IgnoreParens(); + + if (auto UO = dyn_cast(E)) { + if (UO->getOpcode() == UO_Deref) { + return CGF.isPointerKnownNonNull(UO->getSubExpr()); + } + } + + if (auto DRE = dyn_cast(E)) { + return isDeclRefKnownNonNull(CGF, DRE->getDecl()); + } else if (auto ME = dyn_cast(E)) { + if (isa(ME->getMemberDecl())) + return true; + return isDeclRefKnownNonNull(CGF, ME->getMemberDecl()); + } + + // Array subscripts? Anything else? + + return false; +} + +bool CodeGenFunction::isPointerKnownNonNull(const Expr *E) { + assert(E->getType()->isPointerType()); + + E = E->IgnoreParens(); + + if (isa(E)) + return true; + + if (auto UO = dyn_cast(E)) { + if (UO->getOpcode() == UO_AddrOf) { + return isLValueKnownNonNull(*this, UO->getSubExpr()); + } + } + + if (auto CE = dyn_cast(E)) { + if (CE->getCastKind() == CK_FunctionToPointerDecay || + CE->getCastKind() == CK_ArrayToPointerDecay) { + return isLValueKnownNonNull(*this, CE->getSubExpr()); + } + } + + // Maybe honor __nonnull? + + return false; +} + bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) { const Expr *E = CE->getSubExpr(); @@ -3906,6 +3958,20 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { Value *RHS; LValue LHS; + if (auto ptrauth = E->getLHS()->getType().getPointerAuth()) { + LValue LV = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); + LV.getQuals().removePtrAuth(); + llvm::Value *RV = CGF.EmitPointerAuthQualify(ptrauth, E->getRHS(), + LV.getAddress()); + CGF.EmitNullabilityCheck(LV, RV, E->getExprLoc()); + CGF.EmitStoreThroughLValue(RValue::get(RV), LV); + + if (Ignore) return nullptr; + RV = CGF.EmitPointerAuthUnqualify(ptrauth, RV, LV.getType(), + LV.getAddress(), /*nonnull*/ false); + return RV; + } + switch (E->getLHS()->getType().getObjCLifetime()) { case Qualifiers::OCL_Strong: std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore); diff --git a/clang/lib/CodeGen/CGNonTrivialStruct.cpp b/clang/lib/CodeGen/CGNonTrivialStruct.cpp index 05615aa1288160..3f75e65a5d4e05 100644 --- a/clang/lib/CodeGen/CGNonTrivialStruct.cpp +++ b/clang/lib/CodeGen/CGNonTrivialStruct.cpp @@ -261,6 +261,13 @@ struct GenBinaryFuncName : CopyStructVisitor, IsMove>, this->appendStr("_tv" + llvm::to_string(OffsetInBits) + "w" + llvm::to_string(getFieldSize(FD, FT, this->Ctx))); } + + void visitPtrAuth(QualType FT, const FieldDecl *FD, + CharUnits CurStructOffset) { + this->appendStr("_pa"); + CharUnits FieldOffset = CurStructOffset + this->getFieldOffset(FD); + this->appendStr(llvm::to_string(FieldOffset.getQuantity())); + } }; struct GenDefaultInitializeFuncName @@ -563,6 +570,14 @@ struct GenBinaryFunc : CopyStructVisitor, RValue SrcVal = this->CGF->EmitLoadOfLValue(SrcLV, SourceLocation()); this->CGF->EmitStoreThroughLValue(SrcVal, DstLV); } + + void visitPtrAuth(QualType FT, const FieldDecl *FD, CharUnits CurStackOffset, + std::array Addrs) { + PointerAuthQualifier PtrAuth = FT.getPointerAuth(); + Addrs[DstIdx] = this->getAddrWithOffset(Addrs[DstIdx], CurStackOffset, FD); + Addrs[SrcIdx] = this->getAddrWithOffset(Addrs[SrcIdx], CurStackOffset, FD); + this->CGF->EmitPointerAuthCopy(PtrAuth, FT, Addrs[DstIdx], Addrs[SrcIdx]); + } }; // These classes that emit the special functions for a non-trivial struct. diff --git a/clang/lib/CodeGen/CGObjC.cpp b/clang/lib/CodeGen/CGObjC.cpp index 1fa72678081af5..ab90ac2bd56ec7 100644 --- a/clang/lib/CodeGen/CGObjC.cpp +++ b/clang/lib/CodeGen/CGObjC.cpp @@ -3559,7 +3559,8 @@ CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction( EmitStmt(TheCall); FinishFunction(); - HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy); + HelperFn = CGM.getFunctionPointer(Fn, FD->getType()); + HelperFn = llvm::ConstantExpr::getBitCast(HelperFn, VoidPtrTy); CGM.setAtomicSetterHelperFnMap(Ty, HelperFn); return HelperFn; } @@ -3664,7 +3665,8 @@ CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction( AggValueSlot::DoesNotOverlap)); FinishFunction(); - HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy); + HelperFn = CGM.getFunctionPointer(Fn, FD->getType()); + HelperFn = llvm::ConstantExpr::getBitCast(HelperFn, VoidPtrTy); CGM.setAtomicGetterHelperFnMap(Ty, HelperFn); return HelperFn; } diff --git a/clang/lib/CodeGen/CGObjCGNU.cpp b/clang/lib/CodeGen/CGObjCGNU.cpp index d2c089d0360e1a..f4a6fa94cfd41b 100644 --- a/clang/lib/CodeGen/CGObjCGNU.cpp +++ b/clang/lib/CodeGen/CGObjCGNU.cpp @@ -2600,7 +2600,8 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF, llvm::Type::getInt1Ty(VMContext), IsClassMessage))}; llvm::MDNode *node = llvm::MDNode::get(VMContext, impMD); - CGCallee callee(CGCalleeInfo(), imp); + CGPointerAuthInfo pointerAuth; // TODO + CGCallee callee(CGCalleeInfo(), imp, pointerAuth); llvm::CallBase *call; RValue msgRet = CGF.EmitCall(MSI.CallInfo, callee, Return, ActualArgs, &call); @@ -2720,7 +2721,8 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF, imp = EnforceType(Builder, imp, MSI.MessengerType); llvm::CallBase *call; - CGCallee callee(CGCalleeInfo(), imp); + CGPointerAuthInfo pointerAuth; // TODO + CGCallee callee(CGCalleeInfo(), imp, pointerAuth); RValue msgRet = CGF.EmitCall(MSI.CallInfo, callee, Return, ActualArgs, &call); call->setMetadata(msgSendMDKind, node); diff --git a/clang/lib/CodeGen/CGObjCMac.cpp b/clang/lib/CodeGen/CGObjCMac.cpp index 381a22be75b345..338ac145642c66 100644 --- a/clang/lib/CodeGen/CGObjCMac.cpp +++ b/clang/lib/CodeGen/CGObjCMac.cpp @@ -2961,7 +2961,7 @@ std::string CGObjCCommonMac::getRCBlockLayoutStr(CodeGenModule &CGM, const CGBlockInfo &blockInfo) { fillRunSkipBlockVars(CGM, blockInfo); return getBlockLayoutInfoString(RunSkipBlockVars, - blockInfo.needsCopyDisposeHelpers()); + blockInfo.needsCopyDisposeHelpers(CGM.getContext())); } llvm::Constant *CGObjCCommonMac::BuildByrefLayout(CodeGen::CodeGenModule &CGM, @@ -6639,7 +6639,14 @@ void CGObjCNonFragileABIMac::emitMethodConstant(ConstantArrayBuilder &builder, } else { llvm::Function *fn = GetMethodDefinition(MD); assert(fn && "no definition for method?"); - method.addBitCast(fn, ObjCTypes.Int8PtrTy); + + if (const auto &schema = + CGM.getCodeGenOpts().PointerAuth.ObjCMethodListFunctionPointers) { + auto *bitcast = llvm::ConstantExpr::getBitCast(fn, ObjCTypes.Int8PtrTy); + method.addSignedPointer(bitcast, schema, GlobalDecl(), QualType()); + } else { + method.addBitCast(fn, ObjCTypes.Int8PtrTy); + } } method.finishAndAddTo(builder); @@ -7212,7 +7219,8 @@ CGObjCNonFragileABIMac::EmitVTableMessageSend(CodeGenFunction &CGF, llvm::Value *calleePtr = CGF.Builder.CreateLoad(calleeAddr, "msgSend_fn"); calleePtr = CGF.Builder.CreateBitCast(calleePtr, MSI.MessengerType); - CGCallee callee(CGCalleeInfo(), calleePtr); + CGPointerAuthInfo pointerAuth; // This code path is unsupported. + CGCallee callee(CGCalleeInfo(), calleePtr, pointerAuth); RValue result = CGF.EmitCall(MSI.CallInfo, callee, returnSlot, args); return nullReturn.complete(CGF, returnSlot, result, resultType, formalArgs, @@ -7724,11 +7732,17 @@ CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID, } llvm::Value *VTableIdx = llvm::ConstantInt::get(CGM.Int32Ty, 2); + llvm::Constant *VTablePtr = llvm::ConstantExpr::getInBoundsGetElementPtr( + VTableGV->getValueType(), VTableGV, VTableIdx); + ConstantInitBuilder builder(CGM); auto values = builder.beginStruct(ObjCTypes.EHTypeTy); - values.add( - llvm::ConstantExpr::getInBoundsGetElementPtr(VTableGV->getValueType(), - VTableGV, VTableIdx)); + + if (auto &Schema = CGM.getCodeGenOpts().PointerAuth.CXXVTablePointers) { + values.addSignedPointer(VTablePtr, Schema, GlobalDecl(), QualType()); + } else { + values.add(VTablePtr); + } values.add(GetClassName(ClassName)); values.add(GetClassGlobal(ID, /*metaclass*/ false, NotForDefinition)); diff --git a/clang/lib/CodeGen/CGPointerAuth.cpp b/clang/lib/CodeGen/CGPointerAuth.cpp new file mode 100644 index 00000000000000..ab870df65b78bb --- /dev/null +++ b/clang/lib/CodeGen/CGPointerAuth.cpp @@ -0,0 +1,644 @@ +//===--- CGPointerAuth.cpp - IR generation for pointer authentication -----===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains common routines relating to the emission of +// pointer authentication operations. +// +//===----------------------------------------------------------------------===// + + +#include "CGCXXABI.h" +#include "CodeGenFunction.h" +#include "CodeGenModule.h" +#include "CGCall.h" +#include "clang/AST/StableHash.h" +#include "clang/CodeGen/ConstantInitBuilder.h" +#include "clang/CodeGen/CodeGenABITypes.h" +#include "clang/Basic/PointerAuthOptions.h" + +#include "llvm/ADT/DenseMap.h" +#include "llvm/IR/ValueMap.h" +#include "llvm/Analysis/ValueTracking.h" +#include + +using namespace clang; +using namespace CodeGen; + +/// Given a pointer-authentication schema, return a concrete "other" +/// discriminator for it. +llvm::Constant * +CodeGenModule::getPointerAuthOtherDiscriminator(const PointerAuthSchema &schema, + GlobalDecl decl, + QualType type) { + switch (schema.getOtherDiscrimination()) { + case PointerAuthSchema::Discrimination::None: + return nullptr; + + case PointerAuthSchema::Discrimination::Type: + assert(!type.isNull() && + "type not provided for type-discriminated schema"); + return llvm::ConstantInt::get( + IntPtrTy, getContext().getPointerAuthTypeDiscriminator(type)); + + case PointerAuthSchema::Discrimination::Decl: + assert(decl.getDecl() && + "declaration not provided for decl-discriminated schema"); + return llvm::ConstantInt::get(IntPtrTy, + getPointerAuthDeclDiscriminator(decl)); + } + llvm_unreachable("bad discrimination kind"); +} + +uint16_t CodeGen::getPointerAuthTypeDiscriminator(CodeGenModule &CGM, + QualType functionType) { + return CGM.getContext().getPointerAuthTypeDiscriminator(functionType); +} + +/// Compute an ABI-stable hash of the given string. +uint64_t CodeGen::computeStableStringHash(StringRef string) { + return clang::getStableStringHash(string); +} + +uint16_t CodeGen::getPointerAuthDeclDiscriminator(CodeGenModule &CGM, + GlobalDecl declaration) { + return CGM.getPointerAuthDeclDiscriminator(declaration); +} + +/// Return the "other" decl-specific discriminator for the given decl. +uint16_t +CodeGenModule::getPointerAuthDeclDiscriminator(GlobalDecl declaration) { + uint16_t &entityHash = PtrAuthDiscriminatorHashes[declaration]; + + if (entityHash == 0) { + StringRef name = getMangledName(declaration); + entityHash = getPointerAuthStringDiscriminator(getContext(), name); + } + + return entityHash; +} + +/// Return the abstract pointer authentication schema for a +/// function pointer of the given type. +CGPointerAuthInfo +CodeGenModule::getFunctionPointerAuthInfo(QualType functionType) { + // Check for a generic pointer authentication schema. + auto &schema = getCodeGenOpts().PointerAuth.FunctionPointers; + if (!schema) return CGPointerAuthInfo(); + + assert(!schema.isAddressDiscriminated() && + "function pointers cannot use address-specific discrimination"); + + auto discriminator = + getPointerAuthOtherDiscriminator(schema, GlobalDecl(), functionType); + return CGPointerAuthInfo(schema.getKey(), discriminator); +} + +CGPointerAuthInfo +CodeGenModule::getMemberFunctionPointerAuthInfo(QualType functionType) { + assert(functionType->getAs() && + "MemberPointerType expected"); + auto &schema = getCodeGenOpts().PointerAuth.CXXMemberFunctionPointers; + if (!schema) + return CGPointerAuthInfo(); + + assert(!schema.isAddressDiscriminated() && + "function pointers cannot use address-specific discrimination"); + + auto discriminator = + getPointerAuthOtherDiscriminator(schema, GlobalDecl(), functionType); + return CGPointerAuthInfo(schema.getKey(), discriminator); +} + +/// Return the natural pointer authentication for values of the given +/// pointer type. +static CGPointerAuthInfo getPointerAuthInfoForType(CodeGenModule &CGM, + QualType type) { + assert(type->isPointerType()); + + // Function pointers use the function-pointer schema by default. + if (auto ptrTy = type->getAs()) { + auto functionType = ptrTy->getPointeeType(); + if (functionType->isFunctionType()) { + return CGM.getFunctionPointerAuthInfo(functionType); + } + } + + // Normal data pointers never use direct pointer authentication by default. + return CGPointerAuthInfo(); +} + +llvm::Value *CodeGenFunction::EmitPointerAuthBlendDiscriminator( + llvm::Value *storageAddress, llvm::Value *discriminator) { + storageAddress = Builder.CreatePtrToInt(storageAddress, IntPtrTy); + auto intrinsic = CGM.getIntrinsic(llvm::Intrinsic::ptrauth_blend, + { CGM.IntPtrTy }); + return Builder.CreateCall(intrinsic, {storageAddress, discriminator}); +} + +/// Emit the concrete pointer authentication informaton for the +/// given authentication schema. +CGPointerAuthInfo +CodeGenFunction::EmitPointerAuthInfo(const PointerAuthSchema &schema, + llvm::Value *storageAddress, + GlobalDecl schemaDecl, + QualType schemaType) { + if (!schema) return CGPointerAuthInfo(); + + llvm::Value *discriminator = + CGM.getPointerAuthOtherDiscriminator(schema, schemaDecl, schemaType); + + if (schema.isAddressDiscriminated()) { + assert(storageAddress && + "address not provided for address-discriminated schema"); + + if (discriminator) + discriminator = + EmitPointerAuthBlendDiscriminator(storageAddress, discriminator); + else + discriminator = Builder.CreatePtrToInt(storageAddress, IntPtrTy); + } + + return CGPointerAuthInfo(schema.getKey(), discriminator); +} + +CGPointerAuthInfo +CodeGenFunction::EmitPointerAuthInfo(PointerAuthQualifier qualifier, + Address storageAddress) { + assert(qualifier && + "don't call this if you don't know that the qualifier is present"); + + llvm::Value *discriminator = nullptr; + if (unsigned extra = qualifier.getExtraDiscriminator()) { + discriminator = llvm::ConstantInt::get(IntPtrTy, extra); + } + + if (qualifier.isAddressDiscriminated()) { + assert(storageAddress.isValid() && + "address discrimination without address"); + auto storagePtr = storageAddress.getPointer(); + if (discriminator) { + discriminator = + EmitPointerAuthBlendDiscriminator(storagePtr, discriminator); + } else { + discriminator = Builder.CreatePtrToInt(storagePtr, IntPtrTy); + } + } + + return CGPointerAuthInfo(qualifier.getKey(), discriminator); +} + +static std::pair +emitLoadOfOrigPointerRValue(CodeGenFunction &CGF, const LValue &lv, + SourceLocation loc) { + auto value = CGF.EmitLoadOfScalar(lv, loc); + CGPointerAuthInfo authInfo; + if (auto ptrauth = lv.getQuals().getPointerAuth()) { + authInfo = CGF.EmitPointerAuthInfo(ptrauth, lv.getAddress()); + } else { + authInfo = getPointerAuthInfoForType(CGF.CGM, lv.getType()); + } + return { value, authInfo }; +} + +std::pair +CodeGenFunction::EmitOrigPointerRValue(const Expr *E) { + assert(E->getType()->isPointerType()); + + E = E->IgnoreParens(); + if (auto load = dyn_cast(E)) { + if (load->getCastKind() == CK_LValueToRValue) { + E = load->getSubExpr()->IgnoreParens(); + + // We're semantically required to not emit loads of certain DREs naively. + if (auto refExpr = dyn_cast(const_cast(E))) { + if (auto result = tryEmitAsConstant(refExpr)) { + // Fold away a use of an intermediate variable. + if (!result.isReference()) + return { result.getValue(), + getPointerAuthInfoForType(CGM, refExpr->getType()) }; + + // Fold away a use of an intermediate reference. + auto lv = result.getReferenceLValue(*this, refExpr); + return emitLoadOfOrigPointerRValue(*this, lv, refExpr->getLocation()); + } + } + + // Otherwise, load and use the pointer + auto lv = EmitCheckedLValue(E, CodeGenFunction::TCK_Load); + return emitLoadOfOrigPointerRValue(*this, lv, E->getExprLoc()); + } + } + + // Emit direct references to functions without authentication. + if (auto DRE = dyn_cast(E)) { + if (auto FD = dyn_cast(DRE->getDecl())) { + return { CGM.getRawFunctionPointer(FD), CGPointerAuthInfo() }; + } + } else if (auto ME = dyn_cast(E)) { + if (auto FD = dyn_cast(ME->getMemberDecl())) { + EmitIgnoredExpr(ME->getBase()); + return { CGM.getRawFunctionPointer(FD), CGPointerAuthInfo() }; + } + } + + // Fallback: just use the normal rules for the type. + auto value = EmitScalarExpr(E); + return { value, getPointerAuthInfoForType(CGM, E->getType()) }; +} + +llvm::Value * +CodeGenFunction::EmitPointerAuthQualify(PointerAuthQualifier destQualifier, + const Expr *E, + Address destStorageAddress) { + assert(destQualifier); + + auto src = EmitOrigPointerRValue(E); + auto value = src.first; + auto curAuthInfo = src.second; + + auto destAuthInfo = EmitPointerAuthInfo(destQualifier, destStorageAddress); + return EmitPointerAuthResign(value, E->getType(), curAuthInfo, destAuthInfo, + isPointerKnownNonNull(E)); +} + +llvm::Value * +CodeGenFunction::EmitPointerAuthQualify(PointerAuthQualifier destQualifier, + llvm::Value *value, + QualType pointerType, + Address destStorageAddress, + bool isKnownNonNull) { + assert(destQualifier); + + auto curAuthInfo = getPointerAuthInfoForType(CGM, pointerType); + auto destAuthInfo = EmitPointerAuthInfo(destQualifier, destStorageAddress); + return EmitPointerAuthResign(value, pointerType, curAuthInfo, destAuthInfo, + isKnownNonNull); +} + +llvm::Value * +CodeGenFunction::EmitPointerAuthUnqualify(PointerAuthQualifier curQualifier, + llvm::Value *value, + QualType pointerType, + Address curStorageAddress, + bool isKnownNonNull) { + assert(curQualifier); + + auto curAuthInfo = EmitPointerAuthInfo(curQualifier, curStorageAddress); + auto destAuthInfo = getPointerAuthInfoForType(CGM, pointerType); + return EmitPointerAuthResign(value, pointerType, curAuthInfo, destAuthInfo, + isKnownNonNull); +} + +static bool isZeroConstant(llvm::Value *value) { + if (auto ci = dyn_cast(value)) + return ci->isZero(); + return false; +} + +llvm::Value * +CodeGenFunction::EmitPointerAuthResign(llvm::Value *value, QualType type, + const CGPointerAuthInfo &curAuthInfo, + const CGPointerAuthInfo &newAuthInfo, + bool isKnownNonNull) { + // Fast path: if neither schema wants a signature, we're done. + if (!curAuthInfo && !newAuthInfo) + return value; + + // If the value is obviously null, we're done. + auto null = + CGM.getNullPointer(cast(value->getType()), type); + if (value == null) { + return value; + } + + // If both schemas sign the same way, we're done. + if (curAuthInfo && newAuthInfo && + curAuthInfo.getKey() == newAuthInfo.getKey()) { + auto curD = curAuthInfo.getDiscriminator(); + auto newD = newAuthInfo.getDiscriminator(); + if (curD == newD || + (curD == nullptr && isZeroConstant(newD)) || + (newD == nullptr && isZeroConstant(curD))) + return value; + } + + llvm::BasicBlock *initBB = Builder.GetInsertBlock(); + llvm::BasicBlock *resignBB = nullptr, *contBB = nullptr; + + // Null pointers have to be mapped to null, and the ptrauth_resign + // intrinsic doesn't do that. + if (!isKnownNonNull && !llvm::isKnownNonZero(value, CGM.getDataLayout())) { + contBB = createBasicBlock("resign.cont"); + resignBB = createBasicBlock("resign.nonnull"); + + auto isNonNull = Builder.CreateICmpNE(value, null); + Builder.CreateCondBr(isNonNull, resignBB, contBB); + EmitBlock(resignBB); + } + + // Perform the auth/sign/resign operation. + if (!newAuthInfo) { + value = EmitPointerAuthAuth(curAuthInfo, value); + } else if (!curAuthInfo) { + value = EmitPointerAuthSign(newAuthInfo, value); + } else { + value = EmitPointerAuthResignCall(value, curAuthInfo, newAuthInfo); + } + + // Clean up with a phi if we branched before. + if (contBB) { + EmitBlock(contBB); + auto phi = Builder.CreatePHI(value->getType(), 2); + phi->addIncoming(null, initBB); + phi->addIncoming(value, resignBB); + value = phi; + } + + return value; +} + +void CodeGenFunction::EmitPointerAuthCopy(PointerAuthQualifier qualifier, + QualType type, + Address destAddress, + Address srcAddress) { + assert(qualifier); + + llvm::Value *value = Builder.CreateLoad(srcAddress); + + // If we're using address-discrimination, we have to re-sign the value. + if (qualifier.isAddressDiscriminated()) { + auto srcPtrAuth = EmitPointerAuthInfo(qualifier, srcAddress); + auto destPtrAuth = EmitPointerAuthInfo(qualifier, destAddress); + value = EmitPointerAuthResign(value, type, srcPtrAuth, destPtrAuth, + /*is known nonnull*/ false); + } + + Builder.CreateStore(value, destAddress); +} + +/// We use an abstract, side-allocated cache for signed function pointers +/// because (1) most compiler invocations will not need this cache at all, +/// since they don't use signed function pointers, and (2) the +/// representation is pretty complicated (an llvm::ValueMap) and we don't +/// want to have to include that information in CodeGenModule.h. +template +static CacheTy &getOrCreateCache(void *&abstractStorage) { + auto cache = static_cast(abstractStorage); + if (cache) return *cache; + + abstractStorage = cache = new CacheTy(); + return *cache; +} + +template +static void destroyCache(void *&abstractStorage) { + delete static_cast(abstractStorage); + abstractStorage = nullptr; +} + +namespace { +struct PointerAuthConstantEntry { + unsigned Key; + llvm::Constant *OtherDiscriminator; + llvm::GlobalVariable *Global; +}; + +using PointerAuthConstantEntries = + std::vector; +using ByConstantCacheTy = + llvm::ValueMap; +using ByDeclCacheTy = + llvm::DenseMap; +} + +/// Build a global signed-pointer constant. +static llvm::GlobalVariable * +buildConstantSignedPointer(CodeGenModule &CGM, + llvm::Constant *pointer, + unsigned key, + llvm::Constant *storageAddress, + llvm::Constant *otherDiscriminator) { + ConstantInitBuilder builder(CGM); + auto values = builder.beginStruct(); + values.addBitCast(pointer, CGM.Int8PtrTy); + values.addInt(CGM.Int32Ty, key); + if (storageAddress) { + if (isa(storageAddress)) { + assert(!storageAddress->isNullValue() && + "expecting pointer or special address-discriminator indicator"); + values.add(storageAddress); + } else { + values.add(llvm::ConstantExpr::getPtrToInt(storageAddress, CGM.IntPtrTy)); + } + } else { + values.addInt(CGM.SizeTy, 0); + } + if (otherDiscriminator) { + assert(otherDiscriminator->getType() == CGM.SizeTy); + values.add(otherDiscriminator); + } else { + values.addInt(CGM.SizeTy, 0); + } + + auto *stripped = pointer->stripPointerCasts(); + StringRef name; + if (const auto *origGlobal = dyn_cast(stripped)) + name = origGlobal->getName(); + else if (const auto *ce = dyn_cast(stripped)) + if (ce->getOpcode() == llvm::Instruction::GetElementPtr) + name = cast(ce)->getPointerOperand()->getName(); + + auto global = values.finishAndCreateGlobal( + name + ".ptrauth", + CGM.getPointerAlign(), + /*constant*/ true, + llvm::GlobalVariable::PrivateLinkage); + global->setSection("llvm.ptrauth"); + + return global; +} + +llvm::Constant * +CodeGenModule::getConstantSignedPointer(llvm::Constant *pointer, + unsigned key, + llvm::Constant *storageAddress, + llvm::Constant *otherDiscriminator) { + // Unique based on the underlying value, not a signing of it. + auto stripped = pointer->stripPointerCasts(); + + PointerAuthConstantEntries *entries = nullptr; + + // We can cache this for discriminators that aren't defined in terms + // of globals. Discriminators defined in terms of globals (1) would + // require additional tracking to be safe and (2) only come up with + // address-specific discrimination, where this entry is almost certainly + // unique to the use-site anyway. + if (!storageAddress && + (!otherDiscriminator || + isa(otherDiscriminator))) { + + // Get or create the cache. + auto &cache = + getOrCreateCache(ConstantSignedPointersByConstant); + + // Check for an existing entry. + entries = &cache[stripped]; + for (auto &entry : *entries) { + if (entry.Key == key && entry.OtherDiscriminator == otherDiscriminator) { + auto global = entry.Global; + return llvm::ConstantExpr::getBitCast(global, pointer->getType()); + } + } + } + + // Build the constant. + auto global = + buildConstantSignedPointer(*this, stripped, key, storageAddress, + otherDiscriminator); + + // Cache if applicable. + if (entries) { + entries->push_back({ key, otherDiscriminator, global }); + } + + // Cast to the original type. + return llvm::ConstantExpr::getBitCast(global, pointer->getType()); +} + +/// Sign a constant pointer using the given scheme, producing a constant +/// with the same IR type. +llvm::Constant * +CodeGenModule::getConstantSignedPointer(llvm::Constant *pointer, + const PointerAuthSchema &schema, + llvm::Constant *storageAddress, + GlobalDecl schemaDecl, + QualType schemaType) { + llvm::Constant *otherDiscriminator = + getPointerAuthOtherDiscriminator(schema, schemaDecl, schemaType); + + return getConstantSignedPointer(pointer, schema.getKey(), + storageAddress, otherDiscriminator); +} + +llvm::Constant * +CodeGen::getConstantSignedPointer(CodeGenModule &CGM, + llvm::Constant *pointer, unsigned key, + llvm::Constant *storageAddress, + llvm::Constant *otherDiscriminator) { + return CGM.getConstantSignedPointer(pointer, key, storageAddress, + otherDiscriminator); +} + +/// Sign the given pointer and add it to the constant initializer +/// currently being built. +void ConstantAggregateBuilderBase::addSignedPointer( + llvm::Constant *pointer, const PointerAuthSchema &schema, + GlobalDecl calleeDecl, QualType calleeType) { + if (!schema) return add(pointer); + + llvm::Constant *storageAddress = nullptr; + if (schema.isAddressDiscriminated()) { + storageAddress = getAddrOfCurrentPosition(pointer->getType()); + } + + llvm::Constant *signedPointer = + Builder.CGM.getConstantSignedPointer(pointer, schema, storageAddress, + calleeDecl, calleeType); + add(signedPointer); +} + +void ConstantAggregateBuilderBase::addSignedPointer( + llvm::Constant *pointer, unsigned key, + bool useAddressDiscrimination, llvm::Constant *otherDiscriminator) { + llvm::Constant *storageAddress = nullptr; + if (useAddressDiscrimination) { + storageAddress = getAddrOfCurrentPosition(pointer->getType()); + } + + llvm::Constant *signedPointer = + Builder.CGM.getConstantSignedPointer(pointer, key, storageAddress, + otherDiscriminator); + add(signedPointer); +} + +void CodeGenModule::destroyConstantSignedPointerCaches() { + destroyCache(ConstantSignedPointersByConstant); + destroyCache(ConstantSignedPointersByDecl); + destroyCache(SignedThunkPointers); +} + +llvm::Constant *CodeGenModule::getFunctionPointer(llvm::Constant *pointer, + QualType functionType, + const FunctionDecl *FD) { + if (auto pointerAuth = getFunctionPointerAuthInfo(functionType)) { + // Check a cache that, for now, just has entries for functions signed + // with the standard function-pointer scheme. + // Cache function pointers based on their decl. Anything without a decl is + // going to be a one-off that doesn't need to be cached anyway. + llvm::Constant **entry = nullptr; + if (FD) { + auto &cache = + getOrCreateCache(ConstantSignedPointersByDecl); + entry = &cache[FD->getCanonicalDecl()]; + if (*entry) + return llvm::ConstantExpr::getBitCast(*entry, pointer->getType()); + } + + // If the cache misses, build a new constant. It's not a *problem* to + // have more than one of these for a particular function, but it's nice + // to avoid it. + pointer = getConstantSignedPointer( + pointer, pointerAuth.getKey(), nullptr, + cast_or_null(pointerAuth.getDiscriminator())); + + // Store the result back into the cache, if any. + if (entry) + *entry = pointer; + } + + return pointer; +} + +llvm::Constant *CodeGenModule::getFunctionPointer(const FunctionDecl *FD, + llvm::Type *Ty) { + return getFunctionPointer(getRawFunctionPointer(FD, Ty), FD->getType(), FD); +} + +llvm::Constant * +CodeGenModule::getMemberFunctionPointer(llvm::Constant *pointer, + QualType functionType, + const FunctionDecl *FD) { + if (auto pointerAuth = getMemberFunctionPointerAuthInfo(functionType)) { + llvm::Constant **entry = nullptr; + if (FD) { + auto &cache = + getOrCreateCache(SignedThunkPointers); + entry = &cache[FD->getCanonicalDecl()]; + if (*entry) + return llvm::ConstantExpr::getBitCast(*entry, pointer->getType()); + } + + pointer = getConstantSignedPointer( + pointer, pointerAuth.getKey(), nullptr, + cast_or_null(pointerAuth.getDiscriminator())); + + if (entry) + *entry = pointer; + } + + return pointer; +} + +llvm::Constant * +CodeGenModule::getMemberFunctionPointer(const FunctionDecl *FD, llvm::Type *Ty) { + QualType functionType = FD->getType(); + functionType = getContext().getMemberPointerType( + functionType, cast(FD)->getParent()->getTypeForDecl()); + return getMemberFunctionPointer(getRawFunctionPointer(FD, Ty), functionType, + FD); +} diff --git a/clang/lib/CodeGen/CGVTT.cpp b/clang/lib/CodeGen/CGVTT.cpp index e79f3f3dd8bce7..5bd50fb8058700 100644 --- a/clang/lib/CodeGen/CGVTT.cpp +++ b/clang/lib/CodeGen/CGVTT.cpp @@ -85,6 +85,11 @@ CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT, Init = llvm::ConstantExpr::getBitCast(Init, Int8PtrTy); + if (auto &schema = + CGM.getCodeGenOpts().PointerAuth.CXXVTTVTablePointers) + Init = CGM.getConstantSignedPointer(Init, schema, nullptr, GlobalDecl(), + QualType()); + VTTComponents.push_back(Init); } diff --git a/clang/lib/CodeGen/CGVTables.cpp b/clang/lib/CodeGen/CGVTables.cpp index f9f25e7e57adc6..6b1ce852de207f 100644 --- a/clang/lib/CodeGen/CGVTables.cpp +++ b/clang/lib/CodeGen/CGVTables.cpp @@ -708,14 +708,27 @@ void CodeGenVTables::addVTableComponent( nextVTableThunkIndex++; fnPtr = maybeEmitThunk(GD, thunkInfo, /*ForVTable=*/true); + if (CGM.getCodeGenOpts().PointerAuth.CXXVirtualFunctionPointers) { + assert(thunkInfo.Method && "Method not set"); + GD = GD.getWithDecl(thunkInfo.Method); + } // Otherwise we can use the method definition directly. } else { llvm::Type *fnTy = CGM.getTypes().GetFunctionTypeForVTable(GD); fnPtr = CGM.GetAddrOfFunction(GD, fnTy, /*ForVTable=*/true); + if (CGM.getCodeGenOpts().PointerAuth.CXXVirtualFunctionPointers) + GD = getItaniumVTableContext().findOriginalMethod(GD); } fnPtr = llvm::ConstantExpr::getBitCast(fnPtr, CGM.Int8PtrTy); + + if (auto &schema = + CGM.getCodeGenOpts().PointerAuth.CXXVirtualFunctionPointers) { + builder.addSignedPointer(fnPtr, schema, GD, QualType()); + return; + } + builder.add(fnPtr); return; } diff --git a/clang/lib/CodeGen/CMakeLists.txt b/clang/lib/CodeGen/CMakeLists.txt index 6d1f33b89247d8..143aee702501bd 100644 --- a/clang/lib/CodeGen/CMakeLists.txt +++ b/clang/lib/CodeGen/CMakeLists.txt @@ -70,6 +70,7 @@ add_clang_library(clangCodeGen CGOpenCLRuntime.cpp CGOpenMPRuntime.cpp CGOpenMPRuntimeNVPTX.cpp + CGPointerAuth.cpp CGRecordLayoutBuilder.cpp CGStmt.cpp CGStmtOpenMP.cpp diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp index 3f9a52ab7638af..67ffbd8fad4c1c 100644 --- a/clang/lib/CodeGen/CodeGenFunction.cpp +++ b/clang/lib/CodeGen/CodeGenFunction.cpp @@ -2390,3 +2390,93 @@ llvm::DebugLoc CodeGenFunction::SourceLocToDebugLoc(SourceLocation Location) { return llvm::DebugLoc(); } + +void CodeGenFunction::EmitPointerAuthOperandBundle( + const CGPointerAuthInfo &pointerAuth, + SmallVectorImpl &bundles) { + if (!pointerAuth.isSigned()) return; + + auto key = Builder.getInt32(pointerAuth.getKey()); + + llvm::Value *discriminator = pointerAuth.getDiscriminator(); + if (!discriminator) { + discriminator = Builder.getSize(0); + } + + llvm::Value *args[] = { key, discriminator }; + bundles.emplace_back("ptrauth", args); +} + +static llvm::Value *EmitPointerAuthCommon(CodeGenFunction &CGF, + const CGPointerAuthInfo &pointerAuth, + llvm::Value *pointer, + unsigned intrinsicID) { + if (!pointerAuth) return pointer; + + auto key = CGF.Builder.getInt32(pointerAuth.getKey()); + + llvm::Value *discriminator = pointerAuth.getDiscriminator(); + if (!discriminator) { + discriminator = CGF.Builder.getSize(0); + } + + // Convert the pointer to intptr_t before signing it. + auto origType = pointer->getType(); + pointer = CGF.Builder.CreatePtrToInt(pointer, CGF.IntPtrTy); + + // call i64 @llvm.ptrauth.sign.i64(i64 %pointer, i32 %key, i64 %discriminator) + auto intrinsic = + CGF.CGM.getIntrinsic(intrinsicID, { CGF.IntPtrTy }); + pointer = CGF.EmitRuntimeCall(intrinsic, { pointer, key, discriminator }); + + // Convert back to the original type. + pointer = CGF.Builder.CreateIntToPtr(pointer, origType); + return pointer; +} + +llvm::Value * +CodeGenFunction::EmitPointerAuthSign(const CGPointerAuthInfo &pointerAuth, + llvm::Value *pointer) { + return EmitPointerAuthCommon(*this, pointerAuth, pointer, + llvm::Intrinsic::ptrauth_sign); +} + +llvm::Value * +CodeGenFunction::EmitPointerAuthAuth(const CGPointerAuthInfo &pointerAuth, + llvm::Value *pointer) { + return EmitPointerAuthCommon(*this, pointerAuth, pointer, + llvm::Intrinsic::ptrauth_auth); +} + +llvm::Value * +CodeGenFunction::EmitPointerAuthResignCall(llvm::Value *value, + const CGPointerAuthInfo &curAuth, + const CGPointerAuthInfo &newAuth) { + assert(curAuth && newAuth); + + // Convert the pointer to intptr_t before signing it. + auto origType = value->getType(); + value = Builder.CreatePtrToInt(value, IntPtrTy); + + auto curKey = Builder.getInt32(curAuth.getKey()); + auto newKey = Builder.getInt32(newAuth.getKey()); + + llvm::Value *curDiscriminator = curAuth.getDiscriminator(); + if (!curDiscriminator) curDiscriminator = Builder.getSize(0); + + llvm::Value *newDiscriminator = newAuth.getDiscriminator(); + if (!newDiscriminator) newDiscriminator = Builder.getSize(0); + + // call i64 @llvm.ptrauth.resign.i64(i64 %pointer, + // i32 %curKey, i64 %curDiscriminator, + // i32 %newKey, i64 %newDiscriminator) + auto intrinsic = + CGM.getIntrinsic(llvm::Intrinsic::ptrauth_resign, { IntPtrTy }); + value = EmitRuntimeCall(intrinsic, + { value, curKey, curDiscriminator, + newKey, newDiscriminator }); + + // Convert back to the original type. + value = Builder.CreateIntToPtr(value, origType); + return value; +} diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h index 2c20ba4e6b65ba..65ebbc80f16b83 100644 --- a/clang/lib/CodeGen/CodeGenFunction.h +++ b/clang/lib/CodeGen/CodeGenFunction.h @@ -3598,7 +3598,8 @@ class CodeGenFunction : public CodeGenTypeCache { /// LLVM arguments and the types they were derived from. RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, - llvm::CallBase **callOrInvoke, SourceLocation Loc); + llvm::CallBase **callOrInvoke, SourceLocation Loc, + bool IsVirtualFunctionPointerThunk = false); RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **callOrInvoke = nullptr) { @@ -3648,6 +3649,51 @@ class CodeGenFunction : public CodeGenTypeCache { CXXDtorType Type, const CXXRecordDecl *RD); + /// Create the discriminator from the storage address and the entity hash. + llvm::Value *EmitPointerAuthBlendDiscriminator(llvm::Value *storageAddress, + llvm::Value *discriminator); + + CGPointerAuthInfo EmitPointerAuthInfo(const PointerAuthSchema &schema, + llvm::Value *storageAddress, + GlobalDecl calleeDecl, + QualType calleeType); + llvm::Value *EmitPointerAuthSign(const CGPointerAuthInfo &info, + llvm::Value *pointer); + llvm::Value *EmitPointerAuthAuth(const CGPointerAuthInfo &info, + llvm::Value *pointer); + llvm::Value *EmitPointerAuthResign(llvm::Value *pointer, + QualType pointerType, + const CGPointerAuthInfo &curAuthInfo, + const CGPointerAuthInfo &newAuthInfo, + bool isKnownNonNull); + llvm::Value *EmitPointerAuthResignCall(llvm::Value *pointer, + const CGPointerAuthInfo &curInfo, + const CGPointerAuthInfo &newInfo); + void EmitPointerAuthOperandBundle(const CGPointerAuthInfo &info, + SmallVectorImpl &bundles); + + CGPointerAuthInfo EmitPointerAuthInfo(PointerAuthQualifier qualifier, + Address storageAddress); + llvm::Value *EmitPointerAuthQualify(PointerAuthQualifier qualifier, + llvm::Value *pointer, + QualType valueType, + Address storageAddress, + bool isKnownNonNull); + llvm::Value *EmitPointerAuthQualify(PointerAuthQualifier qualifier, + const Expr *pointerExpr, + Address storageAddress); + llvm::Value *EmitPointerAuthUnqualify(PointerAuthQualifier qualifier, + llvm::Value *pointer, + QualType pointerType, + Address storageAddress, + bool isKnownNonNull); + void EmitPointerAuthCopy(PointerAuthQualifier qualifier, QualType type, + Address destField, Address srcField); + + std::pair + EmitOrigPointerRValue(const Expr *E); + bool isPointerKnownNonNull(const Expr *E); + // Return the copy constructor name with the prefix "__copy_constructor_" // removed. static std::string getNonTrivialCopyConstructorStr(QualType QT, @@ -3945,7 +3991,7 @@ class CodeGenFunction : public CodeGenTypeCache { void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr, bool PerformInit); - llvm::Function *createAtExitStub(const VarDecl &VD, llvm::FunctionCallee Dtor, + llvm::Constant *createAtExitStub(const VarDecl &VD, llvm::FunctionCallee Dtor, llvm::Constant *Addr); /// Call atexit() with a function that passes the given argument to diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp index 480a33f2728542..991901dc8d3d52 100644 --- a/clang/lib/CodeGen/CodeGenModule.cpp +++ b/clang/lib/CodeGen/CodeGenModule.cpp @@ -173,7 +173,9 @@ CodeGenModule::CodeGenModule(ASTContext &C, const HeaderSearchOptions &HSO, CoverageMapping.reset(new CoverageMappingModuleGen(*this, *CoverageInfo)); } -CodeGenModule::~CodeGenModule() {} +CodeGenModule::~CodeGenModule() { + destroyConstantSignedPointerCaches(); +} void CodeGenModule::createObjCRuntime() { // This is just isGNUFamily(), but we want to force implementors of diff --git a/clang/lib/CodeGen/CodeGenModule.h b/clang/lib/CodeGen/CodeGenModule.h index f5014c05b0672f..d6a22458ccf66c 100644 --- a/clang/lib/CodeGen/CodeGenModule.h +++ b/clang/lib/CodeGen/CodeGenModule.h @@ -66,6 +66,7 @@ class Stmt; class InitListExpr; class StringLiteral; class NamedDecl; +class PointerAuthSchema; class ValueDecl; class VarDecl; class LangOptions; @@ -405,6 +406,11 @@ class CodeGenModule : public CodeGenTypeCache { /// Global annotations. std::vector Annotations; + /// Signed constant pointers. + void *ConstantSignedPointersByDecl = nullptr; + void *SignedThunkPointers = nullptr; + void *ConstantSignedPointersByConstant = nullptr; + /// Map used to get unique annotation strings. llvm::StringMap AnnotationStrings; @@ -547,6 +553,8 @@ class CodeGenModule : public CodeGenTypeCache { MetadataTypeMap VirtualMetadataIdMap; MetadataTypeMap GeneralizedMetadataIdMap; + llvm::DenseMap PtrAuthDiscriminatorHashes; + public: CodeGenModule(ASTContext &C, const HeaderSearchOptions &headersearchopts, const PreprocessorOptions &ppopts, @@ -845,6 +853,51 @@ class CodeGenModule : public CodeGenTypeCache { ForDefinition_t IsForDefinition = NotForDefinition); + /// Return a function pointer for a reference to the given function. + /// This correctly handles weak references, but does not apply a + /// pointer signature. + llvm::Constant *getRawFunctionPointer(const FunctionDecl *FD, + llvm::Type *Ty = nullptr); + + /// Return the ABI-correct function pointer value for a reference + /// to the given function. This will apply a pointer signature if + /// necessary, caching the result for the given function. + llvm::Constant *getFunctionPointer(const FunctionDecl *FD, + llvm::Type *Ty = nullptr); + + /// Return the ABI-correct function pointer value for a reference + /// to the given function. This will apply a pointer signature if + /// necessary, but will only cache the result if \p FD is passed. + llvm::Constant *getFunctionPointer(llvm::Constant *pointer, + QualType functionType, + const FunctionDecl *FD = nullptr); + + llvm::Constant *getMemberFunctionPointer(const FunctionDecl *FD, + llvm::Type *Ty = nullptr); + + llvm::Constant *getMemberFunctionPointer(llvm::Constant *pointer, + QualType functionType, + const FunctionDecl *FD = nullptr); + + CGPointerAuthInfo getFunctionPointerAuthInfo(QualType functionType); + + CGPointerAuthInfo getMemberFunctionPointerAuthInfo(QualType functionType); + + llvm::Constant *getConstantSignedPointer(llvm::Constant *pointer, + const PointerAuthSchema &schema, + llvm::Constant *storageAddress, + GlobalDecl schemaDecl, + QualType schemaType); + llvm::Constant *getConstantSignedPointer(llvm::Constant *pointer, + unsigned key, + llvm::Constant *storageAddress, + llvm::Constant *extraDiscrim); + + llvm::Constant * + getPointerAuthOtherDiscriminator(const PointerAuthSchema &schema, + GlobalDecl schemaDecl, QualType schemaType); + uint16_t getPointerAuthDeclDiscriminator(GlobalDecl GD); + /// Get the address of the RTTI descriptor for the given type. llvm::Constant *GetAddrOfRTTIDescriptor(QualType Ty, bool ForEH = false); @@ -1514,6 +1567,8 @@ class CodeGenModule : public CodeGenTypeCache { /// function. void SimplifyPersonality(); + void destroyConstantSignedPointerCaches(); + /// Helper function for ConstructAttributeList and AddDefaultFnAttrs. /// Constructs an AttrList for a function with the given properties. void ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone, diff --git a/clang/lib/CodeGen/ItaniumCXXABI.cpp b/clang/lib/CodeGen/ItaniumCXXABI.cpp index 8f9b16470b6428..e82d718881a37a 100644 --- a/clang/lib/CodeGen/ItaniumCXXABI.cpp +++ b/clang/lib/CodeGen/ItaniumCXXABI.cpp @@ -29,6 +29,7 @@ #include "clang/AST/Type.h" #include "clang/AST/StmtCXX.h" #include "llvm/IR/DataLayout.h" +#include "llvm/IR/GlobalPtrAuthInfo.h" #include "llvm/IR/GlobalValue.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Intrinsics.h" @@ -368,6 +369,9 @@ class ItaniumCXXABI : public CodeGen::CGCXXABI { bool NeedsVTTParameter(GlobalDecl GD) override; + llvm::Constant * + getOrCreateVirtualFunctionPointerThunk(const CXXMethodDecl *MD); + /**************************** RTTI Uniqueness ******************************/ protected: @@ -406,6 +410,9 @@ class ItaniumCXXABI : public CodeGen::CGCXXABI { const CXXRecordDecl *RD) override; private: + llvm::Constant * + getSignedVirtualMemberFunctionPointer(const CXXMethodDecl *MD); + bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const { const auto &VtableLayout = CGM.getItaniumVTableContext().getVTableLayout(RD); @@ -777,7 +784,23 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer( CalleePtr->addIncoming(VirtualFn, FnVirtual); CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual); - CGCallee Callee(FPT, CalleePtr); + CGPointerAuthInfo PointerAuth; + + if (const auto &Schema = + CGM.getCodeGenOpts().PointerAuth.CXXMemberFunctionPointers) { + llvm::PHINode *DiscriminatorPHI = Builder.CreatePHI(CGF.IntPtrTy, 2); + DiscriminatorPHI->addIncoming(llvm::ConstantInt::get(CGF.IntPtrTy, 0), + FnVirtual); + const auto &AuthInfo = + CGM.getMemberFunctionPointerAuthInfo(QualType(MPT, 0)); + assert(Schema.getKey() == AuthInfo.getKey() && + "Keys for virtual and non-virtual member functions must match"); + auto *NonVirtualDiscriminator = AuthInfo.getDiscriminator(); + DiscriminatorPHI->addIncoming(NonVirtualDiscriminator, FnNonVirtual); + PointerAuth = CGPointerAuthInfo(Schema.getKey(), DiscriminatorPHI); + } + + CGCallee Callee(FPT, CalleePtr, PointerAuth); return Callee; } @@ -804,6 +827,26 @@ llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress( return Builder.CreateBitCast(Addr, PType); } +// See if it's possible to return a constant signed pointer. +static llvm::Constant *pointerAuthResignConstant( + llvm::Value *Ptr, const CGPointerAuthInfo &CurAuthInfo, + const CGPointerAuthInfo &NewAuthInfo, CodeGenModule &CGM) { + Optional Info = + llvm::GlobalPtrAuthInfo::analyze(Ptr); + + if (!Info || !isa(NewAuthInfo.getDiscriminator())) + return nullptr; + + assert(Info->getKey()->getZExtValue() == CurAuthInfo.getKey() && + Info->getAddrDiscriminator()->isZeroValue() && + Info->getDiscriminator() == CurAuthInfo.getDiscriminator() && + "unexpected key or discriminators"); + + return CGM.getConstantSignedPointer( + Info->getPointer(), NewAuthInfo.getKey(), nullptr, + cast(NewAuthInfo.getDiscriminator())); +} + /// Perform a bitcast, derived-to-base, or base-to-derived member pointer /// conversion. /// @@ -831,21 +874,62 @@ llvm::Value * ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF, const CastExpr *E, llvm::Value *src) { + // Use constant emission if we can. + if (isa(src)) + return EmitMemberPointerConversion(E, cast(src)); + assert(E->getCastKind() == CK_DerivedToBaseMemberPointer || E->getCastKind() == CK_BaseToDerivedMemberPointer || E->getCastKind() == CK_ReinterpretMemberPointer); + CGBuilderTy &Builder = CGF.Builder; + QualType dstType = E->getType(); + + if (dstType->isMemberFunctionPointerType()) + if (const auto &newAuthInfo = + CGM.getMemberFunctionPointerAuthInfo(dstType)) { + QualType srcType = E->getSubExpr()->getType(); + assert(srcType->isMemberFunctionPointerType()); + const auto &curAuthInfo = CGM.getMemberFunctionPointerAuthInfo(srcType); + llvm::Value *memFnPtr = Builder.CreateExtractValue(src, 0, "memptr.ptr"); + llvm::Type *origTy = memFnPtr->getType(); + + llvm::BasicBlock *startBB = Builder.GetInsertBlock(); + llvm::BasicBlock *resignBB = CGF.createBasicBlock("resign"); + llvm::BasicBlock *mergeBB = CGF.createBasicBlock("merge"); + + // Check whether we have a virtual offset or a pointer to a function. + assert(UseARMMethodPtrABI && "ARM ABI expected"); + llvm::Value *adj = Builder.CreateExtractValue(src, 1, "memptr.adj"); + llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1); + llvm::Value *andVal = Builder.CreateAnd(adj, ptrdiff_1); + llvm::Value *isVirtualOffset = + Builder.CreateIsNotNull(andVal, "is.virtual.offset"); + Builder.CreateCondBr(isVirtualOffset, mergeBB, resignBB); + + CGF.EmitBlock(resignBB); + llvm::Type *ptrTy = llvm::PointerType::getUnqual(CGM.Int8Ty); + memFnPtr = Builder.CreateIntToPtr(memFnPtr, ptrTy); + memFnPtr = CGF.EmitPointerAuthResign(memFnPtr, srcType, curAuthInfo, + newAuthInfo, + isa(src)); + memFnPtr = Builder.CreatePtrToInt(memFnPtr, origTy); + llvm::Value *resignedVal = Builder.CreateInsertValue(src, memFnPtr, 0); + resignBB = Builder.GetInsertBlock(); + + CGF.EmitBlock(mergeBB); + llvm::PHINode *newSrc = Builder.CreatePHI(src->getType(), 2); + newSrc->addIncoming(src, startBB); + newSrc->addIncoming(resignedVal, resignBB); + src = newSrc; + } + // Under Itanium, reinterprets don't require any additional processing. if (E->getCastKind() == CK_ReinterpretMemberPointer) return src; - // Use constant emission if we can. - if (isa(src)) - return EmitMemberPointerConversion(E, cast(src)); - llvm::Constant *adj = getMemberPointerAdjustment(E); if (!adj) return src; - CGBuilderTy &Builder = CGF.Builder; bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer); const MemberPointerType *destTy = @@ -890,6 +974,22 @@ ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E, E->getCastKind() == CK_BaseToDerivedMemberPointer || E->getCastKind() == CK_ReinterpretMemberPointer); + QualType dstType = E->getType(); + + if (dstType->isMemberFunctionPointerType()) + if (const auto &newAuthInfo = + CGM.getMemberFunctionPointerAuthInfo(dstType)) { + assert(UseARMMethodPtrABI && "ARM ABI expected"); + QualType srcType = E->getSubExpr()->getType(); + const auto &curAuthInfo = CGM.getMemberFunctionPointerAuthInfo(srcType); + llvm::Constant *memFnPtr = llvm::ConstantExpr::getExtractValue(src, 0); + llvm::Constant *constPtr = + pointerAuthResignConstant(cast(memFnPtr)->getOperand(0), + curAuthInfo, newAuthInfo, CGM); + constPtr = llvm::ConstantExpr::getPtrToInt(constPtr, memFnPtr->getType()); + src = llvm::ConstantExpr::getInsertValue(src, constPtr, 0); + } + // Under Itanium, reinterprets don't require any additional processing. if (E->getCastKind() == CK_ReinterpretMemberPointer) return src; @@ -980,9 +1080,33 @@ llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD, // least significant bit of adj then makes exactly the same // discrimination as the least significant bit of ptr does for // Itanium. - MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset); - MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy, - 2 * ThisAdjustment.getQuantity() + 1); + + // We cannot use the Itanium ABI's representation for virtual member + // function pointers under pointer authentication because it would + // require us to store both the virtual offset and the constant + // discriminator in the pointer, which would be immediately vulnerable + // to attack. Instead we introduce a thunk that does the virtual dispatch + // and store it as if it were a non-virtual member function. This means + // that virtual function pointers may not compare equal anymore, but + // fortunately they aren't required to by the standard, and we do make + // a best-effort attempt to re-use the thunk. + // + // To support interoperation with code in which pointer authentication + // is disabled, derefencing a member function pointer must still handle + // the virtual case, but it can use a discriminator which should never + // be valid. + const auto &Schema = + CGM.getCodeGenOpts().PointerAuth.CXXMemberFunctionPointers; + if (Schema) + MemPtr[0] = llvm::ConstantExpr::getPtrToInt( + getSignedVirtualMemberFunctionPointer(MD), CGM.PtrDiffTy); + else + MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset); + // Don't set the LSB of adj to 1 if pointer authentication for member + // function pointers is enabled. + MemPtr[1] = + llvm::ConstantInt::get(CGM.PtrDiffTy, + 2 * ThisAdjustment.getQuantity() + !Schema); } else { // Itanium C++ ABI 2.3: // For a virtual function, [the pointer field] is 1 plus the @@ -1004,7 +1128,7 @@ llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD, // function type is incomplete. Ty = CGM.PtrDiffTy; } - llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty); + llvm::Constant *addr = CGM.getMemberFunctionPointer(MD, Ty); MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy); MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy, @@ -1267,6 +1391,7 @@ void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) { if (!Record->hasTrivialDestructor()) { CXXDestructorDecl *DtorD = Record->getDestructor(); Dtor = CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete)); + Dtor = CGM.getFunctionPointer(Dtor, DtorD->getType()); Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy); } } @@ -1729,12 +1854,27 @@ llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT( VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex); // And load the address point from the VTT. - return CGF.Builder.CreateAlignedLoad(VTT, CGF.getPointerAlign()); + llvm::Value *AP = CGF.Builder.CreateAlignedLoad(VTT, CGF.getPointerAlign()); + + if (auto &Schema = CGF.CGM.getCodeGenOpts().PointerAuth.CXXVTTVTablePointers) { + CGPointerAuthInfo PointerAuth = CGF.EmitPointerAuthInfo(Schema, VTT, + GlobalDecl(), + QualType()); + AP = CGF.EmitPointerAuthAuth(PointerAuth, AP); + } + + return AP; } llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr( BaseSubobject Base, const CXXRecordDecl *VTableClass) { - return getVTableAddressPoint(Base, VTableClass); + llvm::Constant *AP = getVTableAddressPoint(Base, VTableClass); + + if (auto &Schema = CGM.getCodeGenOpts().PointerAuth.CXXVTablePointers) + AP = CGM.getConstantSignedPointer(AP, Schema, nullptr, GlobalDecl(), + QualType()); + + return AP; } llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD, @@ -1781,15 +1921,16 @@ CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF, llvm::Value *VTable = CGF.GetVTablePtr(This, Ty, MethodDecl->getParent()); uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD); - llvm::Value *VFunc; - if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) { + llvm::Value *VFunc, *VFuncPtr = nullptr; + auto &Schema = CGM.getCodeGenOpts().PointerAuth.CXXVirtualFunctionPointers; + if (!Schema && CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) { VFunc = CGF.EmitVTableTypeCheckedLoad( MethodDecl->getParent(), VTable, VTableIndex * CGM.getContext().getTargetInfo().getPointerWidth(0) / 8); } else { CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc); - llvm::Value *VFuncPtr = + VFuncPtr = CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn"); auto *VFuncLoad = CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign()); @@ -1809,7 +1950,13 @@ CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF, VFunc = VFuncLoad; } - CGCallee Callee(GD, VFunc); + CGPointerAuthInfo PointerAuth; + if (Schema) { + assert(VFuncPtr && "virtual function pointer not set"); + GD = CGM.getItaniumVTableContext().findOriginalMethod(GD.getCanonicalDecl()); + PointerAuth = CGF.EmitPointerAuthInfo(Schema, VFuncPtr, GD, QualType()); + } + CGCallee Callee(GD, VFunc, PointerAuth); return Callee; } @@ -1928,6 +2075,13 @@ static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF, Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy); llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr); + if (auto &Schema = CGF.CGM.getCodeGenOpts().PointerAuth.CXXVTablePointers) { + CGPointerAuthInfo PointerAuth = CGF.EmitPointerAuthInfo(Schema, nullptr, + GlobalDecl(), + QualType()); + VTablePtr = CGF.EmitPointerAuthAuth(PointerAuth, VTablePtr); + } + llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment); @@ -2391,6 +2545,14 @@ static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF, if (llvm::Function *fn = dyn_cast(atexit.getCallee())) fn->setDoesNotThrow(); + auto &Context = CGF.CGM.getContext(); + FunctionProtoType::ExtProtoInfo EPI(Context.getDefaultCallingConvention( + /*IsVariadic=*/false, /*IsCXXMethod=*/false)); + QualType fnType = + Context.getFunctionType(Context.VoidTy, {Context.VoidPtrTy}, EPI); + llvm::Constant *dtorCallee = cast(dtor.getCallee()); + dtorCallee = CGF.CGM.getFunctionPointer(dtorCallee, fnType); + if (!addr) // addr is null when we are trying to register a dtor annotated with // __attribute__((destructor)) in a constructor function. Using null here is @@ -2398,8 +2560,7 @@ static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF, // function. addr = llvm::Constant::getNullValue(CGF.Int8PtrTy); - llvm::Value *args[] = {llvm::ConstantExpr::getBitCast( - cast(dtor.getCallee()), dtorTy), + llvm::Value *args[] = {llvm::ConstantExpr::getBitCast(dtorCallee, dtorTy), llvm::ConstantExpr::getBitCast(addr, AddrInt8PtrTy), handle}; CGF.EmitNounwindRuntimeCall(atexit, args); @@ -2752,6 +2913,72 @@ bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) { return false; } +llvm::Constant * +ItaniumCXXABI::getOrCreateVirtualFunctionPointerThunk(const CXXMethodDecl *MD) { + SmallString<256> MethodName; + llvm::raw_svector_ostream Out(MethodName); + getMangleContext().mangleCXXName(MD, Out); + MethodName += "_vfpthunk_"; + StringRef ThunkName = MethodName.str(); + llvm::Function *ThunkFn; + if ((ThunkFn = cast_or_null( + CGM.getModule().getNamedValue(ThunkName)))) + return ThunkFn; + + const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeCXXMethodDeclaration(MD); + llvm::FunctionType *ThunkTy = CGM.getTypes().GetFunctionType(FnInfo); + llvm::GlobalValue::LinkageTypes Linkage = + MD->isExternallyVisible() ? llvm::GlobalValue::LinkOnceODRLinkage + : llvm::GlobalValue::InternalLinkage; + ThunkFn = + llvm::Function::Create(ThunkTy, Linkage, ThunkName, &CGM.getModule()); + ThunkFn->setVisibility(llvm::GlobalValue::HiddenVisibility); + assert(ThunkFn->getName() == ThunkName && "name was uniqued!"); + + CGM.SetLLVMFunctionAttributes(MD, FnInfo, ThunkFn); + CGM.SetLLVMFunctionAttributesForDefinition(MD, ThunkFn); + + // Start codegen. + CodeGenFunction CGF(CGM); + CGF.CurGD = GlobalDecl(MD); + CGF.CurFuncIsThunk = true; + + // Build FunctionArgs. + FunctionArgList FunctionArgs; + CGF.BuildFunctionArgList(CGF.CurGD, FunctionArgs); + + CGF.StartFunction(GlobalDecl(), FnInfo.getReturnType(), ThunkFn, FnInfo, + FunctionArgs, MD->getLocation(), SourceLocation()); + llvm::Value *ThisVal = loadIncomingCXXThis(CGF); + setCXXABIThisValue(CGF, ThisVal); + + CallArgList CallArgs; + for (const VarDecl *VD : FunctionArgs) + CGF.EmitDelegateCallArg(CallArgs, VD, SourceLocation()); + + const FunctionProtoType *FPT = MD->getType()->getAs(); + RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, /*this*/ 1); + const CGFunctionInfo &CallInfo = + CGM.getTypes().arrangeCXXMethodCall(CallArgs, FPT, Required, 0); + CGCallee Callee = CGCallee::forVirtual(nullptr, GlobalDecl(MD), + getThisAddress(CGF), ThunkTy); + llvm::CallBase *CallOrInvoke; + CGF.EmitCall(CallInfo, Callee, ReturnValueSlot(), CallArgs, &CallOrInvoke, + SourceLocation(), true); + auto *Call = cast(CallOrInvoke); + Call->setTailCallKind(llvm::CallInst::TCK_MustTail); + if (Call->getType()->isVoidTy()) + CGF.Builder.CreateRetVoid(); + else + CGF.Builder.CreateRet(Call); + + // Finish the function to maintain CodeGenFunction invariants. + // FIXME: Don't emit unreachable code. + CGF.EmitBlock(CGF.createBasicBlock()); + CGF.FinishFunction(); + return ThunkFn; +} + namespace { class ItaniumRTTIBuilder { CodeGenModule &CGM; // Per-module state. @@ -3268,6 +3495,10 @@ void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) { llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable, Two); VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy); + if (auto &Schema = CGM.getCodeGenOpts().PointerAuth.CXXVTablePointers) + VTable = CGM.getConstantSignedPointer(VTable, Schema, nullptr, GlobalDecl(), + QualType()); + Fields.push_back(VTable); } @@ -4372,6 +4603,18 @@ ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This, return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD}; } +llvm::Constant * +ItaniumCXXABI::getSignedVirtualMemberFunctionPointer(const CXXMethodDecl *MD) { + const CXXMethodDecl *origMD = + cast(CGM.getItaniumVTableContext() + .findOriginalMethod(MD->getCanonicalDecl()) + .getDecl()); + llvm::Constant *thunk = getOrCreateVirtualFunctionPointerThunk(origMD); + QualType funcType = CGM.getContext().getMemberPointerType( + MD->getType(), MD->getParent()->getTypeForDecl()); + return CGM.getMemberFunctionPointer(thunk, funcType, MD); +} + void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) { if (CGF.getTarget().hasFeature("exception-handling")) diff --git a/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/clang/lib/CodeGen/MicrosoftCXXABI.cpp index 2d8b538bc2eec5..c9eada5a4ae28c 100644 --- a/clang/lib/CodeGen/MicrosoftCXXABI.cpp +++ b/clang/lib/CodeGen/MicrosoftCXXABI.cpp @@ -1896,7 +1896,7 @@ CGCallee MicrosoftCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF, VFunc = Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign()); } - CGCallee Callee(GD, VFunc); + CGCallee Callee(GD, VFunc, /*unsigned*/ CGPointerAuthInfo()); return Callee; } @@ -3369,7 +3369,7 @@ CGCallee MicrosoftCXXABI::EmitLoadOfMemberFunctionPointer( FunctionPointer = Builder.CreateBitCast(FunctionPointer, FTy->getPointerTo()); - CGCallee Callee(FPT, FunctionPointer); + CGCallee Callee(FPT, FunctionPointer, /*unsigned*/ CGPointerAuthInfo()); return Callee; } diff --git a/clang/lib/Driver/ToolChain.cpp b/clang/lib/Driver/ToolChain.cpp index 357a5106ab393d..77387e53356cfc 100644 --- a/clang/lib/Driver/ToolChain.cpp +++ b/clang/lib/Driver/ToolChain.cpp @@ -614,6 +614,10 @@ std::string ToolChain::ComputeLLVMTriple(const ArgList &Args, if (!Triple.isOSBinFormatMachO()) return getTripleString(); + StringRef Arch = Triple.getArchName(); + if (Arch == "arm64e") + return Triple.getTriple(); + // FIXME: older versions of ld64 expect the "arm64" component in the actual // triple string and query it to determine whether an LTO file can be // handled. Remove this when we don't care any more. diff --git a/clang/lib/Driver/ToolChains/Arch/AArch64.cpp b/clang/lib/Driver/ToolChains/Arch/AArch64.cpp index 3a5fe6ddeaed5f..4f926426bf51e9 100644 --- a/clang/lib/Driver/ToolChains/Arch/AArch64.cpp +++ b/clang/lib/Driver/ToolChains/Arch/AArch64.cpp @@ -40,7 +40,18 @@ std::string aarch64::getAArch64TargetCPU(const ArgList &Args, // Handle CPU name is 'native'. if (CPU == "native") return llvm::sys::getHostCPUName(); - else if (CPU.size()) + + // arm64e requires v8.3a and only runs on vortex and later CPUs. + if (Triple.getArchName() == "arm64e") { + // Honor -mcpu as long it doesn't specify an older CPU than "vortex". + if (CPU.size() && (CPU != "cyclone")) + return CPU; + + // Otherwise default to "vortex". + return "vortex"; + } + + if (CPU.size()) return CPU; // Make sure we pick "cyclone" if -arch is used or when targetting a Darwin @@ -139,10 +150,15 @@ getAArch64MicroArchFeaturesFromMtune(const Driver &D, StringRef Mtune, // Handle CPU name is 'native'. if (MtuneLowerCase == "native") MtuneLowerCase = llvm::sys::getHostCPUName(); - if (MtuneLowerCase == "cyclone") { + + // 'cyclone' and later have zero-cycle register moves and zeroing. + if (MtuneLowerCase == "cyclone" || + MtuneLowerCase == "vortex" || + MtuneLowerCase == "lightning") { Features.push_back("+zcm"); Features.push_back("+zcz"); } + return true; } diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index 73c660738aaf44..b463db8b419e32 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -5062,6 +5062,30 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, !NoCommonDefault)) CmdArgs.push_back("-fno-common"); + if (Args.hasFlag(options::OPT_fptrauth_intrinsics, + options::OPT_fno_ptrauth_intrinsics, false)) + CmdArgs.push_back("-fptrauth-intrinsics"); + + if (Args.hasFlag(options::OPT_fptrauth_calls, + options::OPT_fno_ptrauth_calls, false)) + CmdArgs.push_back("-fptrauth-calls"); + + if (Args.hasFlag(options::OPT_fptrauth_returns, + options::OPT_fno_ptrauth_returns, false)) + CmdArgs.push_back("-fptrauth-returns"); + + if (Args.hasFlag(options::OPT_fptrauth_indirect_gotos, + options::OPT_fno_ptrauth_indirect_gotos, false)) + CmdArgs.push_back("-fptrauth-indirect-gotos"); + + if (Args.hasFlag(options::OPT_fptrauth_auth_traps, + options::OPT_fno_ptrauth_auth_traps, false)) + CmdArgs.push_back("-fptrauth-auth-traps"); + + if (Args.hasFlag(options::OPT_fptrauth_soft, + options::OPT_fno_ptrauth_soft, false)) + CmdArgs.push_back("-fptrauth-soft"); + // -fsigned-bitfields is default, and clang doesn't yet support // -funsigned-bitfields. if (!Args.hasFlag(options::OPT_fsigned_bitfields, diff --git a/clang/lib/Driver/ToolChains/Darwin.cpp b/clang/lib/Driver/ToolChains/Darwin.cpp index f31633779bed9c..076b95cc53ad7d 100644 --- a/clang/lib/Driver/ToolChains/Darwin.cpp +++ b/clang/lib/Driver/ToolChains/Darwin.cpp @@ -56,7 +56,7 @@ llvm::Triple::ArchType darwin::getArchTypeForMachOArchName(StringRef Str) { .Cases("arm", "armv4t", "armv5", "armv6", "armv6m", llvm::Triple::arm) .Cases("armv7", "armv7em", "armv7k", "armv7m", llvm::Triple::arm) .Cases("armv7s", "xscale", llvm::Triple::arm) - .Case("arm64", llvm::Triple::aarch64) + .Cases("arm64", "arm64e", llvm::Triple::aarch64) .Case("r600", llvm::Triple::r600) .Case("amdgcn", llvm::Triple::amdgcn) .Case("nvptx", llvm::Triple::nvptx) @@ -71,8 +71,13 @@ void darwin::setTripleTypeForMachOArchName(llvm::Triple &T, StringRef Str) { llvm::ARM::ArchKind ArchKind = llvm::ARM::parseArch(Str); T.setArch(Arch); - if (Str == "x86_64h") + // Preserve the original string for those -arch options that aren't + // reflected in ArchKind but still affect code generation. It's not + // clear why these aren't just reflected in ArchKind, though. + if (Str == "x86_64h" || Str == "arm64e") T.setArchName(Str); + + // These arches aren't really Darwin even if we're using a Darwin toolchain. else if (ArchKind == llvm::ARM::ArchKind::ARMV6M || ArchKind == llvm::ARM::ArchKind::ARMV7M || ArchKind == llvm::ARM::ArchKind::ARMV7EM) { @@ -835,8 +840,11 @@ StringRef MachO::getMachOArchName(const ArgList &Args) const { default: return getDefaultUniversalArchName(); - case llvm::Triple::aarch64: + case llvm::Triple::aarch64: { + if (getTriple().getArchName() == "arm64e") + return "arm64e"; return "arm64"; + } case llvm::Triple::thumb: case llvm::Triple::arm: @@ -924,6 +932,37 @@ void DarwinClang::addClangWarningOptions(ArgStringList &CC1Args) const { } } +void DarwinClang::addClangTargetOptions( + const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args, + Action::OffloadKind DeviceOffloadKind) const{ + + Darwin::addClangTargetOptions(DriverArgs, CC1Args, DeviceOffloadKind); + + // On arm64e, enable pointer authentication (for the return address and + // indirect calls), as well as usage of the intrinsics. + if (getArchName() == "arm64e") { + if (!DriverArgs.hasArg(options::OPT_fptrauth_returns, + options::OPT_fno_ptrauth_returns)) + CC1Args.push_back("-fptrauth-returns"); + + if (!DriverArgs.hasArg(options::OPT_fptrauth_intrinsics, + options::OPT_fno_ptrauth_intrinsics)) + CC1Args.push_back("-fptrauth-intrinsics"); + + if (!DriverArgs.hasArg(options::OPT_fptrauth_calls, + options::OPT_fno_ptrauth_calls)) + CC1Args.push_back("-fptrauth-calls"); + + if (!DriverArgs.hasArg(options::OPT_fptrauth_indirect_gotos, + options::OPT_fno_ptrauth_indirect_gotos)) + CC1Args.push_back("-fptrauth-indirect-gotos"); + + if (!DriverArgs.hasArg(options::OPT_fptrauth_auth_traps, + options::OPT_fno_ptrauth_auth_traps)) + CC1Args.push_back("-fptrauth-auth-traps"); + } +} + /// Take a path that speculatively points into Xcode and return the /// `XCODE/Contents/Developer` path if it is an Xcode path, or an empty path /// otherwise. @@ -1609,7 +1648,7 @@ inferDeploymentTargetFromArch(DerivedArgList &Args, const Darwin &Toolchain, StringRef MachOArchName = Toolchain.getMachOArchName(Args); if (MachOArchName == "armv7" || MachOArchName == "armv7s" || - MachOArchName == "arm64") + MachOArchName == "arm64" || MachOArchName == "arm64e") OSTy = llvm::Triple::IOS; else if (MachOArchName == "armv7k") OSTy = llvm::Triple::WatchOS; diff --git a/clang/lib/Driver/ToolChains/Darwin.h b/clang/lib/Driver/ToolChains/Darwin.h index 1b1c358c40a4f0..74f152c5bd9b07 100644 --- a/clang/lib/Driver/ToolChains/Darwin.h +++ b/clang/lib/Driver/ToolChains/Darwin.h @@ -517,6 +517,10 @@ class LLVM_LIBRARY_VISIBILITY DarwinClang : public Darwin { void addClangWarningOptions(llvm::opt::ArgStringList &CC1Args) const override; + void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs, + llvm::opt::ArgStringList &CC1Args, + Action::OffloadKind DeviceOffloadKind) const override; + void AddLinkARCArgs(const llvm::opt::ArgList &Args, llvm::opt::ArgStringList &CmdArgs) const override; diff --git a/clang/lib/Format/FormatToken.h b/clang/lib/Format/FormatToken.h index b11f36559a8b09..ddba494c8a79ee 100644 --- a/clang/lib/Format/FormatToken.h +++ b/clang/lib/Format/FormatToken.h @@ -448,6 +448,7 @@ struct FormatToken { case tok::kw_noexcept: case tok::kw_static_assert: case tok::kw___attribute: + case tok::kw___ptrauth: return true; default: return false; diff --git a/clang/lib/Format/UnwrappedLineParser.cpp b/clang/lib/Format/UnwrappedLineParser.cpp index bbe05602f6da29..c3c17a24be2be2 100644 --- a/clang/lib/Format/UnwrappedLineParser.cpp +++ b/clang/lib/Format/UnwrappedLineParser.cpp @@ -2183,7 +2183,7 @@ void UnwrappedLineParser::parseRecord(bool ParseAsExpr) { // it is often token-pasted. while (FormatTok->isOneOf(tok::identifier, tok::coloncolon, tok::hashhash, tok::kw___attribute, tok::kw___declspec, - tok::kw_alignas) || + tok::kw_alignas, tok::kw___ptrauth) || ((Style.Language == FormatStyle::LK_Java || Style.Language == FormatStyle::LK_JavaScript) && FormatTok->isOneOf(tok::period, tok::comma))) { diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index 44d5651b4afff3..e096545e76e787 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -664,8 +664,94 @@ static void setPGOUseInstrumentor(CodeGenOptions &Opts, Opts.setProfileUse(CodeGenOptions::ProfileClangInstr); } +static bool parsePointerAuthOptions(PointerAuthOptions &Opts, + ArgList &Args, + const LangOptions &LangOpts, + const llvm::Triple &Triple, + DiagnosticsEngine &Diags) { + if (!LangOpts.PointerAuthCalls && !LangOpts.PointerAuthReturns && + !LangOpts.PointerAuthIndirectGotos && !LangOpts.PointerAuthAuthTraps) + return true; + + if (LangOpts.SoftPointerAuth) { + if (LangOpts.PointerAuthCalls) { + using Key = PointerAuthSchema::SoftKey; + using Discrimination = PointerAuthSchema::Discrimination; + Opts.FunctionPointers = + PointerAuthSchema(Key::FunctionPointers, false, Discrimination::None); + Opts.BlockInvocationFunctionPointers = + PointerAuthSchema(Key::BlockInvocationFunctionPointers, true, + Discrimination::None); + Opts.BlockHelperFunctionPointers = + PointerAuthSchema(Key::BlockHelperFunctionPointers, true, + Discrimination::None); + Opts.BlockByrefHelperFunctionPointers = + PointerAuthSchema(Key::BlockHelperFunctionPointers, true, + Discrimination::None); + Opts.ObjCMethodListFunctionPointers = + PointerAuthSchema(Key::ObjCMethodListFunctionPointers, true, + Discrimination::None); + Opts.CXXVTablePointers = + Opts.CXXVTTVTablePointers = + PointerAuthSchema(Key::CXXVTablePointers, false, + Discrimination::None); + Opts.CXXVirtualFunctionPointers = + Opts.CXXVirtualVariadicFunctionPointers = + PointerAuthSchema(Key::CXXVirtualFunctionPointers, true, + Discrimination::Decl); + Opts.CXXMemberFunctionPointers = + PointerAuthSchema(Key::CXXMemberFunctionPointers, false, + Discrimination::Type); + Opts.ThunkCXXVirtualMemberPointers = false; + } + + Opts.ReturnAddresses = LangOpts.PointerAuthReturns; + Opts.IndirectGotos = LangOpts.PointerAuthIndirectGotos; + Opts.AuthTraps = LangOpts.PointerAuthAuthTraps; + return true; + } + + if (Triple.getArch() == llvm::Triple::aarch64) { + if (LangOpts.PointerAuthCalls) { + using Key = PointerAuthSchema::ARM8_3Key; + using Discrimination = PointerAuthSchema::Discrimination; + // If you change anything here, be sure to update . + Opts.FunctionPointers = + PointerAuthSchema(Key::ASIA, false, Discrimination::None); + Opts.BlockInvocationFunctionPointers = + PointerAuthSchema(Key::ASIA, true, Discrimination::None); + Opts.BlockHelperFunctionPointers = + PointerAuthSchema(Key::ASIA, true, Discrimination::None); + Opts.BlockByrefHelperFunctionPointers = + PointerAuthSchema(Key::ASIA, true, Discrimination::None); + Opts.ObjCMethodListFunctionPointers = + PointerAuthSchema(Key::ASIA, true, Discrimination::None); + Opts.CXXVTablePointers = + PointerAuthSchema(Key::ASDA, false, Discrimination::None); + Opts.CXXVTTVTablePointers = + PointerAuthSchema(Key::ASDA, false, Discrimination::None); + Opts.CXXVirtualFunctionPointers = + Opts.CXXVirtualVariadicFunctionPointers = + PointerAuthSchema(Key::ASIA, true, Discrimination::Decl); + Opts.CXXMemberFunctionPointers = + PointerAuthSchema(Key::ASIA, false, Discrimination::Type); + Opts.ThunkCXXVirtualMemberPointers = false; + } + + Opts.ReturnAddresses = LangOpts.PointerAuthReturns; + Opts.IndirectGotos = LangOpts.PointerAuthIndirectGotos; + Opts.AuthTraps = LangOpts.PointerAuthAuthTraps; + return true; + } + + Diags.Report(diag::err_drv_ptrauth_not_supported) + << Triple.str(); + return false; +} + static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK, DiagnosticsEngine &Diags, + const LangOptions &LangOpts, const TargetOptions &TargetOpts, const FrontendOptions &FrontendOpts) { bool Success = true; @@ -1366,6 +1452,8 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK, Opts.EmitVersionIdentMetadata = Args.hasFlag(OPT_Qy, OPT_Qn, true); + Success &= + parsePointerAuthOptions(Opts.PointerAuth, Args, LangOpts, Triple, Diags); Opts.Addrsig = Args.hasArg(OPT_faddrsig); if (Arg *A = Args.getLastArg(OPT_msign_return_address_EQ)) { @@ -3132,6 +3220,13 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK, if (InlineArg->getOption().matches(options::OPT_fno_inline)) Opts.NoInlineDefine = true; + Opts.PointerAuthIntrinsics = Args.hasArg(OPT_fptrauth_intrinsics); + Opts.PointerAuthCalls = Args.hasArg(OPT_fptrauth_calls); + Opts.PointerAuthReturns = Args.hasArg(OPT_fptrauth_returns); + Opts.PointerAuthIndirectGotos = Args.hasArg(OPT_fptrauth_indirect_gotos); + Opts.PointerAuthAuthTraps = Args.hasArg(OPT_fptrauth_auth_traps); + Opts.SoftPointerAuth = Args.hasArg(OPT_fptrauth_soft); + Opts.FastMath = Args.hasArg(OPT_ffast_math) || Args.hasArg(OPT_cl_fast_relaxed_math); Opts.FiniteMathOnly = Args.hasArg(OPT_ffinite_math_only) || @@ -3501,8 +3596,6 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res, InputKind DashX = ParseFrontendArgs(Res.getFrontendOpts(), Args, Diags, LangOpts.IsHeaderFile); ParseTargetArgs(Res.getTargetOpts(), Args, Diags); - Success &= ParseCodeGenArgs(Res.getCodeGenOpts(), Args, DashX, Diags, - Res.getTargetOpts(), Res.getFrontendOpts()); ParseHeaderSearchArgs(Res.getHeaderSearchOpts(), Args, Res.getFileSystemOpts().WorkingDir); ParseAPINotesArgs(Res.getAPINotesOpts(), Args, Diags); @@ -3542,6 +3635,10 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res, LangOpts.FunctionAlignment = getLastArgIntValue(Args, OPT_function_alignment, 0, Diags); + Success &= ParseCodeGenArgs(Res.getCodeGenOpts(), Args, DashX, Diags, + LangOpts, Res.getTargetOpts(), + Res.getFrontendOpts()); + if (LangOpts.CUDA) { // During CUDA device-side compilation, the aux triple is the // triple used for host compilation. diff --git a/clang/lib/Headers/CMakeLists.txt b/clang/lib/Headers/CMakeLists.txt index 8ff648fdb4e0ba..136d40bd5ea2a9 100644 --- a/clang/lib/Headers/CMakeLists.txt +++ b/clang/lib/Headers/CMakeLists.txt @@ -83,6 +83,7 @@ set(files pconfigintrin.h popcntintrin.h prfchwintrin.h + ptrauth.h ptwriteintrin.h rdseedintrin.h rtmintrin.h diff --git a/clang/lib/Headers/ptrauth.h b/clang/lib/Headers/ptrauth.h new file mode 100644 index 00000000000000..2df030d000ab1c --- /dev/null +++ b/clang/lib/Headers/ptrauth.h @@ -0,0 +1,356 @@ +/*===---- ptrauth.h - Pointer authentication -------------------------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __PTRAUTH_H +#define __PTRAUTH_H + +#include + +typedef enum { + ptrauth_key_asia = 0, + ptrauth_key_asib = 1, + ptrauth_key_asda = 2, + ptrauth_key_asdb = 3, + + /* A process-independent key which can be used to sign code pointers. + Signing and authenticating with this key is a no-op in processes + which disable ABI pointer authentication. */ + ptrauth_key_process_independent_code = ptrauth_key_asia, + + /* A process-specific key which can be used to sign code pointers. + Signing and authenticating with this key is enforced even in processes + which disable ABI pointer authentication. */ + ptrauth_key_process_dependent_code = ptrauth_key_asib, + + /* A process-independent key which can be used to sign data pointers. + Signing and authenticating with this key is a no-op in processes + which disable ABI pointer authentication. */ + ptrauth_key_process_independent_data = ptrauth_key_asda, + + /* A process-specific key which can be used to sign data pointers. + Signing and authenticating with this key is a no-op in processes + which disable ABI pointer authentication. */ + ptrauth_key_process_dependent_data = ptrauth_key_asdb, + + /* The key used to sign C function pointers. + The extra data is always 0. */ + ptrauth_key_function_pointer = ptrauth_key_process_independent_code, + + /* The key used to sign return addresses on the stack. + The extra data is based on the storage address of the return address. + On ARM64, that is always the storage address of the return address plus 8 + (or, in other words, the value of the stack pointer on function entry) */ + ptrauth_key_return_address = ptrauth_key_process_dependent_code, + + /* The key used to sign frame pointers on the stack. + The extra data is based on the storage address of the frame pointer. + On ARM64, that is always the storage address of the frame pointer plus 16 + (or, in other words, the value of the stack pointer on function entry) */ + ptrauth_key_frame_pointer = ptrauth_key_process_dependent_data, + + /* The key used to sign block function pointers, including: + invocation functions, + block object copy functions, + block object destroy functions, + __block variable copy functions, and + __block variable destroy functions. + The extra data is always the address at which the function pointer + is stored. + + Note that block object pointers themselves (i.e. the direct + representations of values of block-pointer type) are not signed. */ + ptrauth_key_block_function = ptrauth_key_asia, + + /* The key used to sign C++ v-table pointers. + The extra data is always 0. */ + ptrauth_key_cxx_vtable_pointer = ptrauth_key_asda, + + /* Other pointers signed under the ABI use private ABI rules. */ + +} ptrauth_key; + +/* An integer type of the appropriate size for a discriminator argument. */ +typedef uintptr_t ptrauth_extra_data_t; + +/* An integer type of the appropriate size for a generic signature. */ +typedef uintptr_t ptrauth_generic_signature_t; + +/* A signed pointer value embeds the original pointer together with + a signature that attests to the validity of that pointer. Because + this signature must use only "spare" bits of the pointer, a + signature's validity is probabilistic in practice: it is unlikely + but still plausible that an invalidly-derived signature will + somehow equal the correct signature and therefore successfully + authenticate. Nonetheless, this scheme provides a strong degree + of protection against certain kinds of attacks. */ + +/* Authenticating a pointer that was not signed with the given key + and extra-data value will (likely) fail by trapping. */ + +/* The null function pointer is always the all-zero bit pattern. + Signing an all-zero bit pattern will embed a (likely) non-zero + signature in the result, and so the result will not seem to be + a null function pointer. Authenticating this value will yield + a null function pointer back. However, authenticating an + all-zero bit pattern will probably fail, because the + authentication will expect a (likely) non-zero signature to + embedded in the value. + + Because of this, if a pointer may validly be null, you should + check for null before attempting to authenticate it with one + of these intrinsics. This is not necessary when using the + __ptrauth qualifier; the compiler will perform this check + automatically. */ + +#ifdef __PTRAUTH_INTRINSICS__ + +/* Strip the signature from a value without authenticating it. + + If the value is a function pointer, the result will not be a + legal function pointer because of the missing signature, and + attempting to call it will result in an authentication failure. + + The value must be an expression of pointer type. + The key must be a constant expression of type ptrauth_key. + The result will have the same type as the original value. */ +#define ptrauth_strip(__value, __key) \ + __builtin_ptrauth_strip(__value, __key) + +/* Blend a constant discriminator into the given pointer-like value + to form a new discriminator. Not all bits of the inputs are + guaranteed to contribute to the result. + + On arm64e, the integer must fall within the range of a uint16_t; + other bits may be ignored. + + For the purposes of ptrauth_sign_constant, the result of calling + this function is considered a constant expression if the arguments + are constant. Some restrictions may be imposed on the pointer. + + The first argument must be an expression of pointer type. + The second argument must be an expression of integer type. + The result will have type uintptr_t. */ +#define ptrauth_blend_discriminator(__pointer, __integer) \ + __builtin_ptrauth_blend_discriminator(__pointer, __integer) + +/* Add a signature to the given pointer value using a specific key, + using the given extra data as a salt to the signing process. + + The value must be a constant expression of pointer type. + The key must be a constant expression of type ptrauth_key. + The extra data must be a constant expression of pointer or integer type; + if an integer, it will be coerced to ptrauth_extra_data_t. + The result will have the same type as the original value. + + This is a constant expression if the extra data is an integer or + null pointer constant. */ +#define ptrauth_sign_constant(__value, __key, __data) \ + __builtin_ptrauth_sign_constant(__value, __key, __data) + +/* Add a signature to the given pointer value using a specific key, + using the given extra data as a salt to the signing process. + + This operation does not authenticate the original value and is + therefore potentially insecure if an attacker could possibly + control that value. + + The value must be an expression of pointer type. + The key must be a constant expression of type ptrauth_key. + The extra data must be an expression of pointer or integer type; + if an integer, it will be coerced to ptrauth_extra_data_t. + The result will have the same type as the original value. */ +#define ptrauth_sign_unauthenticated(__value, __key, __data) \ + __builtin_ptrauth_sign_unauthenticated(__value, __key, __data) + +/* Authenticate a pointer using one scheme and resign it using another. + + If the result is subsequently authenticated using the new scheme, that + authentication is guaranteed to fail if and only if the initial + authentication failed. + + The value must be an expression of pointer type. + The key must be a constant expression of type ptrauth_key. + The extra data must be an expression of pointer or integer type; + if an integer, it will be coerced to ptrauth_extra_data_t. + The result will have the same type as the original value. + + This operation is guaranteed to not leave the intermediate value + available for attack before it is re-signed. + + Do not pass a null pointer to this function. A null pointer + will not successfully authenticate. */ +#define ptrauth_auth_and_resign(__value, __old_key, __old_data, __new_key, __new_data) \ + __builtin_ptrauth_auth_and_resign(__value, __old_key, __old_data, __new_key, __new_data) + +/* Authenticate a pointer using one scheme and resign it as a C + function pointer. + + If the result is subsequently authenticated using the new scheme, that + authentication is guaranteed to fail if and only if the initial + authentication failed. + + The value must be an expression of function pointer type. + The key must be a constant expression of type ptrauth_key. + The extra data must be an expression of pointer or integer type; + if an integer, it will be coerced to ptrauth_extra_data_t. + The result will have the same type as the original value. + + This operation is guaranteed to not leave the intermediate value + available for attack before it is re-signed. Additionally, if this + expression is used syntactically as the function expression in a + call, only a single authentication will be performed. */ +#define ptrauth_auth_function(__value, __old_key, __old_data) \ + ptrauth_auth_and_resign(__value, __old_key, __old_data, ptrauth_key_function_pointer, 0) + +/* Authenticate a data pointer. + + The value must be an expression of non-function pointer type. + The key must be a constant expression of type ptrauth_key. + The extra data must be an expression of pointer or integer type; + if an integer, it will be coerced to ptrauth_extra_data_t. + The result will have the same type as the original value. + + If the authentication fails, dereferencing the resulting pointer + will fail. */ +#define ptrauth_auth_data(__value, __old_key, __old_data) \ + __builtin_ptrauth_auth(__value, __old_key, __old_data) + +/* Compute a constant discriminator from the given string. + + The result can be used as the second argument to + ptrauth_blend_discriminator or the third argument to the + __ptrauth qualifier. It has type size_t. + + The argument must be a string literal. + A call to this function is an integer constant expression. */ +#define ptrauth_string_discriminator(__string) \ + __builtin_ptrauth_string_discriminator(__string) + +/* Compute a constant discriminator from the given type. + + The result can be used as the second argument to + ptrauth_blend_discriminator or the third argument to the + __ptrauth qualifier. It has type size_t. + + If the type is a C++ member function pointer type, the result is + the discriminator used to signed member function pointers of that + type. This property is currently not true of other types. + + The argument must be a type. + A call to this function is an integer constant expression. */ +#define ptrauth_type_discriminator(__type) \ + __builtin_ptrauth_type_discriminator(__type) + + +/* Compute a signature for the given pair of pointer-sized values. + The order of the arguments is significant. + + Like a pointer signature, the resulting signature depends on + private key data and therefore should not be reliably reproducible + by attackers. That means that this can be used to validate the + integrity of arbitrary data by storing a signature for that data + alongside it, then checking that the signature is still valid later. + Data which exceeds two pointers in size can be signed by either + computing a tree of generic signatures or just signing an ordinary + cryptographic hash of the data. + + The result has type ptrauth_generic_signature_t. However, it may + not have as many bits of entropy as that type's width would suggest; + some implementations are known to compute a compressed signature as + if the arguments were a pointer and a discriminator. + + The arguments must be either pointers or integers; if integers, they + will be coerce to uintptr_t. */ +#define ptrauth_sign_generic_data(__value, __data) \ + __builtin_ptrauth_sign_generic_data(__value, __data) + +/* Define some standard __ptrauth qualifiers used in the ABI. */ +#define __ptrauth_function_pointer \ + __ptrauth(ptrauth_key_function_pointer,0,0) +#define __ptrauth_return_address \ + __ptrauth(ptrauth_key_return_address,1,0) +#define __ptrauth_block_invocation_pointer \ + __ptrauth(ptrauth_key_function_pointer,1,0) +#define __ptrauth_block_copy_helper \ + __ptrauth(ptrauth_key_function_pointer,1,0) +#define __ptrauth_block_destroy_helper \ + __ptrauth(ptrauth_key_function_pointer,1,0) +#define __ptrauth_block_byref_copy_helper \ + __ptrauth(ptrauth_key_function_pointer,1,0) +#define __ptrauth_block_byref_destroy_helper \ + __ptrauth(ptrauth_key_function_pointer,1,0) +#define __ptrauth_objc_method_list_imp \ + __ptrauth(ptrauth_key_function_pointer,1,0) +#define __ptrauth_cxx_vtable_pointer \ + __ptrauth(ptrauth_key_cxx_vtable_pointer,0,0) +#define __ptrauth_cxx_vtt_vtable_pointer \ + __ptrauth(ptrauth_key_cxx_vtable_pointer,0,0) +#define __ptrauth_swift_heap_object_destructor \ + __ptrauth(ptrauth_key_function_pointer,1,0xbbbf) + +/* Some situations in the C++ and Swift ABIs use declaration-specific + or type-specific extra discriminators. */ +#define __ptrauth_cxx_virtual_function_pointer(__declkey) \ + __ptrauth(ptrauth_key_function_pointer,1,__declkey) +#define __ptrauth_swift_function_pointer(__typekey) \ + __ptrauth(ptrauth_key_function_pointer,0,__typekey) +#define __ptrauth_swift_class_method_pointer(__declkey) \ + __ptrauth(ptrauth_key_function_pointer,1,__declkey) +#define __ptrauth_swift_protocol_witness_function_pointer(__declkey) \ + __ptrauth(ptrauth_key_function_pointer,1,__declkey) +#define __ptrauth_swift_value_witness_function_pointer(__key) \ + __ptrauth(ptrauth_key_function_pointer,1,__key) + +#else + +#define ptrauth_strip(__value, __key) __value +#define ptrauth_blend_discriminator(__pointer, __integer) ((uintptr_t)0) +#define ptrauth_sign_constant(__value, __key, __data) __value +#define ptrauth_sign_unauthenticated(__value, __key, __data) __value +#define ptrauth_auth_and_resign(__value, __old_key, __old_data, __new_key, __new_data) __value +#define ptrauth_auth_function(__value, __old_key, __old_data) __value +#define ptrauth_auth_data(__value, __old_key, __old_data) __value +#define ptrauth_string_discriminator(__string) ((uintptr_t)0) +#define ptrauth_type_discriminator(__type) ((uintptr_t)0) +#define ptrauth_sign_generic_data(__value, __data) ((ptrauth_generic_signature_t)0) + +#define __ptrauth_function_pointer +#define __ptrauth_return_address +#define __ptrauth_block_invocation_pointer +#define __ptrauth_block_copy_helper +#define __ptrauth_block_destroy_helper +#define __ptrauth_block_byref_copy_helper +#define __ptrauth_block_byref_destroy_helper +#define __ptrauth_objc_method_list_imp +#define __ptrauth_cxx_vtable_pointer +#define __ptrauth_cxx_vtt_vtable_pointer +#define __ptrauth_swift_heap_object_destructor +#define __ptrauth_cxx_virtual_function_pointer(__declkey) +#define __ptrauth_swift_function_pointer(__typekey) +#define __ptrauth_swift_class_method_pointer(__declkey) +#define __ptrauth_swift_protocol_witness_function_pointer(__declkey) +#define __ptrauth_swift_value_witness_function_pointer(__key) + +#endif /* __PTRAUTH_INTRINSICS__ */ + +#endif /* __PTRAUTH_H */ diff --git a/clang/lib/Parse/ParseDecl.cpp b/clang/lib/Parse/ParseDecl.cpp index 8483087ec31a86..d86c48edbf005e 100644 --- a/clang/lib/Parse/ParseDecl.cpp +++ b/clang/lib/Parse/ParseDecl.cpp @@ -2907,6 +2907,39 @@ void Parser::ParseAlignmentSpecifier(ParsedAttributes &Attrs, ParsedAttr::AS_Keyword, EllipsisLoc); } +/// type-qualifier: +/// '__ptrauth' '(' constant-expression +/// (',' constant-expression)[opt] +/// (',' constant-expression)[opt] ')' +void Parser::ParsePtrauthQualifier(ParsedAttributes &attrs) { + assert(Tok.is(tok::kw___ptrauth)); + + IdentifierInfo *kwName = Tok.getIdentifierInfo(); + SourceLocation kwLoc = ConsumeToken(); + + BalancedDelimiterTracker T(*this, tok::l_paren); + if (T.expectAndConsume()) + return; + + ArgsVector argExprs; + do { + ExprResult expr = ParseAssignmentExpression(); + if (expr.isInvalid()) { + T.skipToEnd(); + return; + } + argExprs.push_back(expr.get()); + } while (TryConsumeToken(tok::comma)); + + T.consumeClose(); + SourceLocation endLoc = T.getCloseLocation(); + + attrs.addNew(kwName, SourceRange(kwLoc, endLoc), + /*scope*/ nullptr, SourceLocation(), + argExprs.data(), argExprs.size(), + ParsedAttr::AS_Keyword); +} + /// Determine whether we're looking at something that might be a declarator /// in a simple-declaration. If it can't possibly be a declarator, maybe /// diagnose a missing semicolon after a prior tag definition in the decl @@ -3510,6 +3543,11 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS, getLangOpts()); break; + // __ptrauth qualifier. + case tok::kw___ptrauth: + ParsePtrauthQualifier(DS.getAttributes()); + continue; + case tok::kw___sptr: case tok::kw___uptr: case tok::kw___ptr64: @@ -4962,6 +5000,7 @@ bool Parser::isTypeSpecifierQualifier() { case tok::kw___ptr32: case tok::kw___pascal: case tok::kw___unaligned: + case tok::kw___ptrauth: case tok::kw__Nonnull: case tok::kw__Nullable: @@ -5159,6 +5198,7 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) { case tok::kw___forceinline: case tok::kw___pascal: case tok::kw___unaligned: + case tok::kw___ptrauth: case tok::kw__Nonnull: case tok::kw__Nullable: @@ -5398,6 +5438,12 @@ void Parser::ParseTypeQualifierListOpt( ParseOpenCLQualifiers(DS.getAttributes()); break; + // __ptrauth qualifier. + case tok::kw___ptrauth: + ParsePtrauthQualifier(DS.getAttributes()); + EndLoc = PrevTokLocation; + continue; + case tok::kw___unaligned: isInvalid = DS.SetTypeQual(DeclSpec::TQ_unaligned, Loc, PrevSpec, DiagID, getLangOpts()); diff --git a/clang/lib/Parse/ParseExpr.cpp b/clang/lib/Parse/ParseExpr.cpp index b74a95a3cd4b6d..4c59aa746f6dcd 100644 --- a/clang/lib/Parse/ParseExpr.cpp +++ b/clang/lib/Parse/ParseExpr.cpp @@ -580,6 +580,26 @@ class CastExpressionIdValidator final : public CorrectionCandidateCallback { }; } +ExprResult Parser::ParseBuiltinPtrauthTypeDiscriminator() { + SourceLocation Loc = ConsumeToken(); + + BalancedDelimiterTracker T(*this, tok::l_paren); + if (T.expectAndConsume()) + return ExprError(); + + TypeResult Ty = ParseTypeName(); + if (Ty.isInvalid()) { + SkipUntil(tok::r_paren, StopAtSemi); + return ExprError(); + } + + SourceLocation EndLoc = Tok.getLocation(); + T.consumeClose(); + return Actions.ActOnUnaryExprOrTypeTraitExpr( + Loc, UETT_PtrAuthTypeDiscriminator, + /*isType=*/true, Ty.get().getAsOpaquePtr(), SourceRange(Loc, EndLoc)); +} + /// Parse a cast-expression, or, if \pisUnaryExpression is true, parse /// a unary-expression. /// @@ -1439,6 +1459,9 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression, case tok::kw___array_extent: return ParseArrayTypeTrait(); + case tok::kw___builtin_ptrauth_type_discriminator: + return ParseBuiltinPtrauthTypeDiscriminator(); + case tok::kw___is_lvalue_expr: case tok::kw___is_rvalue_expr: return ParseExpressionTrait(); diff --git a/clang/lib/Sema/SemaCast.cpp b/clang/lib/Sema/SemaCast.cpp index 2ab0a11e532926..43ee2e18fdfe94 100644 --- a/clang/lib/Sema/SemaCast.cpp +++ b/clang/lib/Sema/SemaCast.cpp @@ -148,6 +148,14 @@ namespace { SrcExpr = src; } + void checkQualifiedDestType() { + // Destination type may not be qualified with __ptrauth. + if (DestType.getPointerAuth()) { + Self.Diag(DestRange.getBegin(), diag::err_ptrauth_qualifier_cast) + << DestType << DestRange; + } + } + /// Check for and handle non-overload placeholder expressions. void checkNonOverloadPlaceholders() { if (!isPlaceholder() || isPlaceholder(BuiltinType::Overload)) @@ -269,6 +277,8 @@ Sema::BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, Op.OpRange = SourceRange(OpLoc, Parens.getEnd()); Op.DestRange = AngleBrackets; + Op.checkQualifiedDestType(); + switch (Kind) { default: llvm_unreachable("Unknown C++ cast!"); @@ -2898,6 +2908,8 @@ ExprResult Sema::BuildCStyleCastExpr(SourceLocation LPLoc, // -Wcast-qual DiagnoseCastQual(Op.Self, Op.SrcExpr, Op.DestType); + Op.checkQualifiedDestType(); + return Op.complete(CStyleCastExpr::Create(Context, Op.ResultType, Op.ValueKind, Op.Kind, Op.SrcExpr.get(), &Op.BasePath, CastTypeInfo, LPLoc, RPLoc)); @@ -2917,6 +2929,8 @@ ExprResult Sema::BuildCXXFunctionalCastExpr(TypeSourceInfo *CastTypeInfo, if (Op.SrcExpr.isInvalid()) return ExprError(); + Op.checkQualifiedDestType(); + auto *SubExpr = Op.SrcExpr.get(); if (auto *BindExpr = dyn_cast(SubExpr)) SubExpr = BindExpr->getSubExpr(); diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp index 8322a9bf14775a..cc4e1062cba10d 100644 --- a/clang/lib/Sema/SemaChecking.cpp +++ b/clang/lib/Sema/SemaChecking.cpp @@ -125,6 +125,20 @@ static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) { << call->getArg(1)->getSourceRange(); } +static bool convertArgumentToType(Sema &S, Expr *&Value, QualType Ty) { + if (Value->isTypeDependent()) + return false; + + InitializedEntity Entity = + InitializedEntity::InitializeParameter(S.Context, Ty, false); + ExprResult Result = + S.PerformCopyInitialization(Entity, SourceLocation(), Value); + if (Result.isInvalid()) + return true; + Value = Result.get(); + return false; +} + /// Check that the first argument to __builtin_annotation is an integer /// and the second argument is a non-wide string literal. static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) { @@ -994,6 +1008,299 @@ static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID, return false; } +namespace { + enum PointerAuthOpKind { + PAO_Strip, PAO_Sign, PAO_Auth, PAO_SignGeneric, PAO_Discriminator, + PAO_BlendPointer, PAO_BlendInteger + }; +} + +static bool checkPointerAuthEnabled(Sema &S, Expr *E) { + if (S.getLangOpts().PointerAuthIntrinsics) + return false; + + S.diagnosePointerAuthDisabled(E->getExprLoc(), E->getSourceRange()); + return true; +} + +void Sema::diagnosePointerAuthDisabled(SourceLocation loc, SourceRange range) { + if (!getLangOpts().SoftPointerAuth && + !Context.getTargetInfo().isPointerAuthSupported()) { + Diag(loc, diag::err_ptrauth_disabled_target) << range; + } else { + Diag(loc, diag::err_ptrauth_disabled) << range; + } +} + +static bool checkPointerAuthKey(Sema &S, Expr *&Arg) { + // Convert it to type 'int'. + if (convertArgumentToType(S, Arg, S.Context.IntTy)) + return true; + + // Value-dependent expressions are okay; wait for template instantiation. + if (Arg->isValueDependent()) + return false; + + unsigned KeyValue; + return S.checkConstantPointerAuthKey(Arg, KeyValue); +} + +bool Sema::checkConstantPointerAuthKey(Expr *Arg, unsigned &Result) { + // Attempt to constant-evaluate the expression. + llvm::APSInt KeyValue; + if (!Arg->isIntegerConstantExpr(KeyValue, Context)) { + Diag(Arg->getExprLoc(), diag::err_expr_not_ice) << 0 + << Arg->getSourceRange(); + return true; + } + + // Ask the target to validate the key parameter. + if (!Context.getTargetInfo().validatePointerAuthKey(KeyValue)) { + llvm::SmallString<32> Value; { + llvm::raw_svector_ostream Str(Value); + Str << KeyValue; + } + + Diag(Arg->getExprLoc(), diag::err_ptrauth_invalid_key) + << Value << Arg->getSourceRange(); + return true; + } + + Result = KeyValue.getZExtValue(); + return false; +} + +static std::pair +findConstantBaseAndOffset(Sema &S, Expr *E) { + // Must evaluate as a pointer. + Expr::EvalResult result; + if (!E->EvaluateAsRValue(result, S.Context) || + !result.Val.isLValue()) + return std::make_pair(nullptr, CharUnits()); + + // Base must be a declaration and can't be weakly imported. + auto baseDecl = + result.Val.getLValueBase().dyn_cast(); + if (!baseDecl || baseDecl->hasAttr()) + return std::make_pair(nullptr, CharUnits()); + + return std::make_pair(baseDecl, result.Val.getLValueOffset()); +} + +static bool checkPointerAuthValue(Sema &S, Expr *&Arg, + PointerAuthOpKind OpKind, + bool RequireConstant = false) { + if (Arg->hasPlaceholderType()) { + ExprResult R = S.CheckPlaceholderExpr(Arg); + if (R.isInvalid()) return true; + Arg = R.get(); + } + + auto allowsPointer = [](PointerAuthOpKind OpKind) { + return OpKind != PAO_BlendInteger; + }; + auto allowsInteger = [](PointerAuthOpKind OpKind) { + return OpKind == PAO_Discriminator || + OpKind == PAO_BlendInteger || + OpKind == PAO_SignGeneric; + }; + + // Require the value to have the right range of type. + QualType ExpectedTy; + if (allowsPointer(OpKind) && Arg->getType()->isPointerType()) { + ExpectedTy = Arg->getType().getUnqualifiedType(); + } else if (allowsPointer(OpKind) && Arg->getType()->isNullPtrType()) { + ExpectedTy = S.Context.VoidPtrTy; + } else if (allowsInteger(OpKind) && + Arg->getType()->isIntegralOrUnscopedEnumerationType()) { + ExpectedTy = S.Context.getUIntPtrType(); + + // Diagnose the failures. + } else { + S.Diag(Arg->getExprLoc(), diag::err_ptrauth_value_bad_type) + << unsigned(OpKind == PAO_Discriminator ? 1 : + OpKind == PAO_BlendPointer ? 2 : + OpKind == PAO_BlendInteger ? 3 : 0) + << unsigned(allowsInteger(OpKind) ? + (allowsPointer(OpKind) ? 2 : 1) : 0) + << Arg->getType() + << Arg->getSourceRange(); + return true; + } + + // Convert to that type. This should just be an lvalue-to-rvalue + // conversion. + if (convertArgumentToType(S, Arg, ExpectedTy)) + return true; + + if (!RequireConstant) { + // Warn about null pointers for non-generic sign and auth operations. + if ((OpKind == PAO_Sign || OpKind == PAO_Auth) && + Arg->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNull)) { + S.Diag(Arg->getExprLoc(), + OpKind == PAO_Sign ? diag::warn_ptrauth_sign_null_pointer + : diag::warn_ptrauth_auth_null_pointer) + << Arg->getSourceRange(); + } + + return false; + } + + // Perform special checking on the arguments to ptrauth_sign_constant. + + // The main argument. + if (OpKind == PAO_Sign) { + // Require the value we're signing to have a special form. + auto result = findConstantBaseAndOffset(S, Arg); + bool invalid; + + // Must be rooted in a declaration reference. + if (!result.first) { + invalid = true; + + // If it's a function declaration, we can't have an offset. + } else if (isa(result.first)) { + invalid = !result.second.isZero(); + + // Otherwise we're fine. + } else { + invalid = false; + } + + if (invalid) { + S.Diag(Arg->getExprLoc(), diag::err_ptrauth_bad_constant_pointer); + } + return invalid; + } + + // The discriminator argument. + assert(OpKind == PAO_Discriminator); + + // Must be a pointer or integer or blend thereof. + Expr *pointer = nullptr; + Expr *integer = nullptr; + if (auto call = dyn_cast(Arg->IgnoreParens())) { + if (call->getBuiltinCallee() == + Builtin::BI__builtin_ptrauth_blend_discriminator) { + pointer = call->getArg(0); + integer = call->getArg(1); + } + } + if (!pointer && !integer) { + if (Arg->getType()->isPointerType()) + pointer = Arg; + else + integer = Arg; + } + + // Check the pointer. + bool invalid = false; + if (pointer) { + assert(pointer->getType()->isPointerType()); + + // TODO: if we're initializing a global, check that the address is + // somehow related to what we're initializing. This probably will + // never really be feasible and we'll have to catch it at link-time. + auto result = findConstantBaseAndOffset(S, pointer); + if (!result.first || !isa(result.first)) { + invalid = true; + } + } + + // Check the integer. + if (integer) { + assert(integer->getType()->isIntegerType()); + if (!integer->isEvaluatable(S.Context)) + invalid = true; + } + + if (invalid) { + S.Diag(Arg->getExprLoc(), diag::err_ptrauth_bad_constant_discriminator); + } + return invalid; +} + +static ExprResult SemaPointerAuthStrip(Sema &S, CallExpr *Call) { + if (checkArgCount(S, Call, 2)) return ExprError(); + if (checkPointerAuthEnabled(S, Call)) return ExprError(); + if (checkPointerAuthValue(S, Call->getArgs()[0], PAO_Strip) | + checkPointerAuthKey(S, Call->getArgs()[1])) + return ExprError(); + + Call->setType(Call->getArgs()[0]->getType()); + return Call; +} + +static ExprResult SemaPointerAuthBlendDiscriminator(Sema &S, CallExpr *Call) { + if (checkArgCount(S, Call, 2)) return ExprError(); + if (checkPointerAuthEnabled(S, Call)) return ExprError(); + if (checkPointerAuthValue(S, Call->getArgs()[0], PAO_BlendPointer) | + checkPointerAuthValue(S, Call->getArgs()[1], PAO_BlendInteger)) + return ExprError(); + + Call->setType(S.Context.getUIntPtrType()); + return Call; +} + +static ExprResult SemaPointerAuthSignGenericData(Sema &S, CallExpr *Call) { + if (checkArgCount(S, Call, 2)) return ExprError(); + if (checkPointerAuthEnabled(S, Call)) return ExprError(); + if (checkPointerAuthValue(S, Call->getArgs()[0], PAO_SignGeneric) | + checkPointerAuthValue(S, Call->getArgs()[1], PAO_Discriminator)) + return ExprError(); + + Call->setType(S.Context.getUIntPtrType()); + return Call; +} + +static ExprResult SemaPointerAuthSignOrAuth(Sema &S, CallExpr *Call, + PointerAuthOpKind OpKind, + bool RequireConstant) { + if (checkArgCount(S, Call, 3)) return ExprError(); + if (checkPointerAuthEnabled(S, Call)) return ExprError(); + if (checkPointerAuthValue(S, Call->getArgs()[0], OpKind, RequireConstant) | + checkPointerAuthKey(S, Call->getArgs()[1]) | + checkPointerAuthValue(S, Call->getArgs()[2], PAO_Discriminator, + RequireConstant)) + return ExprError(); + + Call->setType(Call->getArgs()[0]->getType()); + return Call; +} + +static ExprResult SemaPointerAuthAuthAndResign(Sema &S, CallExpr *Call) { + if (checkArgCount(S, Call, 5)) return ExprError(); + if (checkPointerAuthEnabled(S, Call)) return ExprError(); + if (checkPointerAuthValue(S, Call->getArgs()[0], PAO_Auth) | + checkPointerAuthKey(S, Call->getArgs()[1]) | + checkPointerAuthValue(S, Call->getArgs()[2], PAO_Discriminator) | + checkPointerAuthKey(S, Call->getArgs()[3]) | + checkPointerAuthValue(S, Call->getArgs()[4], PAO_Discriminator)) + return ExprError(); + + Call->setType(Call->getArgs()[0]->getType()); + return Call; +} + +static ExprResult SemaPointerAuthStringDiscriminator(Sema &S, CallExpr *call) { + if (checkPointerAuthEnabled(S, call)) return ExprError(); + + // We've already performed normal call type-checking. + Expr *arg = call->getArgs()[0]->IgnoreParenImpCasts(); + + // Operand must be an ordinary or UTF-8 string literal. + auto literal = dyn_cast(arg); + if (!literal || literal->getCharByteWidth() != 1) { + S.Diag(arg->getExprLoc(), diag::err_ptrauth_string_not_literal) + << (literal ? 1 : 0) + << arg->getSourceRange(); + return ExprError(); + } + + return call; +} + + static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) { if (checkArgCount(S, TheCall, 1)) return ExprError(); @@ -1457,6 +1764,25 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, TheCall->setType(Context.VoidPtrTy); break; + case Builtin::BI__builtin_ptrauth_strip: + return SemaPointerAuthStrip(*this, TheCall); + case Builtin::BI__builtin_ptrauth_blend_discriminator: + return SemaPointerAuthBlendDiscriminator(*this, TheCall); + case Builtin::BI__builtin_ptrauth_sign_constant: + return SemaPointerAuthSignOrAuth(*this, TheCall, PAO_Sign, + /*constant*/ true); + case Builtin::BI__builtin_ptrauth_sign_unauthenticated: + return SemaPointerAuthSignOrAuth(*this, TheCall, PAO_Sign, + /*constant*/ false); + case Builtin::BI__builtin_ptrauth_auth: + return SemaPointerAuthSignOrAuth(*this, TheCall, PAO_Auth, + /*constant*/ false); + case Builtin::BI__builtin_ptrauth_sign_generic_data: + return SemaPointerAuthSignGenericData(*this, TheCall); + case Builtin::BI__builtin_ptrauth_auth_and_resign: + return SemaPointerAuthAuthAndResign(*this, TheCall); + case Builtin::BI__builtin_ptrauth_string_discriminator: + return SemaPointerAuthStringDiscriminator(*this, TheCall); // OpenCL v2.0, s6.13.16 - Pipe functions case Builtin::BIread_pipe: case Builtin::BIwrite_pipe: @@ -9459,6 +9785,9 @@ struct SearchNonTrivialToCopyField void visitARCWeak(QualType FT, SourceLocation SL) { S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); } + void visitPtrAuth(QualType FT, SourceLocation SL) { + S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); + } void visitStruct(QualType FT, SourceLocation SL) { for (const FieldDecl *FD : FT->castAs()->getDecl()->fields()) visit(FD->getType(), FD->getLocation()); diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp index 2aeab6e784edb0..1e3793015011c8 100644 --- a/clang/lib/Sema/SemaDecl.cpp +++ b/clang/lib/Sema/SemaDecl.cpp @@ -11533,6 +11533,12 @@ struct DiagNonTrivalCUnionCopyVisitor asDerived().visit(FD->getType(), FD, InNonTrivialUnion); } + void visitPtrAuth(QualType QT, const FieldDecl *FD, bool InNonTrivialUnion) { + if (InNonTrivialUnion) + S.Diag(FD->getLocation(), diag::note_non_trivial_c_union) + << 1 << 2 << QT << FD->getName(); + } + void preVisit(QualType::PrimitiveCopyKind PCK, QualType QT, const FieldDecl *FD, bool InNonTrivialUnion) {} void visitTrivial(QualType QT, const FieldDecl *FD, bool InNonTrivialUnion) {} @@ -13245,6 +13251,12 @@ ParmVarDecl *Sema::CheckParameter(DeclContext *DC, SourceLocation StartLoc, New->setType(T); } + // __ptrauth is forbidden on parameters. + if (T.getPointerAuth()) { + Diag(NameLoc, diag::err_ptrauth_qualifier_param) << T; + New->setInvalidDecl(); + } + // ISO/IEC TR 18037 S6.7.3: "The type of an object with automatic storage // duration shall not be qualified by an address-space qualifier." // Since all parameters have automatic store duration, they can not have @@ -16678,8 +16690,12 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl, if (RT->getDecl()->getArgPassingRestrictions() == RecordDecl::APK_CanNeverPassInRegs) Record->setArgPassingRestrictions(RecordDecl::APK_CanNeverPassInRegs); - } else if (FT.getQualifiers().getObjCLifetime() == Qualifiers::OCL_Weak) + } else if (FT.getQualifiers().getObjCLifetime() == Qualifiers::OCL_Weak) { Record->setArgPassingRestrictions(RecordDecl::APK_CanNeverPassInRegs); + } else if (PointerAuthQualifier Q = FT.getPointerAuth()) { + if (Q.isAddressDiscriminated()) + Record->setArgPassingRestrictions(RecordDecl::APK_CanNeverPassInRegs); + } } if (Record && FD->getType().isVolatileQualified()) diff --git a/clang/lib/Sema/SemaDeclCXX.cpp b/clang/lib/Sema/SemaDeclCXX.cpp index 6c4e127b4fb1b3..2d342cf0851f9c 100644 --- a/clang/lib/Sema/SemaDeclCXX.cpp +++ b/clang/lib/Sema/SemaDeclCXX.cpp @@ -7318,6 +7318,8 @@ struct SpecialMemberDeletionInfo bool shouldDeleteForVariantObjCPtrMember(FieldDecl *FD, QualType FieldType); + bool shouldDeleteForVariantPtrAuthMember(FieldDecl *FD, QualType FieldType); + bool visitBase(CXXBaseSpecifier *Base) { return shouldDeleteForBase(Base); } bool visitField(FieldDecl *Field) { return shouldDeleteForField(Field); } @@ -7466,12 +7468,36 @@ bool SpecialMemberDeletionInfo::shouldDeleteForVariantObjCPtrMember( S.Diag(FD->getLocation(), diag::note_deleted_special_member_class_subobject) << getEffectiveCSM() << ParentClass << /*IsField*/true - << FD << 4 << /*IsDtorCallInCtor*/false << /*IsObjCPtr*/true; + << FD << 4 << /*IsDtorCallInCtor*/false << 1; } return true; } +bool SpecialMemberDeletionInfo::shouldDeleteForVariantPtrAuthMember( + FieldDecl *FD, QualType FieldType) { + // Copy/move constructors/assignment operators are deleted if the field has an + // address-discriminated ptrauth qualifier. + PointerAuthQualifier Q = FieldType.getPointerAuth(); + + if (!Q || !Q.isAddressDiscriminated()) + return false; + + if (CSM == Sema::CXXDefaultConstructor || CSM == Sema::CXXDestructor) + return false; + + if (Diagnose) { + auto *ParentClass = cast(FD->getParent()); + S.Diag(FD->getLocation(), + diag::note_deleted_special_member_class_subobject) + << getEffectiveCSM() << ParentClass << /*IsField*/true + << FD << 4 << /*IsDtorCallInCtor*/false << 2; + } + + return true; +} + + /// Check whether we should delete a special member function due to the class /// having a particular direct or virtual base class. bool SpecialMemberDeletionInfo::shouldDeleteForBase(CXXBaseSpecifier *Base) { @@ -7510,6 +7536,9 @@ bool SpecialMemberDeletionInfo::shouldDeleteForField(FieldDecl *FD) { if (inUnion() && shouldDeleteForVariantObjCPtrMember(FD, FieldType)) return true; + if (inUnion() && shouldDeleteForVariantPtrAuthMember(FD, FieldType)) + return true; + if (CSM == Sema::CXXDefaultConstructor) { // For a default constructor, all references must be initialized in-class // and, if a union, it must have a non-const member. @@ -7574,6 +7603,9 @@ bool SpecialMemberDeletionInfo::shouldDeleteForField(FieldDecl *FD) { if (shouldDeleteForVariantObjCPtrMember(&*UI, UnionFieldType)) return true; + if (shouldDeleteForVariantPtrAuthMember(&*UI, UnionFieldType)) + return true; + if (!UnionFieldType.isConstQualified()) AllVariantFieldsAreConst = false; @@ -8353,6 +8385,12 @@ void Sema::checkIllFormedTrivialABIStruct(CXXRecordDecl &RD) { return; } + // Ill-formed if the field is an address-discriminated pointer. + if (FT.hasAddressDiscriminatedPointerAuth()) { + PrintDiagAndRemoveAttr(); + return; + } + if (const auto *RT = FT->getBaseElementTypeUnsafe()->getAs()) if (!RT->isDependentType() && !cast(RT->getDecl())->canPassInRegisters()) { diff --git a/clang/lib/Sema/SemaExpr.cpp b/clang/lib/Sema/SemaExpr.cpp index 2b5d2bbf76f8ab..8bfb89d9ac63e5 100644 --- a/clang/lib/Sema/SemaExpr.cpp +++ b/clang/lib/Sema/SemaExpr.cpp @@ -3748,6 +3748,17 @@ static bool CheckVecStepTraitOperandType(Sema &S, QualType T, return false; } +static bool CheckPtrAuthTypeDiscriminatorOperandType(Sema &S, QualType T, + SourceLocation Loc, + SourceRange ArgRange) { + if (T->isVariablyModifiedType()) { + S.Diag(Loc, diag::err_ptrauth_type_disc_variably_modified) << T << ArgRange; + return true; + } + + return false; +} + static bool CheckExtensionTraitOperandType(Sema &S, QualType T, SourceLocation Loc, SourceRange ArgRange, @@ -3947,6 +3958,10 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(QualType ExprType, if (ExprKind == UETT_VecStep) return CheckVecStepTraitOperandType(*this, ExprType, OpLoc, ExprRange); + if (ExprKind == UETT_PtrAuthTypeDiscriminator) + return CheckPtrAuthTypeDiscriminatorOperandType( + *this, ExprType, OpLoc, ExprRange); + // Whitelist some types as extensions if (!CheckExtensionTraitOperandType(*this, ExprType, OpLoc, ExprRange, ExprKind)) @@ -6975,6 +6990,14 @@ static QualType checkConditionalPointerCompatibility(Sema &S, ExprResult &LHS, lhQual.removeCVRQualifiers(); rhQual.removeCVRQualifiers(); + if (lhQual.getPointerAuth() != rhQual.getPointerAuth()) { + S.Diag(Loc, diag::err_typecheck_cond_incompatible_ptrauth) + << LHSTy << RHSTy + << LHS.get()->getSourceRange() + << RHS.get()->getSourceRange(); + return QualType(); + } + // OpenCL v2.0 specification doesn't extend compatibility of type qualifiers // (C99 6.7.3) for address spaces. We assume that the check should behave in // the same manner as it's defined for CVR qualifiers, so for OpenCL two @@ -7937,6 +7960,10 @@ checkPointerTypesForAssignment(Sema &S, QualType LHSType, QualType RHSType) { else if (lhq.getObjCLifetime() != rhq.getObjCLifetime()) ConvTy = Sema::IncompatiblePointerDiscardsQualifiers; + // Treat pointer-auth mismatches as fatal. + else if (lhq.getPointerAuth() != rhq.getPointerAuth()) + ConvTy = Sema::IncompatiblePointerDiscardsQualifiers; + // For GCC/MS compatibility, other qualifier mismatches are treated // as still compatible in C. else ConvTy = Sema::CompatiblePointerDiscardsQualifiers; @@ -14707,7 +14734,9 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy, if (lhq.getAddressSpace() != rhq.getAddressSpace()) { DiagKind = diag::err_typecheck_incompatible_address_space; break; - + } else if (lhq.getPointerAuth() != rhq.getPointerAuth()) { + DiagKind = diag::err_typecheck_incompatible_ptrauth; + break; } else if (lhq.getObjCLifetime() != rhq.getObjCLifetime()) { DiagKind = diag::err_typecheck_incompatible_ownership; break; diff --git a/clang/lib/Sema/SemaType.cpp b/clang/lib/Sema/SemaType.cpp index a44726fe3008ac..ff66a51b5baf51 100644 --- a/clang/lib/Sema/SemaType.cpp +++ b/clang/lib/Sema/SemaType.cpp @@ -2456,6 +2456,12 @@ bool Sema::CheckFunctionReturnType(QualType T, SourceLocation Loc) { return true; } + // __ptrauth is illegal on a function return type. + if (T.getPointerAuth()) { + Diag(Loc, diag::err_ptrauth_qualifier_return) << T; + return true; + } + if (T.hasNonTrivialToPrimitiveDestructCUnion() || T.hasNonTrivialToPrimitiveCopyCUnion()) checkNonTrivialCUnion(T, Loc, NTCUC_FunctionReturn, @@ -2544,6 +2550,10 @@ QualType Sema::BuildFunctionType(QualType T, Diag(Loc, diag::err_parameters_retval_cannot_have_fp16_type) << 0 << FixItHint::CreateInsertion(Loc, "*"); Invalid = true; + } else if (ParamType.getPointerAuth()) { + // __ptrauth is illegal on a function return type. + Diag(Loc, diag::err_ptrauth_qualifier_param) << T; + Invalid = true; } // C++2a [dcl.fct]p4: @@ -4623,6 +4633,11 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state, } } + // __ptrauth is illegal on a function return type. + if (T.getPointerAuth()) { + S.Diag(DeclType.Loc, diag::err_ptrauth_qualifier_return) << T; + } + if (LangOpts.OpenCL) { // OpenCL v2.0 s6.12.5 - A block cannot be the return value of a // function. @@ -7343,6 +7358,90 @@ static void HandleNeonVectorTypeAttr(QualType &CurType, const ParsedAttr &Attr, CurType = S.Context.getVectorType(CurType, numElts, VecKind); } +/// Handle the __ptrauth qualifier. +static void HandlePtrAuthQualifier(QualType &type, const ParsedAttr &attr, + Sema &S) { + if (attr.getNumArgs() < 1 || attr.getNumArgs() > 3) { + S.Diag(attr.getLoc(), diag::err_ptrauth_qualifier_bad_arg_count); + attr.setInvalid(); + return; + } + + Expr *keyArg = + attr.getArgAsExpr(0); + Expr *isAddressDiscriminatedArg = + attr.getNumArgs() >= 2 ? attr.getArgAsExpr(1) : nullptr; + Expr *extraDiscriminatorArg = + attr.getNumArgs() >= 3 ? attr.getArgAsExpr(2) : nullptr; + + unsigned key; + if (S.checkConstantPointerAuthKey(keyArg, key)) { + attr.setInvalid(); + return; + } + assert(key <= PointerAuthQualifier::MaxKey && "ptrauth key is out of range"); + + bool isInvalid = false; + auto checkArg = [&](Expr *arg, unsigned argIndex) -> unsigned { + if (!arg) return 0; + + llvm::APSInt result; + if (!arg->isIntegerConstantExpr(result, S.Context)) { + isInvalid = true; + S.Diag(arg->getExprLoc(), diag::err_ptrauth_qualifier_arg_not_ice); + return 0; + } + + unsigned max = + (argIndex == 1 ? 1 : PointerAuthQualifier::MaxDiscriminator); + if (result < 0 || result > max) { + llvm::SmallString<32> value; { + llvm::raw_svector_ostream str(value); + str << result; + } + + if (argIndex == 1) { + S.Diag(arg->getExprLoc(), + diag::err_ptrauth_qualifier_address_discrimination_invalid) + << value; + } else { + S.Diag(arg->getExprLoc(), + diag::err_ptrauth_qualifier_extra_discriminator_invalid) + << value << max; + } + isInvalid = true; + } + return result.getZExtValue(); + }; + bool isAddressDiscriminated = checkArg(isAddressDiscriminatedArg, 1); + unsigned extraDiscriminator = checkArg(extraDiscriminatorArg, 2); + if (isInvalid) { + attr.setInvalid(); + return; + } + + if (!type->isPointerType()) { + S.Diag(attr.getLoc(), diag::err_ptrauth_qualifier_nonpointer) << type; + attr.setInvalid(); + return; + } + + if (type.getPointerAuth()) { + S.Diag(attr.getLoc(), diag::err_ptrauth_qualifier_redundant) << type; + attr.setInvalid(); + return; + } + + if (!S.getLangOpts().PointerAuthIntrinsics) { + S.diagnosePointerAuthDisabled(attr.getLoc(), attr.getRange()); + attr.setInvalid(); + return; + } + + PointerAuthQualifier qual(key, isAddressDiscriminated, extraDiscriminator); + type = S.Context.getPointerAuthType(type, qual); +} + /// Handle OpenCL Access Qualifier Attribute. static void HandleOpenCLAccessAttr(QualType &CurType, const ParsedAttr &Attr, Sema &S) { @@ -7652,6 +7751,10 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type, HandleOpenCLAccessAttr(type, attr, state.getSema()); attr.setUsedAsTypeAttr(); break; + case ParsedAttr::AT_PointerAuth: + HandlePtrAuthQualifier(type, attr, state.getSema()); + attr.setUsedAsTypeAttr(); + break; case ParsedAttr::AT_LifetimeBound: if (TAL == TAL_DeclChunk) HandleLifetimeBoundAttr(state, type, attr); diff --git a/clang/test/AST/ast-dump-ptrauth-json.cpp b/clang/test/AST/ast-dump-ptrauth-json.cpp new file mode 100644 index 00000000000000..d4f23fba17c31e --- /dev/null +++ b/clang/test/AST/ast-dump-ptrauth-json.cpp @@ -0,0 +1,5 @@ +// RUN: %clang_cc1 -triple arm64-apple-ios -fptrauth-calls -fptrauth-intrinsics -std=c++11 -ast-dump=json %s | FileCheck %s + +// CHECK: "name": "ptrauth_type_discriminator", + +int d = __builtin_ptrauth_type_discriminator(int); diff --git a/clang/test/CodeGen/ptrauth-blocks.c b/clang/test/CodeGen/ptrauth-blocks.c new file mode 100644 index 00000000000000..fc01aa3dbcc532 --- /dev/null +++ b/clang/test/CodeGen/ptrauth-blocks.c @@ -0,0 +1,39 @@ +// RUN: %clang_cc1 -triple arm64-apple-ios -fptrauth-calls -fptrauth-intrinsics -fblocks -emit-llvm %s -o - | FileCheck %s + +void (^blockptr)(void); + +// CHECK: [[INVOCATION_1:@.*]] = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (i8*)* {{@.*}} to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ i8**, i32, i32, i8*, %struct.__block_descriptor* }, { i8**, i32, i32, i8*, %struct.__block_descriptor* }* [[GLOBAL_BLOCK_1:@.*]], i32 0, i32 3) to i64), i64 0 }, section "llvm.ptrauth" +// CHECK: [[GLOBAL_BLOCK_1]] = internal constant { i8**, i32, i32, i8*, %struct.__block_descriptor* } { i8** @_NSConcreteGlobalBlock, i32 1342177280, i32 0, i8* bitcast ({ i8*, i32, i64, i64 }* [[INVOCATION_1]] to i8*), +void (^globalblock)(void) = ^{}; + +// CHECK-LABEL: define void @test_block_call() +void test_block_call() { + // CHECK: [[T0:%.*]] = load void ()*, void ()** @blockptr, + // CHECK-NEXT: [[BLOCK:%.*]] = bitcast void ()* [[T0]] to [[BLOCK_T:%.*]]*{{$}} + // CHECK-NEXT: [[FNADDR:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 3 + // CHECK-NEXT: [[BLOCK_OPAQUE:%.*]] = bitcast [[BLOCK_T]]* [[BLOCK]] to i8* + // CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[FNADDR]], + // CHECK-NEXT: [[FNPTR:%.*]] = bitcast i8* [[T0]] to void (i8*)* + // CHECK-NEXT: [[DISC:%.*]] = ptrtoint i8** [[FNADDR]] to i64 + // CHECK-NEXT: call void [[FNPTR]](i8* [[BLOCK_OPAQUE]]) [ "ptrauth"(i32 0, i64 [[DISC]]) ] + blockptr(); +} + +void use_block(int (^)(void)); + +// CHECK-LABEL: define void @test_block_literal( +void test_block_literal(int i) { + // CHECK: [[I:%.*]] = alloca i32, + // CHECK-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:.*]], align + // CHECK: [[FNPTRADDR:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 3 + // CHECK-NEXT: [[DISCRIMINATOR:%.*]] = ptrtoint i8** [[FNPTRADDR]] to i64 + // CHECK-NEXT: [[SIGNED:%.*]] = call i64 @llvm.ptrauth.sign.i64(i64 ptrtoint (i32 (i8*)* {{@.*}} to i64), i32 0, i64 [[DISCRIMINATOR]]) + // CHECK-NEXT: [[T0:%.*]] = inttoptr i64 [[SIGNED]] to i8* + // CHECK-NEXT: store i8* [[T0]], i8** [[FNPTRADDR]] + use_block(^{return i;}); +} + +struct A { + int value; +}; +struct A *createA(void); diff --git a/clang/test/CodeGen/ptrauth-debuginfo.c b/clang/test/CodeGen/ptrauth-debuginfo.c new file mode 100644 index 00000000000000..807bf93a6d9f59 --- /dev/null +++ b/clang/test/CodeGen/ptrauth-debuginfo.c @@ -0,0 +1,26 @@ +// RUN: %clang_cc1 -triple arm64-apple-ios \ +// RUN: -fptrauth-calls -fptrauth-intrinsics -emit-llvm -fblocks \ +// RUN: %s -debug-info-kind=limited -o - | FileCheck %s + +// Constant initializers for data pointers. +extern int external_int; + +// CHECK: !DIDerivedType(tag: DW_TAG_APPLE_ptrauth_type, +// CHECK-SAME: ptrAuthKey: 1, +// CHECK-SAME: ptrAuthIsAddressDiscriminated: false, +// CHECK-SAME: ptrAuthExtraDiscriminator: 1234) +int * __ptrauth(1,0,1234) g1 = &external_int; + +struct A { + int value; +}; +struct A *createA(void); + +void f() { + __block struct A * __ptrauth(1, 1, 1) ptr = createA(); + ^{ ptr->value; }(); +} +// CHECK: !DIDerivedType(tag: DW_TAG_APPLE_ptrauth_type, +// CHECK-SAME: ptrAuthKey: 1, +// CHECK-SAME: ptrAuthIsAddressDiscriminated: true, +// CHECK-SAME: ptrAuthExtraDiscriminator: 1) diff --git a/clang/test/CodeGen/ptrauth-function-attributes.c b/clang/test/CodeGen/ptrauth-function-attributes.c new file mode 100644 index 00000000000000..03f996d89d848e --- /dev/null +++ b/clang/test/CodeGen/ptrauth-function-attributes.c @@ -0,0 +1,27 @@ +// RUN: %clang_cc1 -triple arm64-apple-ios -emit-llvm %s -o - | FileCheck %s --check-prefixes=ALL,OFF + +// RUN: %clang_cc1 -triple arm64-apple-ios -fptrauth-returns -emit-llvm %s -o - | FileCheck %s --check-prefixes=ALL,RETS +// RUN: %clang_cc1 -triple arm64e-apple-ios -fptrauth-returns -emit-llvm %s -o - | FileCheck %s --check-prefixes=ALL,RETS +// RUN: %clang_cc1 -triple arm64e-apple-ios -emit-llvm %s -o - | FileCheck %s --check-prefixes=ALL,OFF + +// RUN: %clang_cc1 -triple arm64-apple-ios -fptrauth-calls -emit-llvm %s -o - | FileCheck %s --check-prefixes=ALL,CALLS +// RUN: %clang_cc1 -triple arm64e-apple-ios -fptrauth-calls -emit-llvm %s -o - | FileCheck %s --check-prefixes=ALL,CALLS +// RUN: %clang_cc1 -triple arm64e-apple-ios -emit-llvm %s -o - | FileCheck %s --check-prefixes=ALL,OFF + +// RUN: %clang_cc1 -triple arm64-apple-ios -fptrauth-indirect-gotos -emit-llvm %s -o - | FileCheck %s --check-prefixes=ALL,GOTOS +// RUN: %clang_cc1 -triple arm64e-apple-ios -fptrauth-indirect-gotos -emit-llvm %s -o - | FileCheck %s --check-prefixes=ALL,GOTOS +// RUN: %clang_cc1 -triple arm64e-apple-ios -emit-llvm %s -o - | FileCheck %s --check-prefixes=ALL,OFF + +// RUN: %clang_cc1 -triple arm64-apple-ios -fptrauth-auth-traps -emit-llvm %s -o - | FileCheck %s --check-prefixes=ALL,TRAPS +// RUN: %clang_cc1 -triple arm64e-apple-ios -fptrauth-auth-traps -emit-llvm %s -o - | FileCheck %s --check-prefixes=ALL,TRAPS +// RUN: %clang_cc1 -triple arm64e-apple-ios -emit-llvm %s -o - | FileCheck %s --check-prefixes=ALL,OFF + +// ALL-LABEL: define void @test() #0 +void test() { +} + +// RETS: attributes #0 = {{{.*}} "ptrauth-returns" {{.*}}} +// CALLS: attributes #0 = {{{.*}} "ptrauth-calls" {{.*}}} +// GOTOS: attributes #0 = {{{.*}} "ptrauth-indirect-gotos" {{.*}}} +// TRAPS: attributes #0 = {{{.*}} "ptrauth-auth-traps" {{.*}}} +// OFF-NOT: attributes {{.*}} "ptrauth- diff --git a/clang/test/CodeGen/ptrauth-in-c-struct.c b/clang/test/CodeGen/ptrauth-in-c-struct.c new file mode 100644 index 00000000000000..c1919f981c62ac --- /dev/null +++ b/clang/test/CodeGen/ptrauth-in-c-struct.c @@ -0,0 +1,129 @@ +// RUN: %clang_cc1 -triple arm64-apple-ios -fblocks -fptrauth-calls -fptrauth-returns -fptrauth-intrinsics -emit-llvm -o - %s | FileCheck %s + +#define AQ __ptrauth(1,1,50) +#define IQ __ptrauth(1,0,50) + +typedef void (^BlockTy)(void); + +// CHECK: %[[STRUCT_SA:.*]] = type { i32, i32* } +// CHECK: %[[STRUCT_SI:.*]] = type { i32* } + +typedef struct { + int f0; + int * AQ f1; // Signed using address discrimination. +} SA; + +typedef struct { + int * IQ f; // No address discrimination. +} SI; + +SA getSA(void); +void calleeSA(SA); + +// CHECK: define void @test_copy_constructor_SA(%[[STRUCT_SA]]* %{{.*}}) +// CHECK: call void @__copy_constructor_8_8_t0w4_pa8( + +// CHECK: define linkonce_odr hidden void @__copy_constructor_8_8_t0w4_pa8(i8** %[[DST:.*]], i8** %[[SRC:.*]]) +// CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 +// CHECK: %[[SRC_ADDR:.*]] = alloca i8**, align 8 +// CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 +// CHECK: store i8** %[[SRC]], i8*** %[[SRC_ADDR]], align 8 +// CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 +// CHECK: %[[V1:.*]] = load i8**, i8*** %[[SRC_ADDR]], align 8 +// CHECK: %[[V5:.*]] = bitcast i8** %[[V0]] to i8* +// CHECK: %[[V6:.*]] = getelementptr inbounds i8, i8* %[[V5]], i64 8 +// CHECK: %[[V7:.*]] = bitcast i8* %[[V6]] to i8** +// CHECK: %[[V8:.*]] = bitcast i8** %[[V1]] to i8* +// CHECK: %[[V9:.*]] = getelementptr inbounds i8, i8* %[[V8]], i64 8 +// CHECK: %[[V10:.*]] = bitcast i8* %[[V9]] to i8** +// CHECK: %[[V11:.*]] = load i8*, i8** %[[V10]], align 8 +// CHECK: %[[V12:.*]] = ptrtoint i8** %[[V10]] to i64 +// CHECK: %[[V13:.*]] = call i64 @llvm.ptrauth.blend.i64(i64 %[[V12]], i64 50) +// CHECK: %[[V14:.*]] = ptrtoint i8** %[[V7]] to i64 +// CHECK: %[[V15:.*]] = call i64 @llvm.ptrauth.blend.i64(i64 %[[V14]], i64 50) +// CHECK: %[[V17:.*]] = ptrtoint i8* %[[V11]] to i64 +// CHECK: %[[V18:.*]] = call i64 @llvm.ptrauth.resign.i64(i64 %[[V17]], i32 1, i64 %[[V13]], i32 1, i64 %[[V15]]) + +void test_copy_constructor_SA(SA *s) { + SA t = *s; +} + +// CHECK: define void @test_copy_assignment_SA( +// CHECK: call void @__copy_assignment_8_8_t0w4_pa8( + +// CHECK: define linkonce_odr hidden void @__copy_assignment_8_8_t0w4_pa8( + +void test_copy_assignment_SA(SA *d, SA *s) { + *d = *s; +} + +// CHECK: define void @test_move_constructor_SA( +// CHECK: define internal void @__Block_byref_object_copy_( +// CHECK: define linkonce_odr hidden void @__move_constructor_8_8_t0w4_pa8( + +void test_move_constructor_SA(void) { + __block SA t; + BlockTy b = ^{ (void)t; }; +} + +// CHECK: define void @test_move_assignment_SA( +// CHECK: call void @__move_assignment_8_8_t0w4_pa8( +// CHECK: define linkonce_odr hidden void @__move_assignment_8_8_t0w4_pa8( + +void test_move_assignment_SA(SA *p) { + *p = getSA(); +} + +// CHECK: define void @test_parameter_SA(%[[STRUCT_SA]]* %{{.*}}) +// CHECK-NOT: call +// CHECK: ret void + +void test_parameter_SA(SA a) { +} + +// CHECK: define void @test_argument_SA(%[[STRUCT_SA]]* %[[A:.*]]) +// CHECK: %[[A_ADDR:.*]] = alloca %[[STRUCT_SA]]*, align 8 +// CHECK: %[[AGG_TMP:.*]] = alloca %[[STRUCT_SA]], align 8 +// CHECK: store %[[STRUCT_SA]]* %[[A]], %[[STRUCT_SA]]** %[[A_ADDR]], align 8 +// CHECK: %[[V0:.*]] = load %[[STRUCT_SA]]*, %[[STRUCT_SA]]** %[[A_ADDR]], align 8 +// CHECK: %[[V1:.*]] = bitcast %[[STRUCT_SA]]* %[[AGG_TMP]] to i8** +// CHECK: %[[V2:.*]] = bitcast %[[STRUCT_SA]]* %[[V0]] to i8** +// CHECK: call void @__copy_constructor_8_8_t0w4_pa8(i8** %[[V1]], i8** %[[V2]]) #5 +// CHECK: call void @calleeSA(%[[STRUCT_SA]]* %[[AGG_TMP]]) +// CHECK-NOT: call +// CHECK: ret void + +void test_argument_SA(SA *a) { + calleeSA(*a); +} + +// CHECK: define void @test_return_SA(%[[STRUCT_SA]]* noalias sret %[[AGG_RESULT:.*]], %[[STRUCT_SA]]* %[[A:.*]]) +// CHECK: %[[A_ADDR:.*]] = alloca %[[STRUCT_SA]]*, align 8 +// CHECK: store %[[STRUCT_SA]]* %[[A]], %[[STRUCT_SA]]** %[[A_ADDR]], align 8 +// CHECK: %[[V0:.*]] = load %[[STRUCT_SA]]*, %[[STRUCT_SA]]** %[[A_ADDR]], align 8 +// CHECK: %[[V1:.*]] = bitcast %[[STRUCT_SA]]* %[[AGG_RESULT]] to i8** +// CHECK: %[[V2:.*]] = bitcast %[[STRUCT_SA]]* %[[V0]] to i8** +// CHECK: call void @__copy_constructor_8_8_t0w4_pa8(i8** %[[V1]], i8** %[[V2]]) #5 +// CHECK-NOT: call +// CHECK: ret void + +SA test_return_SA(SA *a) { + return *a; +} + +// CHECK: define void @test_copy_constructor_SI( +// CHECK-NOT: call +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64( +// CHECK-NOT: call +// CHECK: ret void + +void test_copy_constructor_SI(SI *s) { + SI t = *s; +} + +// CHECK: define void @test_parameter_SI(i64 %{{.*}}) +// CHECK-NOT: call +// CHECK: ret void + +void test_parameter_SI(SI a) { +} diff --git a/clang/test/CodeGen/ptrauth-intrinsics.c b/clang/test/CodeGen/ptrauth-intrinsics.c new file mode 100644 index 00000000000000..cf3e70359e7818 --- /dev/null +++ b/clang/test/CodeGen/ptrauth-intrinsics.c @@ -0,0 +1,96 @@ +// RUN: %clang_cc1 -triple arm64-apple-ios -fptrauth-intrinsics -emit-llvm %s -o - | FileCheck %s + +void (*fnptr)(void); +long int_discriminator; +void *ptr_discriminator; +long signature; + +// CHECK-LABEL: define void @test_auth() +void test_auth() { + // CHECK: [[PTR:%.*]] = load void ()*, void ()** @fnptr, + // CHECK-NEXT: [[DISC0:%.*]] = load i8*, i8** @ptr_discriminator, + // CHECK-NEXT: [[T0:%.*]] = ptrtoint void ()* [[PTR]] to i64 + // CHECK-NEXT: [[DISC:%.*]] = ptrtoint i8* [[DISC0]] to i64 + // CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.auth.i64(i64 [[T0]], i32 0, i64 [[DISC]]) + // CHECK-NEXT: [[RESULT:%.*]] = inttoptr i64 [[T1]] to void ()* + // CHECK-NEXT: store void ()* [[RESULT]], void ()** @fnptr, + fnptr = __builtin_ptrauth_auth(fnptr, 0, ptr_discriminator); +} + +// CHECK-LABEL: define void @test_auth_peephole() +void test_auth_peephole() { + // CHECK: [[PTR:%.*]] = load void ()*, void ()** @fnptr, + // CHECK-NEXT: [[DISC0:%.*]] = load i8*, i8** @ptr_discriminator, + // CHECK-NEXT: [[DISC:%.*]] = ptrtoint i8* [[DISC0]] to i64 + // CHECK-NEXT: call void [[PTR]]() [ "ptrauth"(i32 0, i64 [[DISC]]) ] + // CHECK-NEXT: ret void + __builtin_ptrauth_auth(fnptr, 0, ptr_discriminator)(); +} + +// CHECK-LABEL: define void @test_strip() +void test_strip() { + // CHECK: [[PTR:%.*]] = load void ()*, void ()** @fnptr, + // CHECK-NEXT: [[T0:%.*]] = ptrtoint void ()* [[PTR]] to i64 + // CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.strip.i64(i64 [[T0]], i32 0) + // CHECK-NEXT: [[RESULT:%.*]] = inttoptr i64 [[T1]] to void ()* + // CHECK-NEXT: store void ()* [[RESULT]], void ()** @fnptr, + fnptr = __builtin_ptrauth_strip(fnptr, 0); +} + +// CHECK-LABEL: define void @test_sign_unauthenticated() +void test_sign_unauthenticated() { + // CHECK: [[PTR:%.*]] = load void ()*, void ()** @fnptr, + // CHECK-NEXT: [[DISC0:%.*]] = load i8*, i8** @ptr_discriminator, + // CHECK-NEXT: [[T0:%.*]] = ptrtoint void ()* [[PTR]] to i64 + // CHECK-NEXT: [[DISC:%.*]] = ptrtoint i8* [[DISC0]] to i64 + // CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.sign.i64(i64 [[T0]], i32 0, i64 [[DISC]]) + // CHECK-NEXT: [[RESULT:%.*]] = inttoptr i64 [[T1]] to void ()* + // CHECK-NEXT: store void ()* [[RESULT]], void ()** @fnptr, + fnptr = __builtin_ptrauth_sign_unauthenticated(fnptr, 0, ptr_discriminator); +} + +// CHECK-LABEL: define void @test_auth_and_resign() +void test_auth_and_resign() { + // CHECK: [[PTR:%.*]] = load void ()*, void ()** @fnptr, + // CHECK-NEXT: [[DISC0:%.*]] = load i8*, i8** @ptr_discriminator, + // CHECK-NEXT: [[T0:%.*]] = ptrtoint void ()* [[PTR]] to i64 + // CHECK-NEXT: [[DISC:%.*]] = ptrtoint i8* [[DISC0]] to i64 + // CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 0, i64 [[DISC]], i32 3, i64 15) + // CHECK-NEXT: [[RESULT:%.*]] = inttoptr i64 [[T1]] to void ()* + // CHECK-NEXT: store void ()* [[RESULT]], void ()** @fnptr, + fnptr = __builtin_ptrauth_auth_and_resign(fnptr, 0, ptr_discriminator, 3, 15); +} + +// CHECK-LABEL: define void @test_blend_discriminator() +void test_blend_discriminator() { + // CHECK: [[PTR:%.*]] = load void ()*, void ()** @fnptr, + // CHECK-NEXT: [[DISC:%.*]] = load i64, i64* @int_discriminator, + // CHECK-NEXT: [[T0:%.*]] = ptrtoint void ()* [[PTR]] to i64 + // CHECK-NEXT: [[RESULT:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 [[T0]], i64 [[DISC]]) + // CHECK-NEXT: store i64 [[RESULT]], i64* @int_discriminator, + int_discriminator = __builtin_ptrauth_blend_discriminator(fnptr, int_discriminator); +} + +// CHECK-LABEL: define void @test_sign_generic_data() +void test_sign_generic_data() { + // CHECK: [[PTR:%.*]] = load void ()*, void ()** @fnptr, + // CHECK-NEXT: [[DISC0:%.*]] = load i8*, i8** @ptr_discriminator, + // CHECK-NEXT: [[T0:%.*]] = ptrtoint void ()* [[PTR]] to i64 + // CHECK-NEXT: [[DISC:%.*]] = ptrtoint i8* [[DISC0]] to i64 + // CHECK-NEXT: [[RESULT:%.*]] = call i64 @llvm.ptrauth.sign.generic.i64(i64 [[T0]], i64 [[DISC]]) + // CHECK-NEXT: store i64 [[RESULT]], i64* @signature, + signature = __builtin_ptrauth_sign_generic_data(fnptr, ptr_discriminator); +} + +// CHECK-LABEL: define void @test_string_discriminator() +void test_string_discriminator() { + // CHECK: [[X:%.*]] = alloca i32 + + // Check a couple of random discriminators used by Swift. + + // CHECK: store i32 58298, i32* [[X]], + int x = __builtin_ptrauth_string_discriminator("InitializeWithCopy"); + + // CHECK: store i32 9112, i32* [[X]], + x = __builtin_ptrauth_string_discriminator("DestroyArray"); +} diff --git a/clang/test/CodeGen/ptrauth-qualifier-loadstore.c b/clang/test/CodeGen/ptrauth-qualifier-loadstore.c new file mode 100644 index 00000000000000..919d19a6ccfebf --- /dev/null +++ b/clang/test/CodeGen/ptrauth-qualifier-loadstore.c @@ -0,0 +1,744 @@ +// RUN: %clang_cc1 -triple arm64-apple-ios -fptrauth-calls -fptrauth-intrinsics -emit-llvm %s -o - | FileCheck %s + +#define IQ __ptrauth(1,0,50) +#define AQ __ptrauth(1,1,50) +#define DIFF_IQ __ptrauth(1,0,100) +#define DIFF_AQ __ptrauth(1,1,100) +#define ZERO_IQ __ptrauth(1,0,0) +#define ZERO_AQ __ptrauth(1,1,0) + +extern int external_int; +extern int * global_upi; +extern int * IQ global_iqpi; +extern int * AQ global_aqpi; +extern void use_upi(int *ptr); + +typedef void func_t(void); +extern void external_func(void); +extern func_t *global_upf; +extern func_t * IQ global_iqpf; +extern func_t * AQ global_aqpf; +extern void use_upf(func_t *ptr); + +// Data with address-independent qualifiers. + +// CHECK-LABEL: define void @test_store_data_i_constant() +void test_store_data_i_constant() { +// CHECK: [[V:%.*]] = alloca i32*, +// CHECK-NEXT: [[SIGN:%.*]] = call i64 @llvm.ptrauth.sign.i64(i64 ptrtoint (i32* @external_int to i64), i32 1, i64 50) +// CHECK-NEXT: [[T0:%.*]] = inttoptr i64 [[SIGN]] to i32* +// CHECK-NEXT: store i32* [[T0]], i32** [[V]], + int * IQ iqpi = &external_int; +// CHECK-NEXT: [[T0:%.*]] = call i64 @llvm.ptrauth.sign.i64(i64 ptrtoint (i32* @external_int to i64), i32 1, i64 50) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T0]] to i32* +// CHECK-NEXT: store i32* [[SIGNED]], i32** [[V]], +// CHECK-NEXT: ret void + iqpi = &external_int; +} + +// CHECK-LABEL: define void @test_store_data_iu() +void test_store_data_iu() { +// CHECK: [[V:%.*]] = alloca i32*, +// CHECK-NEXT: [[LOAD:%.*]] = load i32*, i32** @global_upi, +// CHECK-NEXT: [[T0:%.*]] = icmp ne i32* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint i32* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.sign.i64(i64 [[T0]], i32 1, i64 50) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to i32* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi i32* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store i32* [[T0]], i32** [[V]], + int * IQ iqpi = global_upi; +// CHECK-NEXT: [[LOAD:%.*]] = load i32*, i32** @global_upi, +// CHECK-NEXT: [[T0:%.*]] = icmp ne i32* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint i32* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.sign.i64(i64 [[T0]], i32 1, i64 50) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to i32* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi i32* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store i32* [[T0]], i32** [[V]], + iqpi = global_upi; +} + +// CHECK-LABEL: define void @test_store_data_ia() +void test_store_data_ia() { +// CHECK: [[V:%.*]] = alloca i32*, +// CHECK-NEXT: [[LOAD:%.*]] = load i32*, i32** @global_aqpi, +// CHECK-NEXT: [[OLDDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 ptrtoint (i32** @global_aqpi to i64), i64 50) +// CHECK-NEXT: [[T0:%.*]] = icmp ne i32* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint i32* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 1, i64 [[OLDDISC]], i32 1, i64 50) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to i32* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi i32* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store i32* [[T0]], i32** [[V]], + int * IQ iqpi = global_aqpi; +// CHECK-NEXT: [[LOAD:%.*]] = load i32*, i32** @global_aqpi, +// CHECK-NEXT: [[OLDDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 ptrtoint (i32** @global_aqpi to i64), i64 50) +// CHECK-NEXT: [[T0:%.*]] = icmp ne i32* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint i32* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 1, i64 [[OLDDISC]], i32 1, i64 50) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to i32* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi i32* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store i32* [[T0]], i32** [[V]], + iqpi = global_aqpi; +// CHECK-NEXT: [[LOAD:%.*]] = load i32*, i32** @global_aqpi, +// CHECK-NEXT: [[OLDDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 ptrtoint (i32** @global_aqpi to i64), i64 50) +// CHECK-NEXT: [[T0:%.*]] = icmp ne i32* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint i32* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 1, i64 [[OLDDISC]], i32 1, i64 50) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to i32* +// CHECK-NEXT: br label +// CHECK: [[RESULT:%.*]] = phi i32* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store i32* [[RESULT]], i32** [[V]], +// CHECK-NEXT: [[T0:%.*]] = icmp ne i32* [[RESULT]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint i32* [[RESULT]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.auth.i64(i64 [[T0]], i32 1, i64 50) +// CHECK-NEXT: [[AUTHED:%.*]] = inttoptr i64 [[T1]] to i32* +// CHECK-NEXT: br label +// CHECK: [[RESULT:%.*]] = phi i32* [ null, {{.*}} ], [ [[AUTHED]], {{.*}} ] +// CHECK-NEXT: call void @use_upi(i32* [[RESULT]]) + use_upi(iqpi = global_aqpi); +} + +// CHECK-LABEL: define void @test_store_data_ii_same() +void test_store_data_ii_same() { +// CHECK: [[V:%.*]] = alloca i32*, +// CHECK-NEXT: [[LOAD:%.*]] = load i32*, i32** @global_iqpi, +// CHECK-NEXT: store i32* [[LOAD]], i32** [[V]], + int * IQ iqpi = global_iqpi; +// CHECK-NEXT: [[LOAD:%.*]] = load i32*, i32** @global_iqpi, +// CHECK-NEXT: store i32* [[LOAD]], i32** [[V]], + iqpi = global_iqpi; +} + +// CHECK-LABEL: define void @test_store_data_ii_different() +void test_store_data_ii_different() { +// CHECK: [[V:%.*]] = alloca i32*, +// CHECK-NEXT: [[LOAD:%.*]] = load i32*, i32** @global_iqpi, +// CHECK-NEXT: [[T0:%.*]] = icmp ne i32* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint i32* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 1, i64 50, i32 1, i64 100) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to i32* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi i32* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store i32* [[T0]], i32** [[V]], + int * DIFF_IQ iqpi = global_iqpi; +// CHECK-NEXT: [[LOAD:%.*]] = load i32*, i32** @global_iqpi, +// CHECK-NEXT: [[T0:%.*]] = icmp ne i32* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint i32* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 1, i64 50, i32 1, i64 100) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to i32* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi i32* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store i32* [[T0]], i32** [[V]], + iqpi = global_iqpi; +} + +// CHECK-LABEL: define void @test_store_data_ii_zero() +void test_store_data_ii_zero() { +// CHECK: [[V:%.*]] = alloca i32*, +// CHECK-NEXT: [[LOAD:%.*]] = load i32*, i32** @global_iqpi, +// CHECK-NEXT: [[T0:%.*]] = icmp ne i32* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint i32* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 1, i64 50, i32 1, i64 0) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to i32* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi i32* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store i32* [[T0]], i32** [[V]], + int * ZERO_IQ iqpi = global_iqpi; +// CHECK-NEXT: [[LOAD:%.*]] = load i32*, i32** [[V]] +// CHECK-NEXT: [[T0:%.*]] = icmp ne i32* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint i32* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 1, i64 0, i32 1, i64 50) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to i32* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi i32* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store i32* [[T0]], i32** @global_iqpi, + global_iqpi = iqpi; +} + +// CHECK-LABEL: define void @test_load_data_i() +void test_load_data_i() { +// CHECK: [[V:%.*]] = alloca i32*, +// CHECK-NEXT: [[LOAD:%.*]] = load i32*, i32** @global_iqpi, +// CHECK-NEXT: [[T0:%.*]] = icmp ne i32* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint i32* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.auth.i64(i64 [[T0]], i32 1, i64 50) +// CHECK-NEXT: [[AUTHED:%.*]] = inttoptr i64 [[T1]] to i32* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi i32* [ null, {{.*}} ], [ [[AUTHED]], {{.*}} ] +// CHECK-NEXT: store i32* [[T0]], i32** [[V]], + int *upi = global_iqpi; +// CHECK-NEXT: [[LOAD:%.*]] = load i32*, i32** @global_iqpi, +// CHECK-NEXT: [[T0:%.*]] = icmp ne i32* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint i32* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.auth.i64(i64 [[T0]], i32 1, i64 50) +// CHECK-NEXT: [[AUTHED:%.*]] = inttoptr i64 [[T1]] to i32* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi i32* [ null, {{.*}} ], [ [[AUTHED]], {{.*}} ] +// CHECK-NEXT: store i32* [[T0]], i32** [[V]], + upi = global_iqpi; +// CHECK-NEXT: [[LOAD:%.*]] = load i32*, i32** @global_iqpi, +// CHECK-NEXT: [[T0:%.*]] = icmp ne i32* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint i32* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.auth.i64(i64 [[T0]], i32 1, i64 50) +// CHECK-NEXT: [[AUTHED:%.*]] = inttoptr i64 [[T1]] to i32* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi i32* [ null, {{.*}} ], [ [[AUTHED]], {{.*}} ] +// CHECK-NEXT: call void @use_upi(i32* [[T0]]) + use_upi(global_iqpi); +} + +// Data with address-discriminated qualifiers. + +// CHECK-LABEL: define void @test_store_data_a_constant() +void test_store_data_a_constant() { +// CHECK: [[V:%.*]] = alloca i32*, +// CHECK-NEXT: [[T0:%.*]] = ptrtoint i32** [[V]] to i64 +// CHECK-NEXT: [[NEWDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 [[T0]], i64 50) +// CHECK-NEXT: [[SIGN:%.*]] = call i64 @llvm.ptrauth.sign.i64(i64 ptrtoint (i32* @external_int to i64), i32 1, i64 [[NEWDISC]]) +// CHECK-NEXT: [[T0:%.*]] = inttoptr i64 [[SIGN]] to i32* +// CHECK-NEXT: store i32* [[T0]], i32** [[V]], + int * AQ aqpi = &external_int; +// CHECK-NEXT: [[T0:%.*]] = ptrtoint i32** [[V]] to i64 +// CHECK-NEXT: [[NEWDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 [[T0]], i64 50) +// CHECK-NEXT: [[SIGN:%.*]] = call i64 @llvm.ptrauth.sign.i64(i64 ptrtoint (i32* @external_int to i64), i32 1, i64 [[NEWDISC]]) +// CHECK-NEXT: [[T0:%.*]] = inttoptr i64 [[SIGN]] to i32* +// CHECK-NEXT: store i32* [[T0]], i32** [[V]], + aqpi = &external_int; +} + +// CHECK-LABEL: define void @test_store_data_au() +void test_store_data_au() { +// CHECK: [[V:%.*]] = alloca i32*, +// CHECK-NEXT: [[LOAD:%.*]] = load i32*, i32** @global_upi, +// CHECK-NEXT: [[T0:%.*]] = ptrtoint i32** [[V]] to i64 +// CHECK-NEXT: [[NEWDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 [[T0]], i64 50) +// CHECK-NEXT: [[T0:%.*]] = icmp ne i32* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint i32* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.sign.i64(i64 [[T0]], i32 1, i64 [[NEWDISC]]) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to i32* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi i32* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store i32* [[T0]], i32** [[V]], + int * AQ aqpi = global_upi; +// CHECK-NEXT: [[LOAD:%.*]] = load i32*, i32** @global_upi, +// CHECK-NEXT: [[T0:%.*]] = ptrtoint i32** [[V]] to i64 +// CHECK-NEXT: [[NEWDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 [[T0]], i64 50) +// CHECK-NEXT: [[T0:%.*]] = icmp ne i32* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint i32* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.sign.i64(i64 [[T0]], i32 1, i64 [[NEWDISC]]) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to i32* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi i32* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store i32* [[T0]], i32** [[V]], + aqpi = global_upi; +} + +// CHECK-LABEL: define void @test_store_data_ai() +void test_store_data_ai() { +// CHECK: [[V:%.*]] = alloca i32*, +// CHECK-NEXT: [[LOAD:%.*]] = load i32*, i32** @global_iqpi, +// CHECK-NEXT: [[T0:%.*]] = ptrtoint i32** [[V]] to i64 +// CHECK-NEXT: [[NEWDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 [[T0]], i64 50) +// CHECK-NEXT: [[T0:%.*]] = icmp ne i32* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint i32* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 1, i64 50, i32 1, i64 [[NEWDISC]]) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to i32* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi i32* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store i32* [[T0]], i32** [[V]], + int * AQ aqpi = global_iqpi; +// CHECK-NEXT: [[LOAD:%.*]] = load i32*, i32** @global_iqpi, +// CHECK-NEXT: [[T0:%.*]] = ptrtoint i32** [[V]] to i64 +// CHECK-NEXT: [[NEWDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 [[T0]], i64 50) +// CHECK-NEXT: [[T0:%.*]] = icmp ne i32* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint i32* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 1, i64 50, i32 1, i64 [[NEWDISC]]) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to i32* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi i32* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store i32* [[T0]], i32** [[V]], + aqpi = global_iqpi; +} + +// CHECK-LABEL: define void @test_store_data_aa_same() +void test_store_data_aa_same() { +// CHECK: [[V:%.*]] = alloca i32*, +// CHECK-NEXT: [[LOAD:%.*]] = load i32*, i32** @global_aqpi, +// CHECK-NEXT: [[OLDDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 ptrtoint (i32** @global_aqpi to i64), i64 50) +// CHECK-NEXT: [[T0:%.*]] = ptrtoint i32** [[V]] to i64 +// CHECK-NEXT: [[NEWDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 [[T0]], i64 50) +// CHECK-NEXT: [[T0:%.*]] = icmp ne i32* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint i32* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 1, i64 [[OLDDISC]], i32 1, i64 [[NEWDISC]]) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to i32* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi i32* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store i32* [[T0]], i32** [[V]], + int * AQ aqpi = global_aqpi; +// CHECK-NEXT: [[LOAD:%.*]] = load i32*, i32** @global_aqpi, +// CHECK-NEXT: [[OLDDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 ptrtoint (i32** @global_aqpi to i64), i64 50) +// CHECK-NEXT: [[T0:%.*]] = ptrtoint i32** [[V]] to i64 +// CHECK-NEXT: [[NEWDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 [[T0]], i64 50) +// CHECK-NEXT: [[T0:%.*]] = icmp ne i32* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint i32* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 1, i64 [[OLDDISC]], i32 1, i64 [[NEWDISC]]) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to i32* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi i32* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store i32* [[T0]], i32** [[V]], + aqpi = global_aqpi; +} + +// CHECK-LABEL: define void @test_store_data_aa_different() +void test_store_data_aa_different() { +// CHECK: [[V:%.*]] = alloca i32*, +// CHECK-NEXT: [[LOAD:%.*]] = load i32*, i32** @global_aqpi, +// CHECK-NEXT: [[OLDDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 ptrtoint (i32** @global_aqpi to i64), i64 50) +// CHECK-NEXT: [[T0:%.*]] = ptrtoint i32** [[V]] to i64 +// CHECK-NEXT: [[NEWDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 [[T0]], i64 100) +// CHECK-NEXT: [[T0:%.*]] = icmp ne i32* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint i32* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 1, i64 [[OLDDISC]], i32 1, i64 [[NEWDISC]]) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to i32* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi i32* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store i32* [[T0]], i32** [[V]], + int * DIFF_AQ aqpi = global_aqpi; +// CHECK-NEXT: [[LOAD:%.*]] = load i32*, i32** @global_aqpi, +// CHECK-NEXT: [[OLDDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 ptrtoint (i32** @global_aqpi to i64), i64 50) +// CHECK-NEXT: [[T0:%.*]] = ptrtoint i32** [[V]] to i64 +// CHECK-NEXT: [[NEWDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 [[T0]], i64 100) +// CHECK-NEXT: [[T0:%.*]] = icmp ne i32* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint i32* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 1, i64 [[OLDDISC]], i32 1, i64 [[NEWDISC]]) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to i32* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi i32* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store i32* [[T0]], i32** [[V]], + aqpi = global_aqpi; +} + +// CHECK-LABEL: define void @test_store_data_aa_zero() +void test_store_data_aa_zero() { +// CHECK: [[V:%.*]] = alloca i32*, +// CHECK-NEXT: [[LOAD:%.*]] = load i32*, i32** @global_aqpi, +// CHECK-NEXT: [[OLDDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 ptrtoint (i32** @global_aqpi to i64), i64 50) +// CHECK-NEXT: [[NEWDISC:%.*]] = ptrtoint i32** [[V]] to i64 +// CHECK-NEXT: [[T0:%.*]] = icmp ne i32* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint i32* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 1, i64 [[OLDDISC]], i32 1, i64 [[NEWDISC]]) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to i32* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi i32* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store i32* [[T0]], i32** [[V]], + int * ZERO_AQ aqpi = global_aqpi; +// CHECK: [[LOAD:%.*]] = load i32*, i32** [[V]], +// CHECK-NEXT: [[OLDDISC:%.*]] = ptrtoint i32** [[V]] to i64 +// CHECK-NEXT: [[NEWDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 ptrtoint (i32** @global_aqpi to i64), i64 50) +// CHECK-NEXT: [[T0:%.*]] = icmp ne i32* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint i32* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 1, i64 [[OLDDISC]], i32 1, i64 [[NEWDISC]]) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to i32* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi i32* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store i32* [[T0]], i32** @global_aqpi, + global_aqpi = aqpi; +} + +// CHECK-LABEL: define void @test_load_data_a() +void test_load_data_a() { +// CHECK: [[V:%.*]] = alloca i32*, +// CHECK-NEXT: [[LOAD:%.*]] = load i32*, i32** @global_aqpi, +// CHECK-NEXT: [[OLDDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 ptrtoint (i32** @global_aqpi to i64), i64 50) +// CHECK-NEXT: [[T0:%.*]] = icmp ne i32* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint i32* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.auth.i64(i64 [[T0]], i32 1, i64 [[OLDDISC]]) +// CHECK-NEXT: [[AUTHED:%.*]] = inttoptr i64 [[T1]] to i32* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi i32* [ null, {{.*}} ], [ [[AUTHED]], {{.*}} ] +// CHECK-NEXT: store i32* [[T0]], i32** [[V]], + int *upi = global_aqpi; +// CHECK-NEXT: [[LOAD:%.*]] = load i32*, i32** @global_aqpi, +// CHECK-NEXT: [[OLDDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 ptrtoint (i32** @global_aqpi to i64), i64 50) +// CHECK-NEXT: [[T0:%.*]] = icmp ne i32* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint i32* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.auth.i64(i64 [[T0]], i32 1, i64 [[OLDDISC]]) +// CHECK-NEXT: [[AUTHED:%.*]] = inttoptr i64 [[T1]] to i32* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi i32* [ null, {{.*}} ], [ [[AUTHED]], {{.*}} ] +// CHECK-NEXT: store i32* [[T0]], i32** [[V]], + upi = global_aqpi; +// CHECK-NEXT: [[LOAD:%.*]] = load i32*, i32** @global_aqpi, +// CHECK-NEXT: [[OLDDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 ptrtoint (i32** @global_aqpi to i64), i64 50) +// CHECK-NEXT: [[T0:%.*]] = icmp ne i32* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint i32* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.auth.i64(i64 [[T0]], i32 1, i64 [[OLDDISC]]) +// CHECK-NEXT: [[AUTHED:%.*]] = inttoptr i64 [[T1]] to i32* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi i32* [ null, {{.*}} ], [ [[AUTHED]], {{.*}} ] +// CHECK-NEXT: call void @use_upi(i32* [[T0]]) + use_upi(global_aqpi); +} + +// Function with address-independent qualifiers. + +// CHECK-LABEL: define void @test_store_function_i_constant() +void test_store_function_i_constant() { +// CHECK: [[V:%.*]] = alloca void ()*, +// CHECK-NEXT: [[SIGN:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 ptrtoint ({{.*}} @external_func.ptrauth to i64), i32 0, i64 0, i32 1, i64 50) +// CHECK-NEXT: [[T0:%.*]] = inttoptr i64 [[SIGN]] to void ()* +// CHECK-NEXT: store void ()* [[T0]], void ()** [[V]], + func_t * IQ iqpf = &external_func; +// CHECK-NEXT: [[SIGN:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 ptrtoint ({{.*}} @external_func.ptrauth to i64), i32 0, i64 0, i32 1, i64 50) +// CHECK-NEXT: [[T0:%.*]] = inttoptr i64 [[SIGN]] to void ()* +// CHECK-NEXT: store void ()* [[T0]], void ()** [[V]], + iqpf = &external_func; +} + +// CHECK-LABEL: define void @test_store_function_iu() +void test_store_function_iu() { +// CHECK: [[V:%.*]] = alloca void ()*, +// CHECK-NEXT: [[LOAD:%.*]] = load void ()*, void ()** @global_upf, +// CHECK-NEXT: [[T0:%.*]] = icmp ne void ()* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint void ()* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 0, i64 0, i32 1, i64 50) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to void ()* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi void ()* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store void ()* [[T0]], void ()** [[V]], + func_t * IQ iqpf = global_upf; +// CHECK-NEXT: [[LOAD:%.*]] = load void ()*, void ()** @global_upf, +// CHECK-NEXT: [[T0:%.*]] = icmp ne void ()* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint void ()* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 0, i64 0, i32 1, i64 50) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to void ()* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi void ()* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store void ()* [[T0]], void ()** [[V]], + iqpf = global_upf; +} + +// CHECK-LABEL: define void @test_store_function_ia() +void test_store_function_ia() { +// CHECK: [[V:%.*]] = alloca void ()*, +// CHECK-NEXT: [[LOAD:%.*]] = load void ()*, void ()** @global_aqpf, +// CHECK-NEXT: [[OLDDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 ptrtoint (void ()** @global_aqpf to i64), i64 50) +// CHECK-NEXT: [[T0:%.*]] = icmp ne void ()* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint void ()* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 1, i64 [[OLDDISC]], i32 1, i64 50) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to void ()* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi void ()* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store void ()* [[T0]], void ()** [[V]], + func_t * IQ iqpf = global_aqpf; +// CHECK-NEXT: [[LOAD:%.*]] = load void ()*, void ()** @global_aqpf, +// CHECK-NEXT: [[OLDDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 ptrtoint (void ()** @global_aqpf to i64), i64 50) +// CHECK-NEXT: [[T0:%.*]] = icmp ne void ()* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint void ()* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 1, i64 [[OLDDISC]], i32 1, i64 50) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to void ()* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi void ()* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store void ()* [[T0]], void ()** [[V]], + iqpf = global_aqpf; +// CHECK-NEXT: [[LOAD:%.*]] = load void ()*, void ()** @global_aqpf, +// CHECK-NEXT: [[OLDDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 ptrtoint (void ()** @global_aqpf to i64), i64 50) +// CHECK-NEXT: [[T0:%.*]] = icmp ne void ()* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint void ()* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 1, i64 [[OLDDISC]], i32 1, i64 50) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to void ()* +// CHECK-NEXT: br label +// CHECK: [[RESULT:%.*]] = phi void ()* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store void ()* [[RESULT]], void ()** [[V]], +// CHECK-NEXT: [[T0:%.*]] = icmp ne void ()* [[RESULT]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint void ()* [[RESULT]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 1, i64 50, i32 0, i64 0) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to void ()* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi void ()* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: call void @use_upf(void ()* [[T0]]) + use_upf(iqpf = global_aqpf); +} + +// CHECK-LABEL: define void @test_store_function_ii_same() +void test_store_function_ii_same() { +// CHECK: [[V:%.*]] = alloca void ()*, +// CHECK-NEXT: [[LOAD:%.*]] = load void ()*, void ()** @global_iqpf, +// CHECK-NEXT: store void ()* [[LOAD]], void ()** [[V]], + func_t * IQ iqpf = global_iqpf; +// CHECK-NEXT: [[LOAD:%.*]] = load void ()*, void ()** @global_iqpf, +// CHECK-NEXT: store void ()* [[LOAD]], void ()** [[V]], + iqpf = global_iqpf; +} + +// CHECK-LABEL: define void @test_store_function_ii_different() +void test_store_function_ii_different() { +// CHECK: [[V:%.*]] = alloca void ()*, +// CHECK-NEXT: [[LOAD:%.*]] = load void ()*, void ()** @global_iqpf, +// CHECK-NEXT: [[T0:%.*]] = icmp ne void ()* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint void ()* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 1, i64 50, i32 1, i64 100) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to void ()* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi void ()* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store void ()* [[T0]], void ()** [[V]], + func_t * DIFF_IQ iqpf = global_iqpf; +// CHECK-NEXT: [[LOAD:%.*]] = load void ()*, void ()** @global_iqpf, +// CHECK-NEXT: [[T0:%.*]] = icmp ne void ()* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint void ()* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 1, i64 50, i32 1, i64 100) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to void ()* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi void ()* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store void ()* [[T0]], void ()** [[V]], + iqpf = global_iqpf; +} + +// CHECK-LABEL: define void @test_load_function_i() +void test_load_function_i() { +// CHECK: [[V:%.*]] = alloca void ()*, +// CHECK-NEXT: [[LOAD:%.*]] = load void ()*, void ()** @global_iqpf, +// CHECK-NEXT: [[T0:%.*]] = icmp ne void ()* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint void ()* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 1, i64 50, i32 0, i64 0) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to void ()* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi void ()* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store void ()* [[T0]], void ()** [[V]], + func_t *upf = global_iqpf; +// CHECK-NEXT: [[LOAD:%.*]] = load void ()*, void ()** @global_iqpf, +// CHECK-NEXT: [[T0:%.*]] = icmp ne void ()* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint void ()* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 1, i64 50, i32 0, i64 0) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to void ()* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi void ()* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store void ()* [[T0]], void ()** [[V]], + upf = global_iqpf; +// CHECK-NEXT: [[LOAD:%.*]] = load void ()*, void ()** @global_iqpf, +// CHECK-NEXT: [[T0:%.*]] = icmp ne void ()* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint void ()* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 1, i64 50, i32 0, i64 0) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to void ()* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi void ()* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: call void @use_upf(void ()* [[T0]]) + use_upf(global_iqpf); +} + +// Function with address-discriminated qualifiers. + +// CHECK-LABEL: define void @test_store_function_a_constant() +void test_store_function_a_constant() { +// CHECK: [[V:%.*]] = alloca void ()*, +// CHECK-NEXT: [[T0:%.*]] = ptrtoint void ()** [[V]] to i64 +// CHECK-NEXT: [[NEWDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 [[T0]], i64 50) +// CHECK-NEXT: [[SIGN:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 ptrtoint ({{.*}} @external_func.ptrauth to i64), i32 0, i64 0, i32 1, i64 [[NEWDISC]]) +// CHECK-NEXT: [[T0:%.*]] = inttoptr i64 [[SIGN]] to void ()* +// CHECK-NEXT: store void ()* [[T0]], void ()** [[V]], + func_t * AQ aqpf = &external_func; +// CHECK-NEXT: [[T0:%.*]] = ptrtoint void ()** [[V]] to i64 +// CHECK-NEXT: [[NEWDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 [[T0]], i64 50) +// CHECK-NEXT: [[SIGN:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 ptrtoint ({{.*}} @external_func.ptrauth to i64), i32 0, i64 0, i32 1, i64 [[NEWDISC]]) +// CHECK-NEXT: [[T0:%.*]] = inttoptr i64 [[SIGN]] to void ()* +// CHECK-NEXT: store void ()* [[T0]], void ()** [[V]], + aqpf = &external_func; +} + +// CHECK-LABEL: define void @test_store_function_au() +void test_store_function_au() { +// CHECK: [[V:%.*]] = alloca void ()*, +// CHECK-NEXT: [[LOAD:%.*]] = load void ()*, void ()** @global_upf, +// CHECK-NEXT: [[T0:%.*]] = ptrtoint void ()** [[V]] to i64 +// CHECK-NEXT: [[NEWDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 [[T0]], i64 50) +// CHECK-NEXT: [[T0:%.*]] = icmp ne void ()* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint void ()* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 0, i64 0, i32 1, i64 [[NEWDISC]]) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to void ()* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi void ()* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store void ()* [[T0]], void ()** [[V]], + func_t * AQ aqpf = global_upf; +// CHECK-NEXT: [[LOAD:%.*]] = load void ()*, void ()** @global_upf, +// CHECK-NEXT: [[T0:%.*]] = ptrtoint void ()** [[V]] to i64 +// CHECK-NEXT: [[NEWDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 [[T0]], i64 50) +// CHECK-NEXT: [[T0:%.*]] = icmp ne void ()* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint void ()* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 0, i64 0, i32 1, i64 [[NEWDISC]]) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to void ()* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi void ()* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store void ()* [[T0]], void ()** [[V]], + aqpf = global_upf; +} + +// CHECK-LABEL: define void @test_store_function_ai() +void test_store_function_ai() { +// CHECK: [[V:%.*]] = alloca void ()*, +// CHECK-NEXT: [[LOAD:%.*]] = load void ()*, void ()** @global_iqpf, +// CHECK-NEXT: [[T0:%.*]] = ptrtoint void ()** [[V]] to i64 +// CHECK-NEXT: [[NEWDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 [[T0]], i64 50) +// CHECK-NEXT: [[T0:%.*]] = icmp ne void ()* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint void ()* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 1, i64 50, i32 1, i64 [[NEWDISC]]) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to void ()* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi void ()* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store void ()* [[T0]], void ()** [[V]], + func_t * AQ aqpf = global_iqpf; +// CHECK-NEXT: [[LOAD:%.*]] = load void ()*, void ()** @global_iqpf, +// CHECK-NEXT: [[T0:%.*]] = ptrtoint void ()** [[V]] to i64 +// CHECK-NEXT: [[NEWDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 [[T0]], i64 50) +// CHECK-NEXT: [[T0:%.*]] = icmp ne void ()* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint void ()* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 1, i64 50, i32 1, i64 [[NEWDISC]]) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to void ()* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi void ()* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store void ()* [[T0]], void ()** [[V]], + aqpf = global_iqpf; +} + +// CHECK-LABEL: define void @test_store_function_aa_same() +void test_store_function_aa_same() { +// CHECK: [[V:%.*]] = alloca void ()*, +// CHECK-NEXT: [[LOAD:%.*]] = load void ()*, void ()** @global_aqpf, +// CHECK-NEXT: [[OLDDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 ptrtoint (void ()** @global_aqpf to i64), i64 50) +// CHECK-NEXT: [[T0:%.*]] = ptrtoint void ()** [[V]] to i64 +// CHECK-NEXT: [[NEWDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 [[T0]], i64 50) +// CHECK-NEXT: [[T0:%.*]] = icmp ne void ()* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint void ()* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 1, i64 [[OLDDISC]], i32 1, i64 [[NEWDISC]]) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to void ()* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi void ()* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store void ()* [[T0]], void ()** [[V]], + func_t * AQ aqpf = global_aqpf; +// CHECK-NEXT: [[LOAD:%.*]] = load void ()*, void ()** @global_aqpf, +// CHECK-NEXT: [[OLDDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 ptrtoint (void ()** @global_aqpf to i64), i64 50) +// CHECK-NEXT: [[T0:%.*]] = ptrtoint void ()** [[V]] to i64 +// CHECK-NEXT: [[NEWDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 [[T0]], i64 50) +// CHECK-NEXT: [[T0:%.*]] = icmp ne void ()* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint void ()* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 1, i64 [[OLDDISC]], i32 1, i64 [[NEWDISC]]) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to void ()* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi void ()* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store void ()* [[T0]], void ()** [[V]], + aqpf = global_aqpf; +} + +// CHECK-LABEL: define void @test_store_function_aa_different() +void test_store_function_aa_different() { +// CHECK: [[V:%.*]] = alloca void ()*, +// CHECK-NEXT: [[LOAD:%.*]] = load void ()*, void ()** @global_aqpf, +// CHECK-NEXT: [[OLDDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 ptrtoint (void ()** @global_aqpf to i64), i64 50) +// CHECK-NEXT: [[T0:%.*]] = ptrtoint void ()** [[V]] to i64 +// CHECK-NEXT: [[NEWDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 [[T0]], i64 100) +// CHECK-NEXT: [[T0:%.*]] = icmp ne void ()* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint void ()* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 1, i64 [[OLDDISC]], i32 1, i64 [[NEWDISC]]) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to void ()* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi void ()* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store void ()* [[T0]], void ()** [[V]], + func_t * DIFF_AQ aqpf = global_aqpf; +// CHECK-NEXT: [[LOAD:%.*]] = load void ()*, void ()** @global_aqpf, +// CHECK-NEXT: [[OLDDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 ptrtoint (void ()** @global_aqpf to i64), i64 50) +// CHECK-NEXT: [[T0:%.*]] = ptrtoint void ()** [[V]] to i64 +// CHECK-NEXT: [[NEWDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 [[T0]], i64 100) +// CHECK-NEXT: [[T0:%.*]] = icmp ne void ()* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint void ()* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 1, i64 [[OLDDISC]], i32 1, i64 [[NEWDISC]]) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to void ()* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi void ()* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store void ()* [[T0]], void ()** [[V]], + aqpf = global_aqpf; +} + +// CHECK-LABEL: define void @test_load_function_a() +void test_load_function_a() { +// CHECK: [[V:%.*]] = alloca void ()*, +// CHECK-NEXT: [[LOAD:%.*]] = load void ()*, void ()** @global_aqpf, +// CHECK-NEXT: [[OLDDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 ptrtoint (void ()** @global_aqpf to i64), i64 50) +// CHECK-NEXT: [[T0:%.*]] = icmp ne void ()* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint void ()* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 1, i64 [[OLDDISC]], i32 0, i64 0) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to void ()* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi void ()* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store void ()* [[T0]], void ()** [[V]], + func_t *upf = global_aqpf; +// CHECK-NEXT: [[LOAD:%.*]] = load void ()*, void ()** @global_aqpf, +// CHECK-NEXT: [[OLDDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 ptrtoint (void ()** @global_aqpf to i64), i64 50) +// CHECK-NEXT: [[T0:%.*]] = icmp ne void ()* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint void ()* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 1, i64 [[OLDDISC]], i32 0, i64 0) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to void ()* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi void ()* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: store void ()* [[T0]], void ()** [[V]], + upf = global_aqpf; +// CHECK-NEXT: [[LOAD:%.*]] = load void ()*, void ()** @global_aqpf, +// CHECK-NEXT: [[OLDDISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 ptrtoint (void ()** @global_aqpf to i64), i64 50) +// CHECK-NEXT: [[T0:%.*]] = icmp ne void ()* [[LOAD]], null +// CHECK-NEXT: br i1 [[T0]], +// CHECK: [[T0:%.*]] = ptrtoint void ()* [[LOAD]] to i64 +// CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.resign.i64(i64 [[T0]], i32 1, i64 [[OLDDISC]], i32 0, i64 0) +// CHECK-NEXT: [[SIGNED:%.*]] = inttoptr i64 [[T1]] to void ()* +// CHECK-NEXT: br label +// CHECK: [[T0:%.*]] = phi void ()* [ null, {{.*}} ], [ [[SIGNED]], {{.*}} ] +// CHECK-NEXT: call void @use_upf(void ()* [[T0]]) + use_upf(global_aqpf); +} diff --git a/clang/test/CodeGen/ptrauth-qualifier.c b/clang/test/CodeGen/ptrauth-qualifier.c new file mode 100644 index 00000000000000..c123103f06d156 --- /dev/null +++ b/clang/test/CodeGen/ptrauth-qualifier.c @@ -0,0 +1,87 @@ +// RUN: %clang_cc1 -triple arm64-apple-ios -fptrauth-calls -fptrauth-intrinsics -emit-llvm %s -o - | FileCheck %s + +// Constant initializers for data pointers. +extern int external_int; + +// CHECK: [[PTRAUTH_G1:@external_int.ptrauth.*]] = private constant { i8*, i32, i64, i64 } { i8* bitcast (i32* @external_int to i8*), i32 1, i64 0, i64 56 }, section "llvm.ptrauth" +// CHECK: @g1 = global i32* bitcast ({ i8*, i32, i64, i64 }* [[PTRAUTH_G1]] to i32*) +int * __ptrauth(1,0,56) g1 = &external_int; + +// CHECK: [[PTRAUTH_G2:@external_int.ptrauth.*]] = private constant { i8*, i32, i64, i64 } { i8* bitcast (i32* @external_int to i8*), i32 1, i64 ptrtoint (i32** @g2 to i64), i64 1272 }, section "llvm.ptrauth" +// CHECK: @g2 = global i32* bitcast ({ i8*, i32, i64, i64 }* [[PTRAUTH_G2]] to i32*) +int * __ptrauth(1,1,1272) g2 = &external_int; + +// CHECK: @g3 = global i32* null +int * __ptrauth(1,1,871) g3 = 0; + +// FIXME: should we make a ptrauth constant for this absolute symbol? +// CHECK: @g4 = global i32* inttoptr (i64 1230 to i32*) +int * __ptrauth(1,1,1902) g4 = (int*) 1230; + +// CHECK: [[PTRAUTH_GA0:@external_int.ptrauth.*]] = private constant { i8*, i32, i64, i64 } { i8* bitcast (i32* @external_int to i8*), i32 1, i64 ptrtoint ([3 x i32*]* @ga to i64), i64 712 }, section "llvm.ptrauth" +// CHECK: [[PTRAUTH_GA1:@external_int.ptrauth.*]] = private constant { i8*, i32, i64, i64 } { i8* bitcast (i32* @external_int to i8*), i32 1, i64 ptrtoint (i32** getelementptr inbounds ([3 x i32*], [3 x i32*]* @ga, i32 0, i32 1) to i64), i64 712 }, section "llvm.ptrauth" +// CHECK: [[PTRAUTH_GA2:@external_int.ptrauth.*]] = private constant { i8*, i32, i64, i64 } { i8* bitcast (i32* @external_int to i8*), i32 1, i64 ptrtoint (i32** getelementptr inbounds ([3 x i32*], [3 x i32*]* @ga, i32 0, i32 2) to i64), i64 712 }, section "llvm.ptrauth" +// CHECK: @ga = global [3 x i32*] [i32* bitcast ({ i8*, i32, i64, i64 }* [[PTRAUTH_GA0]] to i32*), i32* bitcast ({ i8*, i32, i64, i64 }* [[PTRAUTH_GA1]] to i32*), i32* bitcast ({ i8*, i32, i64, i64 }* [[PTRAUTH_GA2]] to i32*)] +int * __ptrauth(1,1,712) ga[3] = { &external_int, &external_int, &external_int }; + +struct A { + int * __ptrauth(1,0,431) f0; + int * __ptrauth(1,0,9182) f1; + int * __ptrauth(1,0,783) f2; +}; +// CHECK: [[PTRAUTH_GS0:@external_int.ptrauth.*]] = private constant { i8*, i32, i64, i64 } { i8* bitcast (i32* @external_int to i8*), i32 1, i64 0, i64 431 }, section "llvm.ptrauth" +// CHECK: [[PTRAUTH_GS1:@external_int.ptrauth.*]] = private constant { i8*, i32, i64, i64 } { i8* bitcast (i32* @external_int to i8*), i32 1, i64 0, i64 9182 }, section "llvm.ptrauth" +// CHECK: [[PTRAUTH_GS2:@external_int.ptrauth.*]] = private constant { i8*, i32, i64, i64 } { i8* bitcast (i32* @external_int to i8*), i32 1, i64 0, i64 783 }, section "llvm.ptrauth" +// CHECK: @gs1 = global %struct.A { i32* bitcast ({ i8*, i32, i64, i64 }* [[PTRAUTH_GS0]] to i32*), i32* bitcast ({ i8*, i32, i64, i64 }* [[PTRAUTH_GS1]] to i32*), i32* bitcast ({ i8*, i32, i64, i64 }* [[PTRAUTH_GS2]] to i32*) } +struct A gs1 = { &external_int, &external_int, &external_int }; + +struct B { + int * __ptrauth(1,1,1276) f0; + int * __ptrauth(1,1,23674) f1; + int * __ptrauth(1,1,163) f2; +}; +// CHECK: [[PTRAUTH_GS0:@external_int.ptrauth.*]] = private constant { i8*, i32, i64, i64 } { i8* bitcast (i32* @external_int to i8*), i32 1, i64 ptrtoint (%struct.B* @gs2 to i64), i64 1276 }, section "llvm.ptrauth" +// CHECK: [[PTRAUTH_GS1:@external_int.ptrauth.*]] = private constant { i8*, i32, i64, i64 } { i8* bitcast (i32* @external_int to i8*), i32 1, i64 ptrtoint (i32** getelementptr inbounds (%struct.B, %struct.B* @gs2, i32 0, i32 1) to i64), i64 23674 }, section "llvm.ptrauth" +// CHECK: [[PTRAUTH_GS2:@external_int.ptrauth.*]] = private constant { i8*, i32, i64, i64 } { i8* bitcast (i32* @external_int to i8*), i32 1, i64 ptrtoint (i32** getelementptr inbounds (%struct.B, %struct.B* @gs2, i32 0, i32 2) to i64), i64 163 }, section "llvm.ptrauth" +// CHECK: @gs2 = global %struct.B { i32* bitcast ({ i8*, i32, i64, i64 }* [[PTRAUTH_GS0]] to i32*), i32* bitcast ({ i8*, i32, i64, i64 }* [[PTRAUTH_GS1]] to i32*), i32* bitcast ({ i8*, i32, i64, i64 }* [[PTRAUTH_GS2]] to i32*) } +struct B gs2 = { &external_int, &external_int, &external_int }; + +// Constant initializers for function pointers. +extern void external_function(void); +typedef void (*fpt)(void); + +// CHECK: [[PTRAUTH_F1:@external_function.ptrauth.*]] = private constant { i8*, i32, i64, i64 } { i8* bitcast (void ()* @external_function to i8*), i32 1, i64 0, i64 56 }, section "llvm.ptrauth" +// CHECK: @f1 = global void ()* bitcast ({ i8*, i32, i64, i64 }* [[PTRAUTH_F1]] to void ()*) +fpt __ptrauth(1,0,56) f1 = &external_function; + +// CHECK: [[PTRAUTH_F2:@external_function.ptrauth.*]] = private constant { i8*, i32, i64, i64 } { i8* bitcast (void ()* @external_function to i8*), i32 1, i64 ptrtoint (void ()** @f2 to i64), i64 1272 }, section "llvm.ptrauth" +// CHECK: @f2 = global void ()* bitcast ({ i8*, i32, i64, i64 }* [[PTRAUTH_F2]] to void ()*) +fpt __ptrauth(1,1,1272) f2 = &external_function; + +// CHECK: [[PTRAUTH_FA0:@external_function.ptrauth.*]] = private constant { i8*, i32, i64, i64 } { i8* bitcast (void ()* @external_function to i8*), i32 1, i64 ptrtoint ([3 x void ()*]* @fa to i64), i64 712 }, section "llvm.ptrauth" +// CHECK: [[PTRAUTH_FA1:@external_function.ptrauth.*]] = private constant { i8*, i32, i64, i64 } { i8* bitcast (void ()* @external_function to i8*), i32 1, i64 ptrtoint (void ()** getelementptr inbounds ([3 x void ()*], [3 x void ()*]* @fa, i32 0, i32 1) to i64), i64 712 }, section "llvm.ptrauth" +// CHECK: [[PTRAUTH_FA2:@external_function.ptrauth.*]] = private constant { i8*, i32, i64, i64 } { i8* bitcast (void ()* @external_function to i8*), i32 1, i64 ptrtoint (void ()** getelementptr inbounds ([3 x void ()*], [3 x void ()*]* @fa, i32 0, i32 2) to i64), i64 712 }, section "llvm.ptrauth" +// CHECK: @fa = global [3 x void ()*] [void ()* bitcast ({ i8*, i32, i64, i64 }* [[PTRAUTH_FA0]] to void ()*), void ()* bitcast ({ i8*, i32, i64, i64 }* [[PTRAUTH_FA1]] to void ()*), void ()* bitcast ({ i8*, i32, i64, i64 }* [[PTRAUTH_FA2]] to void ()*)] +fpt __ptrauth(1,1,712) fa[3] = { &external_function, &external_function, &external_function }; + +struct C { + fpt __ptrauth(1,0,431) f0; + fpt __ptrauth(1,0,9182) f1; + fpt __ptrauth(1,0,783) f2; +}; +// CHECK: [[PTRAUTH_FS0:@external_function.ptrauth.*]] = private constant { i8*, i32, i64, i64 } { i8* bitcast (void ()* @external_function to i8*), i32 1, i64 0, i64 431 }, section "llvm.ptrauth" +// CHECK: [[PTRAUTH_FS1:@external_function.ptrauth.*]] = private constant { i8*, i32, i64, i64 } { i8* bitcast (void ()* @external_function to i8*), i32 1, i64 0, i64 9182 }, section "llvm.ptrauth" +// CHECK: [[PTRAUTH_FS2:@external_function.ptrauth.*]] = private constant { i8*, i32, i64, i64 } { i8* bitcast (void ()* @external_function to i8*), i32 1, i64 0, i64 783 }, section "llvm.ptrauth" +// CHECK: @fs1 = global %struct.C { void ()* bitcast ({ i8*, i32, i64, i64 }* [[PTRAUTH_FS0]] to void ()*), void ()* bitcast ({ i8*, i32, i64, i64 }* [[PTRAUTH_FS1]] to void ()*), void ()* bitcast ({ i8*, i32, i64, i64 }* [[PTRAUTH_FS2]] to void ()*) } +struct C fs1 = { &external_function, &external_function, &external_function }; + +struct D { + fpt __ptrauth(1,1,1276) f0; + fpt __ptrauth(1,1,23674) f1; + fpt __ptrauth(1,1,163) f2; +}; +// CHECK: [[PTRAUTH_FS0:@external_function.ptrauth.*]] = private constant { i8*, i32, i64, i64 } { i8* bitcast (void ()* @external_function to i8*), i32 1, i64 ptrtoint (%struct.D* @fs2 to i64), i64 1276 }, section "llvm.ptrauth" +// CHECK: [[PTRAUTH_FS1:@external_function.ptrauth.*]] = private constant { i8*, i32, i64, i64 } { i8* bitcast (void ()* @external_function to i8*), i32 1, i64 ptrtoint (void ()** getelementptr inbounds (%struct.D, %struct.D* @fs2, i32 0, i32 1) to i64), i64 23674 }, section "llvm.ptrauth" +// CHECK: [[PTRAUTH_FS2:@external_function.ptrauth.*]] = private constant { i8*, i32, i64, i64 } { i8* bitcast (void ()* @external_function to i8*), i32 1, i64 ptrtoint (void ()** getelementptr inbounds (%struct.D, %struct.D* @fs2, i32 0, i32 2) to i64), i64 163 }, section "llvm.ptrauth" +// CHECK: @fs2 = global %struct.D { void ()* bitcast ({ i8*, i32, i64, i64 }* [[PTRAUTH_FS0]] to void ()*), void ()* bitcast ({ i8*, i32, i64, i64 }* [[PTRAUTH_FS1]] to void ()*), void ()* bitcast ({ i8*, i32, i64, i64 }* [[PTRAUTH_FS2]] to void ()*) } +struct D fs2 = { &external_function, &external_function, &external_function }; diff --git a/clang/test/CodeGen/ptrauth-weak_import.c b/clang/test/CodeGen/ptrauth-weak_import.c new file mode 100644 index 00000000000000..71717b78c4cb8c --- /dev/null +++ b/clang/test/CodeGen/ptrauth-weak_import.c @@ -0,0 +1,10 @@ +// RUN: %clang_cc1 -triple arm64-apple-ios -fptrauth-calls -fptrauth-intrinsics -emit-llvm %s -o - | FileCheck %s + +extern void foo() __attribute__((weak_import)); + +// CHECK-LABEL: define void @bar() +// CHECK: br i1 icmp ne (void (...)* bitcast ({ i8*, i32, i64, i64 }* @foo.ptrauth to void (...)*), void (...)* null), label +void bar() { + if (foo) + foo(); +} diff --git a/clang/test/CodeGen/ptrauth.c b/clang/test/CodeGen/ptrauth.c new file mode 100644 index 00000000000000..097e876647fd94 --- /dev/null +++ b/clang/test/CodeGen/ptrauth.c @@ -0,0 +1,97 @@ +// RUN: %clang_cc1 -triple arm64-apple-ios -fptrauth-calls -fptrauth-intrinsics -emit-llvm %s -o - | FileCheck %s + +#define FNPTRKEY 0 + +void (*fnptr)(void); +long discriminator; + +extern void external_function(void); +// CHECK: [[EXTERNAL_FUNCTION:@.*]] = private constant { i8*, i32, i64, i64 } { i8* bitcast (void ()* @external_function to i8*), i32 0, i64 0, i64 0 }, section "llvm.ptrauth", align 8 +// CHECK: @fptr1 = global void ()* bitcast ({ i8*, i32, i64, i64 }* [[EXTERNAL_FUNCTION]] to void ()*) +void (*fptr1)(void) = external_function; +// CHECK: @fptr2 = global void ()* bitcast ({ i8*, i32, i64, i64 }* [[EXTERNAL_FUNCTION]] to void ()*) +void (*fptr2)(void) = &external_function; + +// CHECK: [[SIGNED:@.*]] = private constant { i8*, i32, i64, i64 } { i8* bitcast (void ()* @external_function to i8*), i32 2, i64 0, i64 26 }, section "llvm.ptrauth", align 8 +// CHECK: @fptr3 = global void ()* bitcast ({ i8*, i32, i64, i64 }* [[SIGNED]] to void ()*) +void (*fptr3)(void) = __builtin_ptrauth_sign_constant(&external_function, 2, 26); + +// CHECK: @fptr4 = global void ()* bitcast ({ i8*, i32, i64, i64 }* [[SIGNED:@.*]] to void ()*) +// CHECK: [[SIGNED]] = private constant { i8*, i32, i64, i64 } { i8* bitcast (void ()* @external_function to i8*), i32 2, i64 ptrtoint (void ()** @fptr4 to i64), i64 26 }, section "llvm.ptrauth", align 8 +void (*fptr4)(void) = __builtin_ptrauth_sign_constant(&external_function, 2, __builtin_ptrauth_blend_discriminator(&fptr4, 26)); + +// CHECK-LABEL: define void @test_call() +void test_call() { + // CHECK: [[T0:%.*]] = load void ()*, void ()** @fnptr, + // CHECK-NEXT: call void [[T0]]() [ "ptrauth"(i32 0, i64 0) ] + fnptr(); +} + +// CHECK-LABEL: define void @test_direct_call() +void test_direct_call() { + // CHECK: call void @test_call(){{$}} + test_call(); +} + +void abort(); +// CHECK-LABEL: define void @test_direct_builtin_call() +void test_direct_builtin_call() { + // CHECK: call void @abort() {{#[0-9]+$}} + abort(); +} + +// CHECK-LABEL: define void @test_sign_unauthenticated_peephole() +void test_sign_unauthenticated_peephole() { + // CHECK: [[T0:%.*]] = load void ()*, void ()** @fnptr, + // CHECK-NEXT: call void [[T0]](){{$}} + // CHECK-NEXT: ret void + __builtin_ptrauth_sign_unauthenticated(fnptr, FNPTRKEY, 0)(); +} + +// This peephole doesn't kick in because it's incorrect when ABI pointer +// authentication is enabled. +// CHECK-LABEL: define void @test_auth_peephole() +void test_auth_peephole() { + // CHECK: [[T0:%.*]] = load void ()*, void ()** @fnptr, + // CHECK-NEXT: [[T1:%.*]] = load i64, i64* @discriminator, + // CHECK-NEXT: [[T2:%.*]] = ptrtoint void ()* [[T0]] to i64 + // CHECK-NEXT: [[T3:%.*]] = call i64 @llvm.ptrauth.auth.i64(i64 [[T2]], i32 0, i64 [[T1]]) + // CHECK-NEXT: [[T4:%.*]] = inttoptr i64 [[T3]] to void ()* + // CHECK-NEXT: call void [[T4]]() [ "ptrauth"(i32 0, i64 0) ] + // CHECK-NEXT: ret void + __builtin_ptrauth_auth(fnptr, 0, discriminator)(); +} + +// CHECK-LABEL: define void @test_auth_and_resign_peephole() +void test_auth_and_resign_peephole() { + // CHECK: [[T0:%.*]] = load void ()*, void ()** @fnptr, + // CHECK-NEXT: [[T1:%.*]] = load i64, i64* @discriminator, + // CHECK-NEXT: call void [[T0]]() [ "ptrauth"(i32 2, i64 [[T1]]) ] + // CHECK-NEXT: ret void + __builtin_ptrauth_auth_and_resign(fnptr, 2, discriminator, FNPTRKEY, 0)(); +} + +// CHECK-LABEL: define void ()* @test_function_pointer() +// CHECK: [[EXTERNAL_FUNCTION]] +void (*test_function_pointer())(void) { + return external_function; +} + +// rdar://34562484 - Handle IR types changing in the caching mechanism. +struct InitiallyIncomplete; +extern struct InitiallyIncomplete returns_initially_incomplete(void); +// CHECK-LABEL: define void @use_while_incomplete() +void use_while_incomplete() { + // CHECK: [[VAR:%.*]] = alloca {}*, + // CHECK-NEXT: store {}* bitcast ({ i8*, i32, i64, i64 }* @returns_initially_incomplete.ptrauth to {}*), {}** [[VAR]], + // CHECK-NEXT: ret void + struct InitiallyIncomplete (*fnptr)(void) = &returns_initially_incomplete; +} +struct InitiallyIncomplete { int x; }; +// CHECK-LABEL: define void @use_while_complete() +void use_while_complete() { + // CHECK: [[VAR:%.*]] = alloca i64 ()*, + // CHECK-NEXT: store i64 ()* bitcast ({ i8*, i32, i64, i64 }* @returns_initially_incomplete.ptrauth to i64 ()*), i64 ()** [[VAR]], + // CHECK-NEXT: ret void + struct InitiallyIncomplete (*fnptr)(void) = &returns_initially_incomplete; +} diff --git a/clang/test/CodeGenCXX/mangle-fail.cpp b/clang/test/CodeGenCXX/mangle-fail.cpp index b588d57749fa3d..7d842c53896db2 100644 --- a/clang/test/CodeGenCXX/mangle-fail.cpp +++ b/clang/test/CodeGenCXX/mangle-fail.cpp @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -emit-llvm-only -x c++ -std=c++11 -triple %itanium_abi_triple -verify %s -DN=1 // RUN: %clang_cc1 -emit-llvm-only -x c++ -std=c++11 -triple %itanium_abi_triple -verify %s -DN=2 +// RUN: %clang_cc1 -emit-llvm-only -x c++ -std=c++11 -triple %itanium_abi_triple -verify %s -DN=3 struct A { int a; }; @@ -13,6 +14,19 @@ template void test(int (&)[sizeof(int)]); template void test(int (&)[sizeof((A){}, T())]) {} // expected-error {{cannot yet mangle}} template void test(int (&)[sizeof(A)]); +#elif N == 3 +// __builtin_ptrauth_type_discriminator +template +struct S1 {}; + +template +void func(S1 s1) { // expected-error {{cannot yet mangle __builtin_ptrauth_type_discriminator expression}} +} + +void testfunc1() { + func(S1()); +} + // FIXME: There are several more cases we can't yet mangle. #else diff --git a/clang/test/CodeGenCXX/ptrauth-apple-kext-indirect-call-2.cpp b/clang/test/CodeGenCXX/ptrauth-apple-kext-indirect-call-2.cpp new file mode 100644 index 00000000000000..17ea16accb6126 --- /dev/null +++ b/clang/test/CodeGenCXX/ptrauth-apple-kext-indirect-call-2.cpp @@ -0,0 +1,105 @@ +// RUN: %clang_cc1 -triple arm64-apple-ios -fptrauth-calls -fapple-kext -fno-rtti -emit-llvm -o - %s | FileCheck %s + +// CHECK: @_ZTV1A = unnamed_addr constant { [4 x i8*] } { [4 x i8*] [i8* null, i8* null, i8* bitcast ({ i8*, i32, i64, i64 }* @_ZNK1A3abcEv.ptrauth to i8*), i8* null] } +// CHECK: @_ZTV4Base = unnamed_addr constant { [4 x i8*] } { [4 x i8*] [i8* null, i8* null, i8* bitcast ({ i8*, i32, i64, i64 }* @_ZNK4Base3abcEv.ptrauth to i8*), i8* null] } +// CHECK: @_ZTV8Derived2 = unnamed_addr constant { [5 x i8*] } { [5 x i8*] [i8* null, i8* null, i8* null, i8* bitcast ({ i8*, i32, i64, i64 }* @_ZNK8Derived23efgEv.ptrauth to i8*), i8* null] } +// CHECK: @_ZTV2D2 = unnamed_addr constant { [5 x i8*] } { [5 x i8*] [i8* null, i8* null, i8* null, i8* bitcast ({ i8*, i32, i64, i64 }* @_ZNK2D23abcEv.ptrauth to i8*), i8* null] } + +struct A { + virtual const char* abc(void) const; +}; + +const char* A::abc(void) const {return "A"; }; + +struct B : virtual A { + virtual void VF(); +}; + +void B::VF() {} + +void FUNC(B* p) { +// CHECK: [[T1:%.*]] = load i8* (%struct.A*)*, i8* (%struct.A*)** getelementptr inbounds (i8* (%struct.A*)*, i8* (%struct.A*)** bitcast ({ [4 x i8*] }* @_ZTV1A to i8* (%struct.A*)**), i64 2) +// CHECK-NEXT: [[BT1:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 ptrtoint (i8* (%struct.A*)** getelementptr inbounds (i8* (%struct.A*)*, i8* (%struct.A*)** bitcast ({ [4 x i8*] }* @_ZTV1A to i8* (%struct.A*)**), i64 2) to i64), i64 12401) +// CHECK-NEXT: [[T2:%.*]] = call i8* [[T1]](%struct.A* {{.*}}) [ "ptrauth"(i32 0, i64 [[BT1]]) ] + const char* c = p->A::abc(); +} + + +// Test2 +struct Base { virtual char* abc(void) const; }; + +char* Base::abc() const { return 0; } + +struct Derived : public Base { +}; + +void FUNC1(Derived* p) { +// CHECK: [[U1:%.*]] = load i8* (%struct.Base*)*, i8* (%struct.Base*)** getelementptr inbounds (i8* (%struct.Base*)*, i8* (%struct.Base*)** bitcast ({ [4 x i8*] }* @_ZTV4Base to i8* (%struct.Base*)**), i64 2) +// CHECK-NEXT: [[BU1:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 ptrtoint (i8* (%struct.Base*)** getelementptr inbounds (i8* (%struct.Base*)*, i8* (%struct.Base*)** bitcast ({ [4 x i8*] }* @_ZTV4Base to i8* (%struct.Base*)**), i64 2) to i64), i64 64320) +// CHECK-NEXT: [[U2:%.*]] = call i8* [[U1]](%struct.Base* {{.*}}) [ "ptrauth"(i32 0, i64 [[BU1]]) ] + char* c = p->Base::abc(); +} + + +// Test3 +struct Base2 { }; + +struct Derived2 : virtual Base2 { + virtual char* efg(void) const; +}; + +char* Derived2::efg(void) const { return 0; } + +void FUNC2(Derived2* p) { +// CHECK: [[V1:%.*]] = load i8* (%struct.Derived2*)*, i8* (%struct.Derived2*)** getelementptr inbounds (i8* (%struct.Derived2*)*, i8* (%struct.Derived2*)** bitcast ({ [5 x i8*] }* @_ZTV8Derived2 to i8* (%struct.Derived2*)**), i64 3) +// CHECK-NEXT: [[BV1:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 ptrtoint (i8* (%struct.Derived2*)** getelementptr inbounds (i8* (%struct.Derived2*)*, i8* (%struct.Derived2*)** bitcast ({ [5 x i8*] }* @_ZTV8Derived2 to i8* (%struct.Derived2*)**), i64 3) to i64), i64 36603) +// CHECK-NEXT: [[V2:%.*]] = call i8* [[V1]](%struct.Derived2* {{.*}}) [ "ptrauth"(i32 0, i64 [[BV1]]) ] + char* c = p->Derived2::efg(); +} + +// Test4 +struct Base3 { }; + +struct D1 : virtual Base3 { +}; + +struct D2 : virtual Base3 { + virtual char *abc(void) const; +}; + +struct Sub : D1, D2 { +}; + +char* D2::abc(void) const { return 0; } + +void FUNC3(Sub* p) { +// CHECK: [[W1:%.*]] = load i8* (%struct.D2*)*, i8* (%struct.D2*)** getelementptr inbounds (i8* (%struct.D2*)*, i8* (%struct.D2*)** bitcast ({ [5 x i8*] }* @_ZTV2D2 to i8* (%struct.D2*)**), i64 3) +// CHECK-NEXT: [[BW1:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 ptrtoint (i8* (%struct.D2*)** getelementptr inbounds (i8* (%struct.D2*)*, i8* (%struct.D2*)** bitcast ({ [5 x i8*] }* @_ZTV2D2 to i8* (%struct.D2*)**), i64 3) to i64), i64 20222) +// CHECK-NEXT: [[W2:%.*]] = call i8* [[W1]](%struct.D2* {{.*}}) [ "ptrauth"(i32 0, i64 [[BW1]]) ] + char* c = p->D2::abc(); +} + + +// Test4 +struct Base4 { virtual void abc(); }; + +void Base4::abc() {} + +struct Derived4 : public Base4 { + void abc() override; +}; + +void Derived4::abc() {} + +void FUNC4(Derived4* p) { +// CHECK: %[[VTABLE:[a-z]+]] = load void (%struct.Derived4*)**, void (%struct.Derived4*)*** %{{.*}} +// CHECK: %[[T0:[0-9]+]] = ptrtoint void (%struct.Derived4*)** %[[VTABLE]] to i64 +// CHECK: %[[T3:[0-9]+]] = call i64 @llvm.ptrauth.auth.i64(i64 %[[T0]], i32 2, i64 0) +// CHECK: %[[T4:[0-9]+]] = inttoptr i64 %[[T3]] to void (%struct.Derived4*)** +// CHECK: %[[VFN:[a-z]+]] = getelementptr inbounds void (%struct.Derived4*)*, void (%struct.Derived4*)** %[[T4]], i64 0 +// CHECK: %[[T5:[0-9]+]] = load void (%struct.Derived4*)*, void (%struct.Derived4*)** %[[VFN]] +// CHECK: %[[T6:[0-9]+]] = ptrtoint void (%struct.Derived4*)** %[[VFN]] to i64 +// CHECK: %[[T7:[0-9]+]] = call i64 @llvm.ptrauth.blend.i64(i64 %[[T6]], i64 426) +// CHECK: call void %[[T5]](%struct.Derived4* %{{.*}}) [ "ptrauth"(i32 0, i64 %[[T7]]) ] + p->abc(); +} diff --git a/clang/test/CodeGenCXX/ptrauth-apple-kext-indirect-call.cpp b/clang/test/CodeGenCXX/ptrauth-apple-kext-indirect-call.cpp new file mode 100644 index 00000000000000..008ba6e3d244ca --- /dev/null +++ b/clang/test/CodeGenCXX/ptrauth-apple-kext-indirect-call.cpp @@ -0,0 +1,42 @@ +// RUN: %clang_cc1 -triple arm64-apple-ios -fptrauth-calls -fapple-kext -emit-llvm -o - %s | FileCheck %s + +// CHECK: @_ZTV5TemplIiE = internal unnamed_addr constant { [5 x i8*] } { [5 x i8*] [i8* null, i8* bitcast ({ i8*, i8* }* @_ZTI5TemplIiE to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN5TemplIiE1fEv.ptrauth to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN5TemplIiE1gEv.ptrauth to i8*), i8* null] } + +struct Base { + virtual void abc(void) const; +}; + +void Base::abc(void) const {} + +void FUNC(Base* p) { + p->Base::abc(); +} + +// CHECK: getelementptr inbounds (void (%struct.Base*)*, void (%struct.Base*)** bitcast ({ [4 x i8*] }* @_ZTV4Base to void (%struct.Base*)**), i64 2) +// CHECK-NOT: call void @_ZNK4Base3abcEv + +template +struct Templ { + virtual void f() {} + virtual void g() {} +}; +template +struct SubTempl : public Templ { + virtual void f() {} // override + virtual void g() {} // override +}; + +void f(SubTempl* t) { + // Qualified calls go through the (qualified) vtable in apple-kext mode. + // Since t's this pointer points to SubTempl's vtable, the call needs + // to load Templ's vtable. Hence, Templ::g needs to be + // instantiated in this TU, for it's referenced by the vtable. + // (This happens only in apple-kext mode; elsewhere virtual calls can always + // use the vtable pointer off this instead of having to load the vtable + // symbol.) + t->Templ::f(); +} + +// CHECK: getelementptr inbounds (void (%struct.Templ*)*, void (%struct.Templ*)** bitcast ({ [5 x i8*] }* @_ZTV5TemplIiE to void (%struct.Templ*)**), i64 2) +// CHECK: define internal void @_ZN5TemplIiE1fEv(%struct.Templ* %this) +// CHECK: define internal void @_ZN5TemplIiE1gEv(%struct.Templ* %this) diff --git a/clang/test/CodeGenCXX/ptrauth-apple-kext-indirect-virtual-dtor-call.cpp b/clang/test/CodeGenCXX/ptrauth-apple-kext-indirect-virtual-dtor-call.cpp new file mode 100644 index 00000000000000..3e081e4e5a6eb5 --- /dev/null +++ b/clang/test/CodeGenCXX/ptrauth-apple-kext-indirect-virtual-dtor-call.cpp @@ -0,0 +1,50 @@ +// RUN: %clang_cc1 -triple arm64-apple-ios -std=c++98 -fptrauth-calls -fapple-kext -fno-rtti -disable-O0-optnone -emit-llvm -o - %s | FileCheck %s + +// CHECK: @_ZTV5TemplIiE = internal unnamed_addr constant { [7 x i8*] } { [7 x i8*] [i8* null, i8* null, i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN5TemplIiED1Ev.ptrauth to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN5TemplIiED0Ev.ptrauth to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN5TemplIiE1fEv.ptrauth to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN5TemplIiE1gEv.ptrauth to i8*), i8* null] } + +struct B1 { + virtual ~B1(); +}; + +B1::~B1() {} + +void DELETE(B1 *pb1) { + pb1->B1::~B1(); +} +// CHECK-LABEL: define void @_ZN2B1D0Ev +// CHECK: [[T1:%.*]] = load %struct.B1* (%struct.B1*)*, %struct.B1* (%struct.B1*)** getelementptr inbounds (%struct.B1* (%struct.B1*)*, %struct.B1* (%struct.B1*)** bitcast ({ [5 x i8*] }* @_ZTV2B1 to %struct.B1* (%struct.B1*)**), i64 2) +// CHECK-NEXT: [[B1:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 ptrtoint (%struct.B1* (%struct.B1*)** getelementptr inbounds (%struct.B1* (%struct.B1*)*, %struct.B1* (%struct.B1*)** bitcast ({ [5 x i8*] }* @_ZTV2B1 to %struct.B1* (%struct.B1*)**), i64 2) to i64), i64 14635) +// CHECK-NEXT: call %struct.B1* [[T1]](%struct.B1* [[T2:%.*]]) [ "ptrauth"(i32 0, i64 [[B1]]) ] +// CHECK-LABEL: define void @_Z6DELETEP2B1 +// CHECK: [[T3:%.*]] = load %struct.B1* (%struct.B1*)*, %struct.B1* (%struct.B1*)** getelementptr inbounds (%struct.B1* (%struct.B1*)*, %struct.B1* (%struct.B1*)** bitcast ({ [5 x i8*] }* @_ZTV2B1 to %struct.B1* (%struct.B1*)**), i64 2) +// CHECK-NEXT: [[B3:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 ptrtoint (%struct.B1* (%struct.B1*)** getelementptr inbounds (%struct.B1* (%struct.B1*)*, %struct.B1* (%struct.B1*)** bitcast ({ [5 x i8*] }* @_ZTV2B1 to %struct.B1* (%struct.B1*)**), i64 2) to i64), i64 14635) +// CHECK-NEXT: call %struct.B1* [[T3]](%struct.B1* [[T4:%.*]]) [ "ptrauth"(i32 0, i64 [[B3]]) + +template +struct Templ { + virtual ~Templ(); // Out-of-line so that the destructor doesn't cause a vtable + virtual void f() {} + virtual void g() {} +}; +template +struct SubTempl : public Templ { + virtual ~SubTempl() {} // override + virtual void f() {} // override + virtual void g() {} // override +}; + +void f(SubTempl* t) { + // Qualified calls go through the (qualified) vtable in apple-kext mode. + // Since t's this pointer points to SubTempl's vtable, the call needs + // to load Templ's vtable. Hence, Templ::g needs to be + // instantiated in this TU, for it's referenced by the vtable. + // (This happens only in apple-kext mode; elsewhere virtual calls can always + // use the vtable pointer off this instead of having to load the vtable + // symbol.) + t->Templ::~Templ(); +} + +// CHECK: getelementptr inbounds (%struct.Templ* (%struct.Templ*)*, %struct.Templ* (%struct.Templ*)** bitcast ({ [7 x i8*] }* @_ZTV5TemplIiE to %struct.Templ* (%struct.Templ*)**), i64 2) +// CHECK: declare void @_ZN5TemplIiED0Ev(%struct.Templ*) +// CHECK: define internal void @_ZN5TemplIiE1fEv(%struct.Templ* %this) +// CHECK: define internal void @_ZN5TemplIiE1gEv(%struct.Templ* %this) diff --git a/clang/test/CodeGenCXX/ptrauth-member-function-pointer.cpp b/clang/test/CodeGenCXX/ptrauth-member-function-pointer.cpp new file mode 100644 index 00000000000000..9e2c10d8a3337f --- /dev/null +++ b/clang/test/CodeGenCXX/ptrauth-member-function-pointer.cpp @@ -0,0 +1,388 @@ +// RUN: %clang_cc1 -triple arm64-apple-ios -fptrauth-calls -fptrauth-intrinsics -emit-llvm -std=c++11 -O1 -disable-llvm-passes -o - %s | FileCheck -check-prefixes=CHECK,NODEBUG %s +// RUN: %clang_cc1 -triple arm64-apple-ios -fptrauth-calls -fptrauth-intrinsics -emit-llvm -std=c++11 -O1 -disable-llvm-passes -debug-info-kind=limited -o - %s | FileCheck -check-prefixes=CHECK %s + +// CHECK: %[[STRUCT_BASE0:.*]] = type { i32 (...)** } +// CHECK: %[[STRUCT_DERIVED0:.*]] = type { %[[STRUCT_BASE0]] } +// CHECK: %[[STRUCT_A0:.*]] = type { [4 x i32] } +// CHECK: %[[STRUCT_A1:.*]] = type { [8 x i32] } +// CHECK: %[[STRUCT_TRIVIALS:.*]] = type { [4 x i32] } +// CHECK: %[[STRUCT_BASE1:.*]] = type { i32 (...)** } +// CHECK: %[[STRUCT_DERIVED1:.*]] = type { %[[STRUCT_BASE0]], %[[STRUCT_BASE1]] } + +// CHECK: @_ZN5Base011nonvirtual0Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%[[STRUCT_BASE0]]*)* @_ZN5Base011nonvirtual0Ev to i8*), i32 0, i64 0, i64 [[TYPEDISC0:22163]] }, section "llvm.ptrauth", align 8 +// CHECK: @_ZN5Base08virtual1Ev_vfpthunk_.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%[[STRUCT_BASE0]]*)* @_ZN5Base08virtual1Ev_vfpthunk_ to i8*), i32 0, i64 0, i64 [[TYPEDISC0]] }, section "llvm.ptrauth", align 8 +// CHECK: @_ZN5Base08virtual3Ev_vfpthunk_.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%[[STRUCT_BASE0]]*)* @_ZN5Base08virtual3Ev_vfpthunk_ to i8*), i32 0, i64 0, i64 [[TYPEDISC0]] }, section "llvm.ptrauth", align 8 +// CHECK: @_ZN5Base016virtual_variadicEiz_vfpthunk_.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%[[STRUCT_BASE0]]*, i32, ...)* @_ZN5Base016virtual_variadicEiz_vfpthunk_ to i8*), i32 0, i64 0, i64 34368 }, section "llvm.ptrauth", align 8 +// CHECK: @_ZN5Base011nonvirtual0Ev.ptrauth.1 = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%[[STRUCT_BASE0]]*)* @_ZN5Base011nonvirtual0Ev to i8*), i32 0, i64 0, i64 [[TYPEDISC1:35591]] }, section "llvm.ptrauth", align 8 +// CHECK: @_ZN5Base08virtual1Ev_vfpthunk_.ptrauth.2 = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%[[STRUCT_BASE0]]*)* @_ZN5Base08virtual1Ev_vfpthunk_ to i8*), i32 0, i64 0, i64 [[TYPEDISC1]] }, section "llvm.ptrauth", align 8 +// CHECK: @_ZN5Base08virtual3Ev_vfpthunk_.ptrauth.3 = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%[[STRUCT_BASE0]]*)* @_ZN5Base08virtual3Ev_vfpthunk_ to i8*), i32 0, i64 0, i64 [[TYPEDISC1]] }, section "llvm.ptrauth", align 8 +// CHECK: @_ZN8Derived011nonvirtual5Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%[[STRUCT_DERIVED0]]*)* @_ZN8Derived011nonvirtual5Ev to i8*), i32 0, i64 0, i64 [[TYPEDISC1]] }, section "llvm.ptrauth", align 8 +// CHECK: @_ZN8Derived08virtual6Ev_vfpthunk_.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%[[STRUCT_DERIVED0]]*)* @_ZN8Derived08virtual6Ev_vfpthunk_ to i8*), i32 0, i64 0, i64 [[TYPEDISC1]] }, section "llvm.ptrauth", align 8 +// CHECK: @_ZN8Derived010return_aggEv_vfpthunk_.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast ([2 x i64] (%[[STRUCT_DERIVED0]]*)* @_ZN8Derived010return_aggEv_vfpthunk_ to i8*), i32 0, i64 0, i64 64418 }, section "llvm.ptrauth", align 8 +// CHECK: @_ZN8Derived04sretEv_vfpthunk_.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%[[STRUCT_A1]]*, %[[STRUCT_DERIVED0]]*)* @_ZN8Derived04sretEv_vfpthunk_ to i8*), i32 0, i64 0, i64 28187 }, section "llvm.ptrauth", align 8 +// CHECK: @_ZN8Derived011trivial_abiE8TrivialS_vfpthunk_.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%[[STRUCT_DERIVED0]]*, [2 x i64])* @_ZN8Derived011trivial_abiE8TrivialS_vfpthunk_ to i8*), i32 0, i64 0, i64 8992 }, section "llvm.ptrauth", align 8 +// CHECK: @_ZN5Base18virtual7Ev_vfpthunk_.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%[[STRUCT_BASE1]]*)* @_ZN5Base18virtual7Ev_vfpthunk_ to i8*), i32 0, i64 0, i64 [[TYPEDISC2:61596]] }, section "llvm.ptrauth", align 8 +// CHECK: @_ZN8Derived18virtual7Ev_vfpthunk_.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%[[STRUCT_DERIVED1]]*)* @_ZN8Derived18virtual7Ev_vfpthunk_ to i8*), i32 0, i64 0, i64 25206 }, section "llvm.ptrauth", align 8 +// CHECK: @_ZN5Base08virtual1Ev_vfpthunk_.ptrauth.4 = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%[[STRUCT_BASE0]]*)* @_ZN5Base08virtual1Ev_vfpthunk_ to i8*), i32 0, i64 0, i64 25206 }, section "llvm.ptrauth", align 8 + +// CHECK: @gmethod0 = global { i64, i64 } { i64 ptrtoint ({ i8*, i32, i64, i64 }* @_ZN5Base011nonvirtual0Ev.ptrauth.1 to i64), i64 0 }, align 8 +// CHECK: @_ZN8Derived011nonvirtual5Ev.ptrauth.6 = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%struct.Derived0*)* @_ZN8Derived011nonvirtual5Ev to i8*), i32 0, i64 0, i64 [[TYPEDISC0]] }, section "llvm.ptrauth", align 8 +// CHECK: @gmethod1 = global { i64, i64 } { i64 ptrtoint ({ i8*, i32, i64, i64 }* @_ZN8Derived011nonvirtual5Ev.ptrauth.6 to i64), i64 0 }, align 8 +// CHECK: @gmethod2 = global { i64, i64 } { i64 ptrtoint ({ i8*, i32, i64, i64 }* @_ZN5Base08virtual1Ev_vfpthunk_.ptrauth to i64), i64 0 }, align 8 + +// CHECK: @_ZTV5Base0 = unnamed_addr constant { [5 x i8*] } { [5 x i8*] [i8* null, i8* bitcast ({ i8*, i8* }* @_ZTI5Base0 to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN5Base08virtual1Ev.ptrauth to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN5Base08virtual3Ev.ptrauth to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN5Base016virtual_variadicEiz.ptrauth to i8*)] }, align 8 +// CHECK: @_ZN5Base08virtual1Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%[[STRUCT_BASE0]]*)* @_ZN5Base08virtual1Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [5 x i8*] }, { [5 x i8*] }* @_ZTV5Base0, i32 0, i32 0, i32 2) to i64), i64 55600 }, section "llvm.ptrauth", align 8 +// CHECK: @_ZN5Base08virtual3Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%[[STRUCT_BASE0]]*)* @_ZN5Base08virtual3Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [5 x i8*] }, { [5 x i8*] }* @_ZTV5Base0, i32 0, i32 0, i32 3) to i64), i64 53007 }, section "llvm.ptrauth", align 8 +// CHECK: @_ZN5Base016virtual_variadicEiz.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%[[STRUCT_BASE0]]*, i32, ...)* @_ZN5Base016virtual_variadicEiz to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [5 x i8*] }, { [5 x i8*] }* @_ZTV5Base0, i32 0, i32 0, i32 4) to i64), i64 7464 }, section "llvm.ptrauth", align 8 + +struct Base0 { + void nonvirtual0(); + virtual void virtual1(); + virtual void virtual3(); + virtual void virtual_variadic(int, ...); +}; + +struct A0 { + int d[4]; +}; + +struct A1 { + int d[8]; +}; + +struct __attribute__((trivial_abi)) TrivialS { + TrivialS(const TrivialS &); + ~TrivialS(); + int p[4]; +}; + +struct Derived0 : Base0 { + void virtual1() override; + void nonvirtual5(); + virtual void virtual6(); + virtual A0 return_agg(); + virtual A1 sret(); + virtual void trivial_abi(TrivialS); +}; + +struct Base1 { + virtual void virtual7(); +}; + +struct Derived1 : Base0, Base1 { + void virtual1() override; + void virtual7() override; +}; + +typedef void (Base0::*MethodTy0)(); +typedef void (Base0::*VariadicMethodTy0)(int, ...); +typedef void (Derived0::*MethodTy1)(); + +// CHECK: define void @_ZN5Base08virtual1Ev( + +// CHECK: define void @_Z5test0v() +// CHECK: %[[METHOD0:.*]] = alloca { i64, i64 }, align 8 +// CHECK-NEXT: %[[VARMETHOD1:.*]] = alloca { i64, i64 }, align 8 +// CHECK-NEXT: %[[METHOD2:.*]] = alloca { i64, i64 }, align 8 +// CHECK-NEXT: %[[METHOD3:.*]] = alloca { i64, i64 }, align 8 +// CHECK-NEXT: %[[METHOD4:.*]] = alloca { i64, i64 }, align 8 +// CHECK-NEXT: %[[METHOD5:.*]] = alloca { i64, i64 }, align 8 +// CHECK-NEXT: %[[METHOD6:.*]] = alloca { i64, i64 }, align 8 +// CHECK-NEXT: %[[METHOD7:.*]] = alloca { i64, i64 }, align 8 +// CHECK: store { i64, i64 } { i64 ptrtoint ({ i8*, i32, i64, i64 }* @_ZN5Base011nonvirtual0Ev.ptrauth to i64), i64 0 }, { i64, i64 }* %[[METHOD0]], align 8 +// CHECK-NEXT: store { i64, i64 } { i64 ptrtoint ({ i8*, i32, i64, i64 }* @_ZN5Base08virtual1Ev_vfpthunk_.ptrauth to i64), i64 0 }, { i64, i64 }* %[[METHOD0]], align 8 +// CHECK-NEXT: store { i64, i64 } { i64 ptrtoint ({ i8*, i32, i64, i64 }* @_ZN5Base08virtual3Ev_vfpthunk_.ptrauth to i64), i64 0 }, { i64, i64 }* %[[METHOD0]], align 8 +// CHECK: store { i64, i64 } { i64 ptrtoint ({ i8*, i32, i64, i64 }* @_ZN5Base016virtual_variadicEiz_vfpthunk_.ptrauth to i64), i64 0 }, { i64, i64 }* %[[VARMETHOD1]], align 8 +// CHECK: store { i64, i64 } { i64 ptrtoint ({ i8*, i32, i64, i64 }* @_ZN5Base011nonvirtual0Ev.ptrauth.1 to i64), i64 0 }, { i64, i64 }* %[[METHOD2]], align 8 +// CHECK-NEXT: store { i64, i64 } { i64 ptrtoint ({ i8*, i32, i64, i64 }* @_ZN5Base08virtual1Ev_vfpthunk_.ptrauth.2 to i64), i64 0 }, { i64, i64 }* %[[METHOD2]], align 8 +// CHECK-NEXT: store { i64, i64 } { i64 ptrtoint ({ i8*, i32, i64, i64 }* @_ZN5Base08virtual3Ev_vfpthunk_.ptrauth.3 to i64), i64 0 }, { i64, i64 }* %[[METHOD2]], align 8 +// CHECK-NEXT: store { i64, i64 } { i64 ptrtoint ({ i8*, i32, i64, i64 }* @_ZN8Derived011nonvirtual5Ev.ptrauth to i64), i64 0 }, { i64, i64 }* %[[METHOD2]], align 8 +// CHECK-NEXT: store { i64, i64 } { i64 ptrtoint ({ i8*, i32, i64, i64 }* @_ZN8Derived08virtual6Ev_vfpthunk_.ptrauth to i64), i64 0 }, { i64, i64 }* %[[METHOD2]], align 8 +// CHECK: store { i64, i64 } { i64 ptrtoint ({ i8*, i32, i64, i64 }* @_ZN8Derived010return_aggEv_vfpthunk_.ptrauth to i64), i64 0 }, { i64, i64 }* %[[METHOD3]], align 8 +// CHECK: store { i64, i64 } { i64 ptrtoint ({ i8*, i32, i64, i64 }* @_ZN8Derived04sretEv_vfpthunk_.ptrauth to i64), i64 0 }, { i64, i64 }* %[[METHOD4]], align 8 +// CHECK: store { i64, i64 } { i64 ptrtoint ({ i8*, i32, i64, i64 }* @_ZN8Derived011trivial_abiE8TrivialS_vfpthunk_.ptrauth to i64), i64 0 }, { i64, i64 }* %[[METHOD5]], align 8 +// CHECK: store { i64, i64 } { i64 ptrtoint ({ i8*, i32, i64, i64 }* @_ZN5Base18virtual7Ev_vfpthunk_.ptrauth to i64), i64 0 }, { i64, i64 }* %[[METHOD6]], align 8 +// CHECK: store { i64, i64 } { i64 ptrtoint ({ i8*, i32, i64, i64 }* @_ZN8Derived18virtual7Ev_vfpthunk_.ptrauth to i64), i64 0 }, { i64, i64 }* %[[METHOD7]], align 8 +// CHECK-NEXT: store { i64, i64 } { i64 ptrtoint ({ i8*, i32, i64, i64 }* @_ZN5Base08virtual1Ev_vfpthunk_.ptrauth.4 to i64), i64 0 }, { i64, i64 }* %[[METHOD7]], align 8 +// CHECK: ret void + +// CHECK: define linkonce_odr hidden void @_ZN5Base08virtual1Ev_vfpthunk_(%[[STRUCT_BASE0]]* %[[THIS:.*]]) +// CHECK: %[[THIS_ADDR:.*]] = alloca %[[STRUCT_BASE0]]*, align 8 +// CHECK: store %[[STRUCT_BASE0]]* %[[THIS]], %[[STRUCT_BASE0]]** %[[THIS_ADDR]], align 8 +// CHECK: %[[THIS1:.*]] = load %[[STRUCT_BASE0]]*, %[[STRUCT_BASE0]]** %[[THIS_ADDR]], align 8 +// CHECK-NEXT: %[[V0:.*]] = load %[[STRUCT_BASE0]]*, %[[STRUCT_BASE0]]** %[[THIS_ADDR]], align 8 +// CHECK-NEXT: %[[V1:.*]] = bitcast %[[STRUCT_BASE0]]* %[[THIS1]] to void (%[[STRUCT_BASE0]]*)*** +// CHECK-NEXT: %[[VTABLE:.*]] = load void (%[[STRUCT_BASE0]]*)**, void (%[[STRUCT_BASE0]]*)*** %[[V1]], align 8 +// CHECK-NEXT: %[[V2:.*]] = ptrtoint void (%[[STRUCT_BASE0]]*)** %[[VTABLE]] to i64 +// CHECK-NEXT: %[[V3:.*]] = call i64 @llvm.ptrauth.auth.i64(i64 %[[V2]], i32 2, i64 0) +// CHECK-NEXT: %[[V4:.*]] = inttoptr i64 %[[V3]] to void (%[[STRUCT_BASE0]]*)** +// CHECK-NEXT: %[[VFN:.*]] = getelementptr inbounds void (%[[STRUCT_BASE0]]*)*, void (%[[STRUCT_BASE0]]*)** %[[V4]], i64 0 +// CHECK-NEXT: %[[V5:.*]] = load void (%[[STRUCT_BASE0]]*)*, void (%[[STRUCT_BASE0]]*)** %[[VFN]], align 8 +// CHECK-NEXT: %[[V6:.*]] = ptrtoint void (%[[STRUCT_BASE0]]*)** %[[VFN]] to i64 +// CHECK-NEXT: %[[V7:.*]] = call i64 @llvm.ptrauth.blend.i64(i64 %[[V6]], i64 55600) +// CHECK-NEXT: musttail call void %[[V5]](%[[STRUCT_BASE0]]* %[[V0]]) [ "ptrauth"(i32 0, i64 %[[V7]]) ] +// CHECK-NEXT: ret void + +// CHECK: define linkonce_odr hidden void @_ZN5Base08virtual3Ev_vfpthunk_(%[[STRUCT_BASE0]]* %{{.*}}) +// CHECK: %[[VTABLE:.*]] = load void (%[[STRUCT_BASE0]]*)**, void (%[[STRUCT_BASE0]]*)*** %{{.*}}, align 8 +// CHECK: %[[V2:.*]] = ptrtoint void (%[[STRUCT_BASE0]]*)** %[[VTABLE]] to i64 +// CHECK: %[[V3:.*]] = call i64 @llvm.ptrauth.auth.i64(i64 %[[V2]], i32 2, i64 0) +// CHECK: %[[V4:.*]] = inttoptr i64 %[[V3]] to void (%[[STRUCT_BASE0]]*)** +// CHECK: getelementptr inbounds void (%[[STRUCT_BASE0]]*)*, void (%[[STRUCT_BASE0]]*)** %[[V4]], i64 1 +// CHECK: call i64 @llvm.ptrauth.blend.i64(i64 %{{.*}}, i64 53007) + +// CHECK: define linkonce_odr hidden void @_ZN5Base016virtual_variadicEiz_vfpthunk_(%[[STRUCT_BASE0]]* %[[THIS:.*]], i32 %0, ...) +// CHECK: %[[THIS_ADDR:.*]] = alloca %[[STRUCT_BASE0]]*, align 8 +// CHECK-NEXT: %[[_ADDR:.*]] = alloca i32, align 4 +// CHECK-NEXT: store %[[STRUCT_BASE0]]* %[[THIS]], %[[STRUCT_BASE0]]** %[[THIS_ADDR]], align 8 +// CHECK: store i32 %0, i32* %[[_ADDR]], align 4 +// CHECK: %[[THIS1:.*]] = load %[[STRUCT_BASE0]]*, %[[STRUCT_BASE0]]** %[[THIS_ADDR]], align 8 +// CHECK-NEXT: %[[V1:.*]] = load %[[STRUCT_BASE0]]*, %[[STRUCT_BASE0]]** %[[THIS_ADDR]], align 8 +// CHECK-NEXT: %[[V2:.*]] = load i32, i32* %[[_ADDR]], align 4 +// CHECK-NEXT: %[[V3:.*]] = bitcast %[[STRUCT_BASE0]]* %[[THIS1]] to void (%[[STRUCT_BASE0]]*, i32, ...)*** +// CHECK-NEXT: %[[VTABLE:.*]] = load void (%[[STRUCT_BASE0]]*, i32, ...)**, void (%[[STRUCT_BASE0]]*, i32, ...)*** %[[V3]], align 8 +// CHECK-NEXT: %[[V4:.*]] = ptrtoint void (%[[STRUCT_BASE0]]*, i32, ...)** %[[VTABLE]] to i64 +// CHECK-NEXT: %[[V5:.*]] = call i64 @llvm.ptrauth.auth.i64(i64 %[[V4]], i32 2, i64 0) +// CHECK-NEXT: %[[V6:.*]] = inttoptr i64 %[[V5]] to void (%[[STRUCT_BASE0]]*, i32, ...)** +// CHECK-NEXT: %[[VFN:.*]] = getelementptr inbounds void (%[[STRUCT_BASE0]]*, i32, ...)*, void (%[[STRUCT_BASE0]]*, i32, ...)** %[[V6]], i64 2 +// CHECK-NEXT: %[[V7:.*]] = load void (%[[STRUCT_BASE0]]*, i32, ...)*, void (%[[STRUCT_BASE0]]*, i32, ...)** %[[VFN]], align 8 +// CHECK-NEXT: %[[V8:.*]] = ptrtoint void (%[[STRUCT_BASE0]]*, i32, ...)** %[[VFN]] to i64 +// CHECK-NEXT: %[[V9:.*]] = call i64 @llvm.ptrauth.blend.i64(i64 %[[V8]], i64 7464) +// CHECK-NEXT: musttail call void (%[[STRUCT_BASE0]]*, i32, ...) %[[V7]](%[[STRUCT_BASE0]]* %[[V1]], i32 %[[V2]], ...) [ "ptrauth"(i32 0, i64 %[[V9]]) ] +// CHECK-NEXT: ret void + +// CHECK: define linkonce_odr hidden void @_ZN8Derived08virtual6Ev_vfpthunk_(%[[STRUCT_DERIVED0]]* %{{.*}}) +// CHECK: %[[VTABLE:.*]] = load void (%[[STRUCT_DERIVED0]]*)**, void (%[[STRUCT_DERIVED0]]*)*** %[[V1]], align 8 +// CHECK: %[[V2:.*]] = ptrtoint void (%[[STRUCT_DERIVED0]]*)** %[[VTABLE]] to i64 +// CHECK: %[[V3:.*]] = call i64 @llvm.ptrauth.auth.i64(i64 %[[V2]], i32 2, i64 0) +// CHECK: %[[V4:.*]] = inttoptr i64 %[[V3]] to void (%[[STRUCT_DERIVED0]]*)** +// CHECK: getelementptr inbounds void (%[[STRUCT_DERIVED0]]*)*, void (%[[STRUCT_DERIVED0]]*)** %[[V4]], i64 3 +// CHECK: call i64 @llvm.ptrauth.blend.i64(i64 %{{.*}}, i64 55535) + +// Check that the return value of the musttail call isn't copied to a temporary. + +// CHECK: define linkonce_odr hidden [2 x i64] @_ZN8Derived010return_aggEv_vfpthunk_(%[[STRUCT_DERIVED0]]* %{{.*}}) +// CHECK: %[[CALL:.*]] = musttail call [2 x i64] %{{.*}}(%[[STRUCT_DERIVED0]]* %{{.*}}) [ "ptrauth"(i32 0, i64 %{{.*}}) ] +// CHECK-NEXT: ret [2 x i64] %[[CALL]] + +// Check that the sret pointer passed to the caller is forwarded to the musttail +// call. + +// CHECK: define linkonce_odr hidden void @_ZN8Derived04sretEv_vfpthunk_(%[[STRUCT_A1]]* noalias sret %[[AGG_RESULT:.*]], %[[STRUCT_DERIVED0]]* %{{.*}}) +// CHECK: musttail call void %{{.*}}(%[[STRUCT_A1]]* sret %[[AGG_RESULT]], %[[STRUCT_DERIVED0]]* %{{.*}}) [ "ptrauth"(i32 0, i64 %{{.*}}) ] +// CHECK-NEXT: ret void + +// Check that the thunk function doesn't destruct the trivial_abi argument. + +// CHECK: define linkonce_odr hidden void @_ZN8Derived011trivial_abiE8TrivialS_vfpthunk_(%[[STRUCT_DERIVED0]]* %{{.*}}, [2 x i64] %{{.*}}) +// NODEBUG-NOT: call +// CHECK: call i64 @llvm.ptrauth.auth.i64( +// NODEBUG-NOT: call +// CHECK: call i64 @llvm.ptrauth.blend.i64( +// NODEBUG-NOT: call +// CHECK: musttail call void +// CHECK-NEXT: ret void + +// CHECK: define linkonce_odr hidden void @_ZN5Base18virtual7Ev_vfpthunk_(%[[STRUCT_BASE1]]* %{{.*}}) +// CHECK: %[[VTABLE:.*]] = load void (%[[STRUCT_BASE1]]*)**, void (%[[STRUCT_BASE1]]*)*** %{{.*}}, align 8 +// CHECK: %[[V2:.*]] = ptrtoint void (%[[STRUCT_BASE1]]*)** %[[VTABLE]] to i64 +// CHECK: %[[V3:.*]] = call i64 @llvm.ptrauth.auth.i64(i64 %[[V2]], i32 2, i64 0) +// CHECK: %[[V4:.*]] = inttoptr i64 %[[V3]] to void (%[[STRUCT_BASE1]]*)** +// CHECK: getelementptr inbounds void (%[[STRUCT_BASE1]]*)*, void (%[[STRUCT_BASE1]]*)** %[[V4]], i64 0 + +// CHECK: define linkonce_odr hidden void @_ZN8Derived18virtual7Ev_vfpthunk_(%[[STRUCT_DERIVED1]]* %{{.*}}) +// CHECK: %[[VTABLE:.*]] = load void (%[[STRUCT_DERIVED1]]*)**, void (%[[STRUCT_DERIVED1]]*)*** %[[V1]], align 8 +// CHECK: %[[V2:.*]] = ptrtoint void (%[[STRUCT_DERIVED1]]*)** %[[VTABLE]] to i64 +// CHECK: %[[V3:.*]] = call i64 @llvm.ptrauth.auth.i64(i64 %[[V2]], i32 2, i64 0) +// CHECK: %[[V4:.*]] = inttoptr i64 %[[V3]] to void (%[[STRUCT_DERIVED1]]*)** +// CHECK: getelementptr inbounds void (%[[STRUCT_DERIVED1]]*)*, void (%[[STRUCT_DERIVED1]]*)** %[[V4]], i64 3 + +void Base0::virtual1() {} + +void test0() { + MethodTy0 method0; + method0 = &Base0::nonvirtual0; + method0 = &Base0::virtual1; + method0 = &Base0::virtual3; + + VariadicMethodTy0 varmethod1; + varmethod1 = &Base0::virtual_variadic; + + MethodTy1 method2; + method2 = &Derived0::nonvirtual0; + method2 = &Derived0::virtual1; + method2 = &Derived0::virtual3; + method2 = &Derived0::nonvirtual5; + method2 = &Derived0::virtual6; + + A0 (Derived0::*method3)(); + method3 = &Derived0::return_agg; + + A1 (Derived0::*method4)(); + method4 = &Derived0::sret; + + void (Derived0::*method5)(TrivialS); + method5 = &Derived0::trivial_abi; + + void (Base1::*method6)(); + method6 = &Base1::virtual7; + + void (Derived1::*method7)(); + method7 = &Derived1::virtual7; + method7 = &Derived1::virtual1; +} + +// CHECK: define void @_Z5test1P5Base0MS_FvvE(%[[STRUCT_BASE0]]* %[[A0:.*]], [2 x i64] %[[A1_COERCE:.*]]) +// CHECK: %[[A1:.*]] = alloca { i64, i64 }, align 8 +// CHECK: %[[A0_ADDR:.*]] = alloca %[[STRUCT_BASE0]]*, align 8 +// CHECK: %[[A1_ADDR:.*]] = alloca { i64, i64 }, align 8 +// CHECK: %[[V0:.*]] = bitcast { i64, i64 }* %[[A1]] to [2 x i64]* +// CHECK: store [2 x i64] %[[A1_COERCE]], [2 x i64]* %[[V0]], align 8 +// CHECK: %[[A11:.*]] = load { i64, i64 }, { i64, i64 }* %[[A1]], align 8 +// CHECK: store %[[STRUCT_BASE0]]* %[[A0]], %[[STRUCT_BASE0]]** %[[A0_ADDR]], align 8 +// CHECK: store { i64, i64 } %[[A11]], { i64, i64 }* %[[A1_ADDR]], align 8 +// CHECK: %[[V1:.*]] = load %[[STRUCT_BASE0]]*, %[[STRUCT_BASE0]]** %[[A0_ADDR]], align 8 +// CHECK: %[[V2:.*]] = load { i64, i64 }, { i64, i64 }* %[[A1_ADDR]], align 8 +// CHECK: %[[MEMPTR_ADJ:.*]] = extractvalue { i64, i64 } %[[V2]], 1 +// CHECK: %[[MEMPTR_ADJ_SHIFTED:.*]] = ashr i64 %[[MEMPTR_ADJ]], 1 +// CHECK: %[[V3:.*]] = bitcast %[[STRUCT_BASE0]]* %[[V1]] to i8* +// CHECK: %[[V4:.*]] = getelementptr inbounds i8, i8* %[[V3]], i64 %[[MEMPTR_ADJ_SHIFTED]] +// CHECK: %[[THIS_ADJUSTED:.*]] = bitcast i8* %[[V4]] to %[[STRUCT_BASE0]]* +// CHECK: %[[MEMPTR_PTR:.*]] = extractvalue { i64, i64 } %[[V2]], 0 +// CHECK: %[[V5:.*]] = and i64 %[[MEMPTR_ADJ]], 1 +// CHECK: %[[MEMPTR_ISVIRTUAL:.*]] = icmp ne i64 %[[V5]], 0 +// CHECK: br i1 %[[MEMPTR_ISVIRTUAL]] + +// CHECK: %[[V6:.*]] = bitcast %[[STRUCT_BASE0]]* %[[THIS_ADJUSTED]] to i8** +// CHECK: %[[VTABLE:.*]] = load i8*, i8** %[[V6]], align 8 +// CHECK: %[[V7:.*]] = ptrtoint i8* %[[VTABLE]] to i64 +// CHECK: %[[V8:.*]] = call i64 @llvm.ptrauth.auth.i64(i64 %[[V7]], i32 2, i64 0) +// CHECK: %[[V9:.*]] = inttoptr i64 %[[V8]] to i8* +// CHECK: %[[V10:.*]] = trunc i64 %[[MEMPTR_PTR]] to i32 +// CHECK: %[[V11:.*]] = zext i32 %[[V10]] to i64 +// CHECK: %[[V12:.*]] = getelementptr i8, i8* %[[V9]], i64 %[[V11]] +// CHECK: %[[V13:.*]] = bitcast i8* %[[V12]] to void (%[[STRUCT_BASE0]]*)** +// CHECK: %[[MEMPTR_VIRTUALFN:.*]] = load void (%[[STRUCT_BASE0]]*)*, void (%[[STRUCT_BASE0]]*)** %[[V13]], align 8 +// CHECK: br + +// CHECK: %[[MEMPTR_NONVIRTUALFN:.*]] = inttoptr i64 %[[MEMPTR_PTR]] to void (%[[STRUCT_BASE0]]*)* +// CHECK: br + +// CHECK: %[[V14:.*]] = phi void (%[[STRUCT_BASE0]]*)* [ %[[MEMPTR_VIRTUALFN]], {{.*}} ], [ %[[MEMPTR_NONVIRTUALFN]], {{.*}} ] +// CHECK: %[[V15:.*]] = phi i64 [ 0, {{.*}} ], [ [[TYPEDISC0]], {{.*}} ] +// CHECK: call void %[[V14]](%[[STRUCT_BASE0]]* %[[THIS_ADJUSTED]]) [ "ptrauth"(i32 0, i64 %[[V15]]) ] +// CHECK: ret void + +void test1(Base0 *a0, MethodTy0 a1) { + (a0->*a1)(); +} + +// CHECK: define void @_Z15testConversion0M5Base0FvvEM8Derived0FvvE([2 x i64] %[[METHOD0_COERCE:.*]], [2 x i64] %[[METHOD1_COERCE:.*]]) +// CHECK: %[[METHOD0:.*]] = alloca { i64, i64 }, align 8 +// CHECK: %[[METHOD1:.*]] = alloca { i64, i64 }, align 8 +// CHECK: %[[METHOD0_ADDR:.*]] = alloca { i64, i64 }, align 8 +// CHECK: %[[METHOD1_ADDR:.*]] = alloca { i64, i64 }, align 8 +// CHECK: %[[V0:.*]] = bitcast { i64, i64 }* %[[METHOD0]] to [2 x i64]* +// CHECK: store [2 x i64] %[[METHOD0_COERCE]], [2 x i64]* %[[V0]], align 8 +// CHECK: %[[METHOD01:.*]] = load { i64, i64 }, { i64, i64 }* %[[METHOD0]], align 8 +// CHECK: %[[V1:.*]] = bitcast { i64, i64 }* %[[METHOD1]] to [2 x i64]* +// CHECK: store [2 x i64] %[[METHOD1_COERCE]], [2 x i64]* %[[V1]], align 8 +// CHECK: %[[METHOD12:.*]] = load { i64, i64 }, { i64, i64 }* %[[METHOD1]], align 8 +// CHECK: store { i64, i64 } %[[METHOD01]], { i64, i64 }* %[[METHOD0_ADDR]], align 8 +// CHECK: store { i64, i64 } %[[METHOD12]], { i64, i64 }* %[[METHOD1_ADDR]], align 8 +// CHECK: %[[V2:.*]] = load { i64, i64 }, { i64, i64 }* %[[METHOD0_ADDR]], align 8 +// CHECK: %[[MEMPTR_PTR:.*]] = extractvalue { i64, i64 } %[[V2]], 0 +// CHECK: %[[MEMPTR_ADJ:.*]] = extractvalue { i64, i64 } %[[V2]], 1 +// CHECK: %[[V3:.*]] = and i64 %[[MEMPTR_ADJ]], 1 +// CHECK: %[[IS_VIRTUAL_OFFSET:.*]] = icmp ne i64 %[[V3]], 0 +// CHECK: br i1 %[[IS_VIRTUAL_OFFSET]] + +// CHECK: %[[V4:.*]] = inttoptr i64 %[[MEMPTR_PTR]] to i8* +// CHECK: %[[V5:.*]] = icmp ne i8* %[[V4]], null +// CHECK: br i1 %[[V5]] + +// CHECK: %[[V6:.*]] = ptrtoint i8* %[[V4]] to i64 +// CHECK: %[[V7:.*]] = call i64 @llvm.ptrauth.resign.i64(i64 %[[V6]], i32 0, i64 [[TYPEDISC0]], i32 0, i64 [[TYPEDISC1]]) +// CHECK: %[[V8:.*]] = inttoptr i64 %[[V7]] to i8* +// CHECK: br + +// CHECK: %[[V9:.*]] = phi i8* [ null, {{.*}} ], [ %[[V8]], {{.*}} ] +// CHECK: %[[V1:.*]] = ptrtoint i8* %[[V9]] to i64 +// CHECK: %[[V11:.*]] = insertvalue { i64, i64 } %[[V2]], i64 %[[V10]], 0 +// CHECK: br + +// CHECK: %[[V12:.*]] = phi { i64, i64 } [ %[[V2]], {{.*}} ], [ %[[V11]], {{.*}} ] +// CHECK: store { i64, i64 } %[[V12]], { i64, i64 }* %[[METHOD1_ADDR]], align 8 +// CHECK: ret void + +void testConversion0(MethodTy0 method0, MethodTy1 method1) { + method1 = method0; +} + +// CHECK: define void @_Z15testConversion1M5Base0FvvE( +// CHECK: call i64 @llvm.ptrauth.resign.i64(i64 %{{.*}}, i32 0, i64 [[TYPEDISC0]], i32 0, i64 [[TYPEDISC1]]) + +void testConversion1(MethodTy0 method0) { + MethodTy1 method1 = reinterpret_cast(method0); +} + +// CHECK: define void @_Z15testConversion2M8Derived0FvvE( +// CHECK: call i64 @llvm.ptrauth.resign.i64(i64 %{{.*}}, i32 0, i64 [[TYPEDISC1]], i32 0, i64 [[TYPEDISC0]]) + +void testConversion2(MethodTy1 method1) { + MethodTy0 method0 = static_cast(method1); +} + +// CHECK: define void @_Z15testConversion3M8Derived0FvvE( +// CHECK: call i64 @llvm.ptrauth.resign.i64(i64 %{{.*}}, i32 0, i64 [[TYPEDISC1]], i32 0, i64 [[TYPEDISC0]]) + +void testConversion3(MethodTy1 method1) { + MethodTy0 method0 = reinterpret_cast(method1); +} + +// No need to call @llvm.ptrauth.resign.i64 if the source member function +// pointer is a constant. + +// CHECK: define void @_Z15testConversion4v( +// CHECK: %[[METHOD0:.*]] = alloca { i64, i64 }, align 8 +// CHECK: store { i64, i64 } { i64 ptrtoint ({ i8*, i32, i64, i64 }* @_ZN5Base08virtual1Ev_vfpthunk_.ptrauth to i64), i64 0 }, { i64, i64 }* %[[METHOD0]], align 8 +// CHECK: ret void + +void testConversion4() { + MethodTy0 method0 = reinterpret_cast(&Derived0::virtual1); +} + +// This code used to crash. +namespace testNonVirtualThunk { + struct R {}; + + struct B0 { + virtual void bar(); + }; + + struct B1 { + virtual R foo(); + }; + + struct D : B0, B1 { + virtual R foo(); + }; + + D d; +} + +// CHECK: define void @_Z39test_builtin_ptrauth_type_discriminatorv() +// CHECK: store i32 [[TYPEDISC0]], i32* % +// CHECK: store i32 [[TYPEDISC1]], i32* % +// CHECK: store i32 [[TYPEDISC2]], i32* % + +void test_builtin_ptrauth_type_discriminator() { + unsigned d; + d = __builtin_ptrauth_type_discriminator(decltype(&Base0::virtual1)); + d = __builtin_ptrauth_type_discriminator(decltype(&Derived0::virtual6)); + d = __builtin_ptrauth_type_discriminator(decltype(&Base1::virtual7)); +} + +MethodTy1 gmethod0 = reinterpret_cast(&Base0::nonvirtual0); +MethodTy0 gmethod1 = reinterpret_cast(&Derived0::nonvirtual5); +MethodTy0 gmethod2 = reinterpret_cast(&Derived0::virtual1); diff --git a/clang/test/CodeGenCXX/ptrauth-qualifier-struct.cpp b/clang/test/CodeGenCXX/ptrauth-qualifier-struct.cpp new file mode 100644 index 00000000000000..d91e898da7ca53 --- /dev/null +++ b/clang/test/CodeGenCXX/ptrauth-qualifier-struct.cpp @@ -0,0 +1,168 @@ +// RUN: %clang_cc1 -triple arm64-apple-ios -fptrauth-calls -fptrauth-intrinsics -std=c++11 -emit-llvm %s -o - | FileCheck %s + +#define AQ __ptrauth(1,1,50) +#define IQ __ptrauth(1,0,50) + +// CHECK: %[[STRUCT_TRIVIALSA:.*]] = type { i32*, i32* } +// CHECK: %[[STRUCT_SA:.*]] = type { i32*, i32* } +// CHECK: %[[STRUCT_SI:.*]] = type { i32* } + +struct SA { + int * AQ m0; // Signed using address discrimination. + int * AQ m1; // Signed using address discrimination. +}; + +struct SI { + int * IQ m; // No address discrimination. +}; + +struct __attribute__((trivial_abi)) TrivialSA { + int * AQ m0; // Signed using address discrimination. + int * AQ m1; // Signed using address discrimination. +}; + +// Check that TrivialSA is passed indirectly despite being annotated with +// 'trivial_abi'. + +// CHECK: define void @_Z18testParamTrivialSA9TrivialSA(%[[STRUCT_TRIVIALSA]]* %{{.*}}) + +void testParamTrivialSA(TrivialSA a) { +} + +// CHECK: define void @_Z19testCopyConstructor2SA(%[[STRUCT_SA]]* +// CHECK: call %[[STRUCT_SA]]* @_ZN2SAC1ERKS_( + +// CHECK: define linkonce_odr %[[STRUCT_SA]]* @_ZN2SAC1ERKS_( +// CHECK: call %[[STRUCT_SA]]* @_ZN2SAC2ERKS_( + +void testCopyConstructor(SA a) { + SA t = a; +} + +// CHECK: define void @_Z19testMoveConstructor2SA(%[[STRUCT_SA]]* +// CHECK: call %[[STRUCT_SA]]* @_ZN2SAC1EOS_( + +// CHECK: define linkonce_odr %[[STRUCT_SA]]* @_ZN2SAC1EOS_( +// CHECK: call %[[STRUCT_SA]]* @_ZN2SAC2EOS_( + +void testMoveConstructor(SA a) { + SA t = static_cast(a); +} + +// CHECK: define void @_Z18testCopyAssignment2SA(%[[STRUCT_SA]]* +// CHECK: call dereferenceable(16) %[[STRUCT_SA]]* @_ZN2SAaSERKS_( + +// CHECK: define linkonce_odr dereferenceable(16) %[[STRUCT_SA:.*]]* @_ZN2SAaSERKS_(%[[STRUCT_SA]]* %[[THIS:.*]], %[[STRUCT_SA]]* dereferenceable(16) %0) +// CHECK: %[[THIS_ADDR:.*]] = alloca %[[STRUCT_SA]]*, align 8 +// CHECK: %[[_ADDR:.*]] = alloca %[[STRUCT_SA]]*, align 8 +// CHECK: store %[[STRUCT_SA]]* %[[THIS]], %[[STRUCT_SA]]** %[[THIS_ADDR]], align 8 +// CHECK: store %[[STRUCT_SA]]* %[[V0:.*]], %[[STRUCT_SA]]** %[[_ADDR]], align 8 +// CHECK: %[[THISI:.*]] = load %[[STRUCT_SA]]*, %[[STRUCT_SA]]** %[[THIS_ADDR]], align 8 +// CHECK: %[[M0:.*]] = getelementptr inbounds %[[STRUCT_SA]], %[[STRUCT_SA]]* %[[THISI]], i32 0, i32 0 +// CHECK: %[[V1:.*]] = load %[[STRUCT_SA]]*, %[[STRUCT_SA]]** %[[_ADDR]], align 8 +// CHECK: %[[M02:.*]] = getelementptr inbounds %[[STRUCT_SA]], %[[STRUCT_SA]]* %[[V1]], i32 0, i32 0 +// CHECK: %[[V2:.*]] = load i32*, i32** %[[M02]], align 8 +// CHECK: %[[V3:.*]] = ptrtoint i32** %[[M02]] to i64 +// CHECK: %[[V4:.*]] = call i64 @llvm.ptrauth.blend.i64(i64 %[[V3]], i64 50) +// CHECK: %[[V5:.*]] = ptrtoint i32** %[[M0]] to i64 +// CHECK: %[[V6:.*]] = call i64 @llvm.ptrauth.blend.i64(i64 %[[V5]], i64 50) +// CHECK: %[[V8:.*]] = ptrtoint i32* %[[V2]] to i64 +// CHECK: %[[V9:.*]] = call i64 @llvm.ptrauth.resign.i64(i64 %[[V8]], i32 1, i64 %[[V4]], i32 1, i64 %[[V6]]) + +void testCopyAssignment(SA a) { + SA t; + t = a; +} + +// CHECK: define void @_Z18testMoveAssignment2SA(%[[STRUCT_SA]]* +// CHECK: call dereferenceable(16) %[[STRUCT_SA]]* @_ZN2SAaSEOS_( + +// CHECK: define linkonce_odr dereferenceable(16) %[[STRUCT_SA:.*]]* @_ZN2SAaSEOS_(%[[STRUCT_SA]]* %[[THIS:.*]], %[[STRUCT_SA]]* dereferenceable(16) %0) +// CHECK: %[[THIS_ADDR:.*]] = alloca %[[STRUCT_SA]]*, align 8 +// CHECK: %[[_ADDR:.*]] = alloca %[[STRUCT_SA]]*, align 8 +// CHECK: store %[[STRUCT_SA]]* %[[THIS]], %[[STRUCT_SA]]** %[[THIS_ADDR]], align 8 +// CHECK: store %[[STRUCT_SA]]* %[[V0:.*]], %[[STRUCT_SA]]** %[[_ADDR]], align 8 +// CHECK: %[[THISI:.*]] = load %[[STRUCT_SA]]*, %[[STRUCT_SA]]** %[[THIS_ADDR]], align 8 +// CHECK: %[[M0:.*]] = getelementptr inbounds %[[STRUCT_SA]], %[[STRUCT_SA]]* %[[THISI]], i32 0, i32 0 +// CHECK: %[[V1:.*]] = load %[[STRUCT_SA]]*, %[[STRUCT_SA]]** %[[_ADDR]], align 8 +// CHECK: %[[M02:.*]] = getelementptr inbounds %[[STRUCT_SA]], %[[STRUCT_SA]]* %[[V1]], i32 0, i32 0 +// CHECK: %[[V2:.*]] = load i32*, i32** %[[M02]], align 8 +// CHECK: %[[V3:.*]] = ptrtoint i32** %[[M02]] to i64 +// CHECK: %[[V4:.*]] = call i64 @llvm.ptrauth.blend.i64(i64 %[[V3]], i64 50) +// CHECK: %[[V5:.*]] = ptrtoint i32** %[[M0]] to i64 +// CHECK: %[[V6:.*]] = call i64 @llvm.ptrauth.blend.i64(i64 %[[V5]], i64 50) +// CHECK: %[[V8:.*]] = ptrtoint i32* %[[V2]] to i64 +// CHECK: %[[V9:.*]] = call i64 @llvm.ptrauth.resign.i64(i64 %[[V8]], i32 1, i64 %[[V4]], i32 1, i64 %[[V6]]) + +void testMoveAssignment(SA a) { + SA t; + t = static_cast(a); +} + +// CHECK: define void @_Z19testCopyConstructor2SI(i +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64( + +void testCopyConstructor(SI a) { + SI t = a; +} + +// CHECK: define void @_Z19testMoveConstructor2SI( +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64( + +void testMoveConstructor(SI a) { + SI t = static_cast(a); +} + +// CHECK: define void @_Z18testCopyAssignment2SI( +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64( + +void testCopyAssignment(SI a) { + SI t; + t = a; +} + +// CHECK: define void @_Z18testMoveAssignment2SI( +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64( + +void testMoveAssignment(SI a) { + SI t; + t = static_cast(a); +} + +// CHECK: define linkonce_odr %[[STRUCT_SA:.*]]* @_ZN2SAC2ERKS_(%[[STRUCT_SA]]* %[[THIS:.*]], %[[STRUCT_SA]]* dereferenceable(16) %0) +// CHECK: %[[RETVAL:.*]] = alloca %[[STRUCT_SA]]*, align 8 +// CHECK: %[[THIS_ADDR:.*]] = alloca %[[STRUCT_SA]]*, align 8 +// CHECK: %[[_ADDR:.*]] = alloca %[[STRUCT_SA]]*, align 8 +// CHECK: store %[[STRUCT_SA]]* %[[THIS]], %[[STRUCT_SA]]** %[[THIS_ADDR]], align 8 +// CHECK: store %[[STRUCT_SA]]* %[[V0:.*]], %[[STRUCT_SA]]** %[[_ADDR]], align 8 +// CHECK: %[[THIS1:.*]] = load %[[STRUCT_SA]]*, %[[STRUCT_SA]]** %[[THIS_ADDR]], align 8 +// CHECK: store %[[STRUCT_SA]]* %[[THIS1]], %[[STRUCT_SA]]** %[[RETVAL]], align 8 +// CHECK: %[[M0:.*]] = getelementptr inbounds %[[STRUCT_SA]], %[[STRUCT_SA]]* %[[THIS1]], i32 0, i32 0 +// CHECK: %[[V1:.*]] = load %[[STRUCT_SA]]*, %[[STRUCT_SA]]** %[[_ADDR]], align 8 +// CHECK: %[[M02:.*]] = getelementptr inbounds %[[STRUCT_SA]], %[[STRUCT_SA]]* %[[V1]], i32 0, i32 0 +// CHECK: %[[V2:.*]] = load i32*, i32** %[[M02]], align 8 +// CHECK: %[[V3:.*]] = ptrtoint i32** %[[M02]] to i64 +// CHECK: %[[V4:.*]] = call i64 @llvm.ptrauth.blend.i64(i64 %[[V3]], i64 50) +// CHECK: %[[V5:.*]] = ptrtoint i32** %[[M0]] to i64 +// CHECK: %[[V6:.*]] = call i64 @llvm.ptrauth.blend.i64(i64 %[[V5]], i64 50) +// CHECK: %[[V8:.*]] = ptrtoint i32* %[[V2]] to i64 +// CHECK: %[[V9:.*]] = call i64 @llvm.ptrauth.resign.i64(i64 %[[V8]], i32 1, i64 %[[V4]], i32 1, i64 %[[V6]]) + +// CHECK: define linkonce_odr %[[STRUCT_SA:.*]]* @_ZN2SAC2EOS_(%[[STRUCT_SA]]* %[[THIS:.*]], %[[STRUCT_SA]]* dereferenceable(16) %0) +// CHECK: %[[RETVAL:.*]] = alloca %[[STRUCT_SA]]*, align 8 +// CHECK: %[[THIS_ADDR:.*]] = alloca %[[STRUCT_SA]]*, align 8 +// CHECK: %[[_ADDR:.*]] = alloca %[[STRUCT_SA]]*, align 8 +// CHECK: store %[[STRUCT_SA]]* %[[THIS]], %[[STRUCT_SA]]** %[[THIS_ADDR]], align 8 +// CHECK: store %[[STRUCT_SA]]* %[[V0:.*]], %[[STRUCT_SA]]** %[[_ADDR]], align 8 +// CHECK: %[[THIS1:.*]] = load %[[STRUCT_SA]]*, %[[STRUCT_SA]]** %[[THIS_ADDR]], align 8 +// CHECK: store %[[STRUCT_SA]]* %[[THIS1]], %[[STRUCT_SA]]** %[[RETVAL]], align 8 +// CHECK: %[[M0:.*]] = getelementptr inbounds %[[STRUCT_SA]], %[[STRUCT_SA]]* %[[THIS1]], i32 0, i32 0 +// CHECK: %[[V1:.*]] = load %[[STRUCT_SA]]*, %[[STRUCT_SA]]** %[[_ADDR]], align 8 +// CHECK: %[[M02:.*]] = getelementptr inbounds %[[STRUCT_SA]], %[[STRUCT_SA]]* %[[V1]], i32 0, i32 0 +// CHECK: %[[V2:.*]] = load i32*, i32** %[[M02]], align 8 +// CHECK: %[[V3:.*]] = ptrtoint i32** %[[M02]] to i64 +// CHECK: %[[V4:.*]] = call i64 @llvm.ptrauth.blend.i64(i64 %[[V3]], i64 50) +// CHECK: %[[V5:.*]] = ptrtoint i32** %[[M0]] to i64 +// CHECK: %[[V6:.*]] = call i64 @llvm.ptrauth.blend.i64(i64 %[[V5]], i64 50) +// CHECK: %[[V8:.*]] = ptrtoint i32* %[[V2]] to i64 +// CHECK: %[[V9:.*]] = call i64 @llvm.ptrauth.resign.i64(i64 %[[V8]], i32 1, i64 %[[V4]], i32 1, i64 %[[V6]]) diff --git a/clang/test/CodeGenCXX/ptrauth-rtti-layout.cpp b/clang/test/CodeGenCXX/ptrauth-rtti-layout.cpp new file mode 100644 index 00000000000000..03fd677a9641c1 --- /dev/null +++ b/clang/test/CodeGenCXX/ptrauth-rtti-layout.cpp @@ -0,0 +1,11 @@ +// RUN: %clang_cc1 %s -I%S -triple=arm64-apple-ios -fptrauth-calls -std=c++11 -emit-llvm -o - | FileCheck %s +#include + +struct A { int a; }; + +// CHECK: @_ZTVN10__cxxabiv117__class_type_infoE = external global i8* +// CHECK: @_ZTVN10__cxxabiv117__class_type_infoE.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (i8** getelementptr inbounds (i8*, i8** @_ZTVN10__cxxabiv117__class_type_infoE, i64 2) to i8*), i32 2, i64 0, i64 0 }, section "llvm.ptrauth", align 8 +// CHECK: @_ZTS1A = linkonce_odr hidden constant [3 x i8] c"1A\00" +// CHECK: @_ZTI1A = linkonce_odr hidden constant { i8*, i8* } { i8* bitcast ({ i8*, i32, i64, i64 }* @_ZTVN10__cxxabiv117__class_type_infoE.ptrauth to i8*), i8* inttoptr (i64 add (i64 ptrtoint ([3 x i8]* @_ZTS1A to i64), i64 -9223372036854775808) to i8*) } + +auto ATI = typeid(A); diff --git a/clang/test/CodeGenCXX/ptrauth-static-destructors.cpp b/clang/test/CodeGenCXX/ptrauth-static-destructors.cpp new file mode 100644 index 00000000000000..6c8d0c560681a8 --- /dev/null +++ b/clang/test/CodeGenCXX/ptrauth-static-destructors.cpp @@ -0,0 +1,26 @@ +// RUN: %clang_cc1 -triple arm64-apple-ios -fptrauth-calls -emit-llvm -std=c++11 %s -o - \ +// RUN: | FileCheck %s --check-prefix=CXAATEXIT + +// RUN: %clang_cc1 -triple arm64-apple-ios -fptrauth-calls -emit-llvm -std=c++11 %s -o - \ +// RUN: -fno-use-cxa-atexit \ +// RUN: | FileCheck %s --check-prefix=ATEXIT + +class Foo { + public: + ~Foo() { + } +}; + +Foo global; + +// CXAATEXIT: @_ZN3FooD1Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (%class.Foo* (%class.Foo*)* @_ZN3FooD1Ev to i8*), i32 0, i64 0, i64 0 }, section "llvm.ptrauth", align 8 +// CXAATEXIT: define internal void @__cxx_global_var_init() +// CXAATEXIT: call i32 @__cxa_atexit(void (i8*)* bitcast ({ i8*, i32, i64, i64 }* @_ZN3FooD1Ev.ptrauth to void (i8*)*), i8* getelementptr inbounds (%class.Foo, %class.Foo* @global, i32 0, i32 0), i8* @__dso_handle) + + +// ATEXIT: @__dtor_global.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void ()* @__dtor_global to i8*), i32 0, i64 0, i64 0 }, section "llvm.ptrauth", align 8 +// ATEXIT: define internal void @__cxx_global_var_init() +// ATEXIT: %{{.*}} = call i32 @atexit(void ()* bitcast ({ i8*, i32, i64, i64 }* @__dtor_global.ptrauth to void ()*)) + +// ATEXIT: define internal void @__dtor_global() {{.*}} section "__TEXT,__StaticInit,regular,pure_instructions" { +// ATEXIT: %{{.*}} = call %class.Foo* @_ZN3FooD1Ev(%class.Foo* @global) diff --git a/clang/test/CodeGenCXX/ptrauth-throw.cpp b/clang/test/CodeGenCXX/ptrauth-throw.cpp new file mode 100644 index 00000000000000..8aebe970342737 --- /dev/null +++ b/clang/test/CodeGenCXX/ptrauth-throw.cpp @@ -0,0 +1,16 @@ +// RUN: %clang_cc1 -triple arm64-apple-ios -fptrauth-calls -fcxx-exceptions -emit-llvm %s -o - | FileCheck %s + +class Foo { + public: + ~Foo() { + } +}; + +void f() { + throw Foo(); +} + +// CHECK: @_ZN3FooD1Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (%class.Foo* (%class.Foo*)* @_ZN3FooD1Ev to i8*), i32 0, i64 0, i64 0 }, section "llvm.ptrauth", align 8 + +// CHECK: define void @_Z1fv() +// CHECK: call void @__cxa_throw(i8* %{{.*}}, i8* bitcast ({ i8*, i8* }* @_ZTI3Foo to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN3FooD1Ev.ptrauth to i8*)) diff --git a/clang/test/CodeGenCXX/ptrauth-thunks.cpp b/clang/test/CodeGenCXX/ptrauth-thunks.cpp new file mode 100644 index 00000000000000..8bc0ba8fb6065d --- /dev/null +++ b/clang/test/CodeGenCXX/ptrauth-thunks.cpp @@ -0,0 +1,27 @@ +// RUN: %clang_cc1 -triple arm64-apple-ios -fptrauth-calls -emit-llvm -std=c++11 %s -o - -O1 | FileCheck %s + +namespace Test1 { + struct B1 { + virtual void* foo1() { + return 0; + } + }; + struct Pad1 { + virtual ~Pad1() {} + }; + struct Proxy1 : Pad1, B1 { + virtual ~Proxy1() {} + }; + struct D : virtual Proxy1 { + virtual ~D() {} + virtual void* foo1(); + }; + void* D::foo1() { + return (void*)this; + } +} + +// CHECK-LABEL: define linkonce_odr void @_ZTv0_n24_N5Test11DD0Ev(%"struct.Test1::D"* %this) +// CHECK: %[[BitcastThis:.*]] = bitcast %"struct.Test1::D"* %this to i64* +// CHECK: %[[SignedVTable:.*]] = load i64, i64* %[[BitcastThis]], align 8 +// CHECK: %[[VTable:.*]] = tail call i64 @llvm.ptrauth.auth.i64(i64 %[[SignedVTable]], i32 2, i64 0) \ No newline at end of file diff --git a/clang/test/CodeGenCXX/ptrauth-virtual-function.cpp b/clang/test/CodeGenCXX/ptrauth-virtual-function.cpp new file mode 100644 index 00000000000000..22b96377d8aa3a --- /dev/null +++ b/clang/test/CodeGenCXX/ptrauth-virtual-function.cpp @@ -0,0 +1,597 @@ +// RUN: %clang_cc1 -triple arm64-apple-ios -fptrauth-calls -emit-llvm -std=c++11 %s -o - | FileCheck %s + +// Check virtual function pointers in vtables are signed and their relocation +// structures are emitted. + +// CHECK: @_ZTV2B1 = unnamed_addr constant { [3 x i8*] } { [3 x i8*] [i8* null, i8* bitcast ({ i8*, i8* }* @_ZTI2B1 to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2B12m0Ev.ptrauth to i8*)] }, align 8 +// CHECK: @_ZTV2B1.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (i8** getelementptr inbounds ({ [3 x i8*] }, { [3 x i8*] }* @_ZTV2B1, i32 0, inrange i32 0, i32 2) to i8*), i32 2, i64 0, i64 0 }, section "llvm.ptrauth" +// CHECK: @g_B1 = global { i8** } { i8** getelementptr inbounds ({ i8*, i32, i64, i64 }, { i8*, i32, i64, i64 }* @_ZTV2B1.ptrauth, i32 0, i32 0) } + +// CHECK: @_ZTV2B0 = unnamed_addr constant { [7 x i8*] } { [7 x i8*] [{{.*}} i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2B02m0Ev.ptrauth to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2B02m1Ev.ptrauth to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2B02m2Ev.ptrauth to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2B0D1Ev.ptrauth to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2B0D0Ev.ptrauth to i8*)] } +// CHECK: @_ZN2B02m0Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.B0*)* @_ZN2B02m0Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*] }, { [7 x i8*] }* @_ZTV2B0, i32 0, i32 0, i32 2) to i64), i64 53119 }, section "llvm.ptrauth" +// CHECK: @_ZN2B02m1Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (%struct.S1* (%class.B0*)* @_ZN2B02m1Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*] }, { [7 x i8*] }* @_ZTV2B0, i32 0, i32 0, i32 3) to i64), i64 15165 }, section "llvm.ptrauth" +// CHECK: @_ZN2B02m2Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.B0*)* @_ZN2B02m2Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*] }, { [7 x i8*] }* @_ZTV2B0, i32 0, i32 0, i32 4) to i64), i64 43073 }, section "llvm.ptrauth" +// CHECK: @_ZN2B0D1Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (%class.B0* (%class.B0*)* @_ZN2B0D1Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*] }, { [7 x i8*] }* @_ZTV2B0, i32 0, i32 0, i32 5) to i64), i64 25525 }, section "llvm.ptrauth" +// CHECK: @_ZN2B0D0Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.B0*)* @_ZN2B0D0Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*] }, { [7 x i8*] }* @_ZTV2B0, i32 0, i32 0, i32 6) to i64), i64 21295 }, section "llvm.ptrauth" + +// CHECK: @_ZTV2D0 = unnamed_addr constant { [9 x i8*] } { [9 x i8*] [{{.*}} i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2D02m0Ev.ptrauth to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZTch0_h4_N2D02m1Ev.ptrauth to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2B02m2Ev.ptrauth.1 to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2D0D1Ev.ptrauth to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2D0D0Ev.ptrauth to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2D02m1Ev.ptrauth to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2D02m3Ev.ptrauth to i8*)] } +// CHECK: @_ZN2D02m0Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.D0*)* @_ZN2D02m0Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [9 x i8*] }, { [9 x i8*] }* @_ZTV2D0, i32 0, i32 0, i32 2) to i64), i64 53119 }, section "llvm.ptrauth" +// CHECK: @_ZTch0_h4_N2D02m1Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (%struct.S2* (%class.D0*)* @_ZTch0_h4_N2D02m1Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [9 x i8*] }, { [9 x i8*] }* @_ZTV2D0, i32 0, i32 0, i32 3) to i64), i64 15165 }, section "llvm.ptrauth" +// CHECK: @_ZN2B02m2Ev.ptrauth.1 = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.B0*)* @_ZN2B02m2Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [9 x i8*] }, { [9 x i8*] }* @_ZTV2D0, i32 0, i32 0, i32 4) to i64), i64 43073 }, section "llvm.ptrauth" +// CHECK: @_ZN2D0D1Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (%class.D0* (%class.D0*)* @_ZN2D0D1Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [9 x i8*] }, { [9 x i8*] }* @_ZTV2D0, i32 0, i32 0, i32 5) to i64), i64 25525 }, section "llvm.ptrauth" +// CHECK: @_ZN2D0D0Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.D0*)* @_ZN2D0D0Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [9 x i8*] }, { [9 x i8*] }* @_ZTV2D0, i32 0, i32 0, i32 6) to i64), i64 21295 }, section "llvm.ptrauth" +// CHECK: @_ZN2D02m1Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (%struct.S2* (%class.D0*)* @_ZN2D02m1Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [9 x i8*] }, { [9 x i8*] }* @_ZTV2D0, i32 0, i32 0, i32 7) to i64), i64 35045 }, section "llvm.ptrauth" +// CHECK: @_ZN2D02m3Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.D0*)* @_ZN2D02m3Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [9 x i8*] }, { [9 x i8*] }* @_ZTV2D0, i32 0, i32 0, i32 8) to i64), i64 10565 }, section "llvm.ptrauth" + +// CHECK: @_ZTV2D1 = unnamed_addr constant { [8 x i8*] } { [8 x i8*] [{{.*}} i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2D12m0Ev.ptrauth to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZTch0_h4_N2D12m1Ev.ptrauth to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2B02m2Ev.ptrauth.2 to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2D1D1Ev.ptrauth to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2D1D0Ev.ptrauth to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2D12m1Ev.ptrauth to i8*)] } +// CHECK: @_ZN2D12m0Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.D1*)* @_ZN2D12m0Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [8 x i8*] }, { [8 x i8*] }* @_ZTV2D1, i32 0, i32 0, i32 2) to i64), i64 53119 }, section "llvm.ptrauth" +// CHECK: @_ZTch0_h4_N2D12m1Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (%struct.S2* (%class.D1*)* @_ZTch0_h4_N2D12m1Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [8 x i8*] }, { [8 x i8*] }* @_ZTV2D1, i32 0, i32 0, i32 3) to i64), i64 15165 }, section "llvm.ptrauth" +// CHECK: @_ZN2B02m2Ev.ptrauth.2 = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.B0*)* @_ZN2B02m2Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [8 x i8*] }, { [8 x i8*] }* @_ZTV2D1, i32 0, i32 0, i32 4) to i64), i64 43073 }, section "llvm.ptrauth" +// CHECK: @_ZN2D1D1Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (%class.D1* (%class.D1*)* @_ZN2D1D1Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [8 x i8*] }, { [8 x i8*] }* @_ZTV2D1, i32 0, i32 0, i32 5) to i64), i64 25525 }, section "llvm.ptrauth" +// CHECK: @_ZN2D1D0Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.D1*)* @_ZN2D1D0Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [8 x i8*] }, { [8 x i8*] }* @_ZTV2D1, i32 0, i32 0, i32 6) to i64), i64 21295 }, section "llvm.ptrauth" +// CHECK: @_ZN2D12m1Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (%struct.S2* (%class.D1*)* @_ZN2D12m1Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [8 x i8*] }, { [8 x i8*] }* @_ZTV2D1, i32 0, i32 0, i32 7) to i64), i64 52864 }, section "llvm.ptrauth" + +// CHECK: @_ZTV2D2 = unnamed_addr constant { [9 x i8*], [8 x i8*] } { [9 x i8*] [{{.*}} i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2D22m0Ev.ptrauth to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZTch0_h4_N2D22m1Ev.ptrauth to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2B02m2Ev.ptrauth.3 to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2D2D1Ev.ptrauth to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2D2D0Ev.ptrauth to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2D22m1Ev.ptrauth to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2D22m3Ev.ptrauth to i8*)], [8 x i8*] [{{.*}} i8* bitcast ({ i8*, i32, i64, i64 }* @_ZThn16_N2D22m0Ev.ptrauth to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZTchn16_h4_N2D22m1Ev.ptrauth to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2B02m2Ev.ptrauth.4 to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZThn16_N2D2D1Ev.ptrauth to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZThn16_N2D2D0Ev.ptrauth to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZThn16_N2D22m1Ev.ptrauth to i8*)] } +// CHECK: @_ZN2D22m0Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.D2*)* @_ZN2D22m0Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [9 x i8*], [8 x i8*] }, { [9 x i8*], [8 x i8*] }* @_ZTV2D2, i32 0, i32 0, i32 2) to i64), i64 53119 }, section "llvm.ptrauth" +// CHECK: @_ZTch0_h4_N2D22m1Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (%struct.S2* (%class.D2*)* @_ZTch0_h4_N2D22m1Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [9 x i8*], [8 x i8*] }, { [9 x i8*], [8 x i8*] }* @_ZTV2D2, i32 0, i32 0, i32 3) to i64), i64 15165 }, section "llvm.ptrauth" +// CHECK: @_ZN2B02m2Ev.ptrauth.3 = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.B0*)* @_ZN2B02m2Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [9 x i8*], [8 x i8*] }, { [9 x i8*], [8 x i8*] }* @_ZTV2D2, i32 0, i32 0, i32 4) to i64), i64 43073 }, section "llvm.ptrauth" +// CHECK: @_ZN2D2D1Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (%class.D2* (%class.D2*)* @_ZN2D2D1Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [9 x i8*], [8 x i8*] }, { [9 x i8*], [8 x i8*] }* @_ZTV2D2, i32 0, i32 0, i32 5) to i64), i64 25525 }, section "llvm.ptrauth" +// CHECK: @_ZN2D2D0Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.D2*)* @_ZN2D2D0Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [9 x i8*], [8 x i8*] }, { [9 x i8*], [8 x i8*] }* @_ZTV2D2, i32 0, i32 0, i32 6) to i64), i64 21295 }, section "llvm.ptrauth" +// CHECK: @_ZN2D22m1Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (%struct.S2* (%class.D2*)* @_ZN2D22m1Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [9 x i8*], [8 x i8*] }, { [9 x i8*], [8 x i8*] }* @_ZTV2D2, i32 0, i32 0, i32 7) to i64), i64 35045 }, section "llvm.ptrauth" +// CHECK: @_ZN2D22m3Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.D2*)* @_ZN2D22m3Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [9 x i8*], [8 x i8*] }, { [9 x i8*], [8 x i8*] }* @_ZTV2D2, i32 0, i32 0, i32 8) to i64), i64 10565 }, section "llvm.ptrauth" +// CHECK: @_ZThn16_N2D22m0Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.D2*)* @_ZThn16_N2D22m0Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [9 x i8*], [8 x i8*] }, { [9 x i8*], [8 x i8*] }* @_ZTV2D2, i32 0, i32 1, i32 2) to i64), i64 53119 }, section "llvm.ptrauth" +// CHECK: @_ZTchn16_h4_N2D22m1Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (%struct.S2* (%class.D2*)* @_ZTchn16_h4_N2D22m1Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [9 x i8*], [8 x i8*] }, { [9 x i8*], [8 x i8*] }* @_ZTV2D2, i32 0, i32 1, i32 3) to i64), i64 15165 }, section "llvm.ptrauth" +// CHECK: @_ZN2B02m2Ev.ptrauth.4 = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.B0*)* @_ZN2B02m2Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [9 x i8*], [8 x i8*] }, { [9 x i8*], [8 x i8*] }* @_ZTV2D2, i32 0, i32 1, i32 4) to i64), i64 43073 }, section "llvm.ptrauth" +// CHECK: @_ZThn16_N2D2D1Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (%class.D2* (%class.D2*)* @_ZThn16_N2D2D1Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [9 x i8*], [8 x i8*] }, { [9 x i8*], [8 x i8*] }* @_ZTV2D2, i32 0, i32 1, i32 5) to i64), i64 25525 }, section "llvm.ptrauth" +// CHECK: @_ZThn16_N2D2D0Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.D2*)* @_ZThn16_N2D2D0Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [9 x i8*], [8 x i8*] }, { [9 x i8*], [8 x i8*] }* @_ZTV2D2, i32 0, i32 1, i32 6) to i64), i64 21295 }, section "llvm.ptrauth" +// CHECK: @_ZThn16_N2D22m1Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (%struct.S2* (%class.D2*)* @_ZThn16_N2D22m1Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [9 x i8*], [8 x i8*] }, { [9 x i8*], [8 x i8*] }* @_ZTV2D2, i32 0, i32 1, i32 7) to i64), i64 52864 }, section "llvm.ptrauth" + +// CHECK: @_ZTV2D3 = unnamed_addr constant { [7 x i8*], [7 x i8*], [11 x i8*] } { [7 x i8*] [i8* inttoptr (i64 32 to i8*), i8* null, i8* bitcast ({ i8*, i8*, i32, i32, i8*, i64, i8*, i64 }* @_ZTI2D3 to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2D32m0Ev.ptrauth to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2D32m1Ev.ptrauth to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2D3D1Ev.ptrauth to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2D3D0Ev.ptrauth to i8*)], [7 x i8*] [i8* inttoptr (i64 16 to i8*), i8* inttoptr (i64 -16 to i8*), i8* bitcast ({ i8*, i8*, i32, i32, i8*, i64, i8*, i64 }* @_ZTI2D3 to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZThn16_N2D32m0Ev.ptrauth to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZThn16_N2D32m1Ev.ptrauth to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZThn16_N2D3D1Ev.ptrauth to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZThn16_N2D3D0Ev.ptrauth to i8*)], [11 x i8*] [i8* inttoptr (i64 -32 to i8*), i8* null, i8* inttoptr (i64 -32 to i8*), i8* inttoptr (i64 -32 to i8*), i8* inttoptr (i64 -32 to i8*), i8* bitcast ({ i8*, i8*, i32, i32, i8*, i64, i8*, i64 }* @_ZTI2D3 to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZTv0_n24_N2D32m0Ev.ptrauth to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZTcv0_n32_h4_N2D32m1Ev.ptrauth to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2B02m2Ev.ptrauth.11 to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZTv0_n48_N2D3D1Ev.ptrauth to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZTv0_n48_N2D3D0Ev.ptrauth to i8*)] } + +// CHECK: @_ZTC2D30_2V0 = unnamed_addr constant { [7 x i8*], [11 x i8*] } { [7 x i8*] [i8* inttoptr (i64 32 to i8*), i8* null, i8* bitcast (i8** @_ZTI2V0 to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2V02m0Ev.ptrauth.12 to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2V02m1Ev.ptrauth.13 to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2V0D1Ev.ptrauth.14 to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2V0D0Ev.ptrauth.15 to i8*)], [11 x i8*] [i8* inttoptr (i64 -32 to i8*), i8* null, i8* inttoptr (i64 -32 to i8*), i8* inttoptr (i64 -32 to i8*), i8* inttoptr (i64 -32 to i8*), i8* bitcast (i8** @_ZTI2V0 to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZTv0_n24_N2V02m0Ev.ptrauth.16 to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZTcv0_n32_h4_N2V02m1Ev.ptrauth.17 to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2B02m2Ev.ptrauth.18 to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZTv0_n48_N2V0D1Ev.ptrauth.19 to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZTv0_n48_N2V0D0Ev.ptrauth.20 to i8*)] } + +// CHECK: @_ZN2V02m0Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.V0*)* @_ZN2V02m0Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [11 x i8*] }, { [7 x i8*], [11 x i8*] }* @_ZTC2D30_2V0, i32 0, i32 0, i32 3) to i64), i64 44578 }, section "llvm.ptrauth" +// CHECK: @_ZN2V02m1Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (%struct.S2* (%class.V0*)* @_ZN2V02m1Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [11 x i8*] }, { [7 x i8*], [11 x i8*] }* @_ZTC2D30_2V0, i32 0, i32 0, i32 4) to i64), i64 30766 }, section "llvm.ptrauth" +// CHECK: @_ZN2V0D1Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (%class.V0* (%class.V0*)* @_ZN2V0D1Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [11 x i8*] }, { [7 x i8*], [11 x i8*] }* @_ZTC2D30_2V0, i32 0, i32 0, i32 5) to i64), i64 57279 }, section "llvm.ptrauth" +// CHECK: @_ZN2V0D0Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.V0*)* @_ZN2V0D0Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [11 x i8*] }, { [7 x i8*], [11 x i8*] }* @_ZTC2D30_2V0, i32 0, i32 0, i32 6) to i64), i64 62452 }, section "llvm.ptrauth" +// CHECK: @_ZTv0_n24_N2V02m0Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.V0*)* @_ZTv0_n24_N2V02m0Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [11 x i8*] }, { [7 x i8*], [11 x i8*] }* @_ZTC2D30_2V0, i32 0, i32 1, i32 6) to i64), i64 53119 }, section "llvm.ptrauth" +// CHECK: @_ZTcv0_n32_h4_N2V02m1Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (%struct.S2* (%class.V0*)* @_ZTcv0_n32_h4_N2V02m1Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [11 x i8*] }, { [7 x i8*], [11 x i8*] }* @_ZTC2D30_2V0, i32 0, i32 1, i32 7) to i64), i64 15165 }, section "llvm.ptrauth" +// CHECK: @_ZN2B02m2Ev.ptrauth.5 = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.B0*)* @_ZN2B02m2Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [11 x i8*] }, { [7 x i8*], [11 x i8*] }* @_ZTC2D30_2V0, i32 0, i32 1, i32 8) to i64), i64 43073 }, section "llvm.ptrauth" +// CHECK: @_ZTv0_n48_N2V0D1Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (%class.V0* (%class.V0*)* @_ZTv0_n48_N2V0D1Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [11 x i8*] }, { [7 x i8*], [11 x i8*] }* @_ZTC2D30_2V0, i32 0, i32 1, i32 9) to i64), i64 25525 }, section "llvm.ptrauth" +// CHECK: @_ZTv0_n48_N2V0D0Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.V0*)* @_ZTv0_n48_N2V0D0Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [11 x i8*] }, { [7 x i8*], [11 x i8*] }* @_ZTC2D30_2V0, i32 0, i32 1, i32 10) to i64), i64 21295 }, section "llvm.ptrauth" + +// CHECK: @_ZTC2D316_2V1 = unnamed_addr constant { [7 x i8*], [11 x i8*] } { [7 x i8*] [i8* inttoptr (i64 16 to i8*), i8* null, i8* bitcast (i8** @_ZTI2V1 to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2V12m0Ev.ptrauth.21 to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2V12m1Ev.ptrauth.22 to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2V1D1Ev.ptrauth.23 to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2V1D0Ev.ptrauth.24 to i8*)], [11 x i8*] [i8* inttoptr (i64 -16 to i8*), i8* null, i8* inttoptr (i64 -16 to i8*), i8* inttoptr (i64 -16 to i8*), i8* inttoptr (i64 -16 to i8*), i8* bitcast (i8** @_ZTI2V1 to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZTv0_n24_N2V12m0Ev.ptrauth.25 to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZTcv0_n32_h4_N2V12m1Ev.ptrauth.26 to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZN2B02m2Ev.ptrauth.27 to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZTv0_n48_N2V1D1Ev.ptrauth.28 to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* @_ZTv0_n48_N2V1D0Ev.ptrauth.29 to i8*)] } +// CHECK: @_ZN2V12m0Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.V1*)* @_ZN2V12m0Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [11 x i8*] }, { [7 x i8*], [11 x i8*] }* @_ZTC2D316_2V1, i32 0, i32 0, i32 3) to i64), i64 49430 }, section "llvm.ptrauth" +// CHECK: @_ZN2V12m1Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (%struct.S2* (%class.V1*)* @_ZN2V12m1Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [11 x i8*] }, { [7 x i8*], [11 x i8*] }* @_ZTC2D316_2V1, i32 0, i32 0, i32 4) to i64), i64 57119 }, section "llvm.ptrauth" +// CHECK: @_ZN2V1D1Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (%class.V1* (%class.V1*)* @_ZN2V1D1Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [11 x i8*] }, { [7 x i8*], [11 x i8*] }* @_ZTC2D316_2V1, i32 0, i32 0, i32 5) to i64), i64 60799 }, section "llvm.ptrauth" +// CHECK: @_ZN2V1D0Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.V1*)* @_ZN2V1D0Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [11 x i8*] }, { [7 x i8*], [11 x i8*] }* @_ZTC2D316_2V1, i32 0, i32 0, i32 6) to i64), i64 52565 }, section "llvm.ptrauth" +// CHECK: @_ZTv0_n24_N2V12m0Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.V1*)* @_ZTv0_n24_N2V12m0Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [11 x i8*] }, { [7 x i8*], [11 x i8*] }* @_ZTC2D316_2V1, i32 0, i32 1, i32 6) to i64), i64 53119 }, section "llvm.ptrauth" +// CHECK: @_ZTcv0_n32_h4_N2V12m1Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (%struct.S2* (%class.V1*)* @_ZTcv0_n32_h4_N2V12m1Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [11 x i8*] }, { [7 x i8*], [11 x i8*] }* @_ZTC2D316_2V1, i32 0, i32 1, i32 7) to i64), i64 15165 }, section "llvm.ptrauth" +// CHECK: @_ZN2B02m2Ev.ptrauth.6 = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.B0*)* @_ZN2B02m2Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [11 x i8*] }, { [7 x i8*], [11 x i8*] }* @_ZTC2D316_2V1, i32 0, i32 1, i32 8) to i64), i64 43073 }, section "llvm.ptrauth" +// CHECK: @_ZTv0_n48_N2V1D1Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (%class.V1* (%class.V1*)* @_ZTv0_n48_N2V1D1Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [11 x i8*] }, { [7 x i8*], [11 x i8*] }* @_ZTC2D316_2V1, i32 0, i32 1, i32 9) to i64), i64 25525 }, section "llvm.ptrauth" +// CHECK: @_ZTv0_n48_N2V1D0Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.V1*)* @_ZTv0_n48_N2V1D0Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [11 x i8*] }, { [7 x i8*], [11 x i8*] }* @_ZTC2D316_2V1, i32 0, i32 1, i32 10) to i64), i64 21295 }, section "llvm.ptrauth" +// CHECK: @_ZN2D32m0Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.D3*)* @_ZN2D32m0Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [7 x i8*], [11 x i8*] }, { [7 x i8*], [7 x i8*], [11 x i8*] }* @_ZTV2D3, i32 0, i32 0, i32 3) to i64), i64 44578 }, section "llvm.ptrauth" +// CHECK: @_ZN2D32m1Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (%struct.S2* (%class.D3*)* @_ZN2D32m1Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [7 x i8*], [11 x i8*] }, { [7 x i8*], [7 x i8*], [11 x i8*] }* @_ZTV2D3, i32 0, i32 0, i32 4) to i64), i64 30766 }, section "llvm.ptrauth" +// CHECK: @_ZN2D3D1Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (%class.D3* (%class.D3*)* @_ZN2D3D1Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [7 x i8*], [11 x i8*] }, { [7 x i8*], [7 x i8*], [11 x i8*] }* @_ZTV2D3, i32 0, i32 0, i32 5) to i64), i64 57279 }, section "llvm.ptrauth" +// CHECK: @_ZN2D3D0Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.D3*)* @_ZN2D3D0Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [7 x i8*], [11 x i8*] }, { [7 x i8*], [7 x i8*], [11 x i8*] }* @_ZTV2D3, i32 0, i32 0, i32 6) to i64), i64 62452 }, section "llvm.ptrauth" +// CHECK: @_ZThn16_N2D32m0Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.D3*)* @_ZThn16_N2D32m0Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [7 x i8*], [11 x i8*] }, { [7 x i8*], [7 x i8*], [11 x i8*] }* @_ZTV2D3, i32 0, i32 1, i32 3) to i64), i64 49430 }, section "llvm.ptrauth" +// CHECK: @_ZThn16_N2D32m1Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (%struct.S2* (%class.D3*)* @_ZThn16_N2D32m1Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [7 x i8*], [11 x i8*] }, { [7 x i8*], [7 x i8*], [11 x i8*] }* @_ZTV2D3, i32 0, i32 1, i32 4) to i64), i64 57119 }, section "llvm.ptrauth" +// CHECK: @_ZThn16_N2D3D1Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (%class.D3* (%class.D3*)* @_ZThn16_N2D3D1Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [7 x i8*], [11 x i8*] }, { [7 x i8*], [7 x i8*], [11 x i8*] }* @_ZTV2D3, i32 0, i32 1, i32 5) to i64), i64 60799 }, section "llvm.ptrauth" +// CHECK: @_ZThn16_N2D3D0Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.D3*)* @_ZThn16_N2D3D0Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [7 x i8*], [11 x i8*] }, { [7 x i8*], [7 x i8*], [11 x i8*] }* @_ZTV2D3, i32 0, i32 1, i32 6) to i64), i64 52565 }, section "llvm.ptrauth" +// CHECK: @_ZTv0_n24_N2D32m0Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.D3*)* @_ZTv0_n24_N2D32m0Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [7 x i8*], [11 x i8*] }, { [7 x i8*], [7 x i8*], [11 x i8*] }* @_ZTV2D3, i32 0, i32 2, i32 6) to i64), i64 53119 }, section "llvm.ptrauth" +// CHECK: @_ZTcv0_n32_h4_N2D32m1Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (%struct.S2* (%class.D3*)* @_ZTcv0_n32_h4_N2D32m1Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [7 x i8*], [11 x i8*] }, { [7 x i8*], [7 x i8*], [11 x i8*] }* @_ZTV2D3, i32 0, i32 2, i32 7) to i64), i64 15165 }, section "llvm.ptrauth" +// CHECK: @_ZN2B02m2Ev.ptrauth.11 = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.B0*)* @_ZN2B02m2Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [7 x i8*], [11 x i8*] }, { [7 x i8*], [7 x i8*], [11 x i8*] }* @_ZTV2D3, i32 0, i32 2, i32 8) to i64), i64 43073 }, section "llvm.ptrauth" +// CHECK: @_ZTv0_n48_N2D3D1Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (%class.D3* (%class.D3*)* @_ZTv0_n48_N2D3D1Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [7 x i8*], [11 x i8*] }, { [7 x i8*], [7 x i8*], [11 x i8*] }* @_ZTV2D3, i32 0, i32 2, i32 9) to i64), i64 25525 }, section "llvm.ptrauth" +// CHECK: @_ZTv0_n48_N2D3D0Ev.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.D3*)* @_ZTv0_n48_N2D3D0Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [7 x i8*], [11 x i8*] }, { [7 x i8*], [7 x i8*], [11 x i8*] }* @_ZTV2D3, i32 0, i32 2, i32 10) to i64), i64 21295 }, section "llvm.ptrauth" +// CHECK: @_ZN2V02m0Ev.ptrauth.12 = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.V0*)* @_ZN2V02m0Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [11 x i8*] }, { [7 x i8*], [11 x i8*] }* @_ZTC2D30_2V0, i32 0, i32 0, i32 3) to i64), i64 44578 }, section "llvm.ptrauth" +// CHECK: @_ZN2V02m1Ev.ptrauth.13 = private constant { i8*, i32, i64, i64 } { i8* bitcast (%struct.S2* (%class.V0*)* @_ZN2V02m1Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [11 x i8*] }, { [7 x i8*], [11 x i8*] }* @_ZTC2D30_2V0, i32 0, i32 0, i32 4) to i64), i64 30766 }, section "llvm.ptrauth" +// CHECK: @_ZN2V0D1Ev.ptrauth.14 = private constant { i8*, i32, i64, i64 } { i8* bitcast (%class.V0* (%class.V0*)* @_ZN2V0D1Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [11 x i8*] }, { [7 x i8*], [11 x i8*] }* @_ZTC2D30_2V0, i32 0, i32 0, i32 5) to i64), i64 57279 }, section "llvm.ptrauth" +// CHECK: @_ZN2V0D0Ev.ptrauth.15 = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.V0*)* @_ZN2V0D0Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [11 x i8*] }, { [7 x i8*], [11 x i8*] }* @_ZTC2D30_2V0, i32 0, i32 0, i32 6) to i64), i64 62452 }, section "llvm.ptrauth" +// CHECK: @_ZTv0_n24_N2V02m0Ev.ptrauth.16 = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.V0*)* @_ZTv0_n24_N2V02m0Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [11 x i8*] }, { [7 x i8*], [11 x i8*] }* @_ZTC2D30_2V0, i32 0, i32 1, i32 6) to i64), i64 53119 }, section "llvm.ptrauth" +// CHECK: @_ZTcv0_n32_h4_N2V02m1Ev.ptrauth.17 = private constant { i8*, i32, i64, i64 } { i8* bitcast (%struct.S2* (%class.V0*)* @_ZTcv0_n32_h4_N2V02m1Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [11 x i8*] }, { [7 x i8*], [11 x i8*] }* @_ZTC2D30_2V0, i32 0, i32 1, i32 7) to i64), i64 15165 }, section "llvm.ptrauth" +// CHECK: @_ZN2B02m2Ev.ptrauth.18 = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.B0*)* @_ZN2B02m2Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [11 x i8*] }, { [7 x i8*], [11 x i8*] }* @_ZTC2D30_2V0, i32 0, i32 1, i32 8) to i64), i64 43073 }, section "llvm.ptrauth" +// CHECK: @_ZTv0_n48_N2V0D1Ev.ptrauth.19 = private constant { i8*, i32, i64, i64 } { i8* bitcast (%class.V0* (%class.V0*)* @_ZTv0_n48_N2V0D1Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [11 x i8*] }, { [7 x i8*], [11 x i8*] }* @_ZTC2D30_2V0, i32 0, i32 1, i32 9) to i64), i64 25525 }, section "llvm.ptrauth" +// CHECK: @_ZTv0_n48_N2V0D0Ev.ptrauth.20 = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.V0*)* @_ZTv0_n48_N2V0D0Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [11 x i8*] }, { [7 x i8*], [11 x i8*] }* @_ZTC2D30_2V0, i32 0, i32 1, i32 10) to i64), i64 21295 }, section "llvm.ptrauth" +// CHECK: @_ZN2V12m0Ev.ptrauth.21 = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.V1*)* @_ZN2V12m0Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [11 x i8*] }, { [7 x i8*], [11 x i8*] }* @_ZTC2D316_2V1, i32 0, i32 0, i32 3) to i64), i64 49430 }, section "llvm.ptrauth" +// CHECK: @_ZN2V12m1Ev.ptrauth.22 = private constant { i8*, i32, i64, i64 } { i8* bitcast (%struct.S2* (%class.V1*)* @_ZN2V12m1Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [11 x i8*] }, { [7 x i8*], [11 x i8*] }* @_ZTC2D316_2V1, i32 0, i32 0, i32 4) to i64), i64 57119 }, section "llvm.ptrauth" +// CHECK: @_ZN2V1D1Ev.ptrauth.23 = private constant { i8*, i32, i64, i64 } { i8* bitcast (%class.V1* (%class.V1*)* @_ZN2V1D1Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [11 x i8*] }, { [7 x i8*], [11 x i8*] }* @_ZTC2D316_2V1, i32 0, i32 0, i32 5) to i64), i64 60799 }, section "llvm.ptrauth" +// CHECK: @_ZN2V1D0Ev.ptrauth.24 = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.V1*)* @_ZN2V1D0Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [11 x i8*] }, { [7 x i8*], [11 x i8*] }* @_ZTC2D316_2V1, i32 0, i32 0, i32 6) to i64), i64 52565 }, section "llvm.ptrauth" +// CHECK: @_ZTv0_n24_N2V12m0Ev.ptrauth.25 = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.V1*)* @_ZTv0_n24_N2V12m0Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [11 x i8*] }, { [7 x i8*], [11 x i8*] }* @_ZTC2D316_2V1, i32 0, i32 1, i32 6) to i64), i64 53119 }, section "llvm.ptrauth" +// CHECK: @_ZTcv0_n32_h4_N2V12m1Ev.ptrauth.26 = private constant { i8*, i32, i64, i64 } { i8* bitcast (%struct.S2* (%class.V1*)* @_ZTcv0_n32_h4_N2V12m1Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [11 x i8*] }, { [7 x i8*], [11 x i8*] }* @_ZTC2D316_2V1, i32 0, i32 1, i32 7) to i64), i64 15165 }, section "llvm.ptrauth" +// CHECK: @_ZN2B02m2Ev.ptrauth.27 = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.B0*)* @_ZN2B02m2Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [11 x i8*] }, { [7 x i8*], [11 x i8*] }* @_ZTC2D316_2V1, i32 0, i32 1, i32 8) to i64), i64 43073 }, section "llvm.ptrauth" +// CHECK: @_ZTv0_n48_N2V1D1Ev.ptrauth.28 = private constant { i8*, i32, i64, i64 } { i8* bitcast (%class.V1* (%class.V1*)* @_ZTv0_n48_N2V1D1Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [11 x i8*] }, { [7 x i8*], [11 x i8*] }* @_ZTC2D316_2V1, i32 0, i32 1, i32 9) to i64), i64 25525 }, section "llvm.ptrauth" +// CHECK: @_ZTv0_n48_N2V1D0Ev.ptrauth.29 = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%class.V1*)* @_ZTv0_n48_N2V1D0Ev to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*], [11 x i8*] }, { [7 x i8*], [11 x i8*] }* @_ZTC2D316_2V1, i32 0, i32 1, i32 10) to i64), i64 21295 }, section "llvm.ptrauth" + + +struct S0 { + int f; +}; + +struct S1 { + int f; +}; + +struct S2 : S0, S1 { + int f; +}; + +class B0 { +public: + virtual void m0(); + virtual S1 *m1(); + virtual void m2(); + virtual ~B0(); + int f; +}; + +class B1 { +public: + virtual void m0(); +}; + +class D0 : public B0 { +public: + void m0() override; + S2 *m1() override; + virtual void m3(); + int f; +}; + +class D1 : public B0 { +public: + void m0() override; + S2 *m1() override; + int f; +}; + +class D2 : public D0, public D1 { +public: + void m0() override; + S2 *m1() override; + void m3() override; + int f; +}; + +class V0 : public virtual B0 { +public: + void m0() override; + S2 *m1() override; + int f; +}; + +class V1 : public virtual B0 { +public: + void m0() override; + S2 *m1() override; + ~V1(); + int f; +}; + +class D3 : public V0, public V1 { +public: + void m0() override; + S2 *m1() override; + int f; +}; + +B1 g_B1; + +void B0::m0() {} + +void B1::m0() {} + +void D0::m0() {} + +void D1::m0() {} + +void D2::m0() {} + +void D3::m0() {} + +V1::~V1() { + m1(); +} + +// Check sign/authentication of vtable pointers and authentication of virtual +// functions. + +// CHECK-LABEL: define %class.V1* @_ZN2V1D2Ev( +// CHECK: %[[T0:[0-9]+]] = load i8*, i8** %{{.*}} +// CHECK: %[[T1:[0-9]+]] = ptrtoint i8* %[[T0]] to i64 +// CHECK: %[[T2:[0-9]+]] = call i64 @llvm.ptrauth.auth.i64(i64 %[[T1]], i32 2, i64 0) +// CHECK: %[[T3:[0-9]+]] = inttoptr i64 %[[T2]] to i8* +// CHECK: %[[SLOT:[0-9]+]] = bitcast %class.V1* %{{.*}} to i32 (...)*** +// CHECK: %[[T5:[0-9]+]] = bitcast i8* %[[T3]] to i32 (...)** +// CHECK: %[[T6:[0-9]+]] = ptrtoint i32 (...)** %[[T5]] to i64 +// CHECK: %[[T7:[0-9]+]] = call i64 @llvm.ptrauth.sign.i64(i64 %[[T6]], i32 2, i64 0) +// CHECK: %[[SIGNED_VTADDR:[0-9]+]] = inttoptr i64 %[[T7]] to i32 (...)** +// CHECK: store i32 (...)** %[[SIGNED_VTADDR]], i32 (...)*** %[[SLOT]] + +// CHECK-LABEL: define void @_Z8testB0m0P2B0( +// CHECK: %[[VTABLE:[a-z]+]] = load void (%class.B0*)**, void (%class.B0*)*** %{{.*}} +// CHECK: %[[T0:[0-9]+]] = ptrtoint void (%class.B0*)** %[[VTABLE]] to i64 +// CHECK: %[[T3:[0-9]+]] = call i64 @llvm.ptrauth.auth.i64(i64 %[[T0]], i32 2, i64 0) +// CHECK: %[[T4:[0-9]+]] = inttoptr i64 %[[T3]] to void (%class.B0*)** +// CHECK: %[[VFN:[a-z]+]] = getelementptr inbounds void (%class.B0*)*, void (%class.B0*)** %[[T4]], i64 0 +// CHECK: %[[T5:[0-9]+]] = load void (%class.B0*)*, void (%class.B0*)** %[[VFN]] +// CHECK: %[[T6:[0-9]+]] = ptrtoint void (%class.B0*)** %[[VFN]] to i64 +// CHECK: %[[T7:[0-9]+]] = call i64 @llvm.ptrauth.blend.i64(i64 %[[T6]], i64 53119) +// CHECK: call void %[[T5]](%class.B0* %{{.*}}) [ "ptrauth"(i32 0, i64 %[[T7]]) ] + +void testB0m0(B0 *a) { + a->m0(); +} + +// CHECK-LABEL: define void @_Z8testB0m1P2B0( +// CHECK: %[[VTABLE:[a-z]+]] = load %struct.S1* (%class.B0*)**, %struct.S1* (%class.B0*)*** %{{.*}} +// CHECK: %[[T0:[0-9]+]] = ptrtoint %struct.S1* (%class.B0*)** %[[VTABLE]] to i64 +// CHECK: %[[T3:[0-9]+]] = call i64 @llvm.ptrauth.auth.i64(i64 %[[T0]], i32 2, i64 0) +// CHECK: %[[T4:[0-9]+]] = inttoptr i64 %[[T3]] to %struct.S1* (%class.B0*)** +// CHECK: %[[VFN:[a-z]+]] = getelementptr inbounds %struct.S1* (%class.B0*)*, %struct.S1* (%class.B0*)** %[[T4]], i64 1 +// CHECK: %[[T5:[0-9]+]] = load %struct.S1* (%class.B0*)*, %struct.S1* (%class.B0*)** %[[VFN]] +// CHECK: %[[T6:[0-9]+]] = ptrtoint %struct.S1* (%class.B0*)** %[[VFN]] to i64 +// CHECK: %[[T7:[0-9]+]] = call i64 @llvm.ptrauth.blend.i64(i64 %[[T6]], i64 15165) +// CHECK: call %struct.S1* %[[T5]](%class.B0* %{{.*}}) [ "ptrauth"(i32 0, i64 %[[T7]]) ] + +void testB0m1(B0 *a) { + a->m1(); +} + +// CHECK-LABEL: define void @_Z8testB0m2P2B0( +// CHECK: %[[VTABLE:[a-z]+]] = load void (%class.B0*)**, void (%class.B0*)*** %{{.*}} +// CHECK: %[[T0:[0-9]+]] = ptrtoint void (%class.B0*)** %[[VTABLE]] to i64 +// CHECK: %[[T3:[0-9]+]] = call i64 @llvm.ptrauth.auth.i64(i64 %[[T0]], i32 2, i64 0) +// CHECK: %[[T4:[0-9]+]] = inttoptr i64 %[[T3]] to void (%class.B0*)** +// CHECK: %[[VFN:[a-z]+]] = getelementptr inbounds void (%class.B0*)*, void (%class.B0*)** %[[T4]], i64 2 +// CHECK: %[[T5:[0-9]+]] = load void (%class.B0*)*, void (%class.B0*)** %[[VFN]] +// CHECK: %[[T6:[0-9]+]] = ptrtoint void (%class.B0*)** %[[VFN]] to i64 +// CHECK: %[[T7:[0-9]+]] = call i64 @llvm.ptrauth.blend.i64(i64 %[[T6]], i64 43073) +// CHECK: call void %[[T5]](%class.B0* %{{.*}}) [ "ptrauth"(i32 0, i64 %[[T7]]) ] + +void testB0m2(B0 *a) { + a->m2(); +} + +// CHECK-LABEL: define void @_Z8testD0m0P2D0( +// CHECK: %[[VTABLE:[a-z]+]] = load void (%class.D0*)**, void (%class.D0*)*** %{{.*}} +// CHECK: %[[T0:[0-9]+]] = ptrtoint void (%class.D0*)** %[[VTABLE]] to i64 +// CHECK: %[[T3:[0-9]+]] = call i64 @llvm.ptrauth.auth.i64(i64 %[[T0]], i32 2, i64 0) +// CHECK: %[[T4:[0-9]+]] = inttoptr i64 %[[T3]] to void (%class.D0*)** +// CHECK: %[[VFN:[a-z]+]] = getelementptr inbounds void (%class.D0*)*, void (%class.D0*)** %[[T4]], i64 0 +// CHECK: %[[T5:[0-9]+]] = load void (%class.D0*)*, void (%class.D0*)** %[[VFN]] +// CHECK: %[[T6:[0-9]+]] = ptrtoint void (%class.D0*)** %[[VFN]] to i64 +// CHECK: %[[T7:[0-9]+]] = call i64 @llvm.ptrauth.blend.i64(i64 %[[T6]], i64 53119) +// CHECK: call void %[[T5]](%class.D0* %{{.*}}) [ "ptrauth"(i32 0, i64 %[[T7]]) ] + +void testD0m0(D0 *a) { + a->m0(); +} + +// CHECK-LABEL: define void @_Z8testD0m1P2D0( +// CHECK: %[[VTABLE:[a-z]+]] = load %struct.S2* (%class.D0*)**, %struct.S2* (%class.D0*)*** %{{.*}} +// CHECK: %[[T0:[0-9]+]] = ptrtoint %struct.S2* (%class.D0*)** %[[VTABLE]] to i64 +// CHECK: %[[T3:[0-9]+]] = call i64 @llvm.ptrauth.auth.i64(i64 %[[T0]], i32 2, i64 0) +// CHECK: %[[T4:[0-9]+]] = inttoptr i64 %[[T3]] to %struct.S2* (%class.D0*)** +// CHECK: %[[VFN:[a-z]+]] = getelementptr inbounds %struct.S2* (%class.D0*)*, %struct.S2* (%class.D0*)** %[[T4]], i64 5 +// CHECK: %[[T5:[0-9]+]] = load %struct.S2* (%class.D0*)*, %struct.S2* (%class.D0*)** %[[VFN]] +// CHECK: %[[T6:[0-9]+]] = ptrtoint %struct.S2* (%class.D0*)** %[[VFN]] to i64 +// CHECK: %[[T7:[0-9]+]] = call i64 @llvm.ptrauth.blend.i64(i64 %[[T6]], i64 35045) +// CHECK: call %struct.S2* %[[T5]](%class.D0* %{{.*}}) [ "ptrauth"(i32 0, i64 %[[T7]]) ] + +void testD0m1(D0 *a) { + a->m1(); +} + +// CHECK-LABEL: define void @_Z8testD0m2P2D0( +// CHECK: %[[VTABLE:[a-z]+]] = load void (%class.B0*)**, void (%class.B0*)*** %{{.*}} +// CHECK: %[[T0:[0-9]+]] = ptrtoint void (%class.B0*)** %[[VTABLE]] to i64 +// CHECK: %[[T3:[0-9]+]] = call i64 @llvm.ptrauth.auth.i64(i64 %[[T0]], i32 2, i64 0) +// CHECK: %[[T4:[0-9]+]] = inttoptr i64 %[[T3]] to void (%class.B0*)** +// CHECK: %[[VFN:[a-z]+]] = getelementptr inbounds void (%class.B0*)*, void (%class.B0*)** %[[T4]], i64 2 +// CHECK: %[[T5:[0-9]+]] = load void (%class.B0*)*, void (%class.B0*)** %[[VFN]] +// CHECK: %[[T6:[0-9]+]] = ptrtoint void (%class.B0*)** %[[VFN]] to i64 +// CHECK: %[[T7:[0-9]+]] = call i64 @llvm.ptrauth.blend.i64(i64 %[[T6]], i64 43073) +// CHECK: call void %[[T5]](%class.B0* %{{.*}}) [ "ptrauth"(i32 0, i64 %[[T7]]) ] + +void testD0m2(D0 *a) { + a->m2(); +} + +// CHECK-LABEL: define void @_Z8testD0m3P2D0( +// CHECK: %[[VTABLE:[a-z]+]] = load void (%class.D0*)**, void (%class.D0*)*** %{{.*}} +// CHECK: %[[T0:[0-9]+]] = ptrtoint void (%class.D0*)** %[[VTABLE]] to i64 +// CHECK: %[[T3:[0-9]+]] = call i64 @llvm.ptrauth.auth.i64(i64 %[[T0]], i32 2, i64 0) +// CHECK: %[[T4:[0-9]+]] = inttoptr i64 %[[T3]] to void (%class.D0*)** +// CHECK: %[[VFN:[a-z]+]] = getelementptr inbounds void (%class.D0*)*, void (%class.D0*)** %[[T4]], i64 6 +// CHECK: %[[T5:[0-9]+]] = load void (%class.D0*)*, void (%class.D0*)** %[[VFN]] +// CHECK: %[[T6:[0-9]+]] = ptrtoint void (%class.D0*)** %[[VFN]] to i64 +// CHECK: %[[T7:[0-9]+]] = call i64 @llvm.ptrauth.blend.i64(i64 %[[T6]], i64 10565) +// CHECK: call void %[[T5]](%class.D0* %{{.*}}) [ "ptrauth"(i32 0, i64 %[[T7]]) ] + +void testD0m3(D0 *a) { + a->m3(); +} + + +// CHECK-LABEL: define void @_Z8testD1m0P2D1( +// CHECK: %[[VTABLE:[a-z]+]] = load void (%class.D1*)**, void (%class.D1*)*** %{{.*}} +// CHECK: %[[T0:[0-9]+]] = ptrtoint void (%class.D1*)** %[[VTABLE]] to i64 +// CHECK: %[[T3:[0-9]+]] = call i64 @llvm.ptrauth.auth.i64(i64 %[[T0]], i32 2, i64 0) +// CHECK: %[[T4:[0-9]+]] = inttoptr i64 %[[T3]] to void (%class.D1*)** +// CHECK: %[[VFN:[a-z]+]] = getelementptr inbounds void (%class.D1*)*, void (%class.D1*)** %[[T4]], i64 0 +// CHECK: %[[T5:[0-9]+]] = load void (%class.D1*)*, void (%class.D1*)** %[[VFN]] +// CHECK: %[[T6:[0-9]+]] = ptrtoint void (%class.D1*)** %[[VFN]] to i64 +// CHECK: %[[T7:[0-9]+]] = call i64 @llvm.ptrauth.blend.i64(i64 %[[T6]], i64 53119) +// CHECK: call void %[[T5]](%class.D1* %{{.*}}) [ "ptrauth"(i32 0, i64 %[[T7]]) ] + +void testD1m0(D1 *a) { + a->m0(); +} + +// CHECK-LABEL: define void @_Z8testD1m1P2D1( +// CHECK: %[[VTABLE:[a-z]+]] = load %struct.S2* (%class.D1*)**, %struct.S2* (%class.D1*)*** %{{.*}} +// CHECK: %[[T0:[0-9]+]] = ptrtoint %struct.S2* (%class.D1*)** %[[VTABLE]] to i64 +// CHECK: %[[T3:[0-9]+]] = call i64 @llvm.ptrauth.auth.i64(i64 %[[T0]], i32 2, i64 0) +// CHECK: %[[T4:[0-9]+]] = inttoptr i64 %[[T3]] to %struct.S2* (%class.D1*)** +// CHECK: %[[VFN:[a-z]+]] = getelementptr inbounds %struct.S2* (%class.D1*)*, %struct.S2* (%class.D1*)** %[[T4]], i64 5 +// CHECK: %[[T5:[0-9]+]] = load %struct.S2* (%class.D1*)*, %struct.S2* (%class.D1*)** %[[VFN]] +// CHECK: %[[T6:[0-9]+]] = ptrtoint %struct.S2* (%class.D1*)** %[[VFN]] to i64 +// CHECK: %[[T7:[0-9]+]] = call i64 @llvm.ptrauth.blend.i64(i64 %[[T6]], i64 52864) +// CHECK: call %struct.S2* %[[T5]](%class.D1* %{{.*}}) [ "ptrauth"(i32 0, i64 %[[T7]]) ] + +void testD1m1(D1 *a) { + a->m1(); +} + +// CHECK-LABEL: define void @_Z8testD1m2P2D1( +// CHECK: %[[VTABLE:[a-z]+]] = load void (%class.B0*)**, void (%class.B0*)*** %{{.*}} +// CHECK: %[[T0:[0-9]+]] = ptrtoint void (%class.B0*)** %[[VTABLE]] to i64 +// CHECK: %[[T3:[0-9]+]] = call i64 @llvm.ptrauth.auth.i64(i64 %[[T0]], i32 2, i64 0) +// CHECK: %[[T4:[0-9]+]] = inttoptr i64 %[[T3]] to void (%class.B0*)** +// CHECK: %[[VFN:[a-z]+]] = getelementptr inbounds void (%class.B0*)*, void (%class.B0*)** %[[T4]], i64 2 +// CHECK: %[[T5:[0-9]+]] = load void (%class.B0*)*, void (%class.B0*)** %[[VFN]] +// CHECK: %[[T6:[0-9]+]] = ptrtoint void (%class.B0*)** %[[VFN]] to i64 +// CHECK: %[[T7:[0-9]+]] = call i64 @llvm.ptrauth.blend.i64(i64 %[[T6]], i64 43073) +// CHECK: call void %[[T5]](%class.B0* %{{.*}}) [ "ptrauth"(i32 0, i64 %[[T7]]) ] + +void testD1m2(D1 *a) { + a->m2(); +} + + +// CHECK-LABEL: define void @_Z8testD2m0P2D2( +// CHECK: %[[VTABLE:[a-z]+]] = load void (%class.D2*)**, void (%class.D2*)*** %{{.*}} +// CHECK: %[[T0:[0-9]+]] = ptrtoint void (%class.D2*)** %[[VTABLE]] to i64 +// CHECK: %[[T3:[0-9]+]] = call i64 @llvm.ptrauth.auth.i64(i64 %[[T0]], i32 2, i64 0) +// CHECK: %[[T4:[0-9]+]] = inttoptr i64 %[[T3]] to void (%class.D2*)** +// CHECK: %[[VFN:[a-z]+]] = getelementptr inbounds void (%class.D2*)*, void (%class.D2*)** %[[T4]], i64 0 +// CHECK: %[[T5:[0-9]+]] = load void (%class.D2*)*, void (%class.D2*)** %[[VFN]] +// CHECK: %[[T6:[0-9]+]] = ptrtoint void (%class.D2*)** %[[VFN]] to i64 +// CHECK: %[[T7:[0-9]+]] = call i64 @llvm.ptrauth.blend.i64(i64 %[[T6]], i64 53119) +// CHECK: call void %[[T5]](%class.D2* %{{.*}}) [ "ptrauth"(i32 0, i64 %[[T7]]) ] + +void testD2m0(D2 *a) { + a->m0(); +} + +// CHECK-LABEL: define void @_Z8testD2m1P2D2( +// CHECK: %[[VTABLE:[a-z]+]] = load %struct.S2* (%class.D2*)**, %struct.S2* (%class.D2*)*** %{{.*}} +// CHECK: %[[T0:[0-9]+]] = ptrtoint %struct.S2* (%class.D2*)** %[[VTABLE]] to i64 +// CHECK: %[[T3:[0-9]+]] = call i64 @llvm.ptrauth.auth.i64(i64 %[[T0]], i32 2, i64 0) +// CHECK: %[[T4:[0-9]+]] = inttoptr i64 %[[T3]] to %struct.S2* (%class.D2*)** +// CHECK: %[[VFN:[a-z]+]] = getelementptr inbounds %struct.S2* (%class.D2*)*, %struct.S2* (%class.D2*)** %[[T4]], i64 5 +// CHECK: %[[T5:[0-9]+]] = load %struct.S2* (%class.D2*)*, %struct.S2* (%class.D2*)** %[[VFN]] +// CHECK: %[[T6:[0-9]+]] = ptrtoint %struct.S2* (%class.D2*)** %[[VFN]] to i64 +// CHECK: %[[T7:[0-9]+]] = call i64 @llvm.ptrauth.blend.i64(i64 %[[T6]], i64 35045) +// CHECK: call %struct.S2* %[[T5]](%class.D2* %{{.*}}) [ "ptrauth"(i32 0, i64 %[[T7]]) ] + +void testD2m1(D2 *a) { + a->m1(); +} + +// CHECK-LABEL: define void @_Z10testD2m2D0P2D2( +// CHECK: call void @_ZN2B02m2Ev(%class.B0* %{{.*}}){{$}} + +void testD2m2D0(D2 *a) { + a->D0::m2(); +} + +// CHECK-LABEL: define void @_Z10testD2m2D1P2D2( +// CHECK: call void @_ZN2B02m2Ev(%class.B0* %{{.*}}){{$}} + +void testD2m2D1(D2 *a) { + a->D1::m2(); +} + +// CHECK-LABEL: define void @_Z8testD2m3P2D2( +// CHECK: %[[VTABLE:[a-z]+]] = load void (%class.D2*)**, void (%class.D2*)*** %{{.*}} +// CHECK: %[[T0:[0-9]+]] = ptrtoint void (%class.D2*)** %[[VTABLE]] to i64 +// CHECK: %[[T3:[0-9]+]] = call i64 @llvm.ptrauth.auth.i64(i64 %[[T0]], i32 2, i64 0) +// CHECK: %[[T4:[0-9]+]] = inttoptr i64 %[[T3]] to void (%class.D2*)** +// CHECK: %[[VFN:[a-z]+]] = getelementptr inbounds void (%class.D2*)*, void (%class.D2*)** %[[T4]], i64 6 +// CHECK: %[[T5:[0-9]+]] = load void (%class.D2*)*, void (%class.D2*)** %[[VFN]] +// CHECK: %[[T6:[0-9]+]] = ptrtoint void (%class.D2*)** %[[VFN]] to i64 +// CHECK: %[[T7:[0-9]+]] = call i64 @llvm.ptrauth.blend.i64(i64 %[[T6]], i64 10565) +// CHECK: call void %[[T5]](%class.D2* %{{.*}}) [ "ptrauth"(i32 0, i64 %[[T7]]) ] + +void testD2m3(D2 *a) { + a->m3(); +} + +// CHECK-LABEL: define void @_Z8testD3m0P2D3( +// CHECK: %[[VTABLE:[a-z]+]] = load void (%class.D3*)**, void (%class.D3*)*** %{{.*}} +// CHECK: %[[T0:[0-9]+]] = ptrtoint void (%class.D3*)** %[[VTABLE]] to i64 +// CHECK: %[[T3:[0-9]+]] = call i64 @llvm.ptrauth.auth.i64(i64 %[[T0]], i32 2, i64 0) +// CHECK: %[[T4:[0-9]+]] = inttoptr i64 %[[T3]] to void (%class.D3*)** +// CHECK: %[[VFN:[a-z]+]] = getelementptr inbounds void (%class.D3*)*, void (%class.D3*)** %[[T4]], i64 0 +// CHECK: %[[T5:[0-9]+]] = load void (%class.D3*)*, void (%class.D3*)** %[[VFN]] +// CHECK: %[[T6:[0-9]+]] = ptrtoint void (%class.D3*)** %[[VFN]] to i64 +// CHECK: %[[T7:[0-9]+]] = call i64 @llvm.ptrauth.blend.i64(i64 %[[T6]], i64 44578) +// CHECK: call void %[[T5]](%class.D3* %{{.*}}) [ "ptrauth"(i32 0, i64 %[[T7]]) ] + +void testD3m0(D3 *a) { + a->m0(); +} + +// CHECK-LABEL: define void @_Z8testD3m1P2D3( +// CHECK: %[[VTABLE:[a-z]+]] = load %struct.S2* (%class.D3*)**, %struct.S2* (%class.D3*)*** %{{.*}} +// CHECK: %[[T0:[0-9]+]] = ptrtoint %struct.S2* (%class.D3*)** %[[VTABLE]] to i64 +// CHECK: %[[T3:[0-9]+]] = call i64 @llvm.ptrauth.auth.i64(i64 %[[T0]], i32 2, i64 0) +// CHECK: %[[T4:[0-9]+]] = inttoptr i64 %[[T3]] to %struct.S2* (%class.D3*)** +// CHECK: %[[VFN:[a-z]+]] = getelementptr inbounds %struct.S2* (%class.D3*)*, %struct.S2* (%class.D3*)** %[[T4]], i64 1 +// CHECK: %[[T5:[0-9]+]] = load %struct.S2* (%class.D3*)*, %struct.S2* (%class.D3*)** %[[VFN]] +// CHECK: %[[T6:[0-9]+]] = ptrtoint %struct.S2* (%class.D3*)** %[[VFN]] to i64 +// CHECK: %[[T7:[0-9]+]] = call i64 @llvm.ptrauth.blend.i64(i64 %[[T6]], i64 30766) +// CHECK: call %struct.S2* %[[T5]](%class.D3* %{{.*}}) [ "ptrauth"(i32 0, i64 %[[T7]]) ] + +void testD3m1(D3 *a) { + a->m1(); +} + +// CHECK-LABEL: define void @_Z8testD3m2P2D3( +// CHECK: %[[VTABLE:[a-z0-9]+]] = load void (%class.B0*)**, void (%class.B0*)*** %{{.*}} +// CHECK: %[[T0:[0-9]+]] = ptrtoint void (%class.B0*)** %[[VTABLE]] to i64 +// CHECK: %[[T3:[0-9]+]] = call i64 @llvm.ptrauth.auth.i64(i64 %[[T0]], i32 2, i64 0) +// CHECK: %[[T4:[0-9]+]] = inttoptr i64 %[[T3]] to void (%class.B0*)** +// CHECK: %[[VFN:[a-z]+]] = getelementptr inbounds void (%class.B0*)*, void (%class.B0*)** %[[T4]], i64 2 +// CHECK: %[[T5:[0-9]+]] = load void (%class.B0*)*, void (%class.B0*)** %[[VFN]] +// CHECK: %[[T6:[0-9]+]] = ptrtoint void (%class.B0*)** %[[VFN]] to i64 +// CHECK: %[[T7:[0-9]+]] = call i64 @llvm.ptrauth.blend.i64(i64 %[[T6]], i64 43073) +// CHECK: call void %[[T5]](%class.B0* %{{.*}}) [ "ptrauth"(i32 0, i64 %[[T7]]) ] + +void testD3m2(D3 *a) { + a->m2(); +} + +// CHECK-LABEL: define void @_Z17testD3Destructor0P2D3( +// CHECK: %[[T1:[0-9]+]] = bitcast %class.D3* %{{.*}} to void (%class.D3*)*** +// CHECK: %[[VTABLE:[a-z]+]] = load void (%class.D3*)**, void (%class.D3*)*** %[[T1]] +// CHECK: %[[T2:[0-9]+]] = ptrtoint void (%class.D3*)** %[[VTABLE]] to i64 +// CHECK: %[[T3:[0-9]+]] = call i64 @llvm.ptrauth.auth.i64(i64 %[[T2]], i32 2, i64 0) +// CHECK: %[[T4:[0-9]+]] = inttoptr i64 %[[T3]] to void (%class.D3*)** +// CHECK: %[[VFN:[a-z]+]] = getelementptr inbounds void (%class.D3*)*, void (%class.D3*)** %[[T4]], i64 3 +// CHECK: %[[T5:[0-9]+]] = load void (%class.D3*)*, void (%class.D3*)** %[[VFN]] +// CHECK: %[[T6:[0-9]+]] = ptrtoint void (%class.D3*)** %[[VFN]] to i64 +// CHECK: %[[T7:[0-9]+]] = call i64 @llvm.ptrauth.blend.i64(i64 %[[T6]], i64 62452) +// CHECK: call void %[[T5]](%class.D3* %{{.*}}) #{{.*}} [ "ptrauth"(i32 0, i64 %[[T7]]) ] + +void testD3Destructor0(D3 *a) { + delete a; +} + +// CHECK-LABEL: define void @_Z17testD3Destructor1P2D3( +// CHECK: %[[T1:[0-9]+]] = bitcast %class.D3* %{{.*}} to i64** +// CHECK: %[[VTABLE0:[a-z0-9]+]] = load i64*, i64** %[[T1]] +// CHECK: %[[T2:[0-9]+]] = ptrtoint i64* %[[VTABLE0]] to i64 +// CHECK: %[[T3:[0-9]+]] = call i64 @llvm.ptrauth.auth.i64(i64 %[[T2]], i32 2, i64 0) +// CHECK: %[[T4:[0-9]+]] = inttoptr i64 %[[T3]] to i64* +// CHECK: %[[COMPLETE_OFFSET_PTR:.*]] = getelementptr inbounds i64, i64* %[[T4]], i64 -2 +// CHECK: %[[T5:[0-9]+]] = load i64, i64* %[[COMPLETE_OFFSET_PTR]] +// CHECK: %[[T6:[0-9]+]] = bitcast %class.D3* %{{.*}} to i8* +// CHECK: %[[T7:[0-9]+]] = getelementptr inbounds i8, i8* %[[T6]], i64 %[[T5]] +// CHECK: %[[T8:[0-9]+]] = bitcast %class.D3* %{{.*}} to %class.D3* (%class.D3*)*** +// CHECK: %[[VTABLE1:[a-z0-9]+]] = load %class.D3* (%class.D3*)**, %class.D3* (%class.D3*)*** %[[T8]] +// CHECK: %[[T9:[0-9]+]] = ptrtoint %class.D3* (%class.D3*)** %[[VTABLE1]] to i64 +// CHECK: %[[T10:[0-9]+]] = call i64 @llvm.ptrauth.auth.i64(i64 %[[T9]], i32 2, i64 0) +// CHECK: %[[T11:[0-9]+]] = inttoptr i64 %[[T10]] to %class.D3* (%class.D3*)** +// CHECK: %[[VFN:[a-z0-9]+]] = getelementptr inbounds %class.D3* (%class.D3*)*, %class.D3* (%class.D3*)** %[[T11]], i64 2 +// CHECK: %[[T12:[0-9]+]] = load %class.D3* (%class.D3*)*, %class.D3* (%class.D3*)** %[[VFN]] +// CHECK: %[[T13:[0-9]+]] = ptrtoint %class.D3* (%class.D3*)** %[[VFN]] to i64 +// CHECK: %[[T14:[0-9]+]] = call i64 @llvm.ptrauth.blend.i64(i64 %[[T13]], i64 57279) +// CHECK: %call = call %class.D3* %[[T12]](%class.D3* %{{.*}}) #{{.*}} [ "ptrauth"(i32 0, i64 %[[T14]]) ] +// CHECK: call void @_ZdlPv(i8* %[[T7]]) + +void testD3Destructor1(D3 *a) { + ::delete a; +} + +// CHECK-LABEL: define void @_Z17testD3Destructor2P2D3( +// CHECK: %[[T1:.*]] = bitcast %class.D3* %{{.*}} to %class.D3* (%class.D3*)*** +// CHECK: %[[VTABLE:.*]] = load %class.D3* (%class.D3*)**, %class.D3* (%class.D3*)*** %[[T1]] +// CHECK: %[[T2:.*]] = ptrtoint %class.D3* (%class.D3*)** %[[VTABLE]] to i64 +// CHECK: %[[T3:.*]] = call i64 @llvm.ptrauth.auth.i64(i64 %[[T2]], i32 2, i64 0) +// CHECK: %[[T4:.*]] = inttoptr i64 %[[T3]] to %class.D3* (%class.D3*)** +// CHECK: %[[VFN:.*]] = getelementptr inbounds %class.D3* (%class.D3*)*, %class.D3* (%class.D3*)** %[[T4]], i64 2 +// CHECK: %[[T5:.*]] = load %class.D3* (%class.D3*)*, %class.D3* (%class.D3*)** %[[VFN]] +// CHECK: %[[T6:.*]] = ptrtoint %class.D3* (%class.D3*)** %[[VFN]] to i64 +// CHECK: %[[T7:.*]] = call i64 @llvm.ptrauth.blend.i64(i64 %[[T6]], i64 57279) +// CHECK: %call = call %class.D3* %[[T5]](%class.D3* %{{.*}}) #{{.*}} [ "ptrauth"(i32 0, i64 %[[T7]]) ] + +void testD3Destructor2(D3 *a) { + a->~D3(); +} + +void materializeConstructors() { + B0 B0; + B1 B1; + D0 D0; + D1 D1; + D2 D2; + D3 D3; + V0 V0; + V1 V1; +} + +// CHECK-LABEL: define linkonce_odr %class.B0* @_ZN2B0C2Ev( +// CHECK: %[[THIS:[0-9]+]] = bitcast %class.B0* %{{.*}} to i32 (...)*** +// CHECK: %[[T0:[0-9]+]] = call i64 @llvm.ptrauth.sign.i64(i64 ptrtoint (i8** getelementptr inbounds ({ [7 x i8*] }, { [7 x i8*] }* @_ZTV2B0, i32 0, inrange i32 0, i32 2) to i64), i32 2, i64 0) +// CHECK: %[[SIGNED_VTADDR:[0-9]+]] = inttoptr i64 %[[T0]] to i32 (...)** +// CHECK: store i32 (...)** %[[SIGNED_VTADDR]], i32 (...)*** %[[THIS]] + +// CHECK-LABEL: define linkonce_odr %class.D0* @_ZN2D0C2Ev( +// CHECK: %[[THIS:[0-9]+]] = bitcast %class.D0* %{{.*}} to i32 (...)*** +// CHECK: %[[T0:[0-9]+]] = call i64 @llvm.ptrauth.sign.i64(i64 ptrtoint (i8** getelementptr inbounds ({ [9 x i8*] }, { [9 x i8*] }* @_ZTV2D0, i32 0, inrange i32 0, i32 2) to i64), i32 2, i64 0) +// CHECK: %[[SIGNED_VTADDR:[0-9]+]] = inttoptr i64 %[[T0]] to i32 (...)** +// CHECK: store i32 (...)** %[[SIGNED_VTADDR]], i32 (...)*** %[[THIS]] + +// CHECK-LABEL: define linkonce_odr %class.D1* @_ZN2D1C2Ev( +// CHECK: %[[THIS:[0-9]+]] = bitcast %class.D1* %{{.*}} to i32 (...)*** +// CHECK: %[[T0:[0-9]+]] = call i64 @llvm.ptrauth.sign.i64(i64 ptrtoint (i8** getelementptr inbounds ({ [8 x i8*] }, { [8 x i8*] }* @_ZTV2D1, i32 0, inrange i32 0, i32 2) to i64), i32 2, i64 0) +// CHECK: %[[SIGNED_VTADDR:[0-9]+]] = inttoptr i64 %[[T0]] to i32 (...)** +// CHECK: store i32 (...)** %[[SIGNED_VTADDR]], i32 (...)*** %[[THIS]] + +// CHECK-LABEL: define linkonce_odr %class.D2* @_ZN2D2C2Ev( +// CHECK: %[[SLOT0:[0-9+]]] = bitcast %class.D2* %[[THIS:[a-z0-9]+]] to i32 (...)*** +// CHECK: %[[SIGN_VTADDR0:[0-9]+]] = call i64 @llvm.ptrauth.sign.i64(i64 ptrtoint (i8** getelementptr inbounds ({ [9 x i8*], [8 x i8*] }, { [9 x i8*], [8 x i8*] }* @_ZTV2D2, i32 0, inrange i32 0, i32 2) to i64), i32 2, i64 0) +// CHECK: %[[T1:[0-9]+]] = inttoptr i64 %[[SIGN_VTADDR0]] to i32 (...)** +// CHECK: store i32 (...)** %[[T1]], i32 (...)*** %[[SLOT0]] +// CHECK: %[[T2:[0-9]+]] = bitcast %class.D2* %[[THIS]] to i8* +// CHECK: %[[T3:[a-z0-9.]+]] = getelementptr inbounds i8, i8* %[[T2]], i64 16 +// CHECK: %[[SLOT1:[0-9]+]] = bitcast i8* %[[T3]] to i32 (...)*** +// CHECK: %[[SIGN_VTADDR1:[0-9]+]] = call i64 @llvm.ptrauth.sign.i64(i64 ptrtoint (i8** getelementptr inbounds ({ [9 x i8*], [8 x i8*] }, { [9 x i8*], [8 x i8*] }* @_ZTV2D2, i32 0, inrange i32 1, i32 2) to i64), i32 2, i64 0) +// CHECK: %[[T5:[0-9]+]] = inttoptr i64 %[[SIGN_VTADDR1]] to i32 (...)** +// CHECK: store i32 (...)** %[[T5]], i32 (...)*** %[[SLOT1]] + +// CHECK-LABEL: define linkonce_odr %class.V0* @_ZN2V0C2Ev( +// CHECK: %[[VTT:[a-z0-9]+]] = load i8**, i8*** %{{.*}} +// CHECK: %[[T0:[0-9]+]] = load i8*, i8** %[[VTT]] +// CHECK: %[[T1:[0-9]+]] = ptrtoint i8* %[[T0]] to i64 +// CHECK: %[[T2:[0-9]+]] = call i64 @llvm.ptrauth.auth.i64(i64 %[[T1]], i32 2, i64 0) +// CHECK: %[[T3:[0-9]+]] = inttoptr i64 %[[T2]] to i8* +// CHECK: %[[SLOT0:[0-9]+]] = bitcast %class.V0* %[[THIS:[a-z0-9]+]] to i32 (...)*** +// CHECK: %[[T5:[0-9]+]] = bitcast i8* %[[T3]] to i32 (...)** +// CHECK: %[[VTADDR0:[0-9]+]] = ptrtoint i32 (...)** %[[T5]] to i64 +// CHECK: %[[T7:[0-9]+]] = call i64 @llvm.ptrauth.sign.i64(i64 %[[VTADDR0]], i32 2, i64 0) +// CHECK: %[[SIGN_VTADDR0:[0-9]+]] = inttoptr i64 %[[T7]] to i32 (...)** +// CHECK: store i32 (...)** %[[SIGN_VTADDR0]], i32 (...)*** %[[SLOT0]] +// CHECK: %[[T9:[0-9]+]] = getelementptr inbounds i8*, i8** %[[VTT]], i64 1 +// CHECK: %[[T10:[0-9]+]] = load i8*, i8** %[[T9]] +// CHECK: %[[T11:[0-9]+]] = ptrtoint i8* %[[T10]] to i64 +// CHECK: %[[T12:[0-9]+]] = call i64 @llvm.ptrauth.auth.i64(i64 %[[T11]], i32 2, i64 0) +// CHECK: %[[T13:[0-9]+]] = inttoptr i64 %[[T12]] to i8* +// CHECK: %[[T14:[0-9]+]] = bitcast %class.V0* %[[THIS]] to i8** +// CHECK: %[[VTABLE:[a-z]+]] = load i8*, i8** %[[T14]] +// CHECK: %[[T15:[0-9]+]] = ptrtoint i8* %[[VTABLE]] to i64 +// CHECK: %[[T16:[0-9]+]] = call i64 @llvm.ptrauth.auth.i64(i64 %[[T15]], i32 2, i64 0) +// CHECK: %[[T17:[0-9]+]] = inttoptr i64 %[[T16]] to i8* +// CHECK: %[[VBASE_OFFSET_PTR:[a-z.]+]] = getelementptr i8, i8* %[[T17]], i64 -24 +// CHECK: %[[T18:[0-9]+]] = bitcast i8* %[[VBASE_OFFSET_PTR]] to i64* +// CHECK: %[[VBASE_OFFSET:[a-z.]+]] = load i64, i64* %[[T18]] +// CHECK: %[[T19:[0-9]+]] = bitcast %class.V0* %[[THIS]] to i8* +// CHECK: %[[T20:[a-z.]+]] = getelementptr inbounds i8, i8* %[[T19]], i64 %[[VBASE_OFFSET]] +// CHECK: %[[SLOT1:[0-9]+]] = bitcast i8* %[[T20]] to i32 (...)*** +// CHECK: %[[T21:[0-9]+]] = bitcast i8* %[[T13]] to i32 (...)** +// CHECK: %[[VTADDR1:[0-9]+]] = ptrtoint i32 (...)** %[[T21]] to i64 +// CHECK: %[[T23:[0-9]+]] = call i64 @llvm.ptrauth.sign.i64(i64 %[[VTADDR1]], i32 2, i64 0) +// CHECK: %[[SIGN_VTADDR1:[0-9]+]] = inttoptr i64 %[[T23]] to i32 (...)** +// CHECK: store i32 (...)** %[[SIGN_VTADDR1]], i32 (...)*** %[[SLOT1]] diff --git a/clang/test/CodeGenObjC/ptrauth-attr-exception.m b/clang/test/CodeGenObjC/ptrauth-attr-exception.m new file mode 100644 index 00000000000000..c90f747e4741d5 --- /dev/null +++ b/clang/test/CodeGenObjC/ptrauth-attr-exception.m @@ -0,0 +1,17 @@ +// RUN: %clang_cc1 -triple arm64-apple-ios -fptrauth-calls -emit-llvm -fexceptions -fobjc-exceptions -o - %s | FileCheck %s + +__attribute__((objc_root_class)) +@interface Root { + Class isa; +} +@end + +__attribute__((objc_exception)) +@interface A : Root +@end + +@implementation A +@end + +// CHECK: @objc_ehtype_vtable.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (i8** getelementptr inbounds (i8*, i8** @objc_ehtype_vtable, i32 2) to i8*), i32 2, i64 0, i64 0 }, section "llvm.ptrauth", align 8 +// CHECK: @"OBJC_EHTYPE_$_A" = global {{%.*}} { i8** getelementptr inbounds ({ i8*, i32, i64, i64 }, { i8*, i32, i64, i64 }* @objc_ehtype_vtable.ptrauth, i32 0, i32 0) diff --git a/clang/test/CodeGenObjC/ptrauth-blocks.m b/clang/test/CodeGenObjC/ptrauth-blocks.m new file mode 100644 index 00000000000000..2e3614b7820c0e --- /dev/null +++ b/clang/test/CodeGenObjC/ptrauth-blocks.m @@ -0,0 +1,64 @@ +// RUN: %clang_cc1 -fptrauth-calls -fobjc-arc -fblocks -fobjc-runtime=ios-7 -triple arm64-apple-ios -emit-llvm %s -o - | FileCheck %s + +void (^blockptr)(void); + +// CHECK: [[INVOCATION_1:@.*]] = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (i8*)* {{@.*}} to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ i8**, i32, i32, i8*, %struct.__block_descriptor* }, { i8**, i32, i32, i8*, %struct.__block_descriptor* }* [[GLOBAL_BLOCK_1:@.*]], i32 0, i32 3) to i64), i64 0 }, section "llvm.ptrauth" +// CHECK: [[GLOBAL_BLOCK_1]] = internal constant { i8**, i32, i32, i8*, %struct.__block_descriptor* } { i8** @_NSConcreteGlobalBlock, i32 1342177280, i32 0, i8* bitcast ({ i8*, i32, i64, i64 }* [[INVOCATION_1]] to i8*), +void (^globalblock)(void) = ^{}; + +// CHECK: [[COPYDISPOSE_COPY:@.*]] = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (i8*, i8*)* {{@.*}} to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ i64, i64, i8*, i8*, i8*, i64 }, { i64, i64, i8*, i8*, i8*, i64 }* [[COPYDISPOSE_DESCRIPTOR:@.*]], i32 0, i32 2) to i64), i64 0 }, section "llvm.ptrauth" +// CHECK: [[COPYDISPOSE_DISPOSE:@.*]] = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (i8*)* {{@.*}} to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ i64, i64, i8*, i8*, i8*, i64 }, { i64, i64, i8*, i8*, i8*, i64 }* [[COPYDISPOSE_DESCRIPTOR]], i32 0, i32 3) to i64), i64 0 }, section "llvm.ptrauth" +// CHECK: [[COPYDISPOSE_DESCRIPTOR:@.*]] = linkonce_odr hidden unnamed_addr constant { i64, i64, i8*, i8*, i8*, i64 } { i64 0, i64 40, i8* bitcast ({ i8*, i32, i64, i64 }* [[COPYDISPOSE_COPY]] to i8*), i8* bitcast ({ i8*, i32, i64, i64 }* [[COPYDISPOSE_DISPOSE]] to i8*), + +@interface A +- (int) count; +@end + +// CHECK-LABEL: define void @test_block_call() +void test_block_call() { + // CHECK: [[T0:%.*]] = load void ()*, void ()** @blockptr, + // CHECK-NEXT: [[BLOCK:%.*]] = bitcast void ()* [[T0]] to [[BLOCK_T:%.*]]*{{$}} + // CHECK-NEXT: [[FNADDR:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 3 + // CHECK-NEXT: [[BLOCK_OPAQUE:%.*]] = bitcast [[BLOCK_T]]* [[BLOCK]] to i8* + // CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[FNADDR]], + // CHECK-NEXT: [[FNPTR:%.*]] = bitcast i8* [[T0]] to void (i8*)* + // CHECK-NEXT: [[DISC:%.*]] = ptrtoint i8** [[FNADDR]] to i64 + // CHECK-NEXT: call void [[FNPTR]](i8* [[BLOCK_OPAQUE]]) [ "ptrauth"(i32 0, i64 [[DISC]]) ] + blockptr(); +} + +void use_block(int (^)(void)); + +// CHECK-LABEL: define void @test_block_literal( +void test_block_literal(int i) { + // CHECK: [[I:%.*]] = alloca i32, + // CHECK-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:.*]], align + // CHECK: [[FNPTRADDR:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 3 + // CHECK-NEXT: [[DISCRIMINATOR:%.*]] = ptrtoint i8** [[FNPTRADDR]] to i64 + // CHECK-NEXT: [[SIGNED:%.*]] = call i64 @llvm.ptrauth.sign.i64(i64 ptrtoint (i32 (i8*)* {{@.*}} to i64), i32 0, i64 [[DISCRIMINATOR]]) + // CHECK-NEXT: [[T0:%.*]] = inttoptr i64 [[SIGNED]] to i8* + // CHECK-NEXT: store i8* [[T0]], i8** [[FNPTRADDR]] + use_block(^{return i;}); +} + +// CHECK-LABEL: define void @test_copy_destroy +void test_copy_destroy(A *a) { + // CHECK: [[COPYDISPOSE_DESCRIPTOR]] + use_block(^{return [a count];}); +} + +// CHECK-LABEL: define void @test_byref_copy_destroy +void test_byref_copy_destroy(A *a) { + // CHECK: [[COPY_FIELD:%.*]] = getelementptr inbounds [[BYREF_T:%.*]], {{%.*}}* [[BYREF:%.*]], i32 0, i32 4 + // CHECK-NEXT: [[T0:%.*]] = ptrtoint i8** [[COPY_FIELD]] to i64 + // CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.sign.i64(i64 ptrtoint (void (i8*, i8*)* {{@.*}} to i64), i32 0, i64 [[T0]]) + // CHECK-NEXT: [[T2:%.*]] = inttoptr i64 [[T1]] to i8* + // CHECK-NEXT: store i8* [[T2]], i8** [[COPY_FIELD]], align 8 + // CHECK: [[DISPOSE_FIELD:%.*]] = getelementptr inbounds [[BYREF_T]], [[BYREF_T]]* [[BYREF]], i32 0, i32 5 + // CHECK-NEXT: [[T0:%.*]] = ptrtoint i8** [[DISPOSE_FIELD]] to i64 + // CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.sign.i64(i64 ptrtoint (void (i8*)* {{@.*}} to i64), i32 0, i64 [[T0]]) + // CHECK-NEXT: [[T2:%.*]] = inttoptr i64 [[T1]] to i8* + // CHECK-NEXT: store i8* [[T2]], i8** [[DISPOSE_FIELD]], align 8 + __block A *aweak = a; + use_block(^{return [aweak count];}); +} diff --git a/clang/test/CodeGenObjC/ptrauth-method-list.m b/clang/test/CodeGenObjC/ptrauth-method-list.m new file mode 100644 index 00000000000000..8f8f4be626edff --- /dev/null +++ b/clang/test/CodeGenObjC/ptrauth-method-list.m @@ -0,0 +1,30 @@ +// RUN: %clang_cc1 -fptrauth-calls -fobjc-arc -fblocks -fobjc-runtime=ios-7 -triple arm64-apple-ios -emit-llvm -o - %s | FileCheck %s + +// CHECK: @"\01+[C pm1].ptrauth" = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (i8*, i8*)* @"\01+[C pm1]" to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ i32, i32, [2 x %struct._objc_method] }, { i32, i32, [2 x %struct._objc_method] }* @"_OBJC_$_CLASS_METHODS_C", i32 0, i32 2, i32 0, i32 2) to i64), i64 0 }, section "llvm.ptrauth" +// CHECK: @"\01+[C m1].ptrauth" = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (i8*, i8*)* @"\01+[C m1]" to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ i32, i32, [2 x %struct._objc_method] }, { i32, i32, [2 x %struct._objc_method] }* @"_OBJC_$_CLASS_METHODS_C", i32 0, i32 2, i32 1, i32 2) to i64), i64 0 }, section "llvm.ptrauth" +// CHECK: @"_OBJC_$_CLASS_METHODS_C" = internal global { i32, i32, [2 x %struct._objc_method] } { i32 24, i32 2, [2 x %struct._objc_method] [%struct._objc_method { i8* getelementptr inbounds ([4 x i8], [4 x i8]* @OBJC_METH_VAR_NAME{{.*}}, i32 0, i32 0), i8* getelementptr inbounds ([8 x i8], [8 x i8]* @OBJC_METH_VAR_TYPE_, i32 0, i32 0), i8* bitcast ({ i8*, i32, i64, i64 }* @"\01+[C pm1].ptrauth" to i8*) }, %struct._objc_method { i8* getelementptr inbounds ([3 x i8], [3 x i8]* @OBJC_METH_VAR_NAME{{.*}}, i32 0, i32 0), i8* getelementptr inbounds ([8 x i8], [8 x i8]* @OBJC_METH_VAR_TYPE_, i32 0, i32 0), i8* bitcast ({ i8*, i32, i64, i64 }* @"\01+[C m1].ptrauth" to i8*) }] }, section "__DATA, __objc_const" +// CHECK: "\01-[C pm0].ptrauth" = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%0*, i8*)* @"\01-[C pm0]" to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ i32, i32, [2 x %struct._objc_method] }, { i32, i32, [2 x %struct._objc_method] }* @"_OBJC_$_INSTANCE_METHODS_C", i32 0, i32 2, i32 0, i32 2) to i64), i64 0 }, section "llvm.ptrauth" +// CHECK: "\01-[C m0].ptrauth" = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%0*, i8*)* @"\01-[C m0]" to i8*), i32 0, i64 ptrtoint (i8** getelementptr inbounds ({ i32, i32, [2 x %struct._objc_method] }, { i32, i32, [2 x %struct._objc_method] }* @"_OBJC_$_INSTANCE_METHODS_C", i32 0, i32 2, i32 1, i32 2) to i64), i64 0 }, section "llvm.ptrauth" +// CHECK: @"_OBJC_$_INSTANCE_METHODS_C" = internal global { i32, i32, [2 x %struct._objc_method] } { i32 24, i32 2, [2 x %struct._objc_method] [%struct._objc_method { i8* getelementptr inbounds ([4 x i8], [4 x i8]* @OBJC_METH_VAR_NAME{{.*}}, i32 0, i32 0), i8* getelementptr inbounds ([8 x i8], [8 x i8]* @OBJC_METH_VAR_TYPE_, i32 0, i32 0), i8* bitcast ({ i8*, i32, i64, i64 }* @"\01-[C pm0].ptrauth" to i8*) }, %struct._objc_method { i8* getelementptr inbounds ([3 x i8], [3 x i8]* @OBJC_METH_VAR_NAME{{.*}}, i32 0, i32 0), i8* getelementptr inbounds ([8 x i8], [8 x i8]* @OBJC_METH_VAR_TYPE_, i32 0, i32 0), i8* bitcast ({ i8*, i32, i64, i64 }* @"\01-[C m0].ptrauth" to i8*) }] }, section "__DATA, __objc_const" + +@protocol P +- (void) pm0; ++ (void) pm1; +@end + +@interface C

+- (void) m0; ++ (void) m1; +@end + +@implementation C +- (void) pm0 {} ++ (void) pm1 {} +- (void) m0 {} ++ (void) m1 {} +@end + +void test_method_list(C *c) { + [c m0]; + [C m1]; +} diff --git a/clang/test/CodeGenObjCXX/ptrauth-property-object-reference.mm b/clang/test/CodeGenObjCXX/ptrauth-property-object-reference.mm new file mode 100644 index 00000000000000..55707202d01c71 --- /dev/null +++ b/clang/test/CodeGenObjCXX/ptrauth-property-object-reference.mm @@ -0,0 +1,59 @@ +// RUN: %clang_cc1 %s -triple arm64-apple-ios11.0 -fobjc-runtime=ios-11.0 -fptrauth-calls -emit-llvm -o - | FileCheck %s + +extern int DEFAULT(); + +struct TCPPObject +{ + TCPPObject(); + ~TCPPObject(); + TCPPObject(const TCPPObject& inObj, int i = DEFAULT()); + TCPPObject& operator=(const TCPPObject& inObj); + int filler[64]; +}; + + +@interface MyDocument +{ +@private + TCPPObject _cppObject; + TCPPObject _cppObject1; +} +@property (assign, readwrite, atomic) const TCPPObject MyProperty; +@property (assign, readwrite, atomic) const TCPPObject MyProperty1; +@end + +@implementation MyDocument + @synthesize MyProperty = _cppObject; + @synthesize MyProperty1 = _cppObject1; +@end + +// CHECK-LABEL: @__copy_helper_atomic_property_.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%struct.TCPPObject*, %struct.TCPPObject*)* @__copy_helper_atomic_property_ to i8*), i32 0, i64 0, i64 0 }, section "llvm.ptrauth", align 8 + +// CHECK-LABEL: @__assign_helper_atomic_property_.ptrauth = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (%struct.TCPPObject*, %struct.TCPPObject*)* @__assign_helper_atomic_property_ to i8*), i32 0, i64 0, i64 0 }, section "llvm.ptrauth", align 8 + +// CHECK-LABEL: define internal void @__copy_helper_atomic_property_(%struct.TCPPObject* %0, %struct.TCPPObject* %1) # +// CHECK: [[TWO:%.*]] = load %struct.TCPPObject*, %struct.TCPPObject** [[ADDR:%.*]], align 8 +// CHECK: [[THREE:%.*]] = load %struct.TCPPObject*, %struct.TCPPObject** [[ADDR1:%.*]], align 8 +// CHECK: [[CALL:%.*]] = call i32 @_Z7DEFAULTv() +// CHECK: call %struct.TCPPObject* @_ZN10TCPPObjectC1ERKS_i(%struct.TCPPObject* [[TWO]], %struct.TCPPObject* dereferenceable({{[0-9]+}}) [[THREE]], i32 [[CALL]]) +// CHECK: ret void + +// CHECK: define internal void @"\01-[MyDocument MyProperty]"( +// CHECK: [[ONE:%.*]] = bitcast i8* [[ADDPTR:%.*]] to %struct.TCPPObject* +// CHECK: [[TWO:%.*]] = bitcast %struct.TCPPObject* [[ONE]] to i8* +// CHECK: [[THREE:%.*]] = bitcast %struct.TCPPObject* [[AGGRESULT:%.*]] to i8* +// CHECK: call void @objc_copyCppObjectAtomic(i8* [[THREE]], i8* [[TWO]], i8* bitcast ({ i8*, i32, i64, i64 }* @__copy_helper_atomic_property_.ptrauth to i8*)) +// CHECK: ret void + +// CHECK-LABEL: define internal void @__assign_helper_atomic_property_(%struct.TCPPObject* %0, %struct.TCPPObject* %1) # +// CHECK: [[THREE:%.*]] = load %struct.TCPPObject*, %struct.TCPPObject** [[ADDR1:%.*]], align 8 +// CHECK: [[TWO:%.*]] = load %struct.TCPPObject*, %struct.TCPPObject** [[ADDR:%.*]], align 8 +// CHECK: [[CALL:%.*]] = call dereferenceable({{[0-9]+}}) %struct.TCPPObject* @_ZN10TCPPObjectaSERKS_(%struct.TCPPObject* [[TWO]], %struct.TCPPObject* dereferenceable({{[0-9]+}}) [[THREE]]) +// CHECK: ret void + +// CHECK: define internal void @"\01-[MyDocument setMyProperty:]"( +// CHECK: [[ONE:%.*]] = bitcast i8* [[ADDRPTR:%.*]] to %struct.TCPPObject* +// CHECK: [[TWO:%.*]] = bitcast %struct.TCPPObject* [[ONE]] to i8* +// CHECK: [[THREE:%.*]] = bitcast %struct.TCPPObject* [[MYPROPERTY:%.*]] to i8* +// CHECK: call void @objc_copyCppObjectAtomic(i8* [[TWO]], i8* [[THREE]], i8* bitcast ({ i8*, i32, i64, i64 }* @__assign_helper_atomic_property_.ptrauth to i8*)) +// CHECK: ret void diff --git a/clang/test/CodeGenObjCXX/ptrauth-struct-cxx-abi.mm b/clang/test/CodeGenObjCXX/ptrauth-struct-cxx-abi.mm new file mode 100644 index 00000000000000..f259a56f93c851 --- /dev/null +++ b/clang/test/CodeGenObjCXX/ptrauth-struct-cxx-abi.mm @@ -0,0 +1,38 @@ +// RUN: %clang_cc1 -triple arm64-apple-ios11 -fptrauth-calls -fptrauth-intrinsics -std=c++11 -fobjc-arc -emit-llvm -o - %s | FileCheck %s + +// CHECK: %[[STRUCT_ADDRDISCSTRONG0:.*]] = type { i32*, i8* } +// CHECK: %[[STRUCT_ADDRDISCSTRONG1:.*]] = type { i32*, i8* } + +#define AQ __ptrauth(1,1,50) + +struct AddrDiscStrong0 { + int * AQ f0; // Signed using address discrimination. + __strong id f1; +}; + +struct AddrDiscStrong1 { + AddrDiscStrong1(const AddrDiscStrong1 &); + int * AQ f0; // Signed using address discrimination. + __strong id f1; +}; + +// Check that AddrDiscStrong0 is destructed in the callee. + +// CHECK: define void @_Z24testParamAddrDiscStrong015AddrDiscStrong0(%[[STRUCT_ADDRDISCSTRONG0]]* %[[A:.*]]) +// CHECK: call %[[STRUCT_ADDRDISCSTRONG0]]* @_ZN15AddrDiscStrong0D1Ev(%[[STRUCT_ADDRDISCSTRONG0]]* %[[A]]) +// CHECK: ret void + +// CHECK: define linkonce_odr %[[STRUCT_ADDRDISCSTRONG0]]* @_ZN15AddrDiscStrong0D1Ev( + +void testParamAddrDiscStrong0(AddrDiscStrong0 a) { +} + +// Check that AddrDiscStrong1 is not destructed in the callee because it has a +// non-trivial copy constructor. + +// CHECK: define void @_Z24testParamAddrDiscStrong115AddrDiscStrong1(%[[STRUCT_ADDRDISCSTRONG1]]* %{{.*}}) +// CHECK-NOT: call +// CHECK: ret void + +void testParamAddrDiscStrong1(AddrDiscStrong1 a) { +} diff --git a/clang/test/Driver/aarch64-cpus.c b/clang/test/Driver/aarch64-cpus.c index 32920ea2edd4ad..93250e169849f5 100644 --- a/clang/test/Driver/aarch64-cpus.c +++ b/clang/test/Driver/aarch64-cpus.c @@ -26,6 +26,9 @@ // ARM64-DARWIN: "-cc1"{{.*}} "-triple" "arm64{{.*}}" "-target-cpu" "cyclone" // ARM64-DARWIN-SAME: "-target-feature" "+aes" +// RUN: %clang -target x86_64-apple-darwin -arch arm64e -### -c %s 2>&1 | FileCheck -check-prefix=ARM64E-DARWIN %s +// ARM64E-DARWIN: "-cc1"{{.*}} "-triple" "arm64e{{.*}}" "-target-cpu" "vortex" + // RUN: %clang -target aarch64 -mcpu=cortex-a35 -### -c %s 2>&1 | FileCheck -check-prefix=CA35 %s // RUN: %clang -target aarch64 -mlittle-endian -mcpu=cortex-a35 -### -c %s 2>&1 | FileCheck -check-prefix=CA35 %s // RUN: %clang -target aarch64_be -mlittle-endian -mcpu=cortex-a35 -### -c %s 2>&1 | FileCheck -check-prefix=CA35 %s @@ -282,6 +285,16 @@ // ARM64-THUNDERX2T99-TUNE: "-cc1"{{.*}} "-triple" "arm64{{.*}}" "-target-cpu" "generic" // ARM64-THUNDERX2T99-TUNE-NOT: +v8.1a +// RUN: %clang -target arm64-apple-darwin -arch arm64 -mcpu=vortex -### -c %s 2>&1 | FileCheck -check-prefix=ARM64-VORTEX %s +// ARM64-VORTEX: "-cc1"{{.*}} "-triple" "arm64{{.*}}" "-target-cpu" "vortex" "-target-feature" "+v8.3a" "-target-feature" "+fp-armv8" "-target-feature" "+neon" "-target-feature" "+crc" "-target-feature" "+crypto" "-target-feature" "+fullfp16" "-target-feature" "+ras" "-target-feature" "+lse" "-target-feature" "+rdm" "-target-feature" "+rcpc" "-target-feature" "+zcm" "-target-feature" "+zcz" "-target-feature" "+sha2" "-target-feature" "+aes" + +// Check that we also support -march, which overrides -mcpu (same for e.g. -march=v8.1a -mcpu=cyclone not enabling crc). +// RUN: %clang -target arm64-apple-darwin -arch arm64 -march=armv8.3a -mcpu=vortex -### -c %s 2>&1 | FileCheck -check-prefix=ARM64-VORTEX-V83 %s +// ARM64-VORTEX-V83: "-cc1"{{.*}} "-triple" "arm64{{.*}}" "-target-cpu" "vortex" "-target-feature" "+neon" "-target-feature" "+v8.3a" "-target-feature" "+zcm" "-target-feature" "+zcz" + +// RUN: %clang -target arm64-apple-darwin -arch arm64 -mcpu=lightning -### -c %s 2>&1 | FileCheck -check-prefix=ARM64-LIGHTNING %s +// ARM64-LIGHTNING: "-cc1"{{.*}} "-triple" "arm64{{.*}}" "-target-cpu" "lightning" "-target-feature" "+v8.4a" "-target-feature" "+fp-armv8" "-target-feature" "+neon" "-target-feature" "+crc" "-target-feature" "+crypto" "-target-feature" "+dotprod" "-target-feature" "+fullfp16" "-target-feature" "+ras" "-target-feature" "+lse" "-target-feature" "+rdm" "-target-feature" "+rcpc" "-target-feature" "+zcm" "-target-feature" "+zcz" "-target-feature" "+fp16fml" "-target-feature" "+sm4" "-target-feature" "+sha3" "-target-feature" "+sha2" "-target-feature" "+aes" + // RUN: %clang -target aarch64_be -### -c %s 2>&1 | FileCheck -check-prefix=GENERIC-BE %s // RUN: %clang -target aarch64 -mbig-endian -### -c %s 2>&1 | FileCheck -check-prefix=GENERIC-BE %s // RUN: %clang -target aarch64_be -mbig-endian -### -c %s 2>&1 | FileCheck -check-prefix=GENERIC-BE %s diff --git a/clang/test/Driver/arch-arm64e.c b/clang/test/Driver/arch-arm64e.c new file mode 100644 index 00000000000000..40b471c8f130da --- /dev/null +++ b/clang/test/Driver/arch-arm64e.c @@ -0,0 +1,69 @@ +// Check that we can manually enable specific ptrauth features. + +// RUN: %clang -arch arm64 -c %s -### 2>&1 | FileCheck %s --check-prefix NONE +// NONE: "-cc1" +// NONE-NOT: "-fptrauth-intrinsics" +// NONE-NOT: "-fptrauth-calls" +// NONE-NOT: "-fptrauth-returns" +// NONE-NOT: "-fptrauth-indirect-gotos" +// NONE-NOT: "-fptrauth-auth-traps" +// NONE-NOT: "-fptrauth-soft" + +// RUN: %clang -arch arm64 -fptrauth-calls -c %s -### 2>&1 | FileCheck %s --check-prefix CALL +// CALL: "-cc1"{{.*}} {{.*}} "-fptrauth-calls" + +// RUN: %clang -arch arm64 -fptrauth-intrinsics -c %s -### 2>&1 | FileCheck %s --check-prefix INTRIN +// INTRIN: "-cc1"{{.*}} {{.*}} "-fptrauth-intrinsics" + +// RUN: %clang -arch arm64 -fptrauth-returns -c %s -### 2>&1 | FileCheck %s --check-prefix RETURN +// RETURN: "-cc1"{{.*}} {{.*}} "-fptrauth-returns" + +// RUN: %clang -arch arm64 -fptrauth-indirect-gotos -c %s -### 2>&1 | FileCheck %s --check-prefix INDGOTO +// INDGOTO: "-cc1"{{.*}} {{.*}} "-fptrauth-indirect-gotos" + +// RUN: %clang -arch arm64 -fptrauth-auth-traps -c %s -### 2>&1 | FileCheck %s --check-prefix TRAPS +// TRAPS: "-cc1"{{.*}} {{.*}} "-fptrauth-auth-traps" + +// RUN: %clang -arch arm64 -fptrauth-soft -c %s -### 2>&1 | FileCheck %s --check-prefix SOFT +// SOFT: "-cc1"{{.*}} {{.*}} "-fptrauth-soft" + + +// Check the arm64e defaults. + +// RUN: %clang -arch arm64e -c %s -### 2>&1 | FileCheck %s --check-prefix DEFAULT +// RUN: %clang -mkernel -arch arm64e -c %s -### 2>&1 | FileCheck %s --check-prefix DEFAULT +// RUN: %clang -fapple-kext -arch arm64e -c %s -### 2>&1 | FileCheck %s --check-prefix DEFAULT +// DEFAULT: "-fptrauth-returns" "-fptrauth-intrinsics" "-fptrauth-calls" "-fptrauth-indirect-gotos" "-fptrauth-auth-traps" "-target-cpu" "vortex"{{.*}} + + +// RUN: %clang -arch arm64e -fno-ptrauth-calls -c %s -### 2>&1 | FileCheck %s --check-prefix DEFAULT-NOCALL +// RUN: %clang -mkernel -arch arm64e -fno-ptrauth-calls -c %s -### 2>&1 | FileCheck %s --check-prefix DEFAULT-NOCALL +// RUN: %clang -fapple-kext -arch arm64e -fno-ptrauth-calls -c %s -### 2>&1 | FileCheck %s --check-prefix DEFAULT-NOCALL +// DEFAULT-NOCALL-NOT: "-fptrauth-calls" +// DEFAULT-NOCALL: "-fptrauth-returns" "-fptrauth-intrinsics" "-fptrauth-indirect-gotos" "-fptrauth-auth-traps" "-target-cpu" "vortex" + + +// RUN: %clang -arch arm64e -fno-ptrauth-returns -c %s -### 2>&1 | FileCheck %s --check-prefix NORET + +// NORET-NOT: "-fptrauth-returns" +// NORET: "-fptrauth-intrinsics" "-fptrauth-calls" "-fptrauth-indirect-gotos" "-fptrauth-auth-traps" "-target-cpu" "vortex" + +// RUN: %clang -arch arm64e -fno-ptrauth-intrinsics -c %s -### 2>&1 | FileCheck %s --check-prefix NOINTRIN + +// NOINTRIN: "-fptrauth-returns" +// NOINTRIN-NOT: "-fptrauth-intrinsics" +// NOINTRIN: "-fptrauth-calls" "-fptrauth-indirect-gotos" "-fptrauth-auth-traps" "-target-cpu" "vortex"{{.*}} + + +// RUN: %clang -arch arm64e -fno-ptrauth-auth-traps -c %s -### 2>&1 | FileCheck %s --check-prefix NOTRAP +// NOTRAP: "-fptrauth-returns" "-fptrauth-intrinsics" "-fptrauth-calls" "-fptrauth-indirect-gotos" "-target-cpu" "vortex" + + +// Check the CPU defaults and overrides. + +// RUN: %clang -arch arm64e -c %s -### 2>&1 | FileCheck %s --check-prefix VORTEX +// RUN: %clang -arch arm64e -mcpu=vortex -c %s -### 2>&1 | FileCheck %s --check-prefix VORTEX +// RUN: %clang -arch arm64e -mcpu=cyclone -c %s -### 2>&1 | FileCheck %s --check-prefix VORTEX +// RUN: %clang -arch arm64e -mcpu=lightning -c %s -### 2>&1 | FileCheck %s --check-prefix LIGHTNING +// VORTEX: "-cc1"{{.*}} "-target-cpu" "vortex" +// LIGHTNING: "-cc1"{{.*}} "-target-cpu" "lightning" diff --git a/clang/test/Frontend/diagnostics-order.c b/clang/test/Frontend/diagnostics-order.c index 37c0cd90d15cca..40ca377b80db53 100644 --- a/clang/test/Frontend/diagnostics-order.c +++ b/clang/test/Frontend/diagnostics-order.c @@ -7,6 +7,6 @@ // // CHECK: error: invalid value '-foo' in '-verify=' // CHECK-NEXT: note: -verify prefixes must start with a letter and contain only alphanumeric characters, hyphens, and underscores -// CHECK-NEXT: warning: optimization level '-O999' is not supported // CHECK-NEXT: error: invalid value 'bogus' in '-std=bogus' // CHECK-NEXT: note: use {{.*}} for {{.*}} standard +// CHECK: warning: optimization level '-O999' is not supported diff --git a/clang/test/Preprocessor/arm64e.c b/clang/test/Preprocessor/arm64e.c new file mode 100644 index 00000000000000..26653ace5852b1 --- /dev/null +++ b/clang/test/Preprocessor/arm64e.c @@ -0,0 +1,5 @@ +// RUN: %clang_cc1 -E -dM -ffreestanding -triple=arm64e-apple-ios < /dev/null | FileCheck %s + +// CHECK: #define __ARM64_ARCH_8__ 1 +// CHECK: #define __arm64__ 1 +// CHECK: #define __arm64e__ 1 diff --git a/clang/test/Preprocessor/ptrauth_feature.c b/clang/test/Preprocessor/ptrauth_feature.c new file mode 100644 index 00000000000000..c9ae6bb3f64ae3 --- /dev/null +++ b/clang/test/Preprocessor/ptrauth_feature.c @@ -0,0 +1,36 @@ +// RUN: %clang_cc1 %s -E -triple=arm64-- | FileCheck %s --check-prefixes=NOCALLS,NOINTRIN,NORETS,NOQUAL +// RUN: %clang_cc1 %s -E -triple=arm64-- -fptrauth-calls | FileCheck %s --check-prefixes=CALLS,NOINTRIN,NORETS,NOQUAL +// RUN: %clang_cc1 %s -E -triple=arm64-- -fptrauth-returns | FileCheck %s --check-prefixes=NOCALLS,NOINTRIN,RETS,NOQUAL +// RUN: %clang_cc1 %s -E -triple=arm64-- -fptrauth-intrinsics | FileCheck %s --check-prefixes=NOCALLS,INTRIN,NORETS,QUAL + +#if __has_feature(ptrauth_calls) +// CALLS: has_ptrauth_calls +void has_ptrauth_calls() {} +#else +// NOCALLS: no_ptrauth_calls +void no_ptrauth_calls() {} +#endif + +#if __has_feature(ptrauth_intrinsics) +// INTRIN: has_ptrauth_intrinsics +void has_ptrauth_intrinsics() {} +#else +// NOINTRIN: no_ptrauth_intrinsics +void no_ptrauth_intrinsics() {} +#endif + +#if __has_feature(ptrauth_returns) +// RETS: has_ptrauth_returns +void has_ptrauth_returns() {} +#else +// NORETS: no_ptrauth_returns +void no_ptrauth_returns() {} +#endif + +#if __has_feature(ptrauth_qualifier) +// QUAL: has_ptrauth_qualifier +void has_ptrauth_qualifier() {} +#else +// NOQUAL: no_ptrauth_qualifier +void no_ptrauth_qualifier() {} +#endif diff --git a/clang/test/Sema/ptrauth-intrinsics-macro.c b/clang/test/Sema/ptrauth-intrinsics-macro.c new file mode 100644 index 00000000000000..6e26933ab6be62 --- /dev/null +++ b/clang/test/Sema/ptrauth-intrinsics-macro.c @@ -0,0 +1,40 @@ +// RUN: %clang_cc1 -triple arm64-apple-ios -fsyntax-only -verify -fptrauth-intrinsics %s +// RUN: %clang_cc1 -triple arm64-apple-ios -fsyntax-only -verify %s + +// expected-no-diagnostics + +#include + +#define VALID_CODE_KEY 0 +#define VALID_DATA_KEY 2 + +extern int dv; + +void test(int *dp, int (*fp)(int), int value) { + dp = ptrauth_strip(dp, VALID_DATA_KEY); + uintptr_t t0 = ptrauth_blend_discriminator(dp, value); + t0 = ptrauth_type_discriminator(int (*)(int)); + dp = ptrauth_sign_constant(&dv, VALID_DATA_KEY, 0); + dp = ptrauth_sign_unauthenticated(dp, VALID_DATA_KEY, 0); + dp = ptrauth_auth_and_resign(dp, VALID_DATA_KEY, dp, VALID_DATA_KEY, dp); + fp = ptrauth_auth_function(fp, VALID_CODE_KEY, 0); + dp = ptrauth_auth_data(dp, VALID_DATA_KEY, 0); + int t1 = ptrauth_string_discriminator("string"); + int t2 = ptrauth_sign_generic_data(dp, 0); + + void * __ptrauth_function_pointer p0; + void * __ptrauth_return_address p1; + void * __ptrauth_block_invocation_pointer p2; + void * __ptrauth_block_copy_helper p3; + void * __ptrauth_block_destroy_helper p4; + void * __ptrauth_block_byref_copy_helper p5; + void * __ptrauth_block_byref_destroy_helper p6; + void * __ptrauth_objc_method_list_imp p7; + void * __ptrauth_cxx_vtable_pointer p8; + void * __ptrauth_cxx_vtt_vtable_pointer p9; + void * __ptrauth_swift_heap_object_destructor p10; + void * __ptrauth_swift_function_pointer(VALID_CODE_KEY) p11; + void * __ptrauth_swift_class_method_pointer(VALID_CODE_KEY) p12; + void * __ptrauth_swift_protocol_witness_function_pointer(VALID_CODE_KEY) p13; + void * __ptrauth_swift_value_witness_function_pointer(VALID_CODE_KEY) p14; +} diff --git a/clang/test/Sema/ptrauth-qualifier.c b/clang/test/Sema/ptrauth-qualifier.c new file mode 100644 index 00000000000000..458bab554fb5fb --- /dev/null +++ b/clang/test/Sema/ptrauth-qualifier.c @@ -0,0 +1,127 @@ +// RUN: %clang_cc1 -triple arm64-apple-ios -fsyntax-only -verify -fptrauth-intrinsics %s + +#if __has_feature(ptrauth_qualifier) +#warning __ptrauth qualifier enabled! +// expected-warning@-1 {{__ptrauth qualifier enabled!}} +#endif + +#if __aarch64__ +#define VALID_CODE_KEY 0 +#define VALID_DATA_KEY 2 +#define INVALID_KEY 200 +#else +#error Provide these constants if you port this test +#endif + +int * __ptrauth(VALID_DATA_KEY) valid0; + +typedef int *intp; + +int nonConstantGlobal = 5; + +__ptrauth int invalid0; // expected-error{{expected '('}} +__ptrauth() int invalid1; // expected-error{{expected expression}} +__ptrauth(INVALID_KEY) int invalid2; // expected-error{{200 does not identify a valid pointer authentication key for the current target}} +__ptrauth(VALID_DATA_KEY) int invalid3; // expected-error{{__ptrauth qualifier may only be applied to pointer types}} +__ptrauth(VALID_DATA_KEY) int *invalid4; // expected-error{{__ptrauth qualifier may only be applied to pointer types}} +int * (__ptrauth(VALID_DATA_KEY) invalid5); // expected-error{{expected identifier or '('}} expected-error{{expected ')'}} expected-note {{to match this '('}} +int * __ptrauth(VALID_DATA_KEY) __ptrauth(VALID_DATA_KEY) invalid6; // expected-error{{type 'int *__ptrauth(2,0,0)' is already __ptrauth-qualified}} +int * __ptrauth(VALID_DATA_KEY, 2) invalid7; // expected-error{{address discrimination flag for __ptrauth must be 0 or 1; value is 2}} +int * __ptrauth(VALID_DATA_KEY, -1) invalid8; // expected-error{{address discrimination flag for __ptrauth must be 0 or 1; value is -1}} +int * __ptrauth(VALID_DATA_KEY, 1, -1) invalid9; // expected-error{{extra discriminator for __ptrauth must between 0 and 65535; value is -1}} +int * __ptrauth(VALID_DATA_KEY, 1, 100000) invalid10; // expected-error{{extra discriminator for __ptrauth must between 0 and 65535; value is 100000}} +int * __ptrauth(VALID_DATA_KEY, 1, 65535, 41) invalid11; // expected-error{{__ptrauth qualifier must take between 1 and 3 arguments}} +int * __ptrauth(VALID_DATA_KEY, 1, nonConstantGlobal) invalid12; // expected-error{{argument to __ptrauth must be an integer constant expression}} + +int * __ptrauth(VALID_DATA_KEY) valid0; +int * __ptrauth(VALID_DATA_KEY) *valid1; +__ptrauth(VALID_DATA_KEY) intp valid2; +__ptrauth(VALID_DATA_KEY) intp *valid3; +intp __ptrauth(VALID_DATA_KEY) valid4; +intp __ptrauth(VALID_DATA_KEY) *valid5; +int * __ptrauth(VALID_DATA_KEY, 0) valid6; +int * __ptrauth(VALID_DATA_KEY, 1) valid7; +int * __ptrauth(VALID_DATA_KEY, (_Bool) 1) valid8; +int * __ptrauth(VALID_DATA_KEY, 1, 0) valid9; +int * __ptrauth(VALID_DATA_KEY, 1, 65535) valid10; + +extern intp redeclaration0; // expected-note {{previous declaration}} +extern intp __ptrauth(VALID_DATA_KEY) redeclaration0; // expected-error{{redeclaration of 'redeclaration0' with a different type: '__ptrauth(2,0,0) intp' (aka 'int *__ptrauth(2,0,0)') vs 'intp' (aka 'int *')}} + +extern intp redeclaration1; // expected-note {{previous declaration}} +extern intp __ptrauth(VALID_DATA_KEY) redeclaration1; // expected-error{{redeclaration of 'redeclaration1' with a different type: '__ptrauth(2,0,0) intp' (aka 'int *__ptrauth(2,0,0)') vs 'intp' (aka 'int *')}} + +intp __ptrauth(VALID_DATA_KEY) redeclaration2; // expected-note {{previous definition}} +intp redeclaration2 = 0; // expected-error{{redefinition of 'redeclaration2' with a different type: 'intp' (aka 'int *') vs '__ptrauth(2,0,0) intp' (aka 'int *__ptrauth(2,0,0)')}} + +intp __ptrauth(VALID_DATA_KEY) redeclaration3; // expected-note {{previous definition}} +intp redeclaration3 = 0; // expected-error{{redefinition of 'redeclaration3' with a different type: 'intp' (aka 'int *') vs '__ptrauth(2,0,0) intp' (aka 'int *__ptrauth(2,0,0)')}} + +void illegal0(intp __ptrauth(VALID_DATA_KEY)); // expected-error{{parameter types may not be qualified with __ptrauth}} +intp __ptrauth(VALID_DATA_KEY) illegal1(void); // expected-error{{return types may not be qualified with __ptrauth}} + +void test_code(intp p) { + p = (intp __ptrauth(VALID_DATA_KEY)) 0; // expected-error{{cast types may not be qualified with __ptrauth}} + + __ptrauth(VALID_DATA_KEY) intp pSpecial = p; + pSpecial = p; + intp pNormal = pSpecial; + pNormal = pSpecial; + + intp __ptrauth(VALID_DATA_KEY) *ppSpecial0 = &pSpecial; + intp __ptrauth(VALID_DATA_KEY) *ppSpecial1 = &pNormal; // expected-error {{initializing '__ptrauth(2,0,0) intp *' (aka 'int *__ptrauth(2,0,0) *') with an expression of type 'intp *' (aka 'int **') changes pointer-authentication of pointee type}} + intp *ppNormal0 = &pSpecial; // expected-error {{initializing 'intp *' (aka 'int **') with an expression of type '__ptrauth(2,0,0) intp *' (aka 'int *__ptrauth(2,0,0) *') changes pointer-authentication of pointee type}} + intp *ppNormal1 = &pNormal; + + intp *pp5 = (p ? &pSpecial : &pNormal); // expected-error {{__ptrauth qualification mismatch ('__ptrauth(2,0,0) intp *' (aka 'int *__ptrauth(2,0,0) *') and 'intp *' (aka 'int **'))}} +} + +void test_array(void) { + intp __ptrauth(VALID_DATA_KEY) pSpecialArray[10]; + intp __ptrauth(VALID_DATA_KEY) *ppSpecial0 = pSpecialArray; + intp __ptrauth(VALID_DATA_KEY) *ppSpecial1 = &pSpecialArray[0]; +} + +struct S0 { // expected-note 4 {{struct S0' has subobjects that are non-trivial to copy}} + intp __ptrauth(1, 1, 50) f0; // expected-note 4 {{f0 has type '__ptrauth(1,1,50) intp' (aka 'int *__ptrauth(1,1,50)') that is non-trivial to copy}} +}; + +union U0 { // expected-note 4 {{union U0' has subobjects that are non-trivial to copy}} + struct S0 s0; +}; + +struct S1 { + intp __ptrauth(1, 0, 50) f0; +}; + +union U1 { + struct S1 s1; +}; + +union U2 { // expected-note 2 {{union U2' has subobjects that are non-trivial to copy}} + intp __ptrauth(1, 1, 50) f0; // expected-note 2 {{f0 has type '__ptrauth(1,1,50) intp' (aka 'int *__ptrauth(1,1,50)') that is non-trivial to copy}} + intp __ptrauth(1, 0, 50) f1; +}; + +// Test for r353556. +struct S2 { // expected-note 2 {{struct S2' has subobjects that are non-trivial to copy}} + intp __ptrauth(1, 1, 50) f0[4]; // expected-note 2 {{f0 has type '__ptrauth(1,1,50) intp' (aka 'int *__ptrauth(1,1,50)') that is non-trivial to copy}} +}; + +union U3 { // expected-note 2 {{union U3' has subobjects that are non-trivial to copy}} + struct S2 s2; +}; + +struct S4 { + union U0 u0; +}; + +union U0 foo0(union U0); // expected-error {{cannot use type 'union U0' for function/method return since it is a union that is non-trivial to copy}} expected-error {{cannot use type 'union U0' for a function/method parameter since it is a union that is non-trivial to copy}} + +union U1 foo1(union U1); + +union U2 foo2(union U2); // expected-error {{cannot use type 'union U2' for function/method return since it is a union that is non-trivial to copy}} expected-error {{cannot use type 'union U2' for a function/method parameter since it is a union that is non-trivial to copy}} + +union U3 foo3(union U3); // expected-error {{cannot use type 'union U3' for function/method return since it is a union that is non-trivial to copy}} expected-error {{cannot use type 'union U3' for a function/method parameter since it is a union that is non-trivial to copy}} + +struct S4 foo4(struct S4); // expected-error {{cannot use type 'struct S4' for function/method return since it contains a union that is non-trivial to copy}} expected-error {{cannot use type 'struct S4' for a function/method parameter since it contains a union that is non-trivial to copy}} diff --git a/clang/test/Sema/ptrauth.c b/clang/test/Sema/ptrauth.c new file mode 100644 index 00000000000000..e5d67992a7f169 --- /dev/null +++ b/clang/test/Sema/ptrauth.c @@ -0,0 +1,155 @@ +// RUN: %clang_cc1 -triple arm64-apple-ios -fsyntax-only -verify -fptrauth-intrinsics %s + +#if __has_feature(ptrauth_intrinsics) +#warning Pointer authentication enabled! +// expected-warning@-1 {{Pointer authentication enabled!}} +#endif + +#if __aarch64__ +#define VALID_CODE_KEY 0 +#define VALID_DATA_KEY 2 +#define INVALID_KEY 200 +#else +#error Provide these constants if you port this test +#endif + +#define NULL ((void*) 0) +struct A { int x; } mismatched_type; + +extern int dv; +extern int fv(int); + +void test_strip(int *dp, int (*fp)(int)) { + __builtin_ptrauth_strip(dp); // expected-error {{too few arguments}} + __builtin_ptrauth_strip(dp, VALID_DATA_KEY, dp); // expected-error {{too many arguments}} + (void) __builtin_ptrauth_strip(NULL, VALID_DATA_KEY); // no warning + + __builtin_ptrauth_strip(mismatched_type, VALID_DATA_KEY); // expected-error {{signed value must have pointer type; type here is 'struct A'}} + __builtin_ptrauth_strip(dp, mismatched_type); // expected-error {{passing 'struct A' to parameter of incompatible type 'int'}} + + int *dr = __builtin_ptrauth_strip(dp, VALID_DATA_KEY); + dr = __builtin_ptrauth_strip(dp, INVALID_KEY); // expected-error {{does not identify a valid pointer authentication key for the current target}} + + int (*fr)(int) = __builtin_ptrauth_strip(fp, VALID_CODE_KEY); + fr = __builtin_ptrauth_strip(fp, INVALID_KEY); // expected-error {{does not identify a valid pointer authentication key for the current target}} + + float *mismatch = __builtin_ptrauth_strip(dp, VALID_DATA_KEY); // expected-warning {{incompatible pointer types initializing 'float *' with an expression of type 'int *'}} +} + +void test_blend_discriminator(int *dp, int (*fp)(int), int value) { + __builtin_ptrauth_blend_discriminator(dp); // expected-error {{too few arguments}} + __builtin_ptrauth_blend_discriminator(dp, dp, dp); // expected-error {{too many arguments}} + (void) __builtin_ptrauth_blend_discriminator(dp, value); // no warning + + __builtin_ptrauth_blend_discriminator(mismatched_type, value); // expected-error {{blended pointer must have pointer type; type here is 'struct A'}} + __builtin_ptrauth_blend_discriminator(dp, mismatched_type); // expected-error {{blended integer must have integer type; type here is 'struct A'}} + + float *mismatch = __builtin_ptrauth_blend_discriminator(dp, value); // expected-warning {{incompatible integer to pointer conversion initializing 'float *' with an expression of type}} +} + +void test_sign_constant(int *dp, int (*fp)(int)) { + __builtin_ptrauth_sign_constant(&dv, VALID_DATA_KEY); // expected-error {{too few arguments}} + __builtin_ptrauth_sign_constant(&dv, VALID_DATA_KEY, &dv, &dv); // expected-error {{too many arguments}} + + __builtin_ptrauth_sign_constant(mismatched_type, VALID_DATA_KEY, 0); // expected-error {{signed value must have pointer type; type here is 'struct A'}} + __builtin_ptrauth_sign_constant(&dv, mismatched_type, 0); // expected-error {{passing 'struct A' to parameter of incompatible type 'int'}} + __builtin_ptrauth_sign_constant(&dv, VALID_DATA_KEY, mismatched_type); // expected-error {{extra discriminator must have pointer or integer type; type here is 'struct A'}} + + (void) __builtin_ptrauth_sign_constant(NULL, VALID_DATA_KEY, &dv); // expected-error {{argument to ptrauth_sign_constant must refer to a global variable or function}} + + int *dr = __builtin_ptrauth_sign_constant(&dv, VALID_DATA_KEY, 0); + dr = __builtin_ptrauth_sign_constant(&dv, INVALID_KEY, 0); // expected-error {{does not identify a valid pointer authentication key for the current target}} + + dr = __builtin_ptrauth_sign_constant(dp, VALID_DATA_KEY, 0); // expected-error {{argument to ptrauth_sign_constant must refer to a global variable or function}} + dr = __builtin_ptrauth_sign_constant(&dv, VALID_DATA_KEY, dp); // expected-error {{discriminator argument to ptrauth_sign_constant must be a constant integer, the address of the global variable where the result will be stored, or a blend of the two}} + + int (*fr)(int) = __builtin_ptrauth_sign_constant(&fv, VALID_CODE_KEY, 0); + fr = __builtin_ptrauth_sign_constant(&fv, INVALID_KEY, 0); // expected-error {{does not identify a valid pointer authentication key for the current target}} + + fr = __builtin_ptrauth_sign_constant(fp, VALID_DATA_KEY, 0); // expected-error {{argument to ptrauth_sign_constant must refer to a global variable or function}} + fr = __builtin_ptrauth_sign_constant(&fv, VALID_DATA_KEY, dp); // expected-error {{discriminator argument to ptrauth_sign_constant must be a constant integer, the address of the global variable where the result will be stored, or a blend of the two}} + + fr = __builtin_ptrauth_sign_constant(&fv, VALID_DATA_KEY, __builtin_ptrauth_blend_discriminator(&fr, 0)); // expected-error {{discriminator argument to ptrauth_sign_constant must be a constant integer, the address of the global variable where the result will be stored, or a blend of the two}} + fr = __builtin_ptrauth_sign_constant(&fv, VALID_DATA_KEY, __builtin_ptrauth_blend_discriminator(&dv, *dp)); // expected-error {{discriminator argument to ptrauth_sign_constant must be a constant integer, the address of the global variable where the result will be stored, or a blend of the two}} + fr = __builtin_ptrauth_sign_constant(&fv, VALID_DATA_KEY, __builtin_ptrauth_blend_discriminator(&dv, 0)); + + float *mismatch = __builtin_ptrauth_sign_constant(&dv, VALID_DATA_KEY, 0); // expected-warning {{incompatible pointer types initializing 'float *' with an expression of type 'int *'}} +} + +void test_sign_unauthenticated(int *dp, int (*fp)(int)) { + __builtin_ptrauth_sign_unauthenticated(dp, VALID_DATA_KEY); // expected-error {{too few arguments}} + __builtin_ptrauth_sign_unauthenticated(dp, VALID_DATA_KEY, dp, dp); // expected-error {{too many arguments}} + + __builtin_ptrauth_sign_unauthenticated(mismatched_type, VALID_DATA_KEY, 0); // expected-error {{signed value must have pointer type; type here is 'struct A'}} + __builtin_ptrauth_sign_unauthenticated(dp, mismatched_type, 0); // expected-error {{passing 'struct A' to parameter of incompatible type 'int'}} + __builtin_ptrauth_sign_unauthenticated(dp, VALID_DATA_KEY, mismatched_type); // expected-error {{extra discriminator must have pointer or integer type; type here is 'struct A'}} + + (void) __builtin_ptrauth_sign_unauthenticated(NULL, VALID_DATA_KEY, 0); // expected-warning {{signing a null pointer will yield a non-null pointer}} + + int *dr = __builtin_ptrauth_sign_unauthenticated(dp, VALID_DATA_KEY, 0); + dr = __builtin_ptrauth_sign_unauthenticated(dp, INVALID_KEY, 0); // expected-error {{does not identify a valid pointer authentication key for the current target}} + + int (*fr)(int) = __builtin_ptrauth_sign_unauthenticated(fp, VALID_CODE_KEY, 0); + fr = __builtin_ptrauth_sign_unauthenticated(fp, INVALID_KEY, 0); // expected-error {{does not identify a valid pointer authentication key for the current target}} + + float *mismatch = __builtin_ptrauth_sign_unauthenticated(dp, VALID_DATA_KEY, 0); // expected-warning {{incompatible pointer types initializing 'float *' with an expression of type 'int *'}} +} + +void test_auth(int *dp, int (*fp)(int)) { + __builtin_ptrauth_auth(dp, VALID_DATA_KEY); // expected-error {{too few arguments}} + __builtin_ptrauth_auth(dp, VALID_DATA_KEY, dp, dp); // expected-error {{too many arguments}} + + __builtin_ptrauth_auth(mismatched_type, VALID_DATA_KEY, 0); // expected-error {{signed value must have pointer type; type here is 'struct A'}} + __builtin_ptrauth_auth(dp, mismatched_type, 0); // expected-error {{passing 'struct A' to parameter of incompatible type 'int'}} + __builtin_ptrauth_auth(dp, VALID_DATA_KEY, mismatched_type); // expected-error {{extra discriminator must have pointer or integer type; type here is 'struct A'}} + + (void) __builtin_ptrauth_auth(NULL, VALID_DATA_KEY, 0); // expected-warning {{authenticating a null pointer will almost certainly trap}} + + int *dr = __builtin_ptrauth_auth(dp, VALID_DATA_KEY, 0); + dr = __builtin_ptrauth_auth(dp, INVALID_KEY, 0); // expected-error {{does not identify a valid pointer authentication key for the current target}} + + int (*fr)(int) = __builtin_ptrauth_auth(fp, VALID_CODE_KEY, 0); + fr = __builtin_ptrauth_auth(fp, INVALID_KEY, 0); // expected-error {{does not identify a valid pointer authentication key for the current target}} + + float *mismatch = __builtin_ptrauth_auth(dp, VALID_DATA_KEY, 0); // expected-warning {{incompatible pointer types initializing 'float *' with an expression of type 'int *'}} +} + +void test_auth_and_resign(int *dp, int (*fp)(int)) { + __builtin_ptrauth_auth_and_resign(dp, VALID_DATA_KEY, 0, VALID_DATA_KEY); // expected-error {{too few arguments}} + __builtin_ptrauth_auth_and_resign(dp, VALID_DATA_KEY, dp, VALID_DATA_KEY, dp, 0); // expected-error {{too many arguments}} + + __builtin_ptrauth_auth_and_resign(mismatched_type, VALID_DATA_KEY, 0, VALID_DATA_KEY, dp); // expected-error {{signed value must have pointer type; type here is 'struct A'}} + __builtin_ptrauth_auth_and_resign(dp, mismatched_type, 0, VALID_DATA_KEY, dp); // expected-error {{passing 'struct A' to parameter of incompatible type 'int'}} + __builtin_ptrauth_auth_and_resign(dp, VALID_DATA_KEY, mismatched_type, VALID_DATA_KEY, dp); // expected-error {{extra discriminator must have pointer or integer type; type here is 'struct A'}} + __builtin_ptrauth_auth_and_resign(dp, VALID_DATA_KEY, 0, mismatched_type, dp); // expected-error {{passing 'struct A' to parameter of incompatible type 'int'}} + __builtin_ptrauth_auth_and_resign(dp, VALID_DATA_KEY, 0, VALID_DATA_KEY, mismatched_type); // expected-error {{extra discriminator must have pointer or integer type; type here is 'struct A'}} + + (void) __builtin_ptrauth_auth_and_resign(NULL, VALID_DATA_KEY, 0, VALID_DATA_KEY, dp); // expected-warning {{authenticating a null pointer will almost certainly trap}} + + int *dr = __builtin_ptrauth_auth_and_resign(dp, VALID_DATA_KEY, 0, VALID_DATA_KEY, dp); + dr = __builtin_ptrauth_auth_and_resign(dp, INVALID_KEY, 0, VALID_DATA_KEY, dp); // expected-error {{does not identify a valid pointer authentication key for the current target}} + dr = __builtin_ptrauth_auth_and_resign(dp, VALID_DATA_KEY, 0, INVALID_KEY, dp); // expected-error {{does not identify a valid pointer authentication key for the current target}} + + int (*fr)(int) = __builtin_ptrauth_auth_and_resign(fp, VALID_CODE_KEY, 0, VALID_CODE_KEY, dp); + fr = __builtin_ptrauth_auth_and_resign(fp, INVALID_KEY, 0, VALID_CODE_KEY, dp); // expected-error {{does not identify a valid pointer authentication key for the current target}} + fr = __builtin_ptrauth_auth_and_resign(fp, VALID_CODE_KEY, 0, INVALID_KEY, dp); // expected-error {{does not identify a valid pointer authentication key for the current target}} + + float *mismatch = __builtin_ptrauth_auth_and_resign(dp, VALID_DATA_KEY, 0, VALID_DATA_KEY, dp); // expected-warning {{incompatible pointer types initializing 'float *' with an expression of type 'int *'}} +} + +void test_sign_generic_data(int *dp) { + __builtin_ptrauth_sign_generic_data(dp); // expected-error {{too few arguments}} + __builtin_ptrauth_sign_generic_data(dp, 0, 0); // expected-error {{too many arguments}} + + __builtin_ptrauth_sign_generic_data(mismatched_type, 0); // expected-error {{signed value must have pointer or integer type; type here is 'struct A'}} + __builtin_ptrauth_sign_generic_data(dp, mismatched_type); // expected-error {{extra discriminator must have pointer or integer type; type here is 'struct A'}} + + (void) __builtin_ptrauth_sign_generic_data(NULL, 0); // no warning + + unsigned long dr = __builtin_ptrauth_sign_generic_data(dp, 0); + dr = __builtin_ptrauth_sign_generic_data(dp, &dv); + dr = __builtin_ptrauth_sign_generic_data(12314, 0); + dr = __builtin_ptrauth_sign_generic_data(12314, &dv); + + int *mismatch = __builtin_ptrauth_sign_generic_data(dp, 0); // expected-warning {{incompatible integer to pointer conversion initializing 'int *' with an expression of type}} +} diff --git a/clang/test/SemaCXX/ptrauth-qualifier.cpp b/clang/test/SemaCXX/ptrauth-qualifier.cpp new file mode 100644 index 00000000000000..e1fd10121ec847 --- /dev/null +++ b/clang/test/SemaCXX/ptrauth-qualifier.cpp @@ -0,0 +1,120 @@ +// RUN: %clang_cc1 -triple arm64-apple-ios -std=c++11 -fptrauth-calls -fptrauth-intrinsics -verify -fsyntax-only %s + +#define AQ __ptrauth(1,1,50) +#define IQ __ptrauth(1,0,50) + +struct __attribute__((trivial_abi)) AddrDisc { // expected-warning {{'trivial_abi' cannot be applied to 'AddrDisc'}} + int * AQ m0; +}; + +struct __attribute__((trivial_abi)) NoAddrDisc { + int * IQ m0; +}; + +namespace test_union { + + union U0 { + int * AQ f0; // expected-note 4 {{'U0' is implicitly deleted because variant field 'f0' has an address-discriminated ptrauth qualifier}} + + // ptrauth fields that don't have an address-discriminated qualifier don't + // delete the special functions. + int * IQ f1; + }; + + union U1 { + int * AQ f0; // expected-note 8 {{'U1' is implicitly deleted because variant field 'f0' has an address-discriminated ptrauth qualifier}} + U1() = default; + ~U1() = default; + U1(const U1 &) = default; // expected-warning {{explicitly defaulted copy constructor is implicitly deleted}} expected-note 2 {{explicitly defaulted function was implicitly deleted here}} + U1(U1 &&) = default; // expected-warning {{explicitly defaulted move constructor is implicitly deleted}} + U1 & operator=(const U1 &) = default; // expected-warning {{explicitly defaulted copy assignment operator is implicitly deleted}} expected-note 2 {{explicitly defaulted function was implicitly deleted here}} + U1 & operator=(U1 &&) = default; // expected-warning {{explicitly defaulted move assignment operator is implicitly deleted}} + }; + + // It's fine if the user has explicitly defined the special functions. + union U2 { + int * AQ f0; + U2() = default; + ~U2() = default; + U2(const U2 &); + U2(U2 &&); + U2 & operator=(const U2 &); + U2 & operator=(U2 &&); + }; + + // Address-discriminated ptrauth fields in anonymous union fields delete the + // defaulted copy/move constructors/assignment operators of the containing + // class. + struct S0 { + union { + int * AQ f0; // expected-note 4 {{'' is implicitly deleted because variant field 'f0' has an address-discriminated ptrauth qualifier}} + char f1; + }; + }; + + struct S1 { + union { + union { // expected-note 2 {{'S1' is implicitly deleted because variant field '' has a non-trivial}} expected-note 2 {{'S1' is implicitly deleted because field '' has a deleted}} + int * AQ f0; + char f1; + }; + int f2; + }; + }; + + U0 *x0; + U1 *x1; + U2 *x2; + S0 *x3; + S1 *x4; + + // No diagnostics since constructors/destructors of the unions aren't deleted by default. + void testDefaultConstructor() { + U0 u0; + U1 u1; + U2 u2; + S0 s0; + S1 s1; + } + + // No diagnostics since destructors of the unions aren't deleted by default. + void testDestructor(U0 *u0, U1 *u1, U2 *u2, S0 *s0, S1 *s1) { + delete u0; + delete u1; + delete u2; + delete s0; + delete s1; + } + + void testCopyConstructor(U0 *u0, U1 *u1, U2 *u2, S0 *s0, S1 *s1) { + U0 t0(*u0); // expected-error {{call to implicitly-deleted copy constructor}} + U1 t1(*u1); // expected-error {{call to implicitly-deleted copy constructor}} + U2 t2(*u2); + S0 t3(*s0); // expected-error {{call to implicitly-deleted copy constructor}} + S1 t4(*s1); // expected-error {{call to implicitly-deleted copy constructor}} + } + + void testCopyAssignment(U0 *u0, U1 *u1, U2 *u2, S0 *s0, S1 *s1) { + *x0 = *u0; // expected-error {{cannot be assigned because its copy assignment operator is implicitly deleted}} + *x1 = *u1; // expected-error {{cannot be assigned because its copy assignment operator is implicitly deleted}} + *x2 = *u2; + *x3 = *s0; // expected-error {{cannot be assigned because its copy assignment operator is implicitly deleted}} + *x4 = *s1; // expected-error {{cannot be assigned because its copy assignment operator is implicitly deleted}} + } + + void testMoveConstructor(U0 *u0, U1 *u1, U2 *u2, S0 *s0, S1 *s1) { + U0 t0(static_cast(*u0)); // expected-error {{call to implicitly-deleted copy constructor}} + U1 t1(static_cast(*u1)); // expected-error {{call to implicitly-deleted copy constructor}} + U2 t2(static_cast(*u2)); + S0 t3(static_cast(*s0)); // expected-error {{call to implicitly-deleted copy constructor}} + S1 t4(static_cast(*s1)); // expected-error {{call to implicitly-deleted copy constructor}} + } + + void testMoveAssignment(U0 *u0, U1 *u1, U2 *u2, S0 *s0, S1 *s1) { + *x0 = static_cast(*u0); // expected-error {{cannot be assigned because its copy assignment operator is implicitly deleted}} + *x1 = static_cast(*u1); // expected-error {{cannot be assigned because its copy assignment operator is implicitly deleted}} + *x2 = static_cast(*u2); + *x3 = static_cast(*s0); // expected-error {{cannot be assigned because its copy assignment operator is implicitly deleted}} + *x4 = static_cast(*s1); // expected-error {{cannot be assigned because its copy assignment operator is implicitly deleted}} + } +} diff --git a/clang/test/SemaCXX/ptrauth.cpp b/clang/test/SemaCXX/ptrauth.cpp new file mode 100644 index 00000000000000..691edc9a2e576d --- /dev/null +++ b/clang/test/SemaCXX/ptrauth.cpp @@ -0,0 +1,33 @@ +// RUN: %clang_cc1 -triple arm64-apple-ios -std=c++17 -fsyntax-only -verify -fptrauth-intrinsics %s + +struct S { + virtual int foo(); +}; + +template +constexpr unsigned dependentOperandDisc() { + return __builtin_ptrauth_type_discriminator(T); +} + +void test_builtin_ptrauth_type_discriminator(unsigned s) { + typedef int (S::*MemFnTy)(); + MemFnTy memFnPtr; + int (S::*memFnPtr2)(); + + constexpr unsigned d = __builtin_ptrauth_type_discriminator(MemFnTy); + static_assert(d == 60844); + static_assert(__builtin_ptrauth_type_discriminator(int (S::*)()) == d); + static_assert(__builtin_ptrauth_type_discriminator(decltype(memFnPtr)) == d); + static_assert(__builtin_ptrauth_type_discriminator(decltype(memFnPtr2)) == d); + static_assert(__builtin_ptrauth_type_discriminator(decltype(&S::foo)) == d); + static_assert(dependentOperandDisc() == d); + static_assert(__builtin_ptrauth_type_discriminator(void (S::*)(int)) == 39121); + static_assert(__builtin_ptrauth_type_discriminator(void (S::*)(float)) == 52453); + static_assert(__builtin_ptrauth_type_discriminator(int *) == 42396); + + int t; + int vmarray[s]; + __builtin_ptrauth_type_discriminator(t); // expected-error {{unknown type name 't'}} + __builtin_ptrauth_type_discriminator(&t); // expected-error {{expected a type}} + __builtin_ptrauth_type_discriminator(decltype(vmarray)); // expected-error {{cannot pass variably-modified type 'decltype(vmarray)'}} +} diff --git a/clang/test/SemaObjC/warn-nontrivial-struct-memaccess-ptrauth.m b/clang/test/SemaObjC/warn-nontrivial-struct-memaccess-ptrauth.m new file mode 100644 index 00000000000000..179ed478ce187d --- /dev/null +++ b/clang/test/SemaObjC/warn-nontrivial-struct-memaccess-ptrauth.m @@ -0,0 +1,41 @@ +// RUN: %clang_cc1 -triple arm64-apple-ios -fptrauth-calls -fptrauth-intrinsics -verify %s + +void *memset(void *, int, __SIZE_TYPE__); +void bzero(void *, __SIZE_TYPE__); +void *memcpy(void *, const void *, __SIZE_TYPE__); +void *memmove(void *, const void *, __SIZE_TYPE__); + +#define AQ __ptrauth(1,1,50) +#define IQ __ptrauth(1,0,50) + +struct PtrAuthTrivial { + int f0; + int * IQ f1; +}; + +struct PtrAuthNonTrivial0 { + int f0; + int * AQ f1; // expected-note 2 {{non-trivial to copy}} + int f2; +}; + +struct PtrAuthNonTrivial1 { + int * AQ f0; // expected-note 2 {{non-trivial to copy}} + int f1; + struct PtrAuthNonTrivial0 f2; +}; + +void testPtrAuthTrivial(struct PtrAuthTrivial *d, struct PtrAuthTrivial *s) { + memset(d, 0, sizeof(struct PtrAuthTrivial)); + bzero(d, sizeof(struct PtrAuthTrivial)); + memcpy(d, s, sizeof(struct PtrAuthTrivial)); + memmove(d, s, sizeof(struct PtrAuthTrivial)); +} + +void testPtrAuthNonTrivial1(struct PtrAuthNonTrivial1 *d, + struct PtrAuthNonTrivial1 *s) { + memset(d, 0, sizeof(struct PtrAuthNonTrivial1)); + bzero(d, sizeof(struct PtrAuthNonTrivial1)); + memcpy(d, s, sizeof(struct PtrAuthNonTrivial1)); // expected-warning {{that is not trivial to primitive-copy}} expected-note {{explicitly cast the pointer to silence}} + memmove(d, s, sizeof(struct PtrAuthNonTrivial1)); // expected-warning {{that is not trivial to primitive-copy}} expected-note {{explicitly cast the pointer to silence}} +} diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst index 6c86c27ea46626..08a34a1ed61b8b 100644 --- a/llvm/docs/LangRef.rst +++ b/llvm/docs/LangRef.rst @@ -1981,6 +1981,13 @@ site, these bundles may contain any values that are needed by the generated code. For more details, see :ref:`GC Transitions `. +Pointer Authentication Operand Bundles +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Pointer Authentication operand bundles are characterized by the +``"ptrauth"`` operand bundle tag. They are described in the +`Pointer Authentication `_ document. + .. _moduleasm: Module-Level Inline Assembly @@ -14453,6 +14460,13 @@ The LLVM exception handling intrinsics (which all start with ``llvm.eh.`` prefix), are described in the `LLVM Exception Handling `_ document. +Pointer Authentication Intrinsics +--------------------------------- + +The LLVM pointer authentication intrinsics (which all start with +``llvm.ptrauth.`` prefix), are described in the `Pointer Authentication +`_ document. + .. _int_trampoline: Trampoline Intrinsics diff --git a/llvm/docs/PointerAuth.md b/llvm/docs/PointerAuth.md new file mode 100644 index 00000000000000..972ad7ce74cda6 --- /dev/null +++ b/llvm/docs/PointerAuth.md @@ -0,0 +1,469 @@ +# Pointer Authentication + +## Introduction + +Pointer Authentication is a mechanism by which certain pointers are signed, +are modified to embed that signature in their unused bits, and are +authenticated (have their signature checked) when used, to prevent pointers +of unknown origin from being injected into a process. + +To enforce Control Flow Integrity (CFI), this is mostly used for all code +pointers (function pointers, vtables, ...), but certain data pointers specified +by the ABI (vptr, ...) are also authenticated. + +Additionally, with clang extensions, users can specify that a given pointer +be signed/authenticated. + +At the IR level, it is represented using: + +* a [set of intrinsics](#intrinsics) (to sign/authenticate pointers) +* a [special section and relocation](#authenticated-global-relocation) + (to sign globals) +* a [call operand bundle](#operand-bundle) (to authenticate called pointers) + +It is implemented by the [AArch64 target](#aarch64-support), using the +[ARMv8.3 Pointer Authentication Code](#armv8-3-pointer-authentication-code) +instructions, to support the Darwin [arm64e](#arm64e) ABI. + + +## Concepts + +### Operations + +Pointer Authentication is based on three fundamental operations: + +#### Sign +* compute a cryptographic signature of a given pointer value +* embed it within the value +* return the signed value + +#### Auth +* compute a cryptographic signature of a given value +* compare it against the embedded signature +* remove the embedded signature +* return the raw, unauthenticated, value + +#### Strip +* remove the embedded signature +* return the unauthenticated value + + +### Diversity + +To prevent any signed pointer from being used instead of any other signed +pointer, the signatures are diversified, using additional inputs: + +* a key: one of a small, fixed set. The value of the key itself is not + directly accessible, but is referenced by ptrauth operations via an + identifier. + +* salt, or extra diversity data: additional data mixed in with the value and + used by the ptrauth operations. + A concrete value is called a "discriminator", and, in the special case where + the diversity data is a pointer to the storage location of the signed value, + the value is said to be "address-discriminated". + Additionally, an arbitrary small integer can be blended into an address + discriminator to produce a blended address discriminator. + +Keys are not necessarily interchangeable, and keys can be specified to be +incompatible with certain kinds of pointers (e.g., code vs data keys/pointers). +Which keys are appropriate for a given kind of pointer is defined by the +target implementation. + +## LLVM IR Representation + +### Intrinsics + +These intrinsics are provided by LLVM to expose pointer authentication +operations. + + +#### '``llvm.ptrauth.sign``' + +##### Syntax: + +```llvm +declare i64 @llvm.ptrauth.sign.i64(i64 , i32 , i64 ) +``` + +##### Overview: + +The '``llvm.ptrauth.sign``' intrinsic signs an unauthenticated pointer. + + +##### Arguments: + +The ``value`` argument is the unauthenticated (raw) pointer value to be signed. +The ``key`` argument is the identifier of the key to be used to generate the +signed value. +The ``extra data`` argument is the additional diversity data to be used as a +discriminator. + +##### Semantics: + +The '``llvm.ptrauth.sign``' intrinsic implements the `sign`_ operation. +It returns a signed value. + +If ``value`` is already a signed value, the behavior is undefined. + +If ``value`` is not a pointer value for which ``key`` is appropriate, the +behavior is undefined. + + +#### '``llvm.ptrauth.auth``' + +##### Syntax: + +```llvm +declare i64 @llvm.ptrauth.auth.i64(i64 , i32 , i64 ) +``` + +##### Overview: + +The '``llvm.ptrauth.auth``' intrinsic authenticates a signed pointer. + +##### Arguments: + +The ``value`` argument is the signed pointer value to be authenticated. +The ``key`` argument is the identifier of the key that was used to generate +the signed value. +The ``extra data`` argument is the additional diversity data to be used as a +discriminator. + +##### Semantics: + +The '``llvm.ptrauth.auth``' intrinsic implements the `auth`_ operation. +It returns a raw, unauthenticated value. +If ``value`` does not have a correct signature for ``key`` and ``extra data``, +the returned value is an invalid, poison pointer. + + +#### '``llvm.ptrauth.strip``' + +##### Syntax: + +```llvm +declare i64 @llvm.ptrauth.strip.i64(i64 , i32 ) +``` + +##### Overview: + +The '``llvm.ptrauth.strip``' intrinsic strips the embedded signature out of a +possibly-signed pointer. + + +##### Arguments: + +The ``value`` argument is the signed pointer value to be stripped. +The ``key`` argument is the identifier of the key that was used to generate +the signed value. + +##### Semantics: + +The '``llvm.ptrauth.strip``' intrinsic implements the `strip`_ operation. +It returns an unauthenticated value. It does **not** check that the +signature is valid. + +If ``value`` is an unauthenticated pointer value, it is returned as-is, +provided the ``key`` is appropriate for the pointer. + +If ``value`` is not a pointer value for which ``key`` is appropriate, the +behavior is undefined. + +If ``value`` is a signed pointer value, but ``key`` does not identify the +same ``key`` that was used to generate ``value``, the behavior is undefined. + + +#### '``llvm.ptrauth.resign``' + +##### Syntax: + +```llvm +declare i64 @llvm.ptrauth.resign.i64(i64 , + i32 , i64 , + i32 , i64 ) +``` + +##### Overview: + +The '``llvm.ptrauth.resign``' intrinsic re-signs a signed pointer using +a different key and diversity data. + +##### Arguments: + +The ``value`` argument is the signed pointer value to be authenticated. +The ``old key`` argument is the identifier of the key that was used to generate +the signed value. +The ``old extra data`` argument is the additional diversity data to be used as a +discriminator in the auth operation. +The ``new key`` argument is the identifier of the key to use to generate the +resigned value. +The ``new extra data`` argument is the additional diversity data to be used as a +discriminator in the sign operation. + +##### Semantics: + +The '``llvm.ptrauth.resign``' intrinsic performs a combined `auth`_ and `sign`_ +operation, without exposing the intermediate unauthenticated pointer. +It returns a signed value. +If ``value`` does not have a correct signature for ``old key`` and +``old extra data``, the returned value is an invalid, poison pointer. + +#### '``llvm.ptrauth.sign_generic``' + +##### Syntax: + +```llvm +declare i64 @llvm.ptrauth.sign_generic.i64(i64 , i64 ) +``` + +##### Overview: + +The '``llvm.ptrauth.sign_generic``' intrinsic computes a generic signature of +arbitrary data. + +##### Arguments: + +The ``value`` argument is the arbitrary data value to be signed. +The ``extra data`` argument is the additional diversity data to be used as a +discriminator. + +##### Semantics: + +The '``llvm.ptrauth.sign_generic``' intrinsic computes the signature of a given +combination of value and additional diversity data. + +It returns a full signature value (as opposed to a signed pointer value, with +an embedded signature). + +As opposed to [``llvm.ptrauth.sign``](#llvm-ptrauth-sign), it does not interpret +``value`` as a pointer value. Instead, it is an arbitrary data value. + + +#### '``llvm.ptrauth.blend``' + +##### Syntax: + +```llvm +declare i64 @llvm.ptrauth.blend.i64(i64

, i64 ) +``` + +##### Overview: + +The '``llvm.ptrauth.blend``' intrinsic blends a pointer address discriminator +with a small integer discriminator to produce a new discriminator. + +##### Arguments: + +The ``address discriminator`` argument is a pointer. +The ``integer discriminator`` argument is a small integer. + +##### Semantics: + +The '``llvm.ptrauth.blend``' intrinsic combines a small integer discriminator +with a pointer address discriminator, in a way that is specified by the target +implementation. + + +### Operand Bundle + +As a way to enforce CFI, function pointers used as indirect call targets are +signed when materialized, and authenticated before calls. + +To prevent the intermediate, unauthenticated pointer from being exposed to +attackers (similar to [``llvm.ptrauth.resign``](#llvm-ptrauth-resign)), the +representation guarantees that the intermediate call target is never attackable +(e.g., by being spilled to memory), using the ``ptrauth`` operand bundle. + +```llvm +define void @f(void ()* %fp) { + call void %fp() [ "ptrauth"(i32 , i64 ) ] + ret void +} +``` + +is functionally equivalent to: + +```llvm +define void @f(void ()* %fp) { + %fp_i = ptrtoint void ()* %fp to i64 + %fp_auth = call i64 @llvm.ptrauth.auth.i64(i64 %fp_i, i32 , i64 ) + %fp_auth_p = inttoptr i64 %fp_auth to void ()* + call void %fp_auth_p() + ret void +} +``` + +but with the added guarantee that ``%fp_i``, ``%fp_auth``, and ``%fp_auth_p`` +are never attackable. + + +### Function Attributes + +Two function attributes are used to describe other pointer authentication +operations that are not otherwise explicitly expressed in IR. + +#### ``ptrauth-returns`` + +``ptrauth-returns`` specifies that returns from functions should be +authenticated, and that saved return addresses should be signed. + +Note that this describes the execution environment that can be assumed by +this function, not the semantics of return instructions in this function alone. + +The semantics of +[``llvm.returnaddress``](LangRef.html#llvm-returnaddress-intrinsic) are not +changed (it still returns a raw, unauthenticated, return address), so it might +require an implicit strip/authenticate operation. This applies to return +addresses stored in deeper stack frames. + +#### ``ptrauth-calls`` + +``ptrauth-calls`` specifies that calls emitted in this function should be +authenticated according to the platform ABI. + +Calls represented by ``call``/``invoke`` instructions in IR are not affected by +this attribute, as they should already be annotated with the +[``ptrauth`` operand bundle](#operand-bundle). + +The ``ptrauth-calls`` attribute only describes calls emitted by the backend, +as part of target-specific lowering (e.g., runtime calls for TLS accesses). + + +### Authenticated Global Relocation + +[Intrinsics](#intrinsics) can be used to produce signed pointers dynamically, +in code, but not for signed pointers referenced by constants, in, e.g., global +initializers. + +The latter are represented using a special kind of global describing an +authenticated relocation (producing a signed pointer). + +These special global must live in section '``llvm.ptrauth``', and have a +specific type. + +```llvm +@fp.ptrauth = constant { i8*, i32, i64, i64 } + { i8* , + i32 , + i64
, + i64 + }, section "llvm.ptrauth" +``` + +is equivalent to ``@fp.ptrauth`` being initialized with: + +```llvm + %disc = call i64 @llvm.ptrauth.blend.i64(i64
, i64 ) + %signed_fp = call i64 @llvm.ptrauth.sign.i64(i64 bitcast (i8* to i64), i32 , i64 %disc) + %fp_p_loc = bitcast { i8*, i32, i64, i64 }* @fp.ptrauth to i64* + store i64 %signed_fp, i8* %fp_p_loc +``` + +Note that this is a temporary representation, chosen to minimize divergence with +upstream. Ideally, this would simply be a new kind of ConstantExpr. + + + +## AArch64 Support + +AArch64 is currently the only target with full support of the pointer +authentication primitives, based on ARMv8.3 instructions. + +### ARMv8.3 Pointer Authentication Code + +[ARMv8.3] is an ISA extension that includes Pointer Authentication Code (PAC) +instructions. + +[ARMv8.3]: https://developer.arm.com/products/architecture/cpu-architecture/a-profile/docs/ddi0487/latest + +#### Keys + +5 keys are supported by ARMv8.3. + +Of those, 4 keys are interchangeably usable to specify the key used in IR +constructs: +* ``ASIA``/``ASIB`` are instruction keys (encoded as respectively 0 and 1). +* ``ASDA``/``ASDB`` are data keys (encoded as respectively 2 and 3). + +``ASGA`` is a special key that cannot be explicitly specified, and is only ever +used implicitly, to implement the +[``llvm.ptrauth.sign_generic``](#llvm-ptrauth-sign-generic) intrinsic. + +#### Instructions + +The IR [Intrinsics](#intrinsics) described above map onto these +instructions as such: +* [``llvm.ptrauth.sign``](#llvm-ptrauth-sign): ``PAC{I,D}{A,B}{Z,SP,}`` +* [``llvm.ptrauth.auth``](#llvm-ptrauth-auth): ``AUT{I,D}{A,B}{Z,SP,}`` +* [``llvm.ptrauth.strip``](#llvm-ptrauth-strip): ``XPAC{I,D}`` +* [``llvm.ptrauth.blend``](#llvm-ptrauth-blend): The semantics of the + blend operation are, in effect, specified by the ABI. arm64e specifies it as + a ``MOVK`` into the high 16-bits. +* [``llvm.ptrauth.sign_generic``](#llvm-ptrauth-sign-generic): ``PACGA`` +* [``llvm.ptrauth.resign``](#llvm-ptrauth-resign): ``AUT*+PAC*``. These are + represented as a single pseudo-instruction in the backend to guarantee that + the intermediate unauthenticated value is not spilled and attackable. + +### arm64e + +Darwin supports ARMv8.3 Pointer Authentication Codes via the arm64e MachO +architecture slice. + +#### CPU Subtype + +The arm64e slice is an extension of the ``arm64`` slice (so uses the same +MachO ``cpu_type``, ``CPU_TYPE_ARM64``). + +It is mainly represented using the ``cpu_subtype`` 2, or ``CPU_SUBTYPE_ARM64E``. + +The subtype also encodes the version of the pointer authentication ABI used in +the object: + +``` +| 31-28 | 28-25 | 24-0 | +| ----- | ------------ | -------------- | +| 0000 | ABI version | 0000 0000 0010 | +``` + + +#### Assembly Representation + +At the assembly level, +[Authenticated Relocations](#authenticated-global-relocation) are represented +using the ``@AUTH`` modifier: + +```asm + .quad _target@AUTH(,[,addr]) +``` + +where: +* ``key`` is the ARMv8.3 key identifier (``ia``, ``ib``, ``da``, ``db``) +* ``discriminator`` is the 16-bit unsigned discriminator value +* ``addr`` signifies that the authenticated pointer is address-discriminated + (that is, that the relocation's target address is to be blended into the + ``discriminator`` before it is used in the sign operation. + +For example: +```asm + _authenticated_reference_to_sym: + .quad _sym@AUTH(db,0) + + _authenticated_reference_to_sym_addr_disc: + .quad _sym@AUTH(ia,12,addr) +``` + +#### Object File Representation + +At the binary object file level, +[Authenticated Relocations](#authenticated-global-relocation) are represented +using the ``ARM64_RELOC_AUTHENTICATED_POINTER`` relocation kind (with value +``11``). + +The pointer authentication information is encoded into the addend, as such: + +``` +| 63 | 62 | 61-51 | 50-49 | 48 | 47 - 32 | 31 - 0 | +| -- | -- | ----- | ----- | ------ | --------------- | -------- | +| 1 | 0 | 0 | key | addr | discriminator | addend | +``` diff --git a/llvm/docs/Reference.rst b/llvm/docs/Reference.rst index 4c421a209274ed..1b2d0772e53ae2 100644 --- a/llvm/docs/Reference.rst +++ b/llvm/docs/Reference.rst @@ -35,6 +35,7 @@ LLVM and API reference documentation. OptBisect ORCv2 PDB/index + PointerAuth ScudoHardenedAllocator SegmentedStacks StackMaps @@ -207,5 +208,9 @@ Additional Topics :doc:`Coroutines` LLVM support for coroutines. +:doc:`PointerAuth` + A description of pointer authentication, its LLVM IR representation, and its + support in the backend. + :doc:`YamlIO` A reference guide for using LLVM's YAML I/O library. diff --git a/llvm/include/llvm/ADT/Triple.h b/llvm/include/llvm/ADT/Triple.h index edeb31efab8013..b66d663ae7c147 100644 --- a/llvm/include/llvm/ADT/Triple.h +++ b/llvm/include/llvm/ADT/Triple.h @@ -124,6 +124,8 @@ class Triple { ARMSubArch_v5te, ARMSubArch_v4t, + AArch64SubArch_E, + KalimbaSubArch_v3, KalimbaSubArch_v4, KalimbaSubArch_v5, diff --git a/llvm/include/llvm/BinaryFormat/Dwarf.def b/llvm/include/llvm/BinaryFormat/Dwarf.def index 34a7410f747440..3ec75e6bc56a26 100644 --- a/llvm/include/llvm/BinaryFormat/Dwarf.def +++ b/llvm/include/llvm/BinaryFormat/Dwarf.def @@ -206,6 +206,7 @@ HANDLE_DW_TAG(0x4108, GNU_formal_parameter_pack, 0, GNU, DW_KIND_NONE) HANDLE_DW_TAG(0x4109, GNU_call_site, 0, GNU, DW_KIND_NONE) HANDLE_DW_TAG(0x410a, GNU_call_site_parameter, 0, GNU, DW_KIND_NONE) HANDLE_DW_TAG(0x4200, APPLE_property, 0, APPLE, DW_KIND_NONE) +HANDLE_DW_TAG(0x4300, APPLE_ptrauth_type, 0, LLVM, DW_KIND_NONE) HANDLE_DW_TAG(0xb000, BORLAND_property, 0, BORLAND, DW_KIND_NONE) HANDLE_DW_TAG(0xb001, BORLAND_Delphi_string, 0, BORLAND, DW_KIND_TYPE) HANDLE_DW_TAG(0xb002, BORLAND_Delphi_dynamic_array, 0, BORLAND, DW_KIND_TYPE) @@ -408,6 +409,9 @@ HANDLE_DW_AT(0x3e01, LLVM_config_macros, 0, LLVM) HANDLE_DW_AT(0x3e02, LLVM_isysroot, 0, LLVM) HANDLE_DW_AT(0x3e03, LLVM_tag_offset, 0, LLVM) // Apple extensions. +HANDLE_DW_AT(0x3e04, APPLE_ptrauth_key, 0, LLVM) +HANDLE_DW_AT(0x3e05, APPLE_ptrauth_address_discriminated, 0, LLVM) +HANDLE_DW_AT(0x3e06, APPLE_ptrauth_extra_discriminator, 0, LLVM) HANDLE_DW_AT(0x3fe1, APPLE_optimized, 0, APPLE) HANDLE_DW_AT(0x3fe2, APPLE_flags, 0, APPLE) HANDLE_DW_AT(0x3fe3, APPLE_isa, 0, APPLE) diff --git a/llvm/include/llvm/BinaryFormat/MachO.h b/llvm/include/llvm/BinaryFormat/MachO.h index fb50e549cb9dae..169bcffd5a4a66 100644 --- a/llvm/include/llvm/BinaryFormat/MachO.h +++ b/llvm/include/llvm/BinaryFormat/MachO.h @@ -460,6 +460,8 @@ enum RelocationInfoType { ARM64_RELOC_TLVP_LOAD_PAGEOFF12 = 9, // Must be followed by ARM64_RELOC_PAGE21 or ARM64_RELOC_PAGEOFF12. ARM64_RELOC_ADDEND = 10, + // An authenticated pointer. + ARM64_RELOC_AUTHENTICATED_POINTER = 11, // Constant values for the r_type field in an x86_64 architecture // llvm::MachO::relocation_info or llvm::MachO::scattered_relocation_info diff --git a/llvm/include/llvm/CodeGen/AsmPrinter.h b/llvm/include/llvm/CodeGen/AsmPrinter.h index 16298ff7b72766..c9bd44b2ecbace 100644 --- a/llvm/include/llvm/CodeGen/AsmPrinter.h +++ b/llvm/include/llvm/CodeGen/AsmPrinter.h @@ -75,6 +75,8 @@ class StackMaps; class TargetLoweringObjectFile; class TargetMachine; +class GlobalPtrAuthInfo; + /// This class is intended to be used as a driving class for all asm writers. class AsmPrinter : public MachineFunctionPass { public: @@ -441,6 +443,16 @@ class AsmPrinter : public MachineFunctionPass { /// instructions in verbose mode. virtual void emitImplicitDef(const MachineInstr *MI) const; + /// Lower the specified "llvm.ptrauth" GlobalVariable to an MCExpr. + virtual const MCExpr * + lowerPtrAuthGlobalConstant(const GlobalPtrAuthInfo &PAI) { + report_fatal_error("llvm.ptrauth global lowering not implemented"); + } + + /// Lower the specified "llvm.ptrauth" GlobalVariable to an MCExpr. + virtual const MCExpr * + lowerBlockAddressConstant(const BlockAddress *BA); + //===------------------------------------------------------------------===// // Symbol Lowering Routines. //===------------------------------------------------------------------===// diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h index 658ad31fa2a640..4169bb0bbe66de 100644 --- a/llvm/include/llvm/CodeGen/ISDOpcodes.h +++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h @@ -61,6 +61,10 @@ namespace ISD { GlobalAddress, GlobalTLSAddress, FrameIndex, JumpTable, ConstantPool, ExternalSymbol, BlockAddress, + /// A llvm.ptrauth global + /// wrapper llvm.ptrauth global, ptr, key, addr-disc, disc + PtrAuthGlobalAddress, + /// The address of the GOT GLOBAL_OFFSET_TABLE, diff --git a/llvm/include/llvm/CodeGen/MachineModuleInfoImpls.h b/llvm/include/llvm/CodeGen/MachineModuleInfoImpls.h index 746e9223961300..dae5b1874bcaf1 100644 --- a/llvm/include/llvm/CodeGen/MachineModuleInfoImpls.h +++ b/llvm/include/llvm/CodeGen/MachineModuleInfoImpls.h @@ -25,6 +25,13 @@ class MCSymbol; /// MachineModuleInfoMachO - This is a MachineModuleInfoImpl implementation /// for MachO targets. class MachineModuleInfoMachO : public MachineModuleInfoImpl { +public: + /// The information specific to a Darwin '$auth_ptr' stub. + struct AuthStubInfo { + const MCExpr *Pointer; + }; + +private: /// GVStubs - Darwin '$non_lazy_ptr' stubs. The key is something like /// "Lfoo$non_lazy_ptr", the value is something like "_foo". The extra bit /// is true if this GV is external. @@ -35,6 +42,11 @@ class MachineModuleInfoMachO : public MachineModuleInfoImpl { /// bit is true if this GV is external. DenseMap ThreadLocalGVStubs; + /// Darwin '$auth_ptr' stubs. The key is the stub symbol, like + /// "Lfoo$addend$auth_ptr$ib$12". The value is the MCExpr representing that + /// pointer, something like "_foo+addend@AUTH(ib, 12)". + DenseMap AuthGVStubs; + virtual void anchor(); // Out of line virtual method. public: @@ -50,11 +62,32 @@ class MachineModuleInfoMachO : public MachineModuleInfoImpl { return ThreadLocalGVStubs[Sym]; } + AuthStubInfo &getAuthGVStubEntry(MCSymbol *Sym) { + assert(Sym && "Key cannot be null"); + return AuthGVStubs[Sym]; + } + /// Accessor methods to return the set of stubs in sorted order. SymbolListTy GetGVStubList() { return getSortedStubs(GVStubs); } SymbolListTy GetThreadLocalGVStubList() { return getSortedStubs(ThreadLocalGVStubs); } + + typedef std::pair AuthStubPairTy; + typedef std::vector AuthStubListTy; + + AuthStubListTy getAuthGVStubList() { + AuthStubListTy List(AuthGVStubs.begin(), AuthGVStubs.end()); + + if (!List.empty()) + std::sort(List.begin(), List.end(), + [](const AuthStubPairTy &LHS, const AuthStubPairTy &RHS) { + return LHS.first->getName() < RHS.first->getName(); + }); + + AuthGVStubs.clear(); + return List; + } }; /// MachineModuleInfoELF - This is a MachineModuleInfoImpl implementation diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h index b9bf19474ca938..0c8f5cbd63982b 100644 --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -3382,6 +3382,11 @@ class TargetLowering : public TargetLoweringBase { return false; } + /// Return true if the target supports ptrauth operand bundles. + virtual bool supportPtrAuthBundles() const { + return false; + } + /// Perform necessary initialization to handle a subset of CSRs explicitly /// via copies. This function is called at the beginning of instruction /// selection. @@ -3427,6 +3432,14 @@ class TargetLowering : public TargetLoweringBase { llvm_unreachable("Not Implemented"); } + /// This structure contains the information necessary for lowering + /// pointer-authenticating indirect calls. It is equivalent to the "ptrauth" + /// operand bundle found on the call instruction, if any. + struct PtrAuthInfo { + uint64_t Key; + SDValue Discriminator; + }; + /// This structure contains all information that is necessary for lowering /// calls. It is passed to TLI::LowerCallTo when the SelectionDAG builder /// needs to lower a call, and targets will see this struct in their LowerCall @@ -3462,6 +3475,8 @@ class TargetLowering : public TargetLoweringBase { SmallVector Ins; SmallVector InVals; + Optional PAI; + CallLoweringInfo(SelectionDAG &DAG) : RetSExt(false), RetZExt(false), IsVarArg(false), IsInReg(false), DoesNotReturn(false), IsReturnValueUsed(true), IsConvergent(false), @@ -3572,6 +3587,11 @@ class TargetLowering : public TargetLoweringBase { return *this; } + CallLoweringInfo &setPtrAuth(PtrAuthInfo Value) { + PAI = Value; + return *this; + } + CallLoweringInfo &setIsPostTypeLegalization(bool Value=true) { IsPostTypeLegalization = Value; return *this; diff --git a/llvm/include/llvm/IR/DIBuilder.h b/llvm/include/llvm/IR/DIBuilder.h index ad9a35b554144a..049ed4b71c9d5a 100644 --- a/llvm/include/llvm/IR/DIBuilder.h +++ b/llvm/include/llvm/IR/DIBuilder.h @@ -207,11 +207,14 @@ namespace llvm { /// \param AlignInBits Alignment. (optional) /// \param DWARFAddressSpace DWARF address space. (optional) /// \param Name Pointer type name. (optional) - DIDerivedType *createPointerType(DIType *PointeeTy, uint64_t SizeInBits, - uint32_t AlignInBits = 0, - Optional DWARFAddressSpace = - None, - StringRef Name = ""); + DIDerivedType *createPointerType( + DIType *PointeeTy, uint64_t SizeInBits, uint32_t AlignInBits = 0, + Optional DWARFAddressSpace = None, StringRef Name = ""); + + /// Create a __ptrauth qualifier. + DIDerivedType *createPtrAuthQualifiedType(DIType *FromTy, unsigned Key, + bool IsAddressDiscriminated, + unsigned ExtraDiscriminator); /// Create debugging information entry for a pointer to member. /// \param PointeeTy Type pointed to by this pointer. diff --git a/llvm/include/llvm/IR/DebugInfoMetadata.h b/llvm/include/llvm/IR/DebugInfoMetadata.h index 28a59576b7c6fb..eae300d919b60f 100644 --- a/llvm/include/llvm/IR/DebugInfoMetadata.h +++ b/llvm/include/llvm/IR/DebugInfoMetadata.h @@ -754,6 +754,35 @@ class DIBasicType : public DIType { /// /// TODO: Split out members (inheritance, fields, methods, etc.). class DIDerivedType : public DIType { +public: + /// Pointer authentication (__ptrauth) metadata. + struct PtrAuthData { + union { + struct { + unsigned Key : 3; + unsigned IsAddressDiscriminated : 1; + unsigned ExtraDiscriminator : 16; + } Data; + unsigned RawData; + } Payload; + + PtrAuthData(unsigned FromRawData) { Payload.RawData = FromRawData; } + PtrAuthData(const PtrAuthData &Copy) { + Payload.RawData = Copy.Payload.RawData; + } + PtrAuthData(unsigned Key, bool IsDiscr, unsigned Discriminator) { + assert(Key < 8); + assert(Discriminator <= 0xffff); + Payload.Data.Key = Key; + Payload.Data.IsAddressDiscriminated = IsDiscr; + Payload.Data.ExtraDiscriminator = Discriminator; + } + bool operator==(struct PtrAuthData Other) const { + return Payload.RawData == Other.Payload.RawData; + } + }; + +private: friend class LLVMContextImpl; friend class MDNode; @@ -764,58 +793,63 @@ class DIDerivedType : public DIType { DIDerivedType(LLVMContext &C, StorageType Storage, unsigned Tag, unsigned Line, uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits, Optional DWARFAddressSpace, - DIFlags Flags, ArrayRef Ops) + Optional PtrAuthData, DIFlags Flags, + ArrayRef Ops) : DIType(C, DIDerivedTypeKind, Storage, Tag, Line, SizeInBits, AlignInBits, OffsetInBits, Flags, Ops), - DWARFAddressSpace(DWARFAddressSpace) {} + DWARFAddressSpace(DWARFAddressSpace) { + if (PtrAuthData) + SubclassData32 = PtrAuthData->Payload.RawData; + } ~DIDerivedType() = default; static DIDerivedType * getImpl(LLVMContext &Context, unsigned Tag, StringRef Name, DIFile *File, unsigned Line, DIScope *Scope, DIType *BaseType, uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits, - Optional DWARFAddressSpace, DIFlags Flags, - Metadata *ExtraData, StorageType Storage, bool ShouldCreate = true) { + Optional DWARFAddressSpace, + Optional PtrAuthData, DIFlags Flags, Metadata *ExtraData, + StorageType Storage, bool ShouldCreate = true) { return getImpl(Context, Tag, getCanonicalMDString(Context, Name), File, Line, Scope, BaseType, SizeInBits, AlignInBits, OffsetInBits, - DWARFAddressSpace, Flags, ExtraData, Storage, ShouldCreate); - } - static DIDerivedType *getImpl(LLVMContext &Context, unsigned Tag, - MDString *Name, Metadata *File, unsigned Line, - Metadata *Scope, Metadata *BaseType, - uint64_t SizeInBits, uint32_t AlignInBits, - uint64_t OffsetInBits, - Optional DWARFAddressSpace, - DIFlags Flags, Metadata *ExtraData, - StorageType Storage, bool ShouldCreate = true); + DWARFAddressSpace, PtrAuthData, Flags, ExtraData, Storage, + ShouldCreate); + } + static DIDerivedType * + getImpl(LLVMContext &Context, unsigned Tag, MDString *Name, Metadata *File, + unsigned Line, Metadata *Scope, Metadata *BaseType, + uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits, + Optional DWARFAddressSpace, + Optional PtrAuthData, DIFlags Flags, Metadata *ExtraData, + StorageType Storage, bool ShouldCreate = true); TempDIDerivedType cloneImpl() const { - return getTemporary(getContext(), getTag(), getName(), getFile(), getLine(), - getScope(), getBaseType(), getSizeInBits(), - getAlignInBits(), getOffsetInBits(), - getDWARFAddressSpace(), getFlags(), getExtraData()); + return getTemporary( + getContext(), getTag(), getName(), getFile(), getLine(), getScope(), + getBaseType(), getSizeInBits(), getAlignInBits(), getOffsetInBits(), + getDWARFAddressSpace(), getPtrAuthData(), getFlags(), getExtraData()); } public: - DEFINE_MDNODE_GET(DIDerivedType, - (unsigned Tag, MDString *Name, Metadata *File, - unsigned Line, Metadata *Scope, Metadata *BaseType, - uint64_t SizeInBits, uint32_t AlignInBits, - uint64_t OffsetInBits, - Optional DWARFAddressSpace, DIFlags Flags, - Metadata *ExtraData = nullptr), - (Tag, Name, File, Line, Scope, BaseType, SizeInBits, - AlignInBits, OffsetInBits, DWARFAddressSpace, Flags, - ExtraData)) + DEFINE_MDNODE_GET( + DIDerivedType, + (unsigned Tag, MDString *Name, Metadata *File, unsigned Line, + Metadata *Scope, Metadata *BaseType, uint64_t SizeInBits, + uint32_t AlignInBits, uint64_t OffsetInBits, + Optional DWARFAddressSpace, Optional PtrAuthData, + DIFlags Flags, Metadata *ExtraData = nullptr), + (Tag, Name, File, Line, Scope, BaseType, SizeInBits, AlignInBits, + OffsetInBits, DWARFAddressSpace, PtrAuthData, Flags, ExtraData)) DEFINE_MDNODE_GET(DIDerivedType, (unsigned Tag, StringRef Name, DIFile *File, unsigned Line, DIScope *Scope, DIType *BaseType, uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits, - Optional DWARFAddressSpace, DIFlags Flags, + Optional DWARFAddressSpace, + Optional PtrAuthData, DIFlags Flags, Metadata *ExtraData = nullptr), (Tag, Name, File, Line, Scope, BaseType, SizeInBits, - AlignInBits, OffsetInBits, DWARFAddressSpace, Flags, - ExtraData)) + AlignInBits, OffsetInBits, DWARFAddressSpace, PtrAuthData, + Flags, ExtraData)) TempDIDerivedType clone() const { return cloneImpl(); } @@ -827,6 +861,31 @@ class DIDerivedType : public DIType { /// a pointer or reference type respectively. Optional getDWARFAddressSpace() const { return DWARFAddressSpace; } + Optional getPtrAuthData() const { + return getTag() == dwarf::DW_TAG_APPLE_ptrauth_type + ? Optional(PtrAuthData(SubclassData32)) + : None; + } + + /// \returns The PointerAuth key. + Optional getPtrAuthKey() const { + if (auto PtrAuthData = getPtrAuthData()) + return PtrAuthData->Payload.Data.Key; + else return None; + } + /// \returns The PointerAuth address discrimination bit. + Optional isPtrAuthAddressDiscriminated() const { + if (auto PtrAuthData = getPtrAuthData()) + return PtrAuthData->Payload.Data.IsAddressDiscriminated; + else return None; + } + /// \returns The PointerAuth extra discriminator. + Optional getPtrAuthExtraDiscriminator() const { + if (auto PtrAuthData = getPtrAuthData()) + return PtrAuthData->Payload.Data.ExtraDiscriminator; + else return None; + } + /// Get extra data associated with this derived type. /// /// Class type for pointer-to-members, objective-c property node for ivars, diff --git a/llvm/include/llvm/IR/GlobalPtrAuthInfo.h b/llvm/include/llvm/IR/GlobalPtrAuthInfo.h new file mode 100644 index 00000000000000..f5d5e57f5b62a1 --- /dev/null +++ b/llvm/include/llvm/IR/GlobalPtrAuthInfo.h @@ -0,0 +1,122 @@ +//===- GlobalPtrAuthInfo.h - Analysis tools for ptrauth globals -*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// \file +/// This file contains a set of utilities to analyze llvm.ptrauth globals, and +/// to decompose them into key, discriminator, and base pointer. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_IR_GLOBALPTRAUTHINFO_H +#define LLVM_IR_GLOBALPTRAUTHINFO_H + +#include "llvm/ADT/APInt.h" +#include "llvm/ADT/Optional.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/GlobalVariable.h" +#include "llvm/Support/Error.h" + +namespace llvm { + +/// Helper class to access the information regarding an "llvm.ptrauth" global. +/// These globals are of the form: +/// @sg = constant { i8*, i32, i64, i64 } +/// { i8* bitcast (i32* @g to i8*), ; base pointer +/// i32 2, ; key ID +/// i64 ptrtoint (i8** @pg to i64), ; address discriminator +/// i64 42 ; discriminator +/// }, section "llvm.ptrauth" +/// +class GlobalPtrAuthInfo { + const GlobalVariable *GV; + + const ConstantStruct *getInitializer() const { + return cast(GV->getInitializer()); + } + + GlobalPtrAuthInfo(const GlobalVariable *GV) : GV(GV) {} + +public: + /// A constant value for the address discriminator which has special + /// significance to coroutine lowering. + enum { AddrDiscriminator_UseCoroStorage = 1 }; + + /// Try to analyze \p V as an authenticated global reference, and return its + /// information if successful. + static Optional analyze(const Value *V); + + /// Try to analyze \p V as an authenticated global reference, and return its + /// information if successful, or an error explaining the failure if not. + static Expected tryAnalyze(const Value *V); + + /// Access the information contained in the "llvm.ptrauth" globals. + /// @{ + /// The "llvm.ptrauth" global itself. + const GlobalVariable *getGV() const { return GV; } + + /// The pointer that is authenticated in this authenticated global reference. + const Constant *getPointer() const { + return cast(getInitializer()->getOperand(0)); + } + + Constant *getPointer() { + return cast(getInitializer()->getOperand(0)); + } + + /// The Key ID, an i32 constant. + const ConstantInt *getKey() const { + return cast(getInitializer()->getOperand(1)); + } + + /// The address discriminator if any, or the null constant. + /// If present, this must be a value equivalent to the storage location of + /// the only user of the authenticated ptrauth global. + const Constant *getAddrDiscriminator() const { + return cast(getInitializer()->getOperand(2)); + } + + /// Whether there is any non-null address discriminator. + bool hasAddressDiversity() const { + return !getAddrDiscriminator()->isNullValue(); + } + + /// Whether the address uses a special address discriminator. + /// These discriminators can't be used in real pointer-auth values; they + /// can only be used in "prototype" values that indicate how some real + /// schema is supposed to be produced. + bool hasSpecialAddressDiscriminator(uint64_t value) const { + if (auto intValue = dyn_cast(getAddrDiscriminator())) + return intValue->getValue() == value; + return false; + } + + /// The discriminator. + const ConstantInt *getDiscriminator() const { + return cast(getInitializer()->getOperand(3)); + } + /// @} + + /// Check whether an authentication operation with key \p KeyV and (possibly + /// blended) discriminator \p DiscriminatorV is compatible with this + /// authenticated global reference. + bool isCompatibleWith(const Value *Key, const Value *Discriminator, + const DataLayout &DL) const; + + /// Produce a "llvm.ptrauth" global that signs a value using the given + /// schema. The result will be casted to have the same type as the value. + static llvm::Constant *create(Module &M, Constant *Pointer, ConstantInt *Key, + Constant *AddrDiscriminator, + ConstantInt *Discriminator); + + /// Produce a new "llvm.ptrauth" global for signing the given value using + /// the same schema as is stored in this info. + llvm::Constant *createWithSameSchema(Module &M, Constant *Pointer) const; +}; + +} // end namespace llvm + +#endif diff --git a/llvm/include/llvm/IR/GlobalValue.h b/llvm/include/llvm/IR/GlobalValue.h index 2209881dbda622..fb6ce518b2e0fe 100644 --- a/llvm/include/llvm/IR/GlobalValue.h +++ b/llvm/include/llvm/IR/GlobalValue.h @@ -443,9 +443,7 @@ class GlobalValue : public Constant { bool hasInternalLinkage() const { return isInternalLinkage(getLinkage()); } bool hasPrivateLinkage() const { return isPrivateLinkage(getLinkage()); } bool hasLocalLinkage() const { return isLocalLinkage(getLinkage()); } - bool hasExternalWeakLinkage() const { - return isExternalWeakLinkage(getLinkage()); - } + bool hasExternalWeakLinkage() const; bool hasCommonLinkage() const { return isCommonLinkage(getLinkage()); } bool hasValidDeclarationLinkage() const { return isValidDeclarationLinkage(getLinkage()); diff --git a/llvm/include/llvm/IR/InstrTypes.h b/llvm/include/llvm/IR/InstrTypes.h index faf58cf1901466..7789f047ea01fd 100644 --- a/llvm/include/llvm/IR/InstrTypes.h +++ b/llvm/include/llvm/IR/InstrTypes.h @@ -1868,9 +1868,9 @@ class CallBase : public Instruction { /// may read from the heap. bool hasReadingOperandBundles() const { // Implementation note: this is a conservative implementation of operand - // bundle semantics, where *any* operand bundle forces a callsite to be at - // least readonly. - return hasOperandBundles(); + // bundle semantics, where any operand bundle (other than ptrauth) forces + // a callsite to be at least readonly. + return hasOperandBundlesOtherThan(LLVMContext::OB_ptrauth); } /// Return true if this operand bundle user has operand bundles that @@ -1878,7 +1878,8 @@ class CallBase : public Instruction { bool hasClobberingOperandBundles() const { for (auto &BOI : bundle_op_infos()) { if (BOI.Tag->second == LLVMContext::OB_deopt || - BOI.Tag->second == LLVMContext::OB_funclet) + BOI.Tag->second == LLVMContext::OB_funclet || + BOI.Tag->second == LLVMContext::OB_ptrauth) continue; // This instruction has an operand bundle that is not known to us. diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td index 7a0263f88c2a61..e2dbee6acf12c4 100644 --- a/llvm/include/llvm/IR/Intrinsics.td +++ b/llvm/include/llvm/IR/Intrinsics.td @@ -1279,6 +1279,58 @@ def int_preserve_struct_access_index : Intrinsic<[llvm_anyptr_ty], [IntrNoMem, ImmArg<1>, ImmArg<2>]>; +//===----------------- Pointer Authentication Intrinsics ------------------===// +// + +// Sign an unauthenticated pointer (with a raw address as its value), using +// the specified key and discriminator. +// Returns the first operand, with a signature embedded instead of known bits. +def int_ptrauth_sign : Intrinsic<[llvm_anyint_ty], + [LLVMMatchType<0>, llvm_i32_ty, + LLVMMatchType<0>], + [IntrNoMem,ImmArg<1>]>; + +// Authenticate a signed pointer, using the specified key and discriminator. +// Returns the first operand, with the signature bits removed. +// If the signature isn't valid, this returns an invalid, poisoned pointer. +def int_ptrauth_auth : Intrinsic<[llvm_anyint_ty], + [LLVMMatchType<0>, llvm_i32_ty, + LLVMMatchType<0>], + [IntrNoMem,ImmArg<1>]>; + +// Authenticate a signed pointer (using the first set of key/discriminator), +// and resign it (using the second). +// This is a combined form of @llvm.ptrauth.sign and @llvm.ptrauth.auth, with +// an additional integrity guarantee on the intermediate value. +def int_ptrauth_resign : Intrinsic<[llvm_anyint_ty], + [LLVMMatchType<0>, llvm_i32_ty, + LLVMMatchType<0>, llvm_i32_ty, + LLVMMatchType<0>], + [IntrNoMem,ImmArg<1>,ImmArg<3>]>; + +// Strip the embedded signature out of a signed pointer. +// This behaves like @llvm.ptrauth.auth, but doesn't check for the validity of +// the embedded signature. +def int_ptrauth_strip : Intrinsic<[llvm_anyint_ty], + [LLVMMatchType<0>, llvm_i32_ty], + [IntrNoMem,ImmArg<1>]>; + +// Blend a small integer discriminator with an address discriminator, producing +// a new discriminator value. +def int_ptrauth_blend : Intrinsic<[llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>], + [IntrNoMem]>; + +// Compute the signature of a value, using a given discriminator. +// This differs from @llvm.ptrauth.sign in that it doesn't embed the computed +// signature in the pointer, but instead returns it as a standalone value. +// That allows it to be used to sign non-pointer data: in that sense, it is +// generic. There is no generic @llvm.ptrauth.auth: instead, the signature +// can be computed using @llvm.ptrauth.sign_generic, and compared separately. +def int_ptrauth_sign_generic : Intrinsic<[llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>], + [IntrNoMem]>; + //===----------------------------------------------------------------------===// // Target-specific intrinsics //===----------------------------------------------------------------------===// diff --git a/llvm/include/llvm/IR/LLVMContext.h b/llvm/include/llvm/IR/LLVMContext.h index ea272740ba9dc5..9c7b818a14d4cd 100644 --- a/llvm/include/llvm/IR/LLVMContext.h +++ b/llvm/include/llvm/IR/LLVMContext.h @@ -86,6 +86,7 @@ class LLVMContext { OB_funclet = 1, // "funclet" OB_gc_transition = 2, // "gc-transition" OB_cfguardtarget = 3, // "cfguardtarget" + OB_ptrauth = 4, // "ptrauth" }; /// getMDKindID - Return a unique non-zero ID for the specified metadata kind. diff --git a/llvm/include/llvm/InitializePasses.h b/llvm/include/llvm/InitializePasses.h index b8108339abbf91..44955590508751 100644 --- a/llvm/include/llvm/InitializePasses.h +++ b/llvm/include/llvm/InitializePasses.h @@ -383,6 +383,7 @@ void initializeSingleLoopExtractorPass(PassRegistry&); void initializeSinkingLegacyPassPass(PassRegistry&); void initializeSjLjEHPreparePass(PassRegistry&); void initializeSlotIndexesPass(PassRegistry&); +void initializeSoftPointerAuthLegacyPassPass(PassRegistry&); void initializeSpeculativeExecutionLegacyPassPass(PassRegistry&); void initializeSpillPlacementPass(PassRegistry&); void initializeStackColoringPass(PassRegistry&); diff --git a/llvm/include/llvm/LinkAllPasses.h b/llvm/include/llvm/LinkAllPasses.h index ac88165845d3c5..28a8c03814233b 100644 --- a/llvm/include/llvm/LinkAllPasses.h +++ b/llvm/include/llvm/LinkAllPasses.h @@ -214,6 +214,7 @@ namespace { (void) llvm::createPartiallyInlineLibCallsPass(); (void) llvm::createScalarizerPass(); (void) llvm::createSeparateConstOffsetFromGEPPass(); + (void) llvm::createSoftPointerAuthPass(); (void) llvm::createSpeculativeExecutionPass(); (void) llvm::createSpeculativeExecutionIfHasBranchDivergencePass(); (void) llvm::createRewriteSymbolsPass(); diff --git a/llvm/include/llvm/Support/AArch64TargetParser.def b/llvm/include/llvm/Support/AArch64TargetParser.def index 15737265dfc3b3..74a6a0bbbdedc8 100644 --- a/llvm/include/llvm/Support/AArch64TargetParser.def +++ b/llvm/include/llvm/Support/AArch64TargetParser.def @@ -120,6 +120,10 @@ AARCH64_CPU_NAME("neoverse-n1", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false, AArch64::AEK_SSBS)) AARCH64_CPU_NAME("cyclone", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, (AArch64::AEK_NONE)) +AARCH64_CPU_NAME("vortex", ARMV8_3A, FK_CRYPTO_NEON_FP_ARMV8, false, + (AArch64::AEK_FP16)) +AARCH64_CPU_NAME("lightning", ARMV8_4A, FK_CRYPTO_NEON_FP_ARMV8, false, + (AArch64::AEK_FP16 | AArch64::AEK_FP16FML)) AARCH64_CPU_NAME("exynos-m1", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, (AArch64::AEK_CRC)) AARCH64_CPU_NAME("exynos-m2", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, diff --git a/llvm/include/llvm/Transforms/Instrumentation.h b/llvm/include/llvm/Transforms/Instrumentation.h index fcad1e11895fea..9854b12cdb2293 100644 --- a/llvm/include/llvm/Transforms/Instrumentation.h +++ b/llvm/include/llvm/Transforms/Instrumentation.h @@ -181,6 +181,11 @@ struct SanitizerCoverageOptions { SanitizerCoverageOptions() = default; }; +// SoftPointerAuth - This pass lowers the llvm.ptrauth intrinsics to use +// runtime function calls instead of relying on support from the +// backend, toolchain, loader, or hardware. +ModulePass *createSoftPointerAuthPass(); + /// Calculate what to divide by to scale counts. /// /// Given the maximum count, calculate a divisor that will scale all the diff --git a/llvm/include/llvm/module.modulemap b/llvm/include/llvm/module.modulemap index ecb3b37004fd68..7648db18343326 100644 --- a/llvm/include/llvm/module.modulemap +++ b/llvm/include/llvm/module.modulemap @@ -207,6 +207,7 @@ module LLVM_intrinsic_gen { module IR_CallSite { header "IR/CallSite.h" export * } module IR_ConstantFolder { header "IR/ConstantFolder.h" export * } module IR_GlobalVariable { header "IR/GlobalVariable.h" export * } + module IR_GlobalPtrAuthInfo { header "IR/GlobalPtrAuthInfo.h" export * } module IR_NoFolder { header "IR/NoFolder.h" export * } module IR_Module { header "IR/Module.h" export * } module IR_ModuleSummaryIndex { header "IR/ModuleSummaryIndex.h" export * } diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp index 41e1d0bd889a89..575d3766d49dc0 100644 --- a/llvm/lib/AsmParser/LLParser.cpp +++ b/llvm/lib/AsmParser/LLParser.cpp @@ -4505,7 +4505,9 @@ bool LLParser::ParseDIBasicType(MDNode *&Result, bool IsDistinct) { /// ::= !DIDerivedType(tag: DW_TAG_pointer_type, name: "int", file: !0, /// line: 7, scope: !1, baseType: !2, size: 32, /// align: 32, offset: 0, flags: 0, extraData: !3, -/// dwarfAddressSpace: 3) +/// dwarfAddressSpace: 3, ptrAuthKey: 1, +/// ptrAuthIsAddressDiscriminated: true, +/// ptrAuthExtraDiscriminator: 0x1234) bool LLParser::ParseDIDerivedType(MDNode *&Result, bool IsDistinct) { #define VISIT_MD_FIELDS(OPTIONAL, REQUIRED) \ REQUIRED(tag, DwarfTagField, ); \ @@ -4519,19 +4521,27 @@ bool LLParser::ParseDIDerivedType(MDNode *&Result, bool IsDistinct) { OPTIONAL(offset, MDUnsignedField, (0, UINT64_MAX)); \ OPTIONAL(flags, DIFlagField, ); \ OPTIONAL(extraData, MDField, ); \ - OPTIONAL(dwarfAddressSpace, MDUnsignedField, (UINT32_MAX, UINT32_MAX)); + OPTIONAL(dwarfAddressSpace, MDUnsignedField, (UINT32_MAX, UINT32_MAX)); \ + OPTIONAL(ptrAuthKey, MDUnsignedField, (0, 7)); \ + OPTIONAL(ptrAuthIsAddressDiscriminated, MDBoolField, ); \ + OPTIONAL(ptrAuthExtraDiscriminator, MDUnsignedField, (0, 0xffff)); PARSE_MD_FIELDS(); #undef VISIT_MD_FIELDS Optional DWARFAddressSpace; if (dwarfAddressSpace.Val != UINT32_MAX) DWARFAddressSpace = dwarfAddressSpace.Val; + Optional PtrAuthData; + if (ptrAuthKey.Val) + PtrAuthData = DIDerivedType::PtrAuthData( + (unsigned)ptrAuthKey.Val, ptrAuthIsAddressDiscriminated.Val, + (unsigned)ptrAuthExtraDiscriminator.Val); Result = GET_OR_DISTINCT(DIDerivedType, (Context, tag.Val, name.Val, file.Val, line.Val, scope.Val, baseType.Val, size.Val, align.Val, - offset.Val, DWARFAddressSpace, flags.Val, - extraData.Val)); + offset.Val, DWARFAddressSpace, PtrAuthData, + flags.Val, extraData.Val)); return false; } diff --git a/llvm/lib/Bitcode/Reader/MetadataLoader.cpp b/llvm/lib/Bitcode/Reader/MetadataLoader.cpp index 7d6b91843e1b82..c40cb91e4e71c5 100644 --- a/llvm/lib/Bitcode/Reader/MetadataLoader.cpp +++ b/llvm/lib/Bitcode/Reader/MetadataLoader.cpp @@ -1322,7 +1322,7 @@ Error MetadataLoader::MetadataLoaderImpl::parseOneMetadata( break; } case bitc::METADATA_DERIVED_TYPE: { - if (Record.size() < 12 || Record.size() > 13) + if (Record.size() < 12 || Record.size() > 14) return error("Invalid record"); // DWARF address space is encoded as N->getDWARFAddressSpace() + 1. 0 means @@ -1330,6 +1330,9 @@ Error MetadataLoader::MetadataLoaderImpl::parseOneMetadata( Optional DWARFAddressSpace; if (Record.size() > 12 && Record[12]) DWARFAddressSpace = Record[12] - 1; + Optional PtrAuthData; + if (Record.size() > 13 && Record[13]) + PtrAuthData = DIDerivedType::PtrAuthData(Record[13]); IsDistinct = Record[0]; DINode::DIFlags Flags = static_cast(Record[10]); @@ -1339,7 +1342,7 @@ Error MetadataLoader::MetadataLoaderImpl::parseOneMetadata( getMDOrNull(Record[3]), Record[4], getDITypeRefOrNull(Record[5]), getDITypeRefOrNull(Record[6]), Record[7], Record[8], - Record[9], DWARFAddressSpace, Flags, + Record[9], DWARFAddressSpace, PtrAuthData, Flags, getDITypeRefOrNull(Record[11]))), NextMetadataNo); NextMetadataNo++; diff --git a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp index deb4019ea8ba73..987fc8178aa390 100644 --- a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp +++ b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp @@ -1569,6 +1569,8 @@ void ModuleBitcodeWriter::writeDIDerivedType(const DIDerivedType *N, Record.push_back(*DWARFAddressSpace + 1); else Record.push_back(0); + if (auto PtrAuthData = N->getPtrAuthData()) + Record.push_back(PtrAuthData->Payload.RawData); Stream.EmitRecord(bitc::METADATA_DERIVED_TYPE, Record, Abbrev); Record.clear(); diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp index 6bff8de4a9cc03..4725b1dba6c507 100644 --- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -70,6 +70,7 @@ #include "llvm/IR/GlobalIFunc.h" #include "llvm/IR/GlobalIndirectSymbol.h" #include "llvm/IR/GlobalObject.h" +#include "llvm/IR/GlobalPtrAuthInfo.h" #include "llvm/IR/GlobalValue.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/Instruction.h" @@ -1936,6 +1937,10 @@ bool AsmPrinter::EmitSpecialLLVMGlobal(const GlobalVariable *GV) { GV->hasAvailableExternallyLinkage()) return true; + // Ignore ptrauth globals: they only represent references to other globals. + if (GV->getSection() == "llvm.ptrauth") + return true; + if (!GV->hasAppendingLinkage()) return false; assert(GV->hasInitializer() && "Not a special LLVM global!"); @@ -2171,11 +2176,20 @@ const MCExpr *AsmPrinter::lowerConstant(const Constant *CV) { if (const ConstantInt *CI = dyn_cast(CV)) return MCConstantExpr::create(CI->getZExtValue(), Ctx); - if (const GlobalValue *GV = dyn_cast(CV)) + if (const GlobalValue *GV = dyn_cast(CV)) { + // llvm.ptrauth globals should have been handled in emitGlobalConstantImpl, + // where we still had the offset of the reference. If we see them here, it + // means that they were obfuscated enough that we didn't detect them. + if (auto *GVB = dyn_cast(GV)) + if (GVB->getSection() == "llvm.ptrauth") + report_fatal_error("Unsupported usage of llvm.ptrauth global '" + + GV->getName() + "'"); + return MCSymbolRefExpr::create(getSymbol(GV), Ctx); + } if (const BlockAddress *BA = dyn_cast(CV)) - return MCSymbolRefExpr::create(GetBlockAddressSymbol(BA), Ctx); + return lowerBlockAddressConstant(BA); const ConstantExpr *CE = dyn_cast(CV); if (!CE) { @@ -2665,6 +2679,70 @@ static void handleIndirectSymViaGOTPCRel(AsmPrinter &AP, const MCExpr **ME, AP.GlobalGOTEquivs[GOTEquivSym] = std::make_pair(GV, NumUses); } +static const GlobalVariable * +stripPtrAuthGlobalVariableCasts(const DataLayout &DL, const Value *V) { + bool FoundInvalidCast = false; + while (true) { + if (auto *CE = dyn_cast(V)) { + if (CE->getOpcode() == Instruction::PtrToInt || + CE->getOpcode() == Instruction::IntToPtr) { + // If we found a size-changing cast, report it later, if we did find an + // llvm.ptrauth global. + if (DL.getTypeAllocSize(CE->getType()) != + DL.getTypeAllocSize(CE->getOperand(0)->getType())) + FoundInvalidCast = true; + V = CE->getOperand(0); + } + } + + if (auto *GV = dyn_cast(V)) { + if (GV->getSection() != "llvm.ptrauth") + return nullptr; + if (FoundInvalidCast) + report_fatal_error("Invalid size-changing cast of llvm.ptrauth global"); + return GV; + } + + // If all else failed, and there are still casts to strip, try again. + // Otherwise, there's nothing else we can look through. + auto *Stripped = V->stripPointerCasts(); + if (Stripped != V) + V = Stripped; + else + return nullptr; + } + + llvm_unreachable("failed to strip ptrauth global casts"); +} + +static void emitPtrAuthGlobalConstant(const DataLayout &DL, + const GlobalVariable *GV, AsmPrinter &AP, + const Constant *BaseCV, uint64_t Offset, + uint64_t Size) { + auto PAI = *GlobalPtrAuthInfo::analyze(GV); + + // Check that the address discriminator matches. + // NOTE: This could in principle be a verifier, but for now this + // always-enabled very conservative check is preferable. + if (PAI.hasAddressDiversity()) { + APInt ComputedOffset(DL.getPointerSizeInBits(), 0); + const GlobalVariable *BaseGV = nullptr; + + auto *BaseCast = dyn_cast(PAI.getAddrDiscriminator()); + if (BaseCast) + BaseGV = dyn_cast( + BaseCast->getPointerOperand() + ->stripAndAccumulateInBoundsConstantOffsets(DL, ComputedOffset)); + if (!BaseGV || BaseCV != BaseGV || Offset != ComputedOffset.getZExtValue()) + GV->getContext().emitError( + "Mismatched address discriminator in llvm.ptrauth global '" + + GV->getName() + "'"); + } + + auto *ME = AP.lowerPtrAuthGlobalConstant(PAI); + AP.OutStreamer->EmitValue(ME, Size); +} + static void emitGlobalConstantImpl(const DataLayout &DL, const Constant *CV, AsmPrinter &AP, const Constant *BaseCV, uint64_t Offset) { @@ -2714,6 +2792,25 @@ static void emitGlobalConstantImpl(const DataLayout &DL, const Constant *CV, return emitGlobalConstantStruct(DL, CVS, AP, BaseCV, Offset); if (const ConstantExpr *CE = dyn_cast(CV)) { + + // Lower "llvm.ptrauth" global references directly, so that we can check + // the address discriminator ourselves, while we still have the offset of + // the constant into the global that contains the reference. + // Only pointer-sized constants could contain an authenticated pointer. + if (Size == DL.getPointerSize()) { + if (auto *GV = stripPtrAuthGlobalVariableCasts(DL, CE)) { + return emitPtrAuthGlobalConstant(DL, GV, AP, BaseCV, Offset, Size); + } + } +#ifndef NDEBUG + // But in asserts builds, check that we didn't let a ptrauth reference slip + // through in a non-pointer-sized constant. + else { + auto *GV = stripPtrAuthGlobalVariableCasts(DL, CE); + assert(!GV && "Invalid non-pointer-sized llvm.ptrauth global reference"); + } +#endif + // Look through bitcasts, which might not be able to be MCExpr'ized (e.g. of // vectors). if (CE->getOpcode() == Instruction::BitCast) @@ -2785,6 +2882,10 @@ MCSymbol *AsmPrinter::GetBlockAddressSymbol(const BasicBlock *BB) const { return MMI->getAddrLabelSymbol(BB); } +const MCExpr *AsmPrinter::lowerBlockAddressConstant(const BlockAddress *BA) { + return MCSymbolRefExpr::create(GetBlockAddressSymbol(BA), OutContext); +} + /// GetCPISymbol - Return the symbol for the specified constant pool entry. MCSymbol *AsmPrinter::GetCPISymbol(unsigned CPID) const { if (getSubtargetInfo().getTargetTriple().isWindowsMSVCEnvironment()) { diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp index be257604cd1a1f..5addcec52f1932 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp @@ -821,7 +821,15 @@ void DwarfUnit::constructTypeDIE(DIE &Buffer, const DIDerivedType *DTy) { // or reference types. if (DTy->getDWARFAddressSpace()) addUInt(Buffer, dwarf::DW_AT_address_class, dwarf::DW_FORM_data4, - DTy->getDWARFAddressSpace().getValue()); + *DTy->getDWARFAddressSpace()); + if (auto Key = DTy->getPtrAuthKey()) + addUInt(Buffer, dwarf::DW_AT_APPLE_ptrauth_key, dwarf::DW_FORM_data1, *Key); + if (auto AddrDisc = DTy->isPtrAuthAddressDiscriminated()) + if (AddrDisc) + addFlag(Buffer, dwarf::DW_AT_APPLE_ptrauth_address_discriminated); + if (auto Disc = DTy->getPtrAuthExtraDiscriminator()) + addUInt(Buffer, dwarf::DW_AT_APPLE_ptrauth_extra_discriminator, + dwarf::DW_FORM_data2, *Disc); } void DwarfUnit::constructSubprogramArguments(DIE &Buffer, DITypeRefArray Args) { diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index da96c9ac435acf..99e9985679deac 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -78,6 +78,8 @@ #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Function.h" #include "llvm/IR/GetElementPtrTypeIterator.h" +#include "llvm/IR/GlobalPtrAuthInfo.h" +#include "llvm/IR/GlobalVariable.h" #include "llvm/IR/InlineAsm.h" #include "llvm/IR/InstrTypes.h" #include "llvm/IR/Instruction.h" @@ -1446,8 +1448,20 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) { if (const ConstantInt *CI = dyn_cast(C)) return DAG.getConstant(*CI, getCurSDLoc(), VT); - if (const GlobalValue *GV = dyn_cast(C)) + if (const GlobalValue *GV = dyn_cast(C)) { + if (const GlobalVariable *GVB = dyn_cast(GV)) { + if (GVB->getSection() == "llvm.ptrauth") { + auto PAI = GlobalPtrAuthInfo::analyze(GVB); + return DAG.getNode(ISD::PtrAuthGlobalAddress, getCurSDLoc(), VT, + DAG.getGlobalAddress(GV, getCurSDLoc(), VT), + getValue(PAI->getPointer()), + getValue(PAI->getKey()), + getValue(PAI->getAddrDiscriminator()), + getValue(PAI->getDiscriminator())); + } + } return DAG.getGlobalAddress(GV, getCurSDLoc(), VT); + } if (isa(C)) { unsigned AS = V->getType()->getPointerAddressSpace(); @@ -2744,11 +2758,12 @@ void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) { MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)]; const BasicBlock *EHPadBB = I.getSuccessor(1); - // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't + // Deopt and ptrauth bundles are lowered in helper functions, and we don't // have to do anything here to lower funclet bundles. assert(!I.hasOperandBundlesOtherThan({LLVMContext::OB_deopt, LLVMContext::OB_funclet, - LLVMContext::OB_cfguardtarget}) && + LLVMContext::OB_cfguardtarget, + LLVMContext::OB_ptrauth}) && "Cannot lower invokes with arbitrary operand bundles yet!"); const Value *Callee(I.getCalledValue()); @@ -2790,6 +2805,8 @@ void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) { // intrinsic, and right now there are no plans to support other intrinsics // with deopt state. LowerCallSiteWithDeoptBundle(&I, getValue(Callee), EHPadBB); + } else if (I.countOperandBundlesOfType(LLVMContext::OB_ptrauth)) { + LowerCallSiteWithPtrAuthBundle(&I, EHPadBB); } else { LowerCallTo(&I, getValue(Callee), false, EHPadBB); } @@ -7095,7 +7112,8 @@ SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI, void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee, bool isTailCall, - const BasicBlock *EHPadBB) { + const BasicBlock *EHPadBB, + const TargetLowering::PtrAuthInfo *PAI) { auto &DL = DAG.getDataLayout(); FunctionType *FTy = CS.getFunctionType(); Type *RetTy = CS.getType(); @@ -7174,6 +7192,15 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee, .setCallee(RetTy, FTy, Callee, std::move(Args), CS) .setTailCall(isTailCall) .setConvergent(CS.isConvergent()); + + // Set the pointer authentication info if we have it. + if (PAI) { + if (!TLI.supportPtrAuthBundles()) + report_fatal_error( + "This target doesn't support calls with ptrauth operand bundles."); + CLI.setPtrAuth(*PAI); + } + std::pair Result = lowerInvokable(CLI, EHPadBB); if (Result.first.getNode()) { @@ -7697,6 +7724,11 @@ void SelectionDAGBuilder::visitCall(const CallInst &I) { } } + if (I.countOperandBundlesOfType(LLVMContext::OB_ptrauth)) { + LowerCallSiteWithPtrAuthBundle(&I, /*EHPadBB=*/nullptr); + return; + } + // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't // have to do anything here to lower funclet bundles. // CFGuardTarget bundles are lowered in LowerCallTo. @@ -7716,6 +7748,46 @@ void SelectionDAGBuilder::visitCall(const CallInst &I) { LowerCallTo(&I, Callee, I.isTailCall()); } +void SelectionDAGBuilder::LowerCallSiteWithPtrAuthBundle( + ImmutableCallSite CS, const BasicBlock *EHPadBB) { + auto PAB = CS.getOperandBundle("ptrauth"); + auto *CalleeV = CS.getCalledValue(); + + // Gather the call ptrauth data from the operand bundle: + // [ i32 , i64 ] + auto *Key = cast(PAB->Inputs[0]); + Value *Discriminator = PAB->Inputs[1]; + + assert(Key->getType()->isIntegerTy(32) && "Invalid ptrauth key"); + assert(Discriminator->getType()->isIntegerTy(64) && + "Invalid ptrauth discriminator"); + + // Look through ptrauth globals to find the raw callee. + // Do a direct unauthenticated call if we found it and everything matches. + if (auto CalleePAI = GlobalPtrAuthInfo::analyze(CalleeV)) { + // FIXME: bring back a static diagnostic when we can guarantee the mismatch + if (CalleePAI->isCompatibleWith(Key, Discriminator, DAG.getDataLayout())) { + LowerCallTo(CS, getValue(CalleePAI->getPointer()), CS.isTailCall(), + EHPadBB); + return; + } + } + + // Functions should never be ptrauth-called directly. + // We could lower these to direct unauthenticated calls, but for that to + // occur, there must have been a semantic mismatch somewhere leading to this + // arguably incorrect IR. + if (isa(CalleeV)) + report_fatal_error("Cannot lower direct authenticated call to" + " unauthenticated target"); + + // Otherwise, do an authenticated indirect call. + TargetLowering::PtrAuthInfo PAI = {Key->getZExtValue(), + getValue(Discriminator)}; + + LowerCallTo(CS, getValue(CalleeV), CS.isTailCall(), EHPadBB, &PAI); +} + namespace { /// AsmOperandInfo - This contains information for each constraint that we are diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h index bfcf30b430b6d4..4effc4bf128437 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h @@ -538,7 +538,8 @@ class SelectionDAGBuilder { void CopyToExportRegsIfNeeded(const Value *V); void ExportFromCurrentBlock(const Value *V); void LowerCallTo(ImmutableCallSite CS, SDValue Callee, bool IsTailCall, - const BasicBlock *EHPadBB = nullptr); + const BasicBlock *EHPadBB = nullptr, + const TargetLowering::PtrAuthInfo *PAI = nullptr); // Lower range metadata from 0 to N to assert zext to an integer of nearest // floor power of two. @@ -621,6 +622,9 @@ class SelectionDAGBuilder { bool VarArgDisallowed, bool ForceVoidReturnTy); + void LowerCallSiteWithPtrAuthBundle(ImmutableCallSite CS, + const BasicBlock *EHPadBB); + /// Returns the type of FrameIndex and TargetFrameIndex nodes. MVT getFrameIndexTy() { return DAG.getTargetLoweringInfo().getFrameIndexTy(DAG.getDataLayout()); diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp index bc10f762123943..323bcb15528f77 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp @@ -120,6 +120,7 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const { case ISD::GlobalTLSAddress: return "GlobalTLSAddress"; case ISD::FrameIndex: return "FrameIndex"; case ISD::JumpTable: return "JumpTable"; + case ISD::PtrAuthGlobalAddress: return "PtrAuthGlobalAddress"; case ISD::GLOBAL_OFFSET_TABLE: return "GLOBAL_OFFSET_TABLE"; case ISD::RETURNADDR: return "RETURNADDR"; case ISD::ADDROFRETURNADDR: return "ADDROFRETURNADDR"; diff --git a/llvm/lib/DebugInfo/DWARF/DWARFDie.cpp b/llvm/lib/DebugInfo/DWARF/DWARFDie.cpp index cec194e8b6b3ef..611d33514b60a5 100644 --- a/llvm/lib/DebugInfo/DWARF/DWARFDie.cpp +++ b/llvm/lib/DebugInfo/DWARF/DWARFDie.cpp @@ -210,6 +210,7 @@ static void dumpTypeName(raw_ostream &OS, const DWARFDie &D) { case DW_TAG_reference_type: case DW_TAG_rvalue_reference_type: case DW_TAG_subroutine_type: + case DW_TAG_APPLE_ptrauth_type: break; default: dumpTypeTagName(OS, T); @@ -257,6 +258,18 @@ static void dumpTypeName(raw_ostream &OS, const DWARFDie &D) { case DW_TAG_rvalue_reference_type: OS << "&&"; break; + case DW_TAG_APPLE_ptrauth_type: { + auto getValOrNull = [&](dwarf::Attribute Attr) -> uint64_t { + if (auto Form = D.find(Attr)) + return *Form->getAsUnsignedConstant(); + return 0; + }; + OS << "__ptrauth(" << getValOrNull(DW_AT_APPLE_ptrauth_key) << ", " + << getValOrNull(DW_AT_APPLE_ptrauth_address_discriminated) << ", 0x0" + << utohexstr(getValOrNull(DW_AT_APPLE_ptrauth_extra_discriminator), true) + << ")"; + break; + } default: break; } diff --git a/llvm/lib/IR/AsmWriter.cpp b/llvm/lib/IR/AsmWriter.cpp index f811c842cf569f..c38f2783bb91d5 100644 --- a/llvm/lib/IR/AsmWriter.cpp +++ b/llvm/lib/IR/AsmWriter.cpp @@ -1860,6 +1860,12 @@ static void writeDIDerivedType(raw_ostream &Out, const DIDerivedType *N, if (const auto &DWARFAddressSpace = N->getDWARFAddressSpace()) Printer.printInt("dwarfAddressSpace", *DWARFAddressSpace, /* ShouldSkipZero */ false); + if (auto Key = N->getPtrAuthKey()) + Printer.printInt("ptrAuthKey", *Key); + if (auto AddrDisc = N->isPtrAuthAddressDiscriminated()) + Printer.printBool("ptrAuthIsAddressDiscriminated", *AddrDisc); + if (auto Disc = N->getPtrAuthExtraDiscriminator()) + Printer.printInt("ptrAuthExtraDiscriminator", *Disc); Out << ")"; } diff --git a/llvm/lib/IR/CMakeLists.txt b/llvm/lib/IR/CMakeLists.txt index a9012637277bfd..89fca1008d276d 100644 --- a/llvm/lib/IR/CMakeLists.txt +++ b/llvm/lib/IR/CMakeLists.txt @@ -24,6 +24,7 @@ add_llvm_library(LLVMCore Dominators.cpp Function.cpp GVMaterializer.cpp + GlobalPtrAuthInfo.cpp Globals.cpp IRBuilder.cpp IRPrintingPasses.cpp diff --git a/llvm/lib/IR/DIBuilder.cpp b/llvm/lib/IR/DIBuilder.cpp index 5d567122743084..6a7c3faa2bdbd9 100644 --- a/llvm/lib/IR/DIBuilder.cpp +++ b/llvm/lib/IR/DIBuilder.cpp @@ -266,19 +266,28 @@ DIBasicType *DIBuilder::createBasicType(StringRef Name, uint64_t SizeInBits, DIDerivedType *DIBuilder::createQualifiedType(unsigned Tag, DIType *FromTy) { return DIDerivedType::get(VMContext, Tag, "", nullptr, 0, nullptr, FromTy, 0, - 0, 0, None, DINode::FlagZero); + 0, 0, None, None, DINode::FlagZero); +} + +DIDerivedType * +DIBuilder::createPtrAuthQualifiedType(DIType *FromTy, unsigned Key, + bool IsAddressDiscriminated, + unsigned ExtraDiscriminator) { + return DIDerivedType::get( + VMContext, dwarf::DW_TAG_APPLE_ptrauth_type, "", nullptr, 0, nullptr, + FromTy, 0, 0, 0, None, + Optional( + {Key, IsAddressDiscriminated, ExtraDiscriminator}), + DINode::FlagZero); } DIDerivedType *DIBuilder::createPointerType( - DIType *PointeeTy, - uint64_t SizeInBits, - uint32_t AlignInBits, - Optional DWARFAddressSpace, - StringRef Name) { + DIType *PointeeTy, uint64_t SizeInBits, uint32_t AlignInBits, + Optional DWARFAddressSpace, StringRef Name) { // FIXME: Why is there a name here? return DIDerivedType::get(VMContext, dwarf::DW_TAG_pointer_type, Name, nullptr, 0, nullptr, PointeeTy, SizeInBits, - AlignInBits, 0, DWARFAddressSpace, + AlignInBits, 0, DWARFAddressSpace, None, DINode::FlagZero); } @@ -289,7 +298,7 @@ DIDerivedType *DIBuilder::createMemberPointerType(DIType *PointeeTy, DINode::DIFlags Flags) { return DIDerivedType::get(VMContext, dwarf::DW_TAG_ptr_to_member_type, "", nullptr, 0, nullptr, PointeeTy, SizeInBits, - AlignInBits, 0, None, Flags, Base); + AlignInBits, 0, None, None, Flags, Base); } DIDerivedType *DIBuilder::createReferenceType( @@ -299,7 +308,7 @@ DIDerivedType *DIBuilder::createReferenceType( Optional DWARFAddressSpace) { assert(RTy && "Unable to create reference type"); return DIDerivedType::get(VMContext, Tag, "", nullptr, 0, nullptr, RTy, - SizeInBits, AlignInBits, 0, DWARFAddressSpace, + SizeInBits, AlignInBits, 0, DWARFAddressSpace, {}, DINode::FlagZero); } @@ -308,14 +317,14 @@ DIDerivedType *DIBuilder::createTypedef(DIType *Ty, StringRef Name, DIScope *Context) { return DIDerivedType::get(VMContext, dwarf::DW_TAG_typedef, Name, File, LineNo, getNonCompileUnitScope(Context), Ty, 0, 0, - 0, None, DINode::FlagZero); + 0, None, None, DINode::FlagZero); } DIDerivedType *DIBuilder::createFriend(DIType *Ty, DIType *FriendTy) { assert(Ty && "Invalid type!"); assert(FriendTy && "Invalid friend type!"); return DIDerivedType::get(VMContext, dwarf::DW_TAG_friend, "", nullptr, 0, Ty, - FriendTy, 0, 0, 0, None, DINode::FlagZero); + FriendTy, 0, 0, 0, None, None, DINode::FlagZero); } DIDerivedType *DIBuilder::createInheritance(DIType *Ty, DIType *BaseTy, @@ -326,7 +335,7 @@ DIDerivedType *DIBuilder::createInheritance(DIType *Ty, DIType *BaseTy, Metadata *ExtraData = ConstantAsMetadata::get( ConstantInt::get(IntegerType::get(VMContext, 32), VBPtrOffset)); return DIDerivedType::get(VMContext, dwarf::DW_TAG_inheritance, "", nullptr, - 0, Ty, BaseTy, 0, 0, BaseOffset, None, + 0, Ty, BaseTy, 0, 0, BaseOffset, None, None, Flags, ExtraData); } @@ -338,7 +347,8 @@ DIDerivedType *DIBuilder::createMemberType(DIScope *Scope, StringRef Name, DINode::DIFlags Flags, DIType *Ty) { return DIDerivedType::get(VMContext, dwarf::DW_TAG_member, Name, File, LineNumber, getNonCompileUnitScope(Scope), Ty, - SizeInBits, AlignInBits, OffsetInBits, None, Flags); + SizeInBits, AlignInBits, OffsetInBits, None, None, + Flags); } static ConstantAsMetadata *getConstantOrNull(Constant *C) { @@ -353,8 +363,8 @@ DIDerivedType *DIBuilder::createVariantMemberType( Constant *Discriminant, DINode::DIFlags Flags, DIType *Ty) { return DIDerivedType::get(VMContext, dwarf::DW_TAG_member, Name, File, LineNumber, getNonCompileUnitScope(Scope), Ty, - SizeInBits, AlignInBits, OffsetInBits, None, Flags, - getConstantOrNull(Discriminant)); + SizeInBits, AlignInBits, OffsetInBits, None, None, + Flags, getConstantOrNull(Discriminant)); } DIDerivedType *DIBuilder::createBitFieldMemberType( @@ -365,7 +375,7 @@ DIDerivedType *DIBuilder::createBitFieldMemberType( return DIDerivedType::get( VMContext, dwarf::DW_TAG_member, Name, File, LineNumber, getNonCompileUnitScope(Scope), Ty, SizeInBits, /* AlignInBits */ 0, - OffsetInBits, None, Flags, + OffsetInBits, None, None, Flags, ConstantAsMetadata::get(ConstantInt::get(IntegerType::get(VMContext, 64), StorageOffsetInBits))); } @@ -378,7 +388,7 @@ DIBuilder::createStaticMemberType(DIScope *Scope, StringRef Name, DIFile *File, Flags |= DINode::FlagStaticMember; return DIDerivedType::get(VMContext, dwarf::DW_TAG_member, Name, File, LineNumber, getNonCompileUnitScope(Scope), Ty, 0, - AlignInBits, 0, None, Flags, + AlignInBits, 0, None, None, Flags, getConstantOrNull(Val)); } @@ -389,8 +399,8 @@ DIBuilder::createObjCIVar(StringRef Name, DIFile *File, unsigned LineNumber, DIType *Ty, MDNode *PropertyNode) { return DIDerivedType::get(VMContext, dwarf::DW_TAG_member, Name, File, LineNumber, getNonCompileUnitScope(File), Ty, - SizeInBits, AlignInBits, OffsetInBits, None, Flags, - PropertyNode); + SizeInBits, AlignInBits, OffsetInBits, None, None, + Flags, PropertyNode); } DIObjCProperty * diff --git a/llvm/lib/IR/DebugInfo.cpp b/llvm/lib/IR/DebugInfo.cpp index 1bbe6b85d26008..02250e7fd85959 100644 --- a/llvm/lib/IR/DebugInfo.cpp +++ b/llvm/lib/IR/DebugInfo.cpp @@ -1020,9 +1020,9 @@ LLVMMetadataRef LLVMDIBuilderCreatePointerType( LLVMDIBuilderRef Builder, LLVMMetadataRef PointeeTy, uint64_t SizeInBits, uint32_t AlignInBits, unsigned AddressSpace, const char *Name, size_t NameLen) { - return wrap(unwrap(Builder)->createPointerType(unwrapDI(PointeeTy), - SizeInBits, AlignInBits, - AddressSpace, {Name, NameLen})); + return wrap(unwrap(Builder)->createPointerType( + unwrapDI(PointeeTy), SizeInBits, AlignInBits, AddressSpace, + {Name, NameLen})); } LLVMMetadataRef LLVMDIBuilderCreateStructType( diff --git a/llvm/lib/IR/DebugInfoMetadata.cpp b/llvm/lib/IR/DebugInfoMetadata.cpp index 94ec3abfa7a252..27b34a6a64df54 100644 --- a/llvm/lib/IR/DebugInfoMetadata.cpp +++ b/llvm/lib/IR/DebugInfoMetadata.cpp @@ -368,17 +368,19 @@ DIDerivedType *DIDerivedType::getImpl( LLVMContext &Context, unsigned Tag, MDString *Name, Metadata *File, unsigned Line, Metadata *Scope, Metadata *BaseType, uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits, - Optional DWARFAddressSpace, DIFlags Flags, Metadata *ExtraData, - StorageType Storage, bool ShouldCreate) { + Optional DWARFAddressSpace, Optional PtrAuthData, + DIFlags Flags, Metadata *ExtraData, StorageType Storage, + bool ShouldCreate) { assert(isCanonical(Name) && "Expected canonical MDString"); DEFINE_GETIMPL_LOOKUP(DIDerivedType, (Tag, Name, File, Line, Scope, BaseType, SizeInBits, - AlignInBits, OffsetInBits, DWARFAddressSpace, Flags, - ExtraData)); + AlignInBits, OffsetInBits, DWARFAddressSpace, + PtrAuthData, Flags, ExtraData)); Metadata *Ops[] = {File, Scope, Name, BaseType, ExtraData}; - DEFINE_GETIMPL_STORE( - DIDerivedType, (Tag, Line, SizeInBits, AlignInBits, OffsetInBits, - DWARFAddressSpace, Flags), Ops); + DEFINE_GETIMPL_STORE(DIDerivedType, + (Tag, Line, SizeInBits, AlignInBits, OffsetInBits, + DWARFAddressSpace, PtrAuthData, Flags), + Ops); } DICompositeType *DICompositeType::getImpl( diff --git a/llvm/lib/IR/GlobalPtrAuthInfo.cpp b/llvm/lib/IR/GlobalPtrAuthInfo.cpp new file mode 100644 index 00000000000000..c3a4df3d6c6cfc --- /dev/null +++ b/llvm/lib/IR/GlobalPtrAuthInfo.cpp @@ -0,0 +1,152 @@ +//===- GlobalPtrAuthInfo.cpp - Analysis tools for ptrauth globals ---------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "llvm/IR/GlobalPtrAuthInfo.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/Instruction.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/Operator.h" + +using namespace llvm; + +Expected GlobalPtrAuthInfo::tryAnalyze(const Value *V) { + auto Invalid = [](const Twine &Reason) { + return make_error(Reason, inconvertibleErrorCode()); + }; + + auto &Ctx = V->getContext(); + + V = V->stripPointerCasts(); + + auto *GV = dyn_cast(V); + if (!GV) + return Invalid("value isn't a global"); + + if (GV->getSection() != "llvm.ptrauth") + return Invalid("global isn't in section \"llvm.ptrauth\""); + + if (!GV->hasInitializer()) + return Invalid("global doesn't have an initializer"); + + auto *Init = GV->getInitializer(); + + auto *Ty = dyn_cast(GV->getInitializer()->getType()); + if (!Ty) + return Invalid("global isn't a struct"); + + auto *I64Ty = Type::getInt64Ty(Ctx); + auto *I32Ty = Type::getInt32Ty(Ctx); + auto *P0I8Ty = Type::getInt8PtrTy(Ctx); + // Check that the struct matches its expected shape: + // { i8*, i32, i64, i64 } + if (!Ty->isLayoutIdentical( + StructType::get(Ctx, {P0I8Ty, I32Ty, I64Ty, I64Ty}))) + return Invalid("global doesn't have type '{ i8*, i32, i64, i64 }'"); + + auto *Key = dyn_cast(Init->getOperand(1)); + if (!Key) + return Invalid("key isn't a constant integer"); + + auto *AddrDiscriminator = Init->getOperand(2); + if (!isa(AddrDiscriminator) && + !isa(AddrDiscriminator)) + return Invalid("address discriminator isn't a constant integer or expr"); + + auto *Discriminator = dyn_cast(Init->getOperand(3)); + if (!Discriminator) + return Invalid("discriminator isn't a constant integer"); + + return GlobalPtrAuthInfo(GV); +} + +Optional GlobalPtrAuthInfo::analyze(const Value *V) { + if (auto PAIOrErr = tryAnalyze(V)) { + return *PAIOrErr; + } else { + consumeError(PAIOrErr.takeError()); + return None; + } +} + +static bool areEquivalentAddrDiscriminators(const Value *V1, const Value *V2, + const DataLayout &DL) { + APInt V1Off(DL.getPointerSizeInBits(), 0); + APInt V2Off(DL.getPointerSizeInBits(), 0); + + if (auto *V1Cast = dyn_cast(V1)) + V1 = V1Cast->getPointerOperand(); + if (auto *V2Cast = dyn_cast(V2)) + V2 = V2Cast->getPointerOperand(); + auto *V1Base = V1->stripAndAccumulateInBoundsConstantOffsets(DL, V1Off); + auto *V2Base = V2->stripAndAccumulateInBoundsConstantOffsets(DL, V2Off); + return V1Base == V2Base && V1Off == V2Off; +} + +bool GlobalPtrAuthInfo::isCompatibleWith(const Value *Key, + const Value *Discriminator, + const DataLayout &DL) const { + // If the keys are different, there's no chance for this to be compatible. + if (Key != getKey()) + return false; + + // If the discriminators are the same, this is compatible iff there is no + // address discriminator. + if (Discriminator == getDiscriminator()) + return getAddrDiscriminator()->isNullValue(); + + // If we dynamically blend the discriminator with the address discriminator, + // this is compatible. + if (auto *DiscBlend = dyn_cast(Discriminator)) { + if (DiscBlend->getIntrinsicID() == Intrinsic::ptrauth_blend && + DiscBlend->getOperand(1) == getDiscriminator() && + areEquivalentAddrDiscriminators(DiscBlend->getOperand(0), + getAddrDiscriminator(), DL)) + return true; + } + + // If we don't have a non-address discriminator, we don't need a blend in + // the first place: accept the address discriminator as the discriminator. + if (getDiscriminator()->isNullValue() && + areEquivalentAddrDiscriminators(getAddrDiscriminator(), Discriminator, + DL)) + return true; + + // Otherwise, we don't know. + return false; +} + +Constant *GlobalPtrAuthInfo::createWithSameSchema(Module &M, + Constant *Pointer) const { + return create(M, Pointer, const_cast(getKey()), + const_cast(getAddrDiscriminator()), + const_cast(getDiscriminator())); +} + +Constant *GlobalPtrAuthInfo::create(Module &M, Constant *Pointer, + ConstantInt *Key, + Constant *AddrDiscriminator, + ConstantInt *Discriminator) { + auto CastPointer = + ConstantExpr::getBitCast(Pointer, Type::getInt8PtrTy(M.getContext())); + + auto Init = ConstantStruct::getAnon({CastPointer, Key, AddrDiscriminator, + Discriminator}, /*packed*/ false); + + // TODO: look for an existing global with the right setup? + auto GV = new GlobalVariable(M, Init->getType(), /*constant*/ true, + GlobalVariable::PrivateLinkage, Init); + GV->setSection("llvm.ptrauth"); + + auto Result = ConstantExpr::getBitCast(GV, Pointer->getType()); + + assert(analyze(Result).hasValue() && "invalid ptrauth constant"); + + return Result; +} diff --git a/llvm/lib/IR/Globals.cpp b/llvm/lib/IR/Globals.cpp index 46a9696b2944f4..f0002118f1696b 100644 --- a/llvm/lib/IR/Globals.cpp +++ b/llvm/lib/IR/Globals.cpp @@ -133,6 +133,15 @@ void GlobalObject::copyAttributesFrom(const GlobalObject *Src) { setSection(Src->getSection()); } +bool GlobalValue::hasExternalWeakLinkage() const { + // Be conservative with llvm.ptrauth wrappers. + // FIXME: this is gross but necessary with our current representation. + if (isa(this) && + getSection() == "llvm.ptrauth") + return true; + return isExternalWeakLinkage(getLinkage()); +} + std::string GlobalValue::getGlobalIdentifier(StringRef Name, GlobalValue::LinkageTypes Linkage, StringRef FileName) { diff --git a/llvm/lib/IR/LLVMContext.cpp b/llvm/lib/IR/LLVMContext.cpp index cb13b27aa50f4c..46dd48f8512d1b 100644 --- a/llvm/lib/IR/LLVMContext.cpp +++ b/llvm/lib/IR/LLVMContext.cpp @@ -67,6 +67,11 @@ LLVMContext::LLVMContext() : pImpl(new LLVMContextImpl(*this)) { "cfguardtarget operand bundle id drifted!"); (void)CFGuardTargetEntry; + auto *PtrauthEntry = pImpl->getOrInsertBundleTag("ptrauth"); + assert(PtrauthEntry->second == LLVMContext::OB_ptrauth && + "ptrauth operand bundle id drifted!"); + (void)PtrauthEntry; + SyncScope::ID SingleThreadSSID = pImpl->getOrInsertSyncScopeID("singlethread"); assert(SingleThreadSSID == SyncScope::SingleThread && diff --git a/llvm/lib/IR/LLVMContextImpl.h b/llvm/lib/IR/LLVMContextImpl.h index 78cf707e0e748b..37ad20f28c6e07 100644 --- a/llvm/lib/IR/LLVMContextImpl.h +++ b/llvm/lib/IR/LLVMContextImpl.h @@ -413,24 +413,27 @@ template <> struct MDNodeKeyImpl { uint64_t OffsetInBits; uint32_t AlignInBits; Optional DWARFAddressSpace; + Optional PtrAuthData; unsigned Flags; Metadata *ExtraData; MDNodeKeyImpl(unsigned Tag, MDString *Name, Metadata *File, unsigned Line, Metadata *Scope, Metadata *BaseType, uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits, - Optional DWARFAddressSpace, unsigned Flags, - Metadata *ExtraData) + Optional DWARFAddressSpace, + Optional PtrAuthData, + unsigned Flags, Metadata *ExtraData) : Tag(Tag), Name(Name), File(File), Line(Line), Scope(Scope), BaseType(BaseType), SizeInBits(SizeInBits), OffsetInBits(OffsetInBits), AlignInBits(AlignInBits), DWARFAddressSpace(DWARFAddressSpace), - Flags(Flags), ExtraData(ExtraData) {} + PtrAuthData(PtrAuthData), Flags(Flags), ExtraData(ExtraData) {} MDNodeKeyImpl(const DIDerivedType *N) : Tag(N->getTag()), Name(N->getRawName()), File(N->getRawFile()), Line(N->getLine()), Scope(N->getRawScope()), BaseType(N->getRawBaseType()), SizeInBits(N->getSizeInBits()), OffsetInBits(N->getOffsetInBits()), AlignInBits(N->getAlignInBits()), - DWARFAddressSpace(N->getDWARFAddressSpace()), Flags(N->getFlags()), + DWARFAddressSpace(N->getDWARFAddressSpace()), + PtrAuthData(N->getPtrAuthData()), Flags(N->getFlags()), ExtraData(N->getRawExtraData()) {} bool isKeyOf(const DIDerivedType *RHS) const { @@ -441,7 +444,7 @@ template <> struct MDNodeKeyImpl { AlignInBits == RHS->getAlignInBits() && OffsetInBits == RHS->getOffsetInBits() && DWARFAddressSpace == RHS->getDWARFAddressSpace() && - Flags == RHS->getFlags() && + PtrAuthData == RHS->getPtrAuthData() && Flags == RHS->getFlags() && ExtraData == RHS->getRawExtraData(); } diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp index 497dc394d96073..9800f84400d5ea 100644 --- a/llvm/lib/IR/Verifier.cpp +++ b/llvm/lib/IR/Verifier.cpp @@ -77,6 +77,7 @@ #include "llvm/IR/Dominators.h" #include "llvm/IR/Function.h" #include "llvm/IR/GlobalAlias.h" +#include "llvm/IR/GlobalPtrAuthInfo.h" #include "llvm/IR/GlobalValue.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/InlineAsm.h" @@ -684,6 +685,14 @@ void Verifier::visitGlobalVariable(const GlobalVariable &GV) { } } + if (GV.getSection() == "llvm.ptrauth") { + if (auto Err = GlobalPtrAuthInfo::tryAnalyze(&GV).takeError()) { + CheckFailed("invalid llvm.ptrauth global: " + toString(std::move(Err)), + &GV); + return; + } + } + // Visit any debug info attachments. SmallVector MDs; GV.getMetadata(LLVMContext::MD_dbg, MDs); @@ -922,6 +931,7 @@ void Verifier::visitDIDerivedType(const DIDerivedType &N) { N.getTag() == dwarf::DW_TAG_volatile_type || N.getTag() == dwarf::DW_TAG_restrict_type || N.getTag() == dwarf::DW_TAG_atomic_type || + N.getTag() == dwarf::DW_TAG_APPLE_ptrauth_type || N.getTag() == dwarf::DW_TAG_member || N.getTag() == dwarf::DW_TAG_inheritance || N.getTag() == dwarf::DW_TAG_friend, @@ -2976,9 +2986,11 @@ void Verifier::visitCallBase(CallBase &Call) { visitIntrinsicCall(ID, Call); // Verify that a callsite has at most one "deopt", at most one "funclet", at - // most one "gc-transition", and at most one "cfguardtarget" operand bundle. + // most one "gc-transition", at most one "cfguardtarget", and at most one + // "ptrauth" operand bundle. bool FoundDeoptBundle = false, FoundFuncletBundle = false, - FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false; + FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false, + FoundPtrauthBundle = false; for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) { OperandBundleUse BU = Call.getOperandBundleAt(i); uint32_t Tag = BU.getTagID(); @@ -3003,6 +3015,16 @@ void Verifier::visitCallBase(CallBase &Call) { FoundCFGuardTargetBundle = true; Assert(BU.Inputs.size() == 1, "Expected exactly one cfguardtarget bundle operand", Call); + } else if (Tag == LLVMContext::OB_ptrauth) { + Assert(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call); + FoundPtrauthBundle = true; + Assert(BU.Inputs.size() == 2, + "Expected exactly two ptrauth bundle operands", Call); + Assert(isa(BU.Inputs[0]) && + BU.Inputs[0]->getType()->isIntegerTy(32), + "Ptrauth bundle key operand must be an i32 constant", Call); + Assert(BU.Inputs[1]->getType()->isIntegerTy(64), + "Ptrauth bundle discriminator operand must be an i64", Call); } } diff --git a/llvm/lib/LTO/LTOCodeGenerator.cpp b/llvm/lib/LTO/LTOCodeGenerator.cpp index 8821928928672b..27f4ad4a06a38b 100644 --- a/llvm/lib/LTO/LTOCodeGenerator.cpp +++ b/llvm/lib/LTO/LTOCodeGenerator.cpp @@ -365,6 +365,8 @@ bool LTOCodeGenerator::determineTarget() { MCpu = "core2"; else if (Triple.getArch() == llvm::Triple::x86) MCpu = "yonah"; + else if (Triple.getArchName() == "arm64e") + MCpu = "vortex"; else if (Triple.getArch() == llvm::Triple::aarch64 || Triple.getArch() == llvm::Triple::aarch64_32) MCpu = "cyclone"; diff --git a/llvm/lib/LTO/LTOModule.cpp b/llvm/lib/LTO/LTOModule.cpp index 587b332e706491..7689269c3f0979 100644 --- a/llvm/lib/LTO/LTOModule.cpp +++ b/llvm/lib/LTO/LTOModule.cpp @@ -220,6 +220,8 @@ LTOModule::makeLTOModule(MemoryBufferRef Buffer, const TargetOptions &options, CPU = "core2"; else if (Triple.getArch() == llvm::Triple::x86) CPU = "yonah"; + else if (Triple.getArchName() == "arm64e") + CPU = "vortex"; else if (Triple.getArch() == llvm::Triple::aarch64 || Triple.getArch() == llvm::Triple::aarch64_32) CPU = "cyclone"; diff --git a/llvm/lib/MC/MCMachOStreamer.cpp b/llvm/lib/MC/MCMachOStreamer.cpp index 8e558a36b7a1fc..8dac4ff16a582d 100644 --- a/llvm/lib/MC/MCMachOStreamer.cpp +++ b/llvm/lib/MC/MCMachOStreamer.cpp @@ -143,6 +143,9 @@ static bool canGoAfterDWARF(const MCSectionMachO &MSec) { SecName == "__thread_ptr")) return true; + if (SegName == "__DATA" && SecName == "__auth_ptr") + return true; + return false; } diff --git a/llvm/lib/Object/MachOObjectFile.cpp b/llvm/lib/Object/MachOObjectFile.cpp index c0c873f973545d..1d602aa2dba1a9 100644 --- a/llvm/lib/Object/MachOObjectFile.cpp +++ b/llvm/lib/Object/MachOObjectFile.cpp @@ -2220,7 +2220,8 @@ void MachOObjectFile::getRelocationTypeName( "ARM64_RELOC_PAGEOFF12", "ARM64_RELOC_GOT_LOAD_PAGE21", "ARM64_RELOC_GOT_LOAD_PAGEOFF12", "ARM64_RELOC_POINTER_TO_GOT", "ARM64_RELOC_TLVP_LOAD_PAGE21", "ARM64_RELOC_TLVP_LOAD_PAGEOFF12", - "ARM64_RELOC_ADDEND" + "ARM64_RELOC_ADDEND", + "ARM64_RELOC_AUTHENTICATED_POINTER" }; if (RType >= array_lengthof(Table)) @@ -2557,6 +2558,8 @@ StringRef MachOObjectFile::getFileFormatName() const { case MachO::CPU_TYPE_X86_64: return "Mach-O 64-bit x86-64"; case MachO::CPU_TYPE_ARM64: + if (getHeader().cpusubtype == MachO::CPU_SUBTYPE_ARM64E) + return "Mach-O arm64e"; return "Mach-O arm64"; case MachO::CPU_TYPE_POWERPC64: return "Mach-O 64-bit ppc64"; @@ -2680,6 +2683,12 @@ Triple MachOObjectFile::getArchTriple(uint32_t CPUType, uint32_t CPUSubType, if (ArchFlag) *ArchFlag = "arm64"; return Triple("arm64-apple-darwin"); + case MachO::CPU_SUBTYPE_ARM64E: + if (McpuDefault) + *McpuDefault = "vortex"; + if (ArchFlag) + *ArchFlag = "arm64e"; + return Triple("arm64e-apple-darwin"); default: return Triple(); } @@ -2727,10 +2736,10 @@ bool MachOObjectFile::isValidArch(StringRef ArchFlag) { } ArrayRef MachOObjectFile::getValidArchs() { - static const std::array validArchs = {{ + static const std::array validArchs = {{ "i386", "x86_64", "x86_64h", "armv4t", "arm", "armv5e", "armv6", "armv6m", "armv7", "armv7em", "armv7k", "armv7m", - "armv7s", "arm64", "arm64_32", "ppc", "ppc64", + "armv7s", "arm64", "arm64e", "arm64_32","ppc", "ppc64", }}; return validArchs; diff --git a/llvm/lib/Support/ARMTargetParser.cpp b/llvm/lib/Support/ARMTargetParser.cpp index ce5daa7fe58c0d..08317284c03bf7 100644 --- a/llvm/lib/Support/ARMTargetParser.cpp +++ b/llvm/lib/Support/ARMTargetParser.cpp @@ -278,6 +278,8 @@ StringRef ARM::getCanonicalArchName(StringRef Arch) { // Begins with "arm" / "thumb", move past it. if (A.startswith("arm64_32")) offset = 8; + else if (A.startswith("arm64e")) + offset = 6; else if (A.startswith("arm64")) offset = 5; else if (A.startswith("aarch64_32")) diff --git a/llvm/lib/Support/Triple.cpp b/llvm/lib/Support/Triple.cpp index d419463e6a5e6f..04f4ef214471af 100644 --- a/llvm/lib/Support/Triple.cpp +++ b/llvm/lib/Support/Triple.cpp @@ -400,6 +400,7 @@ static Triple::ArchType parseArch(StringRef ArchName) { .Case("arc", Triple::arc) .Case("arm64", Triple::aarch64) .Case("arm64_32", Triple::aarch64_32) + .Case("arm64e", Triple::aarch64) .Case("arm", Triple::arm) .Case("armeb", Triple::armeb) .Case("thumb", Triple::thumb) @@ -565,6 +566,9 @@ static Triple::SubArchType parseSubArch(StringRef SubArchName) { StringRef ARMSubArch = ARM::getCanonicalArchName(SubArchName); + if (SubArchName == "arm64e") + return Triple::AArch64SubArch_E; + // For now, this is the small part. Early return. if (ARMSubArch.empty()) return StringSwitch(SubArchName) diff --git a/llvm/lib/Target/AArch64/AArch64.h b/llvm/lib/Target/AArch64/AArch64.h index ac765ebcddc048..f2113888e44d92 100644 --- a/llvm/lib/Target/AArch64/AArch64.h +++ b/llvm/lib/Target/AArch64/AArch64.h @@ -48,6 +48,7 @@ FunctionPass *createAArch64A53Fix835769(); FunctionPass *createFalkorHWPFFixPass(); FunctionPass *createFalkorMarkStridedAccessesPass(); FunctionPass *createAArch64BranchTargetsPass(); +FunctionPass *createAArch64ExpandHardenedPseudosPass(); FunctionPass *createAArch64CleanupLocalDynamicTLSPass(); @@ -69,6 +70,7 @@ void initializeAArch64CompressJumpTablesPass(PassRegistry&); void initializeAArch64ConditionalComparesPass(PassRegistry&); void initializeAArch64ConditionOptimizerPass(PassRegistry&); void initializeAArch64DeadRegisterDefinitionsPass(PassRegistry&); +void initializeAArch64ExpandHardenedPseudosPass(PassRegistry&); void initializeAArch64ExpandPseudoPass(PassRegistry&); void initializeAArch64SpeculationHardeningPass(PassRegistry&); void initializeAArch64LoadStoreOptPass(PassRegistry&); diff --git a/llvm/lib/Target/AArch64/AArch64.td b/llvm/lib/Target/AArch64/AArch64.td index 5b4c9e2149dae4..ac618031775d04 100644 --- a/llvm/lib/Target/AArch64/AArch64.td +++ b/llvm/lib/Target/AArch64/AArch64.td @@ -582,6 +582,43 @@ def ProcCyclone : SubtargetFeature<"cyclone", "ARMProcFamily", "Cyclone", FeatureZCZeroingFPWorkaround ]>; +def ProcVortex : SubtargetFeature<"vortex", "ARMProcFamily", "Vortex", + "Vortex", [ + FeatureAlternateSExtLoadCVTF32Pattern, + FeatureArithmeticBccFusion, + FeatureArithmeticCbzFusion, + FeatureCrypto, + FeatureDisableLatencySchedHeuristic, + FeatureFullFP16, + FeatureFuseAES, + FeatureFuseCryptoEOR, + FeatureNEON, + FeaturePerfMon, + FeatureZCRegMove, + FeatureZCZeroing, + HasV8_3aOps + ]>; + +def ProcLightning : SubtargetFeature<"lightning", "ARMProcFamily", "Lightning", + "Lightning", [ + FeatureAlternateSExtLoadCVTF32Pattern, + FeatureArithmeticBccFusion, + FeatureArithmeticCbzFusion, + FeatureCrypto, + FeatureDisableLatencySchedHeuristic, + FeatureFP16FML, + FeatureFullFP16, + FeatureFuseAES, + FeatureFuseCryptoEOR, + FeatureNEON, + FeaturePerfMon, + FeatureSHA3, + FeatureSM4, + FeatureZCRegMove, + FeatureZCZeroing, + HasV8_4aOps + ]>; + def ProcExynosM1 : SubtargetFeature<"exynosm1", "ARMProcFamily", "ExynosM1", "Samsung Exynos-M1 processors", [FeatureSlowPaired128, @@ -816,6 +853,8 @@ def : ProcessorModel<"cortex-a76ae", CortexA57Model, [ProcA76]>; def : ProcessorModel<"neoverse-e1", CortexA53Model, [ProcNeoverseE1]>; def : ProcessorModel<"neoverse-n1", CortexA57Model, [ProcNeoverseN1]>; def : ProcessorModel<"cyclone", CycloneModel, [ProcCyclone]>; +def : ProcessorModel<"vortex", CycloneModel, [ProcVortex]>; +def : ProcessorModel<"lightning", CycloneModel, [ProcLightning]>; def : ProcessorModel<"exynos-m1", ExynosM1Model, [ProcExynosM1]>; def : ProcessorModel<"exynos-m2", ExynosM1Model, [ProcExynosM2]>; def : ProcessorModel<"exynos-m3", ExynosM3Model, [ProcExynosM3]>; @@ -835,7 +874,7 @@ def : ProcessorModel<"thunderx2t99", ThunderX2T99Model, [ProcThunderX2T99]>; def : ProcessorModel<"tsv110", CortexA57Model, [ProcTSV110]>; // Alias for the latest Apple processor model supported by LLVM. -def : ProcessorModel<"apple-latest", CycloneModel, [ProcCyclone]>; +def : ProcessorModel<"apple-latest", CycloneModel, [ProcLightning]>; //===----------------------------------------------------------------------===// // Assembly parser diff --git a/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp b/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp index 7ea7915c2ca6ae..edda23a22307ad 100644 --- a/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp +++ b/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp @@ -29,6 +29,7 @@ #include "llvm/ADT/StringRef.h" #include "llvm/ADT/Triple.h" #include "llvm/ADT/Twine.h" +#include "llvm/BinaryFormat/MachO.h" #include "llvm/BinaryFormat/COFF.h" #include "llvm/BinaryFormat/ELF.h" #include "llvm/CodeGen/AsmPrinter.h" @@ -42,11 +43,13 @@ #include "llvm/CodeGen/TargetRegisterInfo.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DebugInfoMetadata.h" +#include "llvm/IR/GlobalPtrAuthInfo.h" #include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCContext.h" #include "llvm/MC/MCInst.h" #include "llvm/MC/MCInstBuilder.h" #include "llvm/MC/MCSectionELF.h" +#include "llvm/MC/MCSectionMachO.h" #include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCSymbol.h" #include "llvm/Support/Casting.h" @@ -62,6 +65,16 @@ using namespace llvm; +enum PtrauthCheckMode { Default, Unchecked, Poison, Trap }; +static cl::opt +PtrauthAuthChecks("aarch64-ptrauth-auth-checks", cl::Hidden, + cl::values( + clEnumValN(Unchecked, "none", "don't test for failure"), + clEnumValN(Poison, "poison", "poison on failure"), + clEnumValN(Trap, "trap", "trap on failure")), + cl::desc("Check pointer authentication auth/resign failures"), + cl::init(Default)); + #define DEBUG_TYPE "asm-printer" namespace { @@ -84,6 +97,9 @@ class AArch64AsmPrinter : public AsmPrinter { return MCInstLowering.lowerOperand(MO, MCOp); } + const MCExpr * + lowerPtrAuthGlobalConstant(const GlobalPtrAuthInfo &PAI) override; + void EmitJumpTableInfo() override; void emitJumpTableEntry(const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB, unsigned JTI); @@ -447,11 +463,39 @@ void AArch64AsmPrinter::EmitHwasanMemaccessSymbols(Module &M) { } } +static void +emitAuthenticatedPointer(MCStreamer &OutStreamer, MCSymbol *StubLabel, + const MachineModuleInfoMachO::AuthStubInfo &StubInfo) { + // L_foo$addend$auth_ptr$ib$23: + OutStreamer.EmitLabel(StubLabel); + OutStreamer.EmitValue(StubInfo.Pointer, /*size=*/8); +} + void AArch64AsmPrinter::EmitEndOfAsmFile(Module &M) { EmitHwasanMemaccessSymbols(M); const Triple &TT = TM.getTargetTriple(); if (TT.isOSBinFormatMachO()) { + + // Output authenticated pointers as indirect symbols, if we have any. + MachineModuleInfoMachO &MMIMacho = + MMI->getObjFileInfo(); + + auto Stubs = MMIMacho.getAuthGVStubList(); + + if (!Stubs.empty()) { + // Switch to the "__auth_ptr" section. + OutStreamer->SwitchSection( + OutContext.getMachOSection("__DATA", "__auth_ptr", MachO::S_REGULAR, + SectionKind::getMetadata())); + EmitAlignment(Align(8)); + + for (auto &Stub : Stubs) + emitAuthenticatedPointer(*OutStreamer, Stub.first, Stub.second); + + OutStreamer->AddBlankLine(); + } + // Funny Darwin hack: This flag tells the linker that no global symbols // contain code that falls through to other global symbols (e.g. the obvious // implementation of multiple entry points). If this doesn't occur, the @@ -899,6 +943,50 @@ void AArch64AsmPrinter::EmitFMov0(const MachineInstr &MI) { } } + +const MCExpr * +AArch64AsmPrinter::lowerPtrAuthGlobalConstant(const GlobalPtrAuthInfo &PAI) { + MCContext &Ctx = OutContext; + + // Figure out the base symbol and the addend, if any. + APInt Offset(64, 0); + const Value *BaseGV = + PAI.getPointer()->stripAndAccumulateInBoundsConstantOffsets( + getDataLayout(), Offset); + + auto *BaseGVB = dyn_cast(BaseGV); + + // If we can't understand the referenced ConstantExpr, there's nothing + // else we can do: emit an error. + if (!BaseGVB) { + BaseGVB = PAI.getGV(); + BaseGV->getContext().emitError( + "Couldn't resolve target base/addend of llvm.ptrauth global '" + + BaseGV->getName() + "'"); + } + + // If there is an addend, turn that into the appropriate MCExpr. + const MCExpr *Sym = MCSymbolRefExpr::create(getSymbol(BaseGVB), Ctx); + if (Offset.sgt(0)) + Sym = MCBinaryExpr::createAdd( + Sym, MCConstantExpr::create(Offset.getSExtValue(), Ctx), Ctx); + else if (Offset.slt(0)) + Sym = MCBinaryExpr::createSub( + Sym, MCConstantExpr::create((-Offset).getSExtValue(), Ctx), Ctx); + + auto *Disc = PAI.getDiscriminator(); + uint64_t KeyID = PAI.getKey()->getZExtValue(); + if (!isUInt<2>(KeyID)) + BaseGV->getContext().emitError( + "Invalid AArch64 PAC Key ID '" + utostr(KeyID) + "' in llvm.ptrauth global '" + + BaseGV->getName() + "'"); + + // Finally build the complete @AUTH expr. + return AArch64AuthMCExpr::create(Sym, Disc->getZExtValue(), + AArch64PACKey::ID(KeyID), + PAI.hasAddressDiversity(), Ctx); +} + // Simple pseudo-instructions have their lowering (with expansion to real // instructions) auto-generated. #include "AArch64GenMCPseudoLowering.inc" @@ -987,9 +1075,188 @@ void AArch64AsmPrinter::EmitInstruction(const MachineInstr *MI) { } } + case AArch64::AUT: + case AArch64::AUTPAC: { + const bool IsAUTPAC = MI->getOpcode() == AArch64::AUTPAC; + + // We can expand AUT/AUTPAC into 3 possible sequences: + // - unchecked: + // autia x16, x0 + // pacib x16, x1 ; if AUTPAC + // + // - checked and clearing: + // mov x17, x16 + // autia x16, x0 + // xpaci x17 + // cmp x16, x17 + // pacib x16, x1 + // csel x16, x16, x17, eq + // Where we only emit the AUT if we started with an AUT. + // + // - checked and trapping: + // mov x17, x16 + // autia x16, x0 + // xpaci x17 + // cmp x16, x17 + // b.eq Lsuccess + // brk #<0xc470 + aut key> + // Lsuccess: + // pacib x16, x1 ; if AUTPAC + // Where the b.eq skips over the trap if the PAC is valid. + // + // This sequence is expensive, but we need more information to be able to + // do better. + // + // We can't TBZ the poison bit because EnhancedPAC2 XORs the PAC bits + // on failure. + // We can't TST the PAC bits because we don't always know how the address + // space is setup for the target environment (and the bottom PAC bit is + // based on that). + // Either way, we also don't always know whether TBI is enabled or not for + // the specific target environment. + // + // FIXME: we could re-use AUTReg as a temporary register, but that would + // require splitting the XZR cases into separate opcodes. + + // By default, auth/resign sequences check for auth failures. + bool ShouldCheck = true; + // In the checked sequence, we only trap if explicitly requested. + bool ShouldTrap = MF->getFunction().hasFnAttribute("ptrauth-auth-traps"); + + // However, command-line flags can override this, for experimentation. + switch (PtrauthAuthChecks) { + case PtrauthCheckMode::Default: break; + case PtrauthCheckMode::Unchecked: + ShouldCheck = ShouldTrap = false; + break; + case PtrauthCheckMode::Poison: + ShouldCheck = true; + ShouldTrap = false; + break; + case PtrauthCheckMode::Trap: + ShouldCheck = ShouldTrap = true; + break; + } + + const auto AUTKey = (AArch64PACKey::ID)MI->getOperand(0).getImm(); + const unsigned AUTReg = MI->getOperand(1).getReg(); + + const unsigned XPACOpc = getXPACOpcodeForKey(AUTKey); + const bool AUTZero = AUTReg == AArch64::XZR; + const unsigned AUTOpc = getAUTOpcodeForKey(AUTKey, AUTZero); + + // Checked AUTPACs and trapping AUTs need a temporary copy of the input: x17 + if ((IsAUTPAC && ShouldCheck) || ShouldTrap) { + // mov x17, x16 + EmitToStreamer(*OutStreamer, + MCInstBuilder(AArch64::ORRXrs) + .addReg(AArch64::X17) + .addReg(AArch64::XZR) + .addReg(AArch64::X16) + .addImm(0)); + } + + // autia x16, x0 + MCInst AUTInst; + AUTInst.setOpcode(AUTOpc); + AUTInst.addOperand(MCOperand::createReg(AArch64::X16)); + AUTInst.addOperand(MCOperand::createReg(AArch64::X16)); + if (!AUTZero) + AUTInst.addOperand(MCOperand::createReg(AUTReg)); + EmitToStreamer(*OutStreamer, AUTInst); + + // Unchecked or checked-but-non-trapping AUT is just an "AUT": we're done. + if (!IsAUTPAC && (!ShouldCheck || !ShouldTrap)) + return; + + // Checked sequences do an additional strip-and-compare. + if (ShouldCheck) { + // xpaci x17 + EmitToStreamer(*OutStreamer, + MCInstBuilder(XPACOpc) + .addReg(AArch64::X17) + .addReg(AArch64::X17)); + + // cmp x16, x17 + EmitToStreamer(*OutStreamer, + MCInstBuilder(AArch64::SUBSXrs) + .addReg(AArch64::XZR) + .addReg(AArch64::X16) + .addReg(AArch64::X17) + .addImm(0)); + + // Trapping sequences do a 'brk'. + if (ShouldTrap) { + // b.eq Lsuccess + // where Lsuccess is encoded as 2 (the offset from this instruction to + // what's after the brk, divided by 4) + EmitToStreamer(*OutStreamer, + MCInstBuilder(AArch64::Bcc) + .addImm(AArch64CC::EQ) + .addImm(2)); + + // brk #<0xc470 + aut key> + EmitToStreamer(*OutStreamer, + MCInstBuilder(AArch64::BRK) + .addImm(0xc470 | AUTKey)); + } + } + + // We already emitted unchecked and checked-but-non-trapping AUTs. + // That left us with trapping AUTs, and AUTPACs. + // Trapping AUTs don't need PAC: we're done. + if (!IsAUTPAC) + return; + + const auto PACKey = (AArch64PACKey::ID)MI->getOperand(2).getImm(); + const unsigned PACReg = MI->getOperand(3).getReg(); + const bool PACZero = PACReg == AArch64::XZR; + const unsigned PACOpc = getPACOpcodeForKey(PACKey, PACZero); + + // pacib x16, x9 + MCInst PACInst; + PACInst.setOpcode(PACOpc); + PACInst.addOperand(MCOperand::createReg(AArch64::X16)); + PACInst.addOperand(MCOperand::createReg(AArch64::X16)); + if (!PACZero) + PACInst.addOperand(MCOperand::createReg(PACReg)); + EmitToStreamer(*OutStreamer, PACInst); + + // Non-trapping AUTPAC selects the result based on the xpac check. + // Trapping AUTPAC already trapped; unchecked AUTPAC didn't even check. + if (ShouldTrap || !ShouldCheck) + return; + + // csel x16, x16, x17, eq + EmitToStreamer(*OutStreamer, + MCInstBuilder(AArch64::CSELXr) + .addReg(AArch64::X16) + .addReg(AArch64::X16) + .addReg(AArch64::X17) + .addImm(0)); + return; + } + // Tail calls use pseudo instructions so they have the proper code-gen // attributes (isCall, isReturn, etc.). We lower them to the real // instruction here. + case AArch64::AUTH_TCRETURNrii: + case AArch64::AUTH_TCRETURNriri: { + const bool isZero = MI->getOpcode() == AArch64::AUTH_TCRETURNrii; + const uint64_t Key = MI->getOperand(2).getImm(); + assert (Key < 2 && "Unknown key kind for authenticating tail-call return"); + + const unsigned Opcodes[2][2] = {{AArch64::BRAA, AArch64::BRAAZ}, + {AArch64::BRAB, AArch64::BRABZ}}; + + MCInst TmpInst; + TmpInst.setOpcode(Opcodes[Key][isZero]); + TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg())); + if (!isZero) + TmpInst.addOperand(MCOperand::createReg(MI->getOperand(3).getReg())); + EmitToStreamer(*OutStreamer, TmpInst); + return; + } case AArch64::TCRETURNri: case AArch64::TCRETURNriBTI: case AArch64::TCRETURNriALL: { diff --git a/llvm/lib/Target/AArch64/AArch64ExpandHardenedPseudos.cpp b/llvm/lib/Target/AArch64/AArch64ExpandHardenedPseudos.cpp new file mode 100644 index 00000000000000..8d540eedca5148 --- /dev/null +++ b/llvm/lib/Target/AArch64/AArch64ExpandHardenedPseudos.cpp @@ -0,0 +1,250 @@ +//===- AArch64ExpandHardenedPseudos.cpp --------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +//===----------------------------------------------------------------------===// + +#include "AArch64InstrInfo.h" +#include "AArch64Subtarget.h" +#include "AArch64MachineFunctionInfo.h" +#include "MCTargetDesc/AArch64AddressingModes.h" +#include "Utils/AArch64BaseInfo.h" +#include "llvm/ADT/BitVector.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/CodeGen/MachineBasicBlock.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineJumpTableInfo.h" +#include "llvm/CodeGen/MachineOperand.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/IR/DebugLoc.h" +#include "llvm/Pass.h" +#include "llvm/Support/CodeGen.h" +#include "llvm/Support/Debug.h" +#include "llvm/Target/TargetMachine.h" +#include + +using namespace llvm; + +#define DEBUG_TYPE "aarch64-expand-hardened-pseudos" + +#define PASS_NAME "AArch64 Expand Hardened Pseudos" + +namespace { + +class AArch64ExpandHardenedPseudos : public MachineFunctionPass { +public: + static char ID; + + AArch64ExpandHardenedPseudos() : MachineFunctionPass(ID) { + initializeAArch64ExpandHardenedPseudosPass(*PassRegistry::getPassRegistry()); + } + + bool runOnMachineFunction(MachineFunction &Fn) override; + + StringRef getPassName() const override { + return PASS_NAME; + } + +private: + bool expandMI(MachineInstr &MI); +}; + +} // end anonymous namespace + +char AArch64ExpandHardenedPseudos::ID = 0; + +INITIALIZE_PASS(AArch64ExpandHardenedPseudos, DEBUG_TYPE, PASS_NAME, false, false); + +bool AArch64ExpandHardenedPseudos::expandMI(MachineInstr &MI) { + MachineBasicBlock &MBB = *MI.getParent(); + MachineFunction &MF = *MBB.getParent(); + DebugLoc DL = MI.getDebugLoc(); + auto MBBI = MI.getIterator(); + + const AArch64Subtarget &STI = MF.getSubtarget(); + const AArch64InstrInfo *TII = STI.getInstrInfo(); + + if (MI.getOpcode() == AArch64::BR_JumpTable) { + LLVM_DEBUG(dbgs() << "Expanding: " << MI << "\n"); + const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo(); + assert(MJTI && "Can't lower jump-table dispatch without JTI"); + + const std::vector &JTs = MJTI->getJumpTables(); + assert(!JTs.empty() && "Invalid JT index for jump-table dispatch"); + + // Emit: + // adrp xTable, Ltable@PAGE + // add xTable, Ltable@PAGEOFF + // mov xEntry, # ; depending on table size, with MOVKs + // cmp xEntry, # ; if table size fits in 12-bit immediate + // csel xEntry, xEntry, xzr, ls + // ldrsw xScratch, [xTable, xEntry, lsl #2] ; kill xEntry, xScratch = xEntry + // add xDest, xTable, xScratch ; kill xTable, xDest = xTable + // br xDest + + MachineOperand JTOp = MI.getOperand(0); + + unsigned JTI = JTOp.getIndex(); + const uint64_t NumTableEntries = JTs[JTI].MBBs.size(); + + // cmp only supports a 12-bit immediate. If we need more, materialize the + // immediate, using TableReg as a scratch register. + uint64_t MaxTableEntry = NumTableEntries - 1; + if (isUInt<12>(MaxTableEntry)) { + BuildMI(MBB, MBBI, DL, TII->get(AArch64::SUBSXri), AArch64::XZR) + .addReg(AArch64::X16) + .addImm(MaxTableEntry) + .addImm(0); + } else { + BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVZXi), AArch64::X17) + .addImm(static_cast(MaxTableEntry)) + .addImm(0); + // It's sad that we have to manually materialize instructions, but we can't + // trivially reuse the main pseudo expansion logic. + // A MOVK sequence is easy enough to generate and handles the general case. + for (int Offset = 16; Offset < 64; Offset += 16) { + if ((MaxTableEntry >> Offset) == 0) + break; + BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVKXi), AArch64::X17) + .addReg(AArch64::X17) + .addImm(static_cast(MaxTableEntry >> Offset)) + .addImm(Offset); + } + BuildMI(MBB, MBBI, DL, TII->get(AArch64::SUBSXrs), AArch64::XZR) + .addReg(AArch64::X16) + .addReg(AArch64::X17) + .addImm(0); + } + + // This picks entry #0 on failure. + // We might want to trap instead. + BuildMI(MBB, MBBI, DL, TII->get(AArch64::CSELXr), AArch64::X16) + .addReg(AArch64::X16) + .addReg(AArch64::XZR) + .addImm(AArch64CC::LS); + + MachineOperand JTHiOp(JTOp); + MachineOperand JTLoOp(JTOp); + JTHiOp.setTargetFlags(AArch64II::MO_PAGE); + JTLoOp.setTargetFlags(AArch64II::MO_PAGEOFF); + + BuildMI(MBB, MBBI, DL, TII->get(AArch64::ADRP), AArch64::X17) + .add(JTHiOp); + BuildMI(MBB, MBBI, DL, TII->get(AArch64::ADDXri), AArch64::X17) + .addReg(AArch64::X17) + .add(JTLoOp) + .addImm(0); + + BuildMI(MBB, MBBI, DL, TII->get(AArch64::LDRSWroX), AArch64::X16) + .addReg(AArch64::X17) + .addReg(AArch64::X16) + .addImm(0) + .addImm(1); + BuildMI(MBB, MBBI, DL, TII->get(AArch64::ADDXrs), AArch64::X16) + .addReg(AArch64::X17) + .addReg(AArch64::X16) + .addImm(0); + + BuildMI(MBB, MBBI, DL, TII->get(AArch64::BR)) + .addReg(AArch64::X16); + + MI.eraseFromParent(); + return true; + } + + if (MI.getOpcode() != AArch64::MOVaddrPAC) + return false; + + LLVM_DEBUG(dbgs() << "Expanding: " << MI << "\n"); + + + MachineOperand GAOp = MI.getOperand(0); + uint64_t Offset = MI.getOperand(1).getImm(); + auto Key = (AArch64PACKey::ID)MI.getOperand(2).getImm(); + unsigned AddrDisc = MI.getOperand(3).getReg(); + uint64_t Disc = MI.getOperand(4).getImm(); + + // Emit: + // target materialization: + // adrp x16, _target@GOTPAGE + // ldr x16, [x16, _target@GOTPAGEOFF] + // add x16, x16, # ; if offset != 0; up to 3 depending on width + // + // signing: + // - 0 discriminator: + // paciza x16 + // - Non-0 discriminator, no address discriminator: + // mov x17, #Disc + // pacia x16, x17 + // - address discriminator (with potentially folded immediate discriminator): + // pacia x16, xAddrDisc + + MachineOperand GAHiOp(GAOp); + MachineOperand GALoOp(GAOp); + GAHiOp.setTargetFlags(AArch64II::MO_GOT | AArch64II::MO_PAGE); + GALoOp.setTargetFlags(AArch64II::MO_GOT | AArch64II::MO_PAGEOFF); + + BuildMI(MBB, MBBI, DL, TII->get(AArch64::ADRP), AArch64::X16) + .add(GAHiOp); + + BuildMI(MBB, MBBI, DL, TII->get(AArch64::LDRXui), AArch64::X16) + .addReg(AArch64::X16) + .add(GALoOp); + + if (Offset) { + if (!isUInt<32>(Offset)) + report_fatal_error("ptrauth global offset too large, 32bit max encoding"); + + for (int BitPos = 0; BitPos < 32 && (Offset >> BitPos); BitPos += 12) { + BuildMI(MBB, MBBI, DL, TII->get(AArch64::ADDXri), AArch64::X16) + .addReg(AArch64::X16) + .addImm((Offset >> BitPos) & 0xfff) + .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, BitPos)); + } + } + + unsigned DiscReg = AArch64::XZR; + if (Disc) { + DiscReg = AArch64::X17; + BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVZXi), AArch64::X17) + .addImm(Disc) + .addImm(0); + } else if (AddrDisc != AArch64::XZR) { + assert(Disc == 0 && "Non-0 discriminators should be folded into addr-disc"); + DiscReg = AddrDisc; + } + + unsigned PACOpc = getPACOpcodeForKey(Key, DiscReg == AArch64::XZR); + auto MIB = BuildMI(MBB, MBBI, DL, TII->get(PACOpc), AArch64::X16) + .addReg(AArch64::X16); + if (DiscReg != AArch64::XZR) + MIB.addReg(DiscReg); + + MI.eraseFromParent(); + return true; +} + + +bool AArch64ExpandHardenedPseudos::runOnMachineFunction(MachineFunction &MF) { + LLVM_DEBUG(dbgs() << "***** AArch64ExpandHardenedPseudos *****\n"); + + bool Modified = false; + for (auto &MBB : MF) { + for (auto MBBI = MBB.begin(), MBBE = MBB.end(); MBBI != MBBE; ) { + auto &MI = *MBBI++; + Modified |= expandMI(MI); + } + } + return Modified; +} + +FunctionPass *llvm::createAArch64ExpandHardenedPseudosPass() { + return new AArch64ExpandHardenedPseudos(); +} diff --git a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp index 082e17e44d0430..7b0c7b7cbd2e82 100644 --- a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp +++ b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp @@ -662,6 +662,37 @@ bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB, MI.eraseFromParent(); return true; } + case AArch64::XPACIuntied: { + const MachineOperand &LHS = MI.getOperand(0); + const MachineOperand &RHS = MI.getOperand(1); + // If the registrs are the same, just lower to the "tied" version. + // $x0 = XPACIuntied $x0 -> $x0 = XPACI $x0. + if (LHS.getReg() == RHS.getReg()) { + MachineInstrBuilder DefMIB = + BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::XPACI)) + .add(LHS) + .add(RHS); + transferImpOps(MI, DefMIB, DefMIB); + } else { + // $x0 = XPACIuntied $x1 + // -> + // mov $x0, $x1 + // XPACI $x0. + MachineInstrBuilder DefMIB = + BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ORRXrs)) + .addReg(LHS.getReg()) + .addReg(AArch64::XZR) + .add(RHS) + .addImm(0); + MachineInstrBuilder UseMIB = + BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::XPACI), + LHS.getReg()) + .addReg(LHS.getReg()); + transferImpOps(MI, UseMIB, DefMIB); + } + MI.eraseFromParent(); + return true; + } case AArch64::IRGstack: { MachineFunction &MF = *MBB.getParent(); const AArch64FunctionInfo *AFI = MF.getInfo(); diff --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp index 98410c2e747eaa..b35db2a422ed17 100644 --- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp +++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp @@ -470,6 +470,12 @@ unsigned AArch64FastISel::materializeGV(const GlobalValue *GV) { unsigned ADRPReg = createResultReg(&AArch64::GPR64commonRegClass); unsigned ResultReg; + // Authenticated global references have special handling. + // Fallback to SDAG. + if (const GlobalVariable *GVB = dyn_cast(GV)) + if (GVB->getSection() == "llvm.ptrauth") + return 0; + if (OpFlags & AArch64II::MO_GOT) { // ADRP + LDRX BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADRP), diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp index 042d8fdcc51d0e..be95f08a4fbf94 100644 --- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp @@ -228,6 +228,13 @@ bool AArch64FrameLowering::canUseRedZone(const MachineFunction &MF) const { getSVEStackSize(MF)); } +bool AArch64FrameLowering::shouldAuthenticateLR( + const MachineFunction &MF) const { + // Return address authentication can be enabled at the function level, using + // the "ptrauth-returns" attribute. + return MF.getFunction().hasFnAttribute("ptrauth-returns"); +} + /// hasFP - Return true if the specified function should have a dedicated frame /// pointer register. bool AArch64FrameLowering::hasFP(const MachineFunction &MF) const { @@ -880,6 +887,18 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF, .setMIFlags(MachineInstr::FrameSetup); } + // If we're saving LR, sign it first. + if (shouldAuthenticateLR(MF)) { + if (LLVM_UNLIKELY(!Subtarget.hasPA())) + report_fatal_error("arm64e LR authentication requires ptrauth"); + for (const CalleeSavedInfo &Info : MFI.getCalleeSavedInfo()) { + if (Info.getReg() != AArch64::LR) + continue; + BuildMI(MBB, MBBI, DL, TII->get(AArch64::PACIBSP)) + .setMIFlags(MachineInstr::FrameSetup); + } + } + // All calls are tail calls in GHC calling conv, and functions have no // prologue/epilogue. if (MF.getFunction().getCallingConv() == CallingConv::GHC) @@ -1294,7 +1313,7 @@ static void InsertReturnAddressAuth(MachineFunction &MF, // this instruction can safely used for any v8a architecture. // From v8.3a onwards there are optimised authenticate LR and return // instructions, namely RETA{A,B}, that can be used instead. - if (Subtarget.hasV8_3aOps() && MBBI != MBB.end() && + if (Subtarget.hasPA() && MBBI != MBB.end() && MBBI->getOpcode() == AArch64::RET_ReallyLR) { BuildMI(MBB, MBBI, DL, TII->get(ShouldSignWithAKey(MF) ? AArch64::RETAA : AArch64::RETAB)) @@ -1340,6 +1359,9 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF, IsTailCallReturn = RetOpcode == AArch64::TCRETURNdi || RetOpcode == AArch64::TCRETURNri || RetOpcode == AArch64::TCRETURNriBTI; + + IsTailCallReturn |= RetOpcode == AArch64::AUTH_TCRETURNrii || + RetOpcode == AArch64::AUTH_TCRETURNriri; IsFunclet = isFuncletReturnInstr(*MBBI); } @@ -1352,6 +1374,32 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF, if (MF.getFunction().getCallingConv() == CallingConv::GHC) return; + // If we're restoring LR, authenticate it before returning. + // Use scope_exit to ensure we do that last on all return paths. + auto InsertAuthLROnExit = make_scope_exit([&]() { + if (shouldAuthenticateLR(MF)) { + if (LLVM_UNLIKELY(!Subtarget.hasPA())) + report_fatal_error("arm64e LR authentication requires ptrauth"); + for (const CalleeSavedInfo &Info : MFI.getCalleeSavedInfo()) { + if (Info.getReg() != AArch64::LR) + continue; + MachineBasicBlock::iterator TI = MBB.getFirstTerminator(); + if (TI != MBB.end() && TI->getOpcode() == AArch64::RET_ReallyLR) { + // If there is a terminator and it's a RET, we can fold AUTH into it. + // Be careful to keep the implicitly returned registers. + // By now, we don't need the ReallyLR pseudo, since it's only there + // to make it possible for LR to be used for non-RET purposes, and + // that happens in RA and PEI. + BuildMI(MBB, TI, DL, TII->get(AArch64::RETAB)).copyImplicitOps(*TI); + MBB.erase(TI); + } else { + // Otherwise, we could be in a shrink-wrapped or tail-calling block. + BuildMI(MBB, TI, DL, TII->get(AArch64::AUTIBSP)); + } + } + } + }); + // Initial and residual are named for consistency with the prologue. Note that // in the epilogue, the residual adjustment is executed first. uint64_t ArgumentPopSize = 0; diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.h b/llvm/lib/Target/AArch64/AArch64FrameLowering.h index ac150e86c9eb5d..2022a9511f6adc 100644 --- a/llvm/lib/Target/AArch64/AArch64FrameLowering.h +++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.h @@ -102,6 +102,7 @@ class AArch64FrameLowering : public TargetFrameLowering { bool shouldCombineCSRLocalStackBump(MachineFunction &MF, unsigned StackBumpBytes) const; int64_t determineSVEStackSize(MachineFrameInfo &MF, unsigned &MaxAlign) const; + bool shouldAuthenticateLR(const MachineFunction &MF) const; }; } // End llvm namespace diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp index 4d4f76e8e91289..f5f94fd76dc0cf 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -178,6 +178,8 @@ class AArch64DAGToDAGISel : public SelectionDAGISel { bool tryIndexedLoad(SDNode *N); + bool tryAuthLoad(SDNode *N); + bool trySelectStackSlotTagP(SDNode *N); void SelectTagP(SDNode *N); @@ -1240,6 +1242,133 @@ bool AArch64DAGToDAGISel::tryIndexedLoad(SDNode *N) { return true; } +bool AArch64DAGToDAGISel::tryAuthLoad(SDNode *N) { + LoadSDNode *LD = cast(N); + EVT VT = LD->getMemoryVT(); + if (VT != MVT::i64) + return false; + + assert(LD->getExtensionType() == ISD::NON_EXTLOAD && "invalid 64bit extload"); + + ISD::MemIndexedMode AM = LD->getAddressingMode(); + bool isPre = AM == ISD::PRE_INC; + if (!isPre && AM != ISD::UNINDEXED) + return false; + + SDValue Chain = LD->getChain(); + SDValue Ptr = LD->getBasePtr(); + + SDValue Base = Ptr; + + int64_t OffsetVal = 0; + if (isPre) { + OffsetVal = cast(LD->getOffset())->getSExtValue(); + } else if (CurDAG->isBaseWithConstantOffset(Base)) { + // We support both 'base' and 'base + constant offset' modes. + ConstantSDNode *RHS = dyn_cast(Base.getOperand(1)); + if (!RHS) + return false; + OffsetVal = RHS->getSExtValue(); + Base = Base.getOperand(0); + } + + if (!isShiftedInt<10, 3>(OffsetVal)) + return false; + + // The base must be of the form: + // (int_ptrauth_auth , da/db, 0) + if (Base.getOpcode() != ISD::INTRINSIC_WO_CHAIN) + return false; + + unsigned IntID = cast(Base.getOperand(0))->getZExtValue(); + if (IntID != Intrinsic::ptrauth_auth) + return false; + unsigned IntKey = cast(Base.getOperand(2))->getZExtValue(); + if (!isNullConstant(Base.getOperand(3))) + return false; + + // If the pointer is an address computation based on an intermediate auth + // that's used more than once, it's not worth folding the auth, as we can't + // writeback just the auth result (without the address computation). + // + // FIXME: we can turn it into an unchecked auth though. + if (OffsetVal && !Base.hasOneUse()) + return false; + + Base = Base.getOperand(1); + + // If this is an indexed pre-inc load, we obviously need the writeback form. + bool needsWriteback = isPre; + // If not, but the base authenticated pointer has any other use, it's + // beneficial to use the writeback form, to "writeback" the auth, even if + // there is no base+offset addition. + if (!Ptr.hasOneUse()) { + needsWriteback = true; + + // However, we can only do that if we don't introduce cycles between the + // load node and any other user of the pointer computation nodes. That can + // happen if the load node uses any of said other users. + // In other words: we can only do this transformation if none of the other + // uses of the pointer computation to be folded are predecessors of the load + // we're folding into. + // + // Visited is a cache containing nodes that are known predecessors of N. + // Worklist is the set of nodes we're looking for predecessors of. + // For the first lookup, that only contains the load node N. Each call to + // hasPredecessorHelper adds any of the potential predecessors of N to the + // Worklist. + SmallPtrSet Visited; + SmallVector Worklist; + Worklist.push_back(N); + for (SDNode *U : Ptr.getNode()->uses()) + if (SDNode::hasPredecessorHelper(U, Visited, Worklist, /*Max=*/32, + /*TopologicalPrune=*/true)) + return false; + } + + unsigned Opc = 0; + switch (IntKey) { + case AArch64PACKey::DA: + Opc = needsWriteback ? AArch64::LDRAAwriteback : AArch64::LDRAAindexed; + break; + case AArch64PACKey::DB: + Opc = needsWriteback ? AArch64::LDRABwriteback : AArch64::LDRABindexed; + break; + default: + return false; + } + + SDLoc DL(N); + // The offset is encoded as scaled, for an element size of 8 bytes. + SDValue Offset = CurDAG->getTargetConstant(OffsetVal / 8, DL, MVT::i64); + SDValue Ops[] = { Base, Offset, Chain }; + SDNode *Res = needsWriteback ? + CurDAG->getMachineNode(Opc, DL, MVT::i64, MVT::i64, MVT::Other, Ops) : + CurDAG->getMachineNode(Opc, DL, MVT::i64, MVT::Other, Ops); + + if (isPre) { + // If the original load was pre-inc, the resulting LDRA is writeback. + assert(needsWriteback && "preinc loads can't be selected into non-wb ldra"); + ReplaceUses(SDValue(N, 1), SDValue(Res, 0)); // writeback + ReplaceUses(SDValue(N, 0), SDValue(Res, 1)); // loaded value + ReplaceUses(SDValue(N, 2), SDValue(Res, 2)); // chain + } else if (needsWriteback) { + // If the original load was unindexed, but we emitted a writeback form, + // we need to replace the uses of the original auth(signedbase)[+offset] + // computation. + ReplaceUses(Ptr, SDValue(Res, 0)); // writeback + ReplaceUses(SDValue(N, 0), SDValue(Res, 1)); // loaded value + ReplaceUses(SDValue(N, 1), SDValue(Res, 2)); // chain + } else { + // Otherwise, we selected a simple load to a simple non-wb ldra. + assert(Ptr.hasOneUse() && "reused auth ptr should be folded into ldra"); + ReplaceUses(SDValue(N, 0), SDValue(Res, 0)); // loaded value + ReplaceUses(SDValue(N, 1), SDValue(Res, 1)); // chain + } + CurDAG->RemoveDeadNode(N); + return true; +} + void AArch64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc, unsigned SubRegIdx) { SDLoc dl(N); @@ -2902,8 +3031,8 @@ void AArch64DAGToDAGISel::Select(SDNode *Node) { break; case ISD::LOAD: { - // Try to select as an indexed load. Fall through to normal processing - // if we can't. + if (tryAuthLoad(Node)) + return; if (tryIndexedLoad(Node)) return; break; @@ -3354,6 +3483,67 @@ void AArch64DAGToDAGISel::Select(SDNode *Node) { if (tryMULLV64LaneV128(IntNo, Node)) return; break; + + case Intrinsic::ptrauth_resign: { + SDLoc DL(Node); + // IntrinsicID is operand #0 + SDValue Val = Node->getOperand(1); + SDValue AUTKey = Node->getOperand(2); + SDValue AUTDisc = Node->getOperand(3); + SDValue PACKey = Node->getOperand(4); + SDValue PACDisc = Node->getOperand(5); + + unsigned AUTKeyC = cast(AUTKey)->getZExtValue(); + unsigned PACKeyC = cast(PACKey)->getZExtValue(); + + AUTKey = CurDAG->getTargetConstant(AUTKeyC, DL, MVT::i64); + PACKey = CurDAG->getTargetConstant(PACKeyC, DL, MVT::i64); + + SDValue ImpDef = SDValue( + CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::i64), 0); + SDValue X16Copy = CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL, + AArch64::X16, Val, SDValue()); + SDValue X17Copy = + CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL, AArch64::X17, + ImpDef, X16Copy.getValue(1)); + + SDValue Ops[] = {AUTKey, AUTDisc, PACKey, PACDisc, X17Copy.getValue(1)}; + SDVTList VTs = CurDAG->getVTList(MVT::Other, MVT::Glue); + SDNode *N = CurDAG->getMachineNode(AArch64::AUTPAC, DL, VTs, Ops); + N = CurDAG->getCopyFromReg(SDValue(N, 0), DL, AArch64::X16, MVT::i64, + SDValue(N, 1)).getNode(); + ReplaceNode(Node, N); + return; + } + + case Intrinsic::ptrauth_auth: { + SDLoc DL(Node); + // IntrinsicID is operand #0 + SDValue Val = Node->getOperand(1); + SDValue AUTKey = Node->getOperand(2); + SDValue AUTDisc = Node->getOperand(3); + + unsigned AUTKeyC = cast(AUTKey)->getZExtValue(); + AUTKey = CurDAG->getTargetConstant(AUTKeyC, DL, MVT::i64); + + SDValue ImpDef = SDValue( + CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::i64), 0); + SDValue X16Copy = CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL, + AArch64::X16, Val, SDValue()); + SDValue X17Copy = + CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL, AArch64::X17, + ImpDef, X16Copy.getValue(1)); + + SDValue Ops[] = {AUTKey, AUTDisc, X17Copy.getValue(1)}; + + SDVTList VTs = CurDAG->getVTList(MVT::Other, MVT::Glue); + SDNode *N = CurDAG->getMachineNode(AArch64::AUT, DL, VTs, Ops); + N = CurDAG->getCopyFromReg(SDValue(N, 0), DL, AArch64::X16, MVT::i64, + SDValue(N, 1)).getNode(); + ReplaceNode(Node, N); + return; + } + } break; } diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 8e3a524ed2cb1d..5b4de5e6db8674 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -117,6 +117,10 @@ EnableOptimizeLogicalImm("aarch64-enable-logical-imm", cl::Hidden, "optimization"), cl::init(true)); +static cl::opt AArch64PtrAuthGlobalDynamicMat( + "aarch64-ptrauth-global-dynamic-mat", cl::Hidden, cl::init(true), + cl::desc("Always materialize llvm.ptrauth global references dynamically")); + /// Value type used for condition codes. static const MVT MVT_CC = MVT::i32; @@ -217,6 +221,8 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM, setOperationAction(ISD::BR_JT, MVT::Other, Custom); setOperationAction(ISD::JumpTable, MVT::i64, Custom); + setOperationAction(ISD::PtrAuthGlobalAddress, MVT::i64, Custom); + setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); @@ -1182,6 +1188,8 @@ const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const { switch ((AArch64ISD::NodeType)Opcode) { case AArch64ISD::FIRST_NUMBER: break; case AArch64ISD::CALL: return "AArch64ISD::CALL"; + case AArch64ISD::AUTH_CALL: return "AArch64ISD::AUTH_CALL"; + case AArch64ISD::AUTH_TC_RETURN: return "AArch64ISD::AUTH_TC_RETURN"; case AArch64ISD::ADRP: return "AArch64ISD::ADRP"; case AArch64ISD::ADR: return "AArch64ISD::ADR"; case AArch64ISD::ADDlow: return "AArch64ISD::ADDlow"; @@ -2971,6 +2979,8 @@ SDValue AArch64TargetLowering::LowerOperation(SDValue Op, return LowerGlobalAddress(Op, DAG); case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); + case ISD::PtrAuthGlobalAddress: + return LowerPtrAuthGlobalAddress(Op, DAG); case ISD::SETCC: return LowerSETCC(Op, DAG); case ISD::BR_CC: @@ -4031,6 +4041,13 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI, assert(Subtarget->isTargetWindows() && "Windows is the only supported COFF target"); Callee = getGOT(G, DAG, AArch64II::MO_DLLIMPORT); + } else if (GV->getSection() == "llvm.ptrauth") { + // FIXME: this should deal with PtrAuthGlobalAddress instead + // If we're directly referencing a ptrauth wrapper, we need to materialize + // it from its __auth_ptr slot. + // We combine some of these into the call; ideally we'd catch them all. + Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, /*TargetFlags=*/0); + Callee = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, Callee); } else { const GlobalValue *GV = G->getGlobal(); Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 0); @@ -4057,6 +4074,8 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI, InFlag = Chain.getValue(1); } + unsigned Opc = IsTailCall ? AArch64ISD::TC_RETURN : AArch64ISD::CALL; + std::vector Ops; Ops.push_back(Chain); Ops.push_back(Callee); @@ -4068,6 +4087,17 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI, Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32)); } + if (CLI.PAI) { + const uint64_t Key = CLI.PAI->Key; + // Authenticated calls only support IA and IB. + if (Key > 1) + report_fatal_error("Unsupported key kind for authenticating call"); + + Opc = IsTailCall ? AArch64ISD::AUTH_TC_RETURN : AArch64ISD::AUTH_CALL; + Ops.push_back(DAG.getTargetConstant(Key, DL, MVT::i32)); + Ops.push_back(CLI.PAI->Discriminator); + } + // Add argument registers to the end of the list so that they are known live // into the call. for (auto &RegToPass : RegsToPass) @@ -4119,13 +4149,13 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI, // actual call instruction. if (IsTailCall) { MF.getFrameInfo().setHasTailCall(); - SDValue Ret = DAG.getNode(AArch64ISD::TC_RETURN, DL, NodeTys, Ops); + SDValue Ret = DAG.getNode(Opc, DL, NodeTys, Ops); DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo)); return Ret; } // Returns a chain and a flag for retval copy to use. - Chain = DAG.getNode(AArch64ISD::CALL, DL, NodeTys, Ops); + Chain = DAG.getNode(Opc, DL, NodeTys, Ops); InFlag = Chain.getValue(1); DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo)); @@ -4458,10 +4488,24 @@ AArch64TargetLowering::LowerDarwinGlobalTLSAddress(SDValue Op, // normal AArch64 call node: x0 takes the address of the descriptor, and // returns the address of the variable in this thread. Chain = DAG.getCopyToReg(Chain, DL, AArch64::X0, DescAddr, SDValue()); + + unsigned Opcode = AArch64ISD::CALL; + SmallVector Ops; + Ops.push_back(Chain); + Ops.push_back(FuncTLVGet); + + // With ptrauth-calls, the tlv access thunk pointer is authenticated (IA, 0). + if (DAG.getMachineFunction().getFunction().hasFnAttribute("ptrauth-calls")) { + Opcode = AArch64ISD::AUTH_CALL; + Ops.push_back(DAG.getTargetConstant(AArch64PACKey::IA, DL, MVT::i32)); + Ops.push_back(DAG.getTargetConstant(0, DL, MVT::i64)); + } + + Ops.push_back(DAG.getRegister(AArch64::X0, MVT::i64)); + Ops.push_back(DAG.getRegisterMask(Mask)); + Ops.push_back(Chain.getValue(1)); Chain = - DAG.getNode(AArch64ISD::CALL, DL, DAG.getVTList(MVT::Other, MVT::Glue), - Chain, FuncTLVGet, DAG.getRegister(AArch64::X0, MVT::i64), - DAG.getRegisterMask(Mask), Chain.getValue(1)); + DAG.getNode(Opcode, DL, DAG.getVTList(MVT::Other, MVT::Glue), Ops); return DAG.getCopyFromReg(Chain, DL, AArch64::X0, PtrVT, Chain.getValue(1)); } @@ -4673,6 +4717,121 @@ SDValue AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op, llvm_unreachable("Unexpected platform trying to use TLS"); } +SDValue AArch64TargetLowering::LowerPtrAuthGlobalAddressViaGOT( + SDValue Wrapper, AArch64PACKey::ID Key, bool HasAddrDiversity, + GlobalAddressSDNode *PtrBaseGA, SelectionDAG &DAG) const { + + // Most llvm.ptrauth global references can be lowered using a stub. + // However, we cannot do that for address-diversified stub references: + // the stub relocation would be signed with the stub address, which + // is meaningless. + if (HasAddrDiversity) + return SDValue(); + + // Both key allocation and the wrapper usage support are target-specific. + if (!Subtarget->isTargetMachO()) + llvm_unreachable("Unimplemented ptrauth global lowering"); + + // Process-specific keys are dangerous when used in relocations. + if (Key == AArch64PACKey::IB || Key == AArch64PACKey::DB) { + // ..but that's our only way to implement weak references. Ban the combo. + if (PtrBaseGA->getGlobal()->hasExternalWeakLinkage()) + report_fatal_error("Unsupported weak B-key ptrauth global reference"); + return SDValue(); + } + + // In general, we don't want to abuse the pointer section, because + // that provides an attractive target, containing a cluster of + // already-signed pointers. + // + // FIXME: As an optimization, we could use the pointer section for + // global references that are used often. + // + // For now, materialize references dynamically, whenever possible. + // + // Don't do it for weak references, to avoid emitting another null-check. + if (AArch64PtrAuthGlobalDynamicMat && + !PtrBaseGA->getGlobal()->hasExternalWeakLinkage()) + return SDValue(); + + EVT VT = Wrapper.getValueType(); + SDLoc DL(Wrapper); + + // Use the wrapper directly, and let AsmPrinter turn it into $auth_ptr$ + // That's only implemented for MachO. + GlobalAddressSDNode *WrapperN = cast(Wrapper.getNode()); + Wrapper = DAG.getTargetGlobalAddress(WrapperN->getGlobal(), DL, VT, + /*Offset=*/0, /*TargetFlags=*/0); + return DAG.getNode(AArch64ISD::LOADgot, DL, VT, Wrapper); +} + +SDValue +AArch64TargetLowering::LowerPtrAuthGlobalAddress(SDValue Op, + SelectionDAG &DAG) const { + SDValue Wrapper = Op.getOperand(0); + SDValue Ptr = Op.getOperand(1); + uint64_t KeyC = Op.getConstantOperandVal(2); + SDValue AddrDiscriminator = Op.getOperand(3); + SDValue Discriminator = Op.getOperand(4); + uint64_t DiscriminatorC = Op.getConstantOperandVal(4); + EVT VT = Op.getValueType(); + SDLoc DL(Op); + + bool HasAddrDiversity = !isNullConstant(AddrDiscriminator); + + uint64_t PtrOffsetC = 0; + if (Ptr.getOpcode() == ISD::ADD) { + PtrOffsetC = Ptr.getConstantOperandVal(1); + Ptr = Ptr.getOperand(0); + } + GlobalAddressSDNode *PtrN = cast(Ptr.getNode()); + SDValue TPtr = DAG.getTargetGlobalAddress(PtrN->getGlobal(), DL, VT, + /*Offset=*/0, /*TargetFlags=*/0); + PtrOffsetC += PtrN->getOffset(); + assert(PtrN->getTargetFlags() == 0 && "Unsupported tflags on ptrauth global"); + + // Emit code to dynamically sign llvm.ptrauth global references. + // The alternative is to use a section of pointers (see ViaGOT helper). + // + // FIXME: An alternative lowering would be to simply use the address + // discriminator as a pointer and load the fully signed value from + // there. + if (SDValue Ld = LowerPtrAuthGlobalAddressViaGOT(Wrapper, + (AArch64PACKey::ID)KeyC, + HasAddrDiversity, PtrN, DAG)) + return Ld; + + + // If we're using an address discriminator, compute the full discriminator + // here, to avoid needing to carry a potentially complex constant expr all the + // way to the pseudo expansion. + if (HasAddrDiversity) { + SDValue BlendIntID = DAG.getTargetConstant(Intrinsic::ptrauth_blend, DL, VT); + AddrDiscriminator = + DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, BlendIntID, + AddrDiscriminator, Discriminator); + DiscriminatorC = 0; + } + + SDValue PtrOffset = DAG.getTargetConstant(PtrOffsetC, DL, MVT::i64); + SDValue Key = DAG.getTargetConstant(KeyC, DL, MVT::i32); + Discriminator = DAG.getTargetConstant(DiscriminatorC, DL, VT); + + SDValue X16Copy = + DAG.getCopyToReg(DAG.getEntryNode(), DL, AArch64::X16, + DAG.getUNDEF(MVT::i64), SDValue()); + SDValue X17Copy = + DAG.getCopyToReg(DAG.getEntryNode(), DL, AArch64::X17, + DAG.getUNDEF(MVT::i64), X16Copy.getValue(1)); + + SDNode *MOV = + DAG.getMachineNode(AArch64::MOVaddrPAC, DL, {MVT::Other, MVT::Glue}, + {TPtr, PtrOffset, Key, AddrDiscriminator, + Discriminator, X17Copy.getValue(1)}); + return DAG.getCopyFromReg(SDValue(MOV, 0), DL, AArch64::X16, MVT::i64, + SDValue(MOV, 1)); +} + SDValue AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { SDValue Chain = Op.getOperand(0); ISD::CondCode CC = cast(Op.getOperand(1))->get(); @@ -5274,6 +5433,21 @@ SDValue AArch64TargetLowering::LowerBR_JT(SDValue Op, SDValue Entry = Op.getOperand(2); int JTI = cast(JT.getNode())->getIndex(); + // With aarch64-hardened-codegen, we only expand the full jump table dispatch + // sequence later, to guarantee the integrity of the intermediate values. + if (DAG.getMachineFunction().getFunction() + .hasFnAttribute("jump-table-hardening") || + Subtarget->getTargetTriple().getArchName() == "arm64e") { + if (getTargetMachine().getCodeModel() != CodeModel::Small) + report_fatal_error("Unsupported code-model for hardened jump-table"); + SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), DL, AArch64::X16, + Entry, SDValue()); + SDNode *B = DAG.getMachineNode(AArch64::BR_JumpTable, DL, MVT::Other, + DAG.getTargetJumpTable(JTI, MVT::i32), + Chain.getValue(0), Chain.getValue(1)); + return SDValue(B, 0); + } + SDNode *Dest = DAG.getMachineNode(AArch64::JumpTableDest32, DL, MVT::i64, MVT::i64, JT, Entry, DAG.getTargetJumpTable(JTI, MVT::i32)); @@ -5576,18 +5750,30 @@ SDValue AArch64TargetLowering::LowerRETURNADDR(SDValue Op, EVT VT = Op.getValueType(); SDLoc DL(Op); + SDValue ReturnAddr; + SDValue FrameAddr; unsigned Depth = cast(Op.getOperand(0))->getZExtValue(); if (Depth) { - SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); + SDNodeFlags Flags; + Flags.setNoUnsignedWrap(true); + FrameAddr = LowerFRAMEADDR(Op, DAG); SDValue Offset = DAG.getConstant(8, DL, getPointerTy(DAG.getDataLayout())); - return DAG.getLoad(VT, DL, DAG.getEntryNode(), - DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset), - MachinePointerInfo()); + ReturnAddr = DAG.getLoad(VT, DL, DAG.getEntryNode(), + DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset, Flags), + MachinePointerInfo()); + } else { + unsigned LRReg = MF.addLiveIn(AArch64::LR, &AArch64::GPR64RegClass); + ReturnAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, LRReg, VT); } - // Return LR, which contains the return address. Mark it an implicit live-in. - unsigned Reg = MF.addLiveIn(AArch64::LR, &AArch64::GPR64RegClass); - return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, VT); + // If we're doing LR signing, we need to fixup ReturnAddr: strip it. + if (!MF.getFunction().hasFnAttribute("ptrauth-returns")) + return ReturnAddr; + + return DAG.getNode( + ISD::INTRINSIC_WO_CHAIN, DL, VT, + DAG.getConstant(Intrinsic::ptrauth_strip, DL, MVT::i32), ReturnAddr, + DAG.getConstant(AArch64PACKey::IB, DL, MVT::i32)); } /// LowerShiftRightParts - Lower SRA_PARTS, which returns two diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h index 5a76f0c467b770..a83d6ce44092f4 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -30,6 +30,12 @@ enum NodeType : unsigned { WrapperLarge, // 4-instruction MOVZ/MOVK sequence for 64-bit addresses. CALL, // Function call. + // Function call, authenticating the callee value first: + // AUTH_CALL chain, callee, auth key #, discriminator, operands. + AUTH_CALL, + // AUTH_TC_RETURN chain, callee, fpdiff, auth key #, discriminator, operands. + AUTH_TC_RETURN, + // Produces the full sequence of instructions for getting the thread pointer // offset of a variable into X0, using the TLSDesc model. TLSDESC_CALLSEQ, @@ -545,6 +551,10 @@ class AArch64TargetLowering : public TargetLowering { return true; } + bool supportPtrAuthBundles() const override { + return true; + } + /// Enable aggressive FMA fusion on targets that want it. bool enableAggressiveFMAFusion(EVT VT) const override; @@ -651,6 +661,12 @@ class AArch64TargetLowering : public TargetLowering { SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, const SDLoc &DL, SelectionDAG &DAG) const; SDValue LowerWindowsGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerPtrAuthGlobalAddress(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerPtrAuthGlobalAddressViaGOT(SDValue Wrapper, + AArch64PACKey::ID Key, + bool HasAddrDiversity, + GlobalAddressSDNode *PtrBaseGA, + SelectionDAG &DAG) const; SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const; SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const; SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const; diff --git a/llvm/lib/Target/AArch64/AArch64InstrFormats.td b/llvm/lib/Target/AArch64/AArch64InstrFormats.td index f555e412330723..10d6bd68ff9abf 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrFormats.td +++ b/llvm/lib/Target/AArch64/AArch64InstrFormats.td @@ -694,6 +694,13 @@ def i64_imm0_65535 : Operand, TImmLeaf; } +def imm64_0_65535 : Operand, ImmLeaf { + let ParserMatchClass = AsmImmRange<0, 65535>; + let PrintMethod = "printImmHex"; +} + // imm0_255 predicate - True if the immediate is in the range [0,255]. def Imm0_255Operand : AsmImmRange<0,255>; @@ -1427,6 +1434,7 @@ class AuthOneOperand opc, bits<1> M, string asm> let Inst{9-5} = Rn; } +let Uses = [LR,SP] in class AuthReturn op, bits<1> M, string asm> : AuthBase { let Inst{24} = 0; @@ -1463,6 +1471,9 @@ multiclass AuthLoad { def : InstAlias(NAME # "indexed") GPR64:$Rt, GPR64sp:$Rn, 0)>; + + def : InstAlias(NAME # "writeback") GPR64sp:$wback, GPR64:$Rt, 0)>; } //--- @@ -1692,9 +1703,10 @@ class OneXRegData opc, string asm, SDPatternOperator node> let Inst{31} = 1; } -class SignAuthOneData opcode_prefix, bits<2> opcode, string asm> - : I<(outs GPR64:$Rd), (ins GPR64sp:$Rn), asm, "\t$Rd, $Rn", "", - []>, +class SignAuthOneData opcode_prefix, bits<2> opcode, string asm, + Intrinsic op> + : I<(outs GPR64:$dst), (ins GPR64:$Rd, GPR64sp:$Rn), asm, "\t$Rd, $Rn", + "$dst = $Rd", [(set GPR64:$dst, (op GPR64:$Rd, opcode, GPR64sp:$Rn))]>, Sched<[WriteI, ReadI]> { bits<5> Rd; bits<5> Rn; @@ -1705,8 +1717,11 @@ class SignAuthOneData opcode_prefix, bits<2> opcode, string asm> let Inst{4-0} = Rd; } -class SignAuthZero opcode_prefix, bits<2> opcode, string asm> - : I<(outs GPR64:$Rd), (ins), asm, "\t$Rd", "", []>, Sched<[]> { +class SignAuthZero opcode_prefix, bits<2> opcode, string asm, + SDPatternOperator op> + : I<(outs GPR64:$dst), (ins GPR64:$Rd), asm, "\t$Rd", "$dst = $Rd", + [(set GPR64:$dst, (op GPR64:$Rd, opcode, (i64 0)))]>, + Sched<[]> { bits<5> Rd; let Inst{31-15} = 0b11011010110000010; let Inst{14-12} = opcode_prefix; diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp index 5c35e5bcdd30e7..d61dbface995fe 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -110,6 +110,22 @@ unsigned AArch64InstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { // This gets lowered to an instruction sequence which takes 16 bytes NumBytes = 16; break; + case AArch64::AUT: + NumBytes = 24; + break; + case AArch64::AUTPAC: + NumBytes = 28; + break; + case AArch64::MOVaddrPAC: + // 12 fixed + 16 variable, for pointer offset, and discriminator + // We could potentially model the variable size overhead more accurately. + NumBytes = 28; + break; + case AArch64::BR_JumpTable: + // 28 fixed + 16 variable, for table size materialization + // We could potentially model the variable size overhead more accurately. + NumBytes = 44; + break; case AArch64::JumpTableDest32: case AArch64::JumpTableDest16: case AArch64::JumpTableDest8: diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.h b/llvm/lib/Target/AArch64/AArch64InstrInfo.h index 1688045e4fb86c..a46d70aee6f228 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.h +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.h @@ -360,6 +360,34 @@ static inline bool isIndirectBranchOpcode(int Opc) { return Opc == AArch64::BR; } +static inline unsigned getXPACOpcodeForKey(AArch64PACKey::ID K) { + using namespace AArch64PACKey; + switch (K) { + case IA: case IB: return AArch64::XPACI; + case DA: case DB: return AArch64::XPACD; + } +} + +static inline unsigned getAUTOpcodeForKey(AArch64PACKey::ID K, bool Zero) { + using namespace AArch64PACKey; + switch (K) { + case IA: return Zero ? AArch64::AUTIZA : AArch64::AUTIA; + case IB: return Zero ? AArch64::AUTIZB : AArch64::AUTIB; + case DA: return Zero ? AArch64::AUTDZA : AArch64::AUTDA; + case DB: return Zero ? AArch64::AUTDZB : AArch64::AUTDB; + } +} + +static inline unsigned getPACOpcodeForKey(AArch64PACKey::ID K, bool Zero) { + using namespace AArch64PACKey; + switch (K) { + case IA: return Zero ? AArch64::PACIZA : AArch64::PACIA; + case IB: return Zero ? AArch64::PACIZB : AArch64::PACIB; + case DA: return Zero ? AArch64::PACDZA : AArch64::PACDA; + case DB: return Zero ? AArch64::PACDZB : AArch64::PACDB; + } +} + // struct TSFlags { #define TSFLAG_ELEMENT_SIZE_TYPE(X) (X) // 3-bits #define TSFLAG_DESTRUCTIVE_INST_TYPE(X) ((X) << 3) // 1-bit diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td index 4e1be2696d59c2..bd3b32343bdf2d 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td @@ -604,6 +604,17 @@ def JumpTableDest8 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch), Sched<[]>; } +def BR_JumpTable : Pseudo<(outs), (ins i32imm:$jti), []>, + Sched<[]> { + let isBranch = 1; + let isTerminator = 1; + let isIndirectBranch = 1; + let isBarrier = 1; + let Defs = [X16,X17,NZCV]; + let Uses = [X16]; + let Size = 44; // 28 fixed + 16 variable, for table size materialization +} + // Space-consuming pseudo to aid testing of placement and reachability // algorithms. Immediate operand is the number of bytes this "instruction" // occupies; register operands can be used to enforce dependency and constrain @@ -724,6 +735,17 @@ defm FCADD : SIMDThreeSameVectorComplexHSD<1, 0b111, complexrotateopodd, defm FCMLA : SIMDIndexedTiedComplexHSD<1, 0, 1, complexrotateop, "fcmla", null_frag>; +def AArch64authcall : SDNode<"AArch64ISD::AUTH_CALL", + SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>, + [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, + SDNPVariadic]>; + +def AArch64authtcret: SDNode<"AArch64ISD::AUTH_TC_RETURN", + SDTypeProfile<0, 4, [SDTCisPtrTy<0>, + SDTCisVT<2, i32>, + SDTCisVT<3, i64>]>, + [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; + // v8.3a Pointer Authentication // These instructions inhabit part of the hint space and so can be used for // armv8 targets @@ -752,34 +774,59 @@ let Uses = [LR], Defs = [LR], CRm = 0b0000 in { // These pointer authentication isntructions require armv8.3a let Predicates = [HasPA] in { - multiclass SignAuth prefix, bits<3> prefix_z, string asm> { - def IA : SignAuthOneData; - def IB : SignAuthOneData; - def DA : SignAuthOneData; - def DB : SignAuthOneData; - def IZA : SignAuthZero; - def DZA : SignAuthZero; - def IZB : SignAuthZero; - def DZB : SignAuthZero; + multiclass SignAuth prefix, bits<3> prefix_z, string asm, + Intrinsic op> { + def IA : SignAuthOneData; + def IB : SignAuthOneData; + def DA : SignAuthOneData; + def DB : SignAuthOneData; + def IZA : SignAuthZero; + def DZA : SignAuthZero; + def IZB : SignAuthZero; + def DZB : SignAuthZero; } - defm PAC : SignAuth<0b000, 0b010, "pac">; - defm AUT : SignAuth<0b001, 0b011, "aut">; + defm PAC : SignAuth<0b000, 0b010, "pac", int_ptrauth_sign>; + defm AUT : SignAuth<0b001, 0b011, "aut", null_frag>; + + def XPACI : SignAuthZero<0b100, 0b00, "xpaci", null_frag>; + // Pseudo of the previous instruction with untied operands. Lowers to: + // mov $dst, $src + // xpaci $dst + def XPACIuntied : Pseudo<(outs GPR64:$dst), (ins GPR64:$src), []>, Sched<[]>; + def : Pat<(int_ptrauth_strip GPR64:$Rd, 0), (XPACIuntied GPR64:$Rd)>; + def : Pat<(int_ptrauth_strip GPR64:$Rd, 1), (XPACIuntied GPR64:$Rd)>; - def XPACI : SignAuthZero<0b100, 0b00, "xpaci">; - def XPACD : SignAuthZero<0b100, 0b01, "xpacd">; - def PACGA : SignAuthTwoOperand<0b1100, "pacga", null_frag>; + def XPACD : SignAuthZero<0b100, 0b01, "xpacd", null_frag>; + def : Pat<(int_ptrauth_strip GPR64:$Rd, 2), (XPACD GPR64:$Rd)>; + def : Pat<(int_ptrauth_strip GPR64:$Rd, 3), (XPACD GPR64:$Rd)>; + + def PACGA : SignAuthTwoOperand<0b1100, "pacga", int_ptrauth_sign_generic>; // Combined Instructions - def BRAA : AuthBranchTwoOperands<0, 0, "braa">; - def BRAB : AuthBranchTwoOperands<0, 1, "brab">; - def BLRAA : AuthBranchTwoOperands<1, 0, "blraa">; - def BLRAB : AuthBranchTwoOperands<1, 1, "blrab">; + let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in { + def BRAA : AuthBranchTwoOperands<0, 0, "braa">; + def BRAB : AuthBranchTwoOperands<0, 1, "brab">; + def BRAAZ : AuthOneOperand<0b000, 0, "braaz">; + def BRABZ : AuthOneOperand<0b000, 1, "brabz">; + } + + let isCall = 1, Defs = [LR], Uses = [SP] in { + def BLRAA : AuthBranchTwoOperands<1, 0, "blraa">; + def BLRAB : AuthBranchTwoOperands<1, 1, "blrab">; - def BRAAZ : AuthOneOperand<0b000, 0, "braaz">; - def BRABZ : AuthOneOperand<0b000, 1, "brabz">; - def BLRAAZ : AuthOneOperand<0b001, 0, "blraaz">; - def BLRABZ : AuthOneOperand<0b001, 1, "blrabz">; + def BLRAAZ : AuthOneOperand<0b001, 0, "blraaz">; + def BLRABZ : AuthOneOperand<0b001, 1, "blrabz">; + } + + def : Pat<(AArch64authcall GPR64:$Rn, (i32 0), GPR64sp:$Rm), + (BLRAA GPR64:$Rn, GPR64:$Rm)>; + def : Pat<(AArch64authcall GPR64:$Rn, (i32 1), GPR64sp:$Rm), + (BLRAB GPR64:$Rn, GPR64:$Rm)>; + def : Pat<(AArch64authcall GPR64:$Rn, (i32 0), (i64 0)), + (BLRAAZ GPR64:$Rn)>; + def : Pat<(AArch64authcall GPR64:$Rn, (i32 1), (i64 0)), + (BLRABZ GPR64:$Rn)>; let isReturn = 1, isTerminator = 1, isBarrier = 1 in { def RETAA : AuthReturn<0b010, 0, "retaa">; @@ -791,6 +838,64 @@ let Predicates = [HasPA] in { defm LDRAA : AuthLoad<0, "ldraa", simm10Scaled>; defm LDRAB : AuthLoad<1, "ldrab", simm10Scaled>; + // AUT pseudo. + // This directly manipulates x16/x17, which are the only registers the OS + // guarantees are safe to use for sensitive operations. + def AUT : Pseudo<(outs), (ins i32imm:$Key, GPR64all:$Rn), []>, + Sched<[WriteI, ReadI]> { + let isCodeGenOnly = 1; + let hasSideEffects = 1; + let mayStore = 0; + let mayLoad = 0; + let Defs = [X16,X17,NZCV]; + let Uses = [X16,X17]; + } + + // AUT and re-PAC a value, using different keys/data. + // This directly manipulates x16/x17, which are the only registers the OS + // guarantees are safe to use for sensitive operations. + def AUTPAC : Pseudo<(outs), (ins i32imm:$AUTKey, GPR64:$AUTRn, + i32imm:$PACKey, GPR64:$PACRn), []>, + Sched<[WriteI, ReadI]> { + let isCodeGenOnly = 1; + let hasSideEffects = 1; + let mayStore = 0; + let mayLoad = 0; + let Defs = [X16,X17,NZCV]; + let Uses = [X16,X17]; + } + + // Materialize a signed global address. + def MOVaddrPAC : Pseudo<(outs), + (ins i64imm:$Addr, i64imm:$Offset, i32imm:$Key, + GPR64:$AddrDisc, i64imm:$Disc), []>, + Sched<[WriteI, ReadI]> { + let isReMaterializable = 1; + let isCodeGenOnly = 1; + let Size = 28; // ranges from 12 to 28 + let Defs = [X16,X17]; + let Uses = [X16,X17]; + } + + let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, + Uses = [SP] in { + def AUTH_TCRETURNriri : Pseudo<(outs), (ins tcGPR64:$dst, i32imm:$FPDiff, + i32imm:$Key, tcGPR64:$Rn), + []>, Sched<[WriteBrReg]>; + def AUTH_TCRETURNrii : Pseudo<(outs), (ins tcGPR64:$dst, i32imm:$FPDiff, + i32imm:$Key), + []>, Sched<[WriteBrReg]>; + } + + def : Pat<(AArch64authtcret tcGPR64:$dst, (i32 timm:$FPDiff), (i32 timm:$Key), + tcGPR64:$Rn), + (AUTH_TCRETURNriri tcGPR64:$dst, imm:$FPDiff, imm:$Key, + tcGPR64:$Rn)>; + + def : Pat<(AArch64authtcret tcGPR64:$dst, (i32 timm:$FPDiff), (i32 timm:$Key), + (i64 0)), + (AUTH_TCRETURNrii tcGPR64:$dst, imm:$FPDiff, imm:$Key)>; + } // v8.3a floating point conversion for javascript @@ -7175,5 +7280,15 @@ let AddedComplexity = 10 in { // FIXME: add SVE dot-product patterns. } +//----------------------------------------------------------------------------- +// v8.3 Pointer Authentication late patterns + +let Predicates = [HasPA] in { +def : Pat<(int_ptrauth_blend GPR64:$Rd, imm64_0_65535:$imm), + (MOVKXi GPR64:$Rd, (trunc_imm imm64_0_65535:$imm), 48)>; +def : Pat<(int_ptrauth_blend GPR64:$Rd, GPR64:$Rn), + (BFMXri GPR64:$Rd, GPR64:$Rn, 16, 15)>; +} + include "AArch64InstrAtomics.td" include "AArch64SVEInstrInfo.td" diff --git a/llvm/lib/Target/AArch64/AArch64MCInstLower.cpp b/llvm/lib/Target/AArch64/AArch64MCInstLower.cpp index afd5ae6bcbf2cc..c29396a01632b0 100644 --- a/llvm/lib/Target/AArch64/AArch64MCInstLower.cpp +++ b/llvm/lib/Target/AArch64/AArch64MCInstLower.cpp @@ -14,10 +14,14 @@ #include "AArch64MCInstLower.h" #include "MCTargetDesc/AArch64MCExpr.h" #include "Utils/AArch64BaseInfo.h" +#include "llvm/ADT/StringExtras.h" #include "llvm/CodeGen/AsmPrinter.h" #include "llvm/CodeGen/MachineBasicBlock.h" #include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/CodeGen/MachineModuleInfoImpls.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/GlobalPtrAuthInfo.h" #include "llvm/IR/Mangler.h" #include "llvm/MC/MCContext.h" #include "llvm/MC/MCExpr.h" @@ -33,9 +37,68 @@ extern cl::opt EnableAArch64ELFLocalDynamicTLSGeneration; AArch64MCInstLower::AArch64MCInstLower(MCContext &ctx, AsmPrinter &printer) : Ctx(ctx), Printer(printer) {} +static MCSymbol *getAuthGVStub(const GlobalVariable *GVB, AsmPrinter &Printer) { + auto PAI = *GlobalPtrAuthInfo::analyze(GVB); + + // Figure out the base symbol and the addend, if any. + APInt Offset(64, 0); + const Value *BaseGV = + PAI.getPointer()->stripAndAccumulateInBoundsConstantOffsets( + Printer.getDataLayout(), Offset); + + auto *BaseGVB = dyn_cast(BaseGV); + + // If we can't understand the referenced ConstantExpr, there's nothing + // else we can do: emit an error. + if (!BaseGVB) { + BaseGVB = GVB; + BaseGV->getContext().emitError( + "Couldn't resolve target base/addend of llvm.ptrauth global '" + + BaseGV->getName() + "'"); + } + + uint16_t Discriminator = PAI.getDiscriminator()->getZExtValue(); + + auto *KeyC = PAI.getKey(); + assert(isUInt<2>(KeyC->getZExtValue()) && "Invalid PAC Key ID"); + AArch64PACKey::ID Key = AArch64PACKey::ID(KeyC->getZExtValue()); + + // Mangle the offset into the stub name. Avoid '-' in symbols and extra logic + // by using the uint64_t representation for negative numbers. + uint64_t OffsetV = Offset.getSExtValue(); + std::string Suffix = "$"; + if (OffsetV) + Suffix += utostr(OffsetV) + "$"; + Suffix += (Twine("auth_ptr$") + AArch64PACKeyIDToString(Key) + "$" + + utostr(Discriminator)) + .str(); + + if (PAI.hasAddressDiversity()) + report_fatal_error("Can't reference an address-diversified ptrauth global" + " in an instruction."); + + MCSymbol *MCSym = Printer.OutContext.getOrCreateSymbol( + Printer.getDataLayout().getLinkerPrivateGlobalPrefix() + "_" + + BaseGVB->getName() + Suffix); + + MachineModuleInfoMachO &MMIMachO = + Printer.MMI->getObjFileInfo(); + MachineModuleInfoMachO::AuthStubInfo &StubInfo = + MMIMachO.getAuthGVStubEntry(MCSym); + + if (!StubInfo.Pointer) + StubInfo.Pointer = Printer.lowerPtrAuthGlobalConstant(PAI); + return MCSym; +} + MCSymbol * AArch64MCInstLower::GetGlobalAddressSymbol(const MachineOperand &MO) const { const GlobalValue *GV = MO.getGlobal(); + + if (const GlobalVariable *GVB = dyn_cast(GV)) + if (GV->getSection() == "llvm.ptrauth") + return getAuthGVStub(GVB, Printer); + unsigned TargetFlags = MO.getTargetFlags(); const Triple &TheTriple = Printer.TM.getTargetTriple(); if (!TheTriple.isOSBinFormatCOFF()) diff --git a/llvm/lib/Target/AArch64/AArch64Subtarget.cpp b/llvm/lib/Target/AArch64/AArch64Subtarget.cpp index 558bea368eff2b..3bb7208ee06d93 100644 --- a/llvm/lib/Target/AArch64/AArch64Subtarget.cpp +++ b/llvm/lib/Target/AArch64/AArch64Subtarget.cpp @@ -89,6 +89,8 @@ void AArch64Subtarget::initializeProperties() { PrefFunctionLogAlignment = 4; break; case Cyclone: + case Vortex: + case Lightning: CacheLineSize = 64; PrefetchDistance = 280; MinPrefetchStride = 2048; diff --git a/llvm/lib/Target/AArch64/AArch64Subtarget.h b/llvm/lib/Target/AArch64/AArch64Subtarget.h index f3212fae8e5e1a..bbd5adfae09aa4 100644 --- a/llvm/lib/Target/AArch64/AArch64Subtarget.h +++ b/llvm/lib/Target/AArch64/AArch64Subtarget.h @@ -52,6 +52,7 @@ class AArch64Subtarget final : public AArch64GenSubtargetInfo { ExynosM3, Falkor, Kryo, + Lightning, NeoverseE1, NeoverseN1, Saphira, @@ -60,7 +61,8 @@ class AArch64Subtarget final : public AArch64GenSubtargetInfo { ThunderXT81, ThunderXT83, ThunderXT88, - TSV110 + TSV110, + Vortex }; protected: diff --git a/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp b/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp index 0ec1e667d69207..4df314aadc2fa3 100644 --- a/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp +++ b/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp @@ -184,6 +184,7 @@ extern "C" void LLVMInitializeAArch64Target() { initializeAArch64SpeculationHardeningPass(*PR); initializeAArch64StackTaggingPass(*PR); initializeAArch64StackTaggingPreRAPass(*PR); + initializeAArch64ExpandHardenedPseudosPass(*PR); } //===----------------------------------------------------------------------===// @@ -216,6 +217,12 @@ static std::string computeDataLayout(const Triple &TT, return "E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"; } +static StringRef computeDefaultCPU(const Triple &TT, StringRef CPU) { + if (CPU.empty() && TT.getArchName() == "arm64e") + return "vortex"; + return CPU; +} + static Reloc::Model getEffectiveRelocModel(const Triple &TT, Optional RM) { // AArch64 Darwin and Windows are always PIC. @@ -264,7 +271,8 @@ AArch64TargetMachine::AArch64TargetMachine(const Target &T, const Triple &TT, bool LittleEndian) : LLVMTargetMachine(T, computeDataLayout(TT, Options.MCOptions, LittleEndian), - TT, CPU, FS, Options, getEffectiveRelocModel(TT, RM), + TT, computeDefaultCPU(TT, CPU), FS, Options, + getEffectiveRelocModel(TT, RM), getEffectiveAArch64CodeModel(TT, CM, JIT), OL), TLOF(createTLOF(getTargetTriple())), isLittle(LittleEndian) { initAsmInfo(); @@ -288,6 +296,7 @@ AArch64TargetMachine::AArch64TargetMachine(const Target &T, const Triple &TT, // MachO/CodeModel::Large, which GlobalISel does not support. if (getOptLevel() <= EnableGlobalISelAtO && TT.getArch() != Triple::aarch64_32 && + TT.getArchName() != "arm64e" && !(getCodeModel() == CodeModel::Large && TT.isOSBinFormatMachO())) { setGlobalISel(true); setGlobalISelAbort(GlobalISelAbortMode::Disable); @@ -629,6 +638,10 @@ void AArch64PassConfig::addPreEmitPass() { if (TM->getOptLevel() != CodeGenOpt::None && EnableCompressJumpTables) addPass(createAArch64CompressJumpTablesPass()); + // Expand hardened pseudo-instructions. + // Do this now to enable LOH emission. + addPass(createAArch64ExpandHardenedPseudosPass()); + if (TM->getOptLevel() != CodeGenOpt::None && EnableCollectLOH && TM->getTargetTriple().isOSBinFormatMachO()) addPass(createAArch64CollectLOHPass()); diff --git a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp index 4fb409f020d91a..d6091ef5b8b83c 100644 --- a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp +++ b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp @@ -164,6 +164,8 @@ class AArch64AsmParser : public MCTargetAsmParser { bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo, OperandVector &Operands); + bool parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc); + bool parseDirectiveArch(SMLoc L); bool parseDirectiveArchExtension(SMLoc L); bool parseDirectiveCPU(SMLoc L); @@ -264,6 +266,8 @@ class AArch64AsmParser : public MCTargetAsmParser { unsigned validateTargetOperandClass(MCParsedAsmOperand &Op, unsigned Kind) override; + bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) override; + static bool classifySymbolRef(const MCExpr *Expr, AArch64MCExpr::VariantKind &ELFRefKind, MCSymbolRefExpr::VariantKind &DarwinRefKind, @@ -5466,6 +5470,113 @@ bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() { return false; } +bool AArch64AsmParser::parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) { + // Try @AUTH expressions: they're more complex than the usual symbol variants. + if (!parseAuthExpr(Res, EndLoc)) + return false; + return getParser().parsePrimaryExpr(Res, EndLoc); +} + +/// parseAuthExpr +/// ::= _sym@AUTH(ib,123[,addr]) +/// ::= (_sym + 5)@AUTH(ib,123[,addr]) +/// ::= (_sym - 5)@AUTH(ib,123[,addr]) +bool AArch64AsmParser::parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc) { + MCAsmParser &Parser = getParser(); + MCContext &Ctx = getContext(); + + AsmToken Tok = Parser.getTok(); + + // Look for '_sym@AUTH' ... + if (Tok.is(AsmToken::Identifier) && Tok.getIdentifier().endswith("@AUTH")) { + StringRef SymName = Tok.getIdentifier().drop_back(strlen("@AUTH")); + Res = MCSymbolRefExpr::create(Ctx.getOrCreateSymbol(SymName), Ctx); + + Parser.Lex(); // Eat the identifier. + } else { + // ... or look for a more complex symbol reference, such as ... + SmallVector Tokens; + + // ... '"_long sym"@AUTH' ... + if (Tok.is(AsmToken::String)) + Tokens.resize(2); + // ... or '(_sym + 5)@AUTH'. + else if (Tok.is(AsmToken::LParen)) + Tokens.resize(6); + else + return true; + + if (Parser.getLexer().peekTokens(Tokens) != Tokens.size()) + return true; + + // In either case, the expression ends with '@' 'AUTH'. + if (Tokens[Tokens.size()-2].isNot(AsmToken::At) || + Tokens[Tokens.size()-1].isNot(AsmToken::Identifier) || + Tokens[Tokens.size()-1].getIdentifier() != "AUTH") + return true; + + if (Tok.is(AsmToken::String)) { + StringRef SymName; + if (Parser.parseIdentifier(SymName)) + return true; + Res = MCSymbolRefExpr::create(Ctx.getOrCreateSymbol(SymName), Ctx); + } else { + if (Parser.parsePrimaryExpr(Res, EndLoc)) + return true; + } + + Parser.Lex(); // '@' + Parser.Lex(); // 'AUTH' + } + + // At this point, we encountered "@AUTH". There is no fallback anymore. + if (parseToken(AsmToken::LParen, "expected '(' after @AUTH expression")) + return true; + + if (Parser.getTok().isNot(AsmToken::Identifier)) + return TokError("expected key name in @AUTH expression"); + + StringRef KeyStr = Parser.getTok().getIdentifier(); + auto KeyIDOrNone = AArch64StringToPACKeyID(KeyStr); + if (!KeyIDOrNone) + return TokError("invalid key '" + KeyStr + "' in @AUTH expression"); + Parser.Lex(); + + if (parseToken(AsmToken::Comma, "expected ',' after key in @AUTH expression")) + return true; + + if (Parser.getTok().isNot(AsmToken::Integer)) + return TokError( + "expected integer discriminator after key in @AUTH expression"); + int64_t Discriminator = Parser.getTok().getIntVal(); + + if (!isUInt<16>(Discriminator)) + return TokError("too wide integer discriminator '" + itostr(Discriminator) + + "'in @AUTH expression"); + Parser.Lex(); + + bool UseAddressDiversity = false; + if (Parser.getTok().is(AsmToken::Comma)) { + Parser.Lex(); + if (Parser.getTok().isNot(AsmToken::Identifier) || + Parser.getTok().getIdentifier() != "addr") + return TokError( + "expected 'addr' after discriminator in @AUTH expression"); + UseAddressDiversity = true; + Parser.Lex(); + } + + EndLoc = Parser.getTok().getEndLoc(); + if (parseToken(AsmToken::RParen, + "expected ')' at the end of @AUTH expression")) + return true; + + Res = AArch64AuthMCExpr::create(Res, Discriminator, *KeyIDOrNone, + UseAddressDiversity, Ctx); + + return false; +} + bool AArch64AsmParser::classifySymbolRef(const MCExpr *Expr, AArch64MCExpr::VariantKind &ELFRefKind, diff --git a/llvm/lib/Target/AArch64/CMakeLists.txt b/llvm/lib/Target/AArch64/CMakeLists.txt index 103925d45d5104..ae68830f6871d6 100644 --- a/llvm/lib/Target/AArch64/CMakeLists.txt +++ b/llvm/lib/Target/AArch64/CMakeLists.txt @@ -33,6 +33,7 @@ add_llvm_target(AArch64CodeGen AArch64CondBrTuning.cpp AArch64ConditionalCompares.cpp AArch64DeadRegisterDefinitionsPass.cpp + AArch64ExpandHardenedPseudos.cpp AArch64ExpandImm.cpp AArch64ExpandPseudoInsts.cpp AArch64FalkorHWPFFix.cpp diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp index 21ce5785ea5e12..14f5ecc70cd305 100644 --- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp +++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp @@ -529,6 +529,7 @@ enum CompactUnwindEncodings { class DarwinAArch64AsmBackend : public AArch64AsmBackend { const MCRegisterInfo &MRI; bool IsILP32; + bool IsARM64E; /// Encode compact unwind stack adjustment for frameless functions. /// See UNWIND_ARM64_FRAMELESS_STACK_SIZE_MASK in compact_unwind_encoding.h. @@ -539,18 +540,22 @@ class DarwinAArch64AsmBackend : public AArch64AsmBackend { public: DarwinAArch64AsmBackend(const Target &T, const Triple &TT, - const MCRegisterInfo &MRI, bool IsILP32) + const MCRegisterInfo &MRI, + bool IsILP32, bool IsARM64E) : AArch64AsmBackend(T, TT, /*IsLittleEndian*/ true), MRI(MRI), - IsILP32(IsILP32) {} + IsILP32(IsILP32), IsARM64E(IsARM64E) {} std::unique_ptr createObjectTargetWriter() const override { if (IsILP32) return createAArch64MachObjectWriter( MachO::CPU_TYPE_ARM64_32, MachO::CPU_SUBTYPE_ARM64_32_V8, true); + else if (IsARM64E) + return createAArch64MachObjectWriter( + MachO::CPU_TYPE_ARM64, MachO::CPU_SUBTYPE_ARM64E, false); else - return createAArch64MachObjectWriter(MachO::CPU_TYPE_ARM64, - MachO::CPU_SUBTYPE_ARM64_ALL, false); + return createAArch64MachObjectWriter( + MachO::CPU_TYPE_ARM64, MachO::CPU_SUBTYPE_ARM64_ALL, false); } /// Generate the compact unwind encoding from the CFI directives. @@ -734,7 +739,8 @@ MCAsmBackend *llvm::createAArch64leAsmBackend(const Target &T, const Triple &TheTriple = STI.getTargetTriple(); if (TheTriple.isOSBinFormatMachO()) { const bool IsILP32 = TheTriple.isArch32Bit(); - return new DarwinAArch64AsmBackend(T, TheTriple, MRI, IsILP32); + const bool IsARM64E = TheTriple.getArchName() == "arm64e"; + return new DarwinAArch64AsmBackend(T, TheTriple, MRI, IsILP32, IsARM64E); } if (TheTriple.isOSBinFormatCOFF()) diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp index 548e399e05a3f6..ece2c26d1e7d32 100644 --- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp +++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp @@ -152,3 +152,54 @@ void AArch64MCExpr::fixELFSymbolsInTLSFixups(MCAssembler &Asm) const { fixELFSymbolsInTLSFixupsImpl(getSubExpr(), Asm); } + +//===----------------------------------------------------------------------===// + +const AArch64AuthMCExpr *AArch64AuthMCExpr::create(const MCExpr *Expr, + uint16_t Discriminator, + AArch64PACKey::ID Key, + bool HasAddressDiversity, + MCContext &Ctx) { + return new (Ctx) + AArch64AuthMCExpr(Expr, Discriminator, Key, HasAddressDiversity); +} + +void AArch64AuthMCExpr::printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const { + bool WrapSubExprInParens = !isa(getSubExpr()); + if (WrapSubExprInParens) + OS << '('; + getSubExpr()->print(OS, MAI); + if (WrapSubExprInParens) + OS << ')'; + + OS << "@AUTH(" << AArch64PACKeyIDToString(Key) << ',' << Discriminator; + if (hasAddressDiversity()) + OS << ",addr"; + OS << ')'; +} + +void AArch64AuthMCExpr::visitUsedExpr(MCStreamer &Streamer) const { + Streamer.visitUsedExpr(*getSubExpr()); +} + +MCFragment *AArch64AuthMCExpr::findAssociatedFragment() const { + llvm_unreachable("FIXME: what goes here?"); +} + +bool AArch64AuthMCExpr::evaluateAsRelocatableImpl(MCValue &Res, + const MCAsmLayout *Layout, + const MCFixup *Fixup) const { + if (!getSubExpr()->evaluateAsRelocatable(Res, Layout, Fixup)) + return false; + + if (Res.getSymB()) + report_fatal_error("Auth relocation can't reference two symbols"); + + Res = MCValue::get(Res.getSymA(), nullptr, Res.getConstant(), getKind()); + + return true; +} + +void AArch64AuthMCExpr::fixELFSymbolsInTLSFixups(MCAssembler &Asm) const { + llvm_unreachable("FIXME"); +} diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h index a82ff2e9142685..04808c9f820e6d 100644 --- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h +++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h @@ -14,6 +14,7 @@ #ifndef LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64MCEXPR_H #define LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64MCEXPR_H +#include "Utils/AArch64BaseInfo.h" #include "llvm/MC/MCExpr.h" #include "llvm/Support/ErrorHandling.h" @@ -34,6 +35,8 @@ class AArch64MCExpr : public MCTargetExpr { VK_TPREL = 0x007, VK_TLSDESC = 0x008, VK_SECREL = 0x009, + VK_AUTH = 0x00a, + VK_AUTHADDR = 0x00b, VK_SymLocBits = 0x00f, // Variants specifying which part of the final address calculation is @@ -114,6 +117,7 @@ class AArch64MCExpr : public MCTargetExpr { const MCExpr *Expr; const VariantKind Kind; +protected: explicit AArch64MCExpr(const MCExpr *Expr, VariantKind Kind) : Expr(Expr), Kind(Kind) {} @@ -171,6 +175,53 @@ class AArch64MCExpr : public MCTargetExpr { static bool classof(const AArch64MCExpr *) { return true; } }; + +class AArch64AuthMCExpr : public AArch64MCExpr { + uint16_t Discriminator; + AArch64PACKey::ID Key; + + explicit AArch64AuthMCExpr(const MCExpr *Expr, uint16_t Discriminator, + AArch64PACKey::ID Key, bool HasAddressDiversity) + : AArch64MCExpr(Expr, HasAddressDiversity ? VK_AUTHADDR : VK_AUTH), + Discriminator(Discriminator), Key(Key) {} + +public: + /// @name Construction + /// @{ + + static const AArch64AuthMCExpr * + create(const MCExpr *Expr, uint16_t Discriminator, AArch64PACKey::ID Key, + bool HasAddressDiversity, MCContext &Ctx); + + /// @} + /// @name Accessors + /// @{ + + AArch64PACKey::ID getKey() const { return Key; } + uint16_t getDiscriminator() const { return Discriminator; } + bool hasAddressDiversity() const { return getKind() == VK_AUTHADDR; } + + /// @} + + void printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const override; + + void visitUsedExpr(MCStreamer &Streamer) const override; + + MCFragment *findAssociatedFragment() const override; + + bool evaluateAsRelocatableImpl(MCValue &Res, const MCAsmLayout *Layout, + const MCFixup *Fixup) const override; + + void fixELFSymbolsInTLSFixups(MCAssembler &Asm) const override; + + static bool classof(const MCExpr *E) { + return E->getKind() == MCExpr::Target; + } + + static bool classof(const AArch64MCExpr *E) { + return E->getKind() == VK_AUTH || E->getKind() == VK_AUTHADDR; + } +}; } // end namespace llvm #endif diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp index 0cafd5dd12fa61..a7ce00d6b90abd 100644 --- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp +++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp @@ -50,9 +50,13 @@ static MCInstrInfo *createAArch64MCInstrInfo() { static MCSubtargetInfo * createAArch64MCSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS) { - if (CPU.empty()) + if (CPU.empty()) { CPU = "generic"; + if (TT.getArchName() == "arm64e") + CPU = "vortex"; + } + return createAArch64MCSubtargetInfoImpl(TT, CPU, FS); } diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp index fc04d37eb3623a..2c505b2c59b1ef 100644 --- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp +++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp @@ -7,6 +7,7 @@ //===----------------------------------------------------------------------===// #include "MCTargetDesc/AArch64FixupKinds.h" +#include "MCTargetDesc/AArch64MCExpr.h" #include "MCTargetDesc/AArch64MCTargetDesc.h" #include "llvm/ADT/Twine.h" #include "llvm/BinaryFormat/MachO.h" @@ -392,6 +393,46 @@ void AArch64MachObjectWriter::recordRelocation( Value = 0; } + if (Target.getRefKind() == AArch64MCExpr::VK_AUTH || + Target.getRefKind() == AArch64MCExpr::VK_AUTHADDR) { + auto *Expr = cast(Fixup.getValue()); + + assert(Type == MachO::ARM64_RELOC_UNSIGNED); + + if (IsPCRel) { + Asm.getContext().reportError( + Fixup.getLoc(), "invalid PC relative auth relocation"); + return; + } + + if (Log2Size != 3) { + Asm.getContext().reportError( + Fixup.getLoc(), "invalid auth relocation size, must be 8 bytes"); + return; + } + + if (Target.getSymB()) { + Asm.getContext().reportError( + Fixup.getLoc(), "invalid auth relocation, can't reference two symbols"); + return; + } + + uint16_t Discriminator = Expr->getDiscriminator(); + AArch64PACKey::ID Key = Expr->getKey(); + + if (!isInt<32>(Value)) { + Asm.getContext().reportError(Fixup.getLoc(), "too wide addend '" + + itostr(Value) + + "' in auth relocation"); + return; + } + + Type = MachO::ARM64_RELOC_AUTHENTICATED_POINTER; + Value = (uint32_t(Value)) | (uint64_t(Discriminator) << 32) | + (uint64_t(Expr->hasAddressDiversity()) << 48) | + (uint64_t(Key) << 49) | (1ULL << 63); + } + // If there's any addend left to handle, encode it in the instruction. FixedValue = Value; diff --git a/llvm/lib/Target/AArch64/Utils/AArch64BaseInfo.h b/llvm/lib/Target/AArch64/Utils/AArch64BaseInfo.h index 7a4fcac09ec4d8..48c363738dd081 100644 --- a/llvm/lib/Target/AArch64/Utils/AArch64BaseInfo.h +++ b/llvm/lib/Target/AArch64/Utils/AArch64BaseInfo.h @@ -643,6 +643,45 @@ namespace AArch64II { }; } // end namespace AArch64II +//===----------------------------------------------------------------------===// +// v8.3a Pointer Authentication +// + +namespace AArch64PACKey { +enum ID : uint8_t { + IA = 0, + IB = 1, + DA = 2, + DB = 3 +}; +}; + +inline static StringRef AArch64PACKeyIDToString(AArch64PACKey::ID KeyID) { + switch (KeyID) { + case AArch64PACKey::IA: + return StringRef("ia"); + case AArch64PACKey::IB: + return StringRef("ib"); + case AArch64PACKey::DA: + return StringRef("da"); + case AArch64PACKey::DB: + return StringRef("db"); + } +} + +inline static Optional +AArch64StringToPACKeyID(StringRef Name) { + if (Name == "ia") + return AArch64PACKey::IA; + if (Name == "ib") + return AArch64PACKey::IB; + if (Name == "da") + return AArch64PACKey::DA; + if (Name == "db") + return AArch64PACKey::DB; + return None; +} + } // end namespace llvm #endif diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp index 654de3ee0172dd..fe32a3b50a453c 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -4214,6 +4214,81 @@ static IntrinsicInst *findInitTrampoline(Value *Callee) { return nullptr; } +Instruction *InstCombiner::tryCombinePtrAuthCall(CallBase &Call) { + Value *Callee = Call.getCalledValue(); + auto *IPC = dyn_cast(Callee); + if (!IPC || !IPC->isNoopCast(DL)) + return nullptr; + + IntrinsicInst *II = dyn_cast(IPC->getOperand(0)); + if (!II) + return nullptr; + + auto PtrAuthBundleOrNone = Call.getOperandBundle(LLVMContext::OB_ptrauth); + assert(Call.getNumOperandBundles() <= 1 && + "unimplemented support for ptrauth and other bundle"); + + Value *NewCallee = nullptr; + SmallVector NewBundles; + switch (II->getIntrinsicID()) { + default: + return nullptr; + + // call(ptrauth_resign(p)), ["ptrauth"()] -> call p, ["ptrauth"()] + // assuming the call bundle and the sign operands match. + case Intrinsic::ptrauth_resign: { + if (!PtrAuthBundleOrNone) + return nullptr; + auto PtrAuthBundle = *PtrAuthBundleOrNone; + if (II->getOperand(3) != PtrAuthBundle.Inputs[0] || + II->getOperand(4) != PtrAuthBundle.Inputs[1]) + return nullptr; + + Value *NewBundleOps[] = {II->getOperand(1), II->getOperand(2)}; + NewBundles.emplace_back("ptrauth", NewBundleOps); + NewCallee = II->getOperand(0); + break; + } + + // call(ptrauth_sign(p)), ["ptrauth"()] -> call p + // assuming the call bundle and the sign operands match. + case Intrinsic::ptrauth_sign: { + if (!PtrAuthBundleOrNone) + return nullptr; + auto PtrAuthBundle = *PtrAuthBundleOrNone; + if (II->getOperand(1) != PtrAuthBundle.Inputs[0] || + II->getOperand(2) != PtrAuthBundle.Inputs[1]) + return nullptr; + NewCallee = II->getOperand(0); + break; + } + + // call(ptrauth_auth(p)) -> call p, ["ptrauth"()] + case Intrinsic::ptrauth_auth: { + if (PtrAuthBundleOrNone) + return nullptr; + Value *NewBundleOps[] = {II->getOperand(1), II->getOperand(2)}; + NewBundles.emplace_back("ptrauth", NewBundleOps); + NewCallee = II->getOperand(0); + break; + } + } + + if (!NewCallee) + return nullptr; + + NewCallee = Builder.CreateBitOrPointerCast(NewCallee, Callee->getType()); + CallBase *NewCall = nullptr; + if (auto *CI = dyn_cast(&Call)) { + NewCall = CallInst::Create(CI, NewBundles); + } else { + auto *IKI = cast(&Call); + NewCall = InvokeInst::Create(IKI, NewBundles); + } + NewCall->setCalledOperand(NewCallee); + return NewCall; +} + static void annotateAnyAllocSite(CallBase &Call, const TargetLibraryInfo *TLI) { unsigned NumArgs = Call.getNumArgOperands(); ConstantInt *Op0C = dyn_cast(Call.getOperand(0)); @@ -4356,6 +4431,11 @@ Instruction *InstCombiner::visitCallBase(CallBase &Call) { if (IntrinsicInst *II = findInitTrampoline(Callee)) return transformCallThroughTrampoline(Call, *II); + // Combine calls involving pointer authentication + if (Instruction *NewCall = tryCombinePtrAuthCall(Call)) + return NewCall; + + PointerType *PTy = cast(Callee->getType()); FunctionType *FTy = cast(PTy->getElementType()); if (FTy->isVarArg()) { diff --git a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h index 1dbc06d92e7aee..0a3bfc501ff409 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h +++ b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h @@ -505,6 +505,7 @@ class LLVM_LIBRARY_VISIBILITY InstCombiner bool transformConstExprCastCall(CallBase &Call); Instruction *transformCallThroughTrampoline(CallBase &Call, IntrinsicInst &Tramp); + Instruction *tryCombinePtrAuthCall(CallBase &Call); Value *simplifyMaskedLoad(IntrinsicInst &II); Instruction *simplifyMaskedStore(IntrinsicInst &II); diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp index d92ee11c2e1ab5..0ac8e2349ed98d 100644 --- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -1848,6 +1848,8 @@ bool ModuleAddressSanitizer::ShouldInstrumentGlobal(GlobalVariable *G) { // Globals from llvm.metadata aren't emitted, do not instrument them. if (Section == "llvm.metadata") return false; + // Same for globals in llvm.ptrauth. + if (Section == "llvm.ptrauth") return false; // Do not instrument globals from special LLVM sections. if (Section.find("__llvm") != StringRef::npos || Section.find("__LLVM") != StringRef::npos) return false; diff --git a/llvm/lib/Transforms/Instrumentation/CMakeLists.txt b/llvm/lib/Transforms/Instrumentation/CMakeLists.txt index 22190ad7a0ae98..570b7d556c3420 100644 --- a/llvm/lib/Transforms/Instrumentation/CMakeLists.txt +++ b/llvm/lib/Transforms/Instrumentation/CMakeLists.txt @@ -14,6 +14,7 @@ add_llvm_library(LLVMInstrumentation PGOMemOPSizeOpt.cpp PoisonChecking.cpp SanitizerCoverage.cpp + SoftPointerAuth.cpp ValueProfileCollector.cpp ThreadSanitizer.cpp HWAddressSanitizer.cpp diff --git a/llvm/lib/Transforms/Instrumentation/Instrumentation.cpp b/llvm/lib/Transforms/Instrumentation/Instrumentation.cpp index a6c2c9b464b634..15dd22eb080f66 100644 --- a/llvm/lib/Transforms/Instrumentation/Instrumentation.cpp +++ b/llvm/lib/Transforms/Instrumentation/Instrumentation.cpp @@ -119,6 +119,7 @@ void llvm::initializeInstrumentation(PassRegistry &Registry) { initializeThreadSanitizerLegacyPassPass(Registry); initializeModuleSanitizerCoverageLegacyPassPass(Registry); initializeDataFlowSanitizerPass(Registry); + initializeSoftPointerAuthLegacyPassPass(Registry); } /// LLVMInitializeInstrumentation - C binding for diff --git a/llvm/lib/Transforms/Instrumentation/SoftPointerAuth.cpp b/llvm/lib/Transforms/Instrumentation/SoftPointerAuth.cpp new file mode 100644 index 00000000000000..486fe57122e6de --- /dev/null +++ b/llvm/lib/Transforms/Instrumentation/SoftPointerAuth.cpp @@ -0,0 +1,879 @@ +//===- SoftPointerAuth.cpp - Software lowering for ptrauth intrinsics -----===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This pass lowers the llvm.ptrauth intrinsics into something that can +// be supported (inefficiently) on an arbitrary target. +// +// The runtime functions you must define to use this pass are: +// /// Apply a signature to the given unsigned pointer value. +// void *__ptrauth_sign(void *pointer, int32_t key, uintptr_t discriminator); +// +// /// Remove the signature from the given signed pointer value. +// void *__ptrauth_strip(void *pointer, int32_t key); +// +// /// Authenticate and remove the signature on the given signed +// /// pointer value. Trap on authenticate failure. +// void *__ptrauth_auth(void *pointer, int32_t key, uintptr_t discriminator); +// +// /// Blend a small non-zero value into a primary discriminator, +// /// which is expected to resemble a pointer. +// uintptr_t __ptrauth_blend(uintptr_t primaryDiscriminator, +// uintptr_t secondaryDiscriminator); +// +// /// Compute a full, pointer-wide signature on a value. +// uintptr_t __ptrauth_sign_generic(uintptr_t data, uintptr_t discriminator); +// +// The resulting code pattern does not perfectly protect against the backend +// inserting code between authentications and uses, and so the result may +// be attackable. +// +//===----------------------------------------------------------------------===// + +#include "llvm/IR/CallSite.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/GlobalVariable.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/Module.h" +#include "llvm/Transforms/Instrumentation.h" +#include "llvm/Transforms/Utils/BasicBlockUtils.h" +#include "llvm/Transforms/Utils/ModuleUtils.h" + +#include "llvm/ADT/Optional.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/STLExtras.h" +#include + +#define DEBUG_TYPE "soft-ptrauth" + +using namespace llvm; +using IRBuilderTy = llvm::IRBuilder<>; + +namespace { + +/// A structure for tracking uses of relocations within a Constant. +struct UseSite { + /// A map from operand index to the tracking sites of children. + /// If this is empty, the Constant is a GlobalVariable for a relocation. + /// Otherwise, the Constant is a ConstantAggregate or ConstantExpr, and + /// the relocation reference(s) appear further down in the tree. + std::map Children; +}; + +/// A linked list down to the use of a relocation. +struct UsePath { + const UsePath *Next; + unsigned OperandIndex; +}; + +enum TypeTag { + IntPtr, // uintptr_t + Discriminator = IntPtr, // uintptr_t + Key, // uint32_t + VoidPtr, // i8* +}; + +class SoftPointerAuth { + // The module. + Module *M = nullptr; + + // Cached function pointers, initialized lazily. + Constant *SignPointerFn = nullptr; + Constant *AuthPointerFn = nullptr; + Constant *StripPointerFn = nullptr; + Constant *BlendDiscriminatorFn = nullptr; + Constant *SignGenericFn = nullptr; + + Optional GlobalConstructorBuilder; + +public: + SoftPointerAuth() {} + + bool runOnModule(Module &M); + +private: + bool isPointerAuthRelocation(GlobalVariable *global); + + bool transformRelocations(); + void transformGlobalInitializer(GlobalVariable *global, + const UseSite &usesToTransform); + Constant *transformInitializer(GlobalVariable *global, + SmallVectorImpl &pathToInitializer, + Constant *initializer, + const UseSite &usesToTransform); + void transformInstructionOperands(Instruction *user, + const UseSite &usesToTransform); + Value *emitTransformedConstant(IRBuilderTy &builder, Constant *constant, + const UseSite &usesToTransform); + IRBuilderTy &continueGlobalConstructor(); + + bool transformCalls(); + bool transformCall(CallInst *call); + bool transformInvoke(InvokeInst *call); + bool transformPointerAuthCall(CallSite oldCall, + const OperandBundleUse &bundle); + + Value *emitSign(IRBuilderTy &builder, Value *pointer, + Value *key, Value *discriminator); + Value *emitResign(IRBuilderTy &builder, Value *pointer, + Value *oldKey, Value *oldDiscriminator, + Value *newKey, Value *newDiscriminator); + Value *emitAuth(IRBuilderTy &builder, Value *pointer, + Value *key, Value *discriminator); + Value *emitStrip(IRBuilderTy &builder, Value *pointer, Value *key); + Value *emitBlend(IRBuilderTy &builder, Value *primary, Value *secondary); + Value *emitSignGeneric(IRBuilderTy &builder, + Value *value, Value *discriminator); + + /// Check whether the callee of a call has the right prototype. + bool hasExpectedPrototype(CallSite call, TypeTag resultTypeTag, + ArrayRef argTypeTags) { + if (!hasType(call.getInstruction(), resultTypeTag)) + return false; + + if (call.getNumArgOperands() != argTypeTags.size()) + return false; + for (unsigned i = 0, e = argTypeTags.size(); i != e; ++i) { + if (!hasType(call.getArgOperand(i), argTypeTags[i])) + return false; + } + return true; + } + + /// Does the given value have its expected type? + bool hasType(Value *value, TypeTag tag) { + auto type = value->getType(); + switch (tag) { + case VoidPtr: + if (auto ptrType = dyn_cast(type)) + return ptrType->getAddressSpace() == 0 && + ptrType->getElementType()->isIntegerTy(8); + return false; + case Key: + return type->isIntegerTy(32); + case IntPtr: + return type->isIntegerTy(M->getDataLayout().getPointerSizeInBits(0)); + } + llvm_unreachable("unexpected type tag"); + } + /// Fetch an expected type. + Type *getType(TypeTag tag) { + switch (tag) { + case VoidPtr: return Type::getInt8PtrTy(M->getContext()); + case Key: return Type::getInt32Ty(M->getContext()); + case IntPtr: return Type::getIntNTy(M->getContext(), + M->getDataLayout().getPointerSizeInBits(0)); + } + llvm_unreachable("unexpected type tag"); + } + + ConstantInt *getInt32(unsigned value) { + return ConstantInt::get(Type::getInt32Ty(M->getContext()), value); + } + + /// Create a declaration for the given runtime function. + Constant *getOrInsertFunction(StringRef name, TypeTag resultTypeTag, + ArrayRef argTypeTags) { + auto resultType = getType(resultTypeTag); + SmallVector argTypes; + for (auto argTypeTag : argTypeTags) + argTypes.push_back(getType(argTypeTag)); + auto functionType = FunctionType::get(resultType, argTypes, false); + return cast( + M->getOrInsertFunction(name, functionType).getCallee()); + } + + Constant *getSignPointerFn() { + if (!SignPointerFn) + SignPointerFn = getOrInsertFunction("__ptrauth_sign", VoidPtr, + { VoidPtr, Key, Discriminator }); + return SignPointerFn; + } + + Constant *getAuthPointerFn() { + if (!AuthPointerFn) + AuthPointerFn = getOrInsertFunction("__ptrauth_auth", VoidPtr, + { VoidPtr, Key, Discriminator }); + return AuthPointerFn; + } + + Constant *getStripPointerFn() { + if (!StripPointerFn) + StripPointerFn = getOrInsertFunction("__ptrauth_strip", VoidPtr, + { VoidPtr, Key }); + return StripPointerFn; + } + + Constant *getBlendDiscriminatorFn() { + if (!BlendDiscriminatorFn) + BlendDiscriminatorFn = getOrInsertFunction("__ptrauth_blend", + Discriminator, + { Discriminator, Discriminator }); + return BlendDiscriminatorFn; + } + + Constant *getSignGenericFn() { + if (!SignGenericFn) + SignGenericFn = getOrInsertFunction("__ptrauth_sign_generic", IntPtr, + { IntPtr, Key, Discriminator }); + return SignPointerFn; + } +}; + +} // end anonymous namespace + +bool SoftPointerAuth::runOnModule(Module &M) { + assert(!GlobalConstructorBuilder); + + // Reset any existing caches. + SignPointerFn = nullptr; + AuthPointerFn = nullptr; + StripPointerFn = nullptr; + BlendDiscriminatorFn = nullptr; + SignGenericFn = nullptr; + + this->M = &M; + + bool changed = false; + + // Transform all of the intrinsic calls and operand bundles. + // Doing this before transforming the relocations doesn't deeply matter, + // but this pass has to walk all the functions and the relocation pass is + // based on use lists, so this order minimizes redundant work. + changed |= transformCalls(); + + // Next, transform all the uses of relocations. + changed |= transformRelocations(); + + return changed; +} + +/*****************************************************************************/ +/********************************** Common ***********************************/ +/*****************************************************************************/ + +Value *SoftPointerAuth::emitSign(IRBuilderTy &builder, Value *pointer, + Value *key, Value *discriminator) { + auto call = builder.CreateCall(getSignPointerFn(), + { pointer, key, discriminator }); + call->setDoesNotThrow(); + return call; +} + +Value *SoftPointerAuth::emitResign(IRBuilderTy &builder, Value *pointer, + Value *oldKey, Value *oldDiscriminator, + Value *newKey, Value *newDiscriminator) { + // This is not an unattackable code pattern, but we don't emit one for + // call operand bundles, either. + auto rawValue = emitAuth(builder, pointer, oldKey, oldDiscriminator); + return emitSign(builder, rawValue, newKey, newDiscriminator); +} + +Value *SoftPointerAuth::emitAuth(IRBuilderTy &builder, Value *pointer, + Value *key, Value *discriminator) { + auto call = builder.CreateCall(getAuthPointerFn(), + { pointer, key, discriminator }); + call->setDoesNotThrow(); + return call; +} + +Value *SoftPointerAuth::emitStrip(IRBuilderTy &builder, Value *pointer, + Value *key) { + auto call = builder.CreateCall(getStripPointerFn(), + { pointer, key }); + call->setDoesNotThrow(); + return call; +} + +Value *SoftPointerAuth::emitBlend(IRBuilderTy &builder, Value *primary, + Value *secondary) { + auto call = builder.CreateCall(getBlendDiscriminatorFn(), + { primary, secondary }); + call->setDoesNotThrow(); + return call; +} + +Value *SoftPointerAuth::emitSignGeneric(IRBuilderTy &builder, Value *value, + Value *discriminator) { + auto call = builder.CreateCall(getSignGenericFn(), + { value, discriminator }); + call->setDoesNotThrow(); + return call; +} + +bool SoftPointerAuth::isPointerAuthRelocation(GlobalVariable *global) { + // After checking the name, validate the type. + if (global->getSection() == "llvm.ptrauth") { + if (auto init = dyn_cast_or_null( + global->getInitializer())) { + return (init->getNumOperands() == 4 && + hasType(init->getOperand(0), VoidPtr) && + hasType(init->getOperand(1), Key) && + hasType(init->getOperand(2), Discriminator) && + hasType(init->getOperand(3), Discriminator)); + } + } + + return false; +} + +/*****************************************************************************/ +/******************************** Relocations ********************************/ +/*****************************************************************************/ + +/// Find all the top-level uses of a constant (i.e. the uses that are not +/// ConstantAggregates or ConstantExprs) and call the given callback +/// function on them. +template +static void findTopLevelUsesOfConstant(Constant *constant, const UsePath *path, + const Fn &callback) { + for (auto i = constant->use_begin(), e = constant->use_end(); i != e; ++i) { + UsePath userPath = { path, i->getOperandNo() }; + auto user = i->getUser(); + + // If the user is a global variable, there's only one use we care about. + if (isa(user)) { + assert(userPath.OperandIndex == 0 && "non-zero use index on global var"); + callback(user, path); + + // If the user is an instruction, remember the operand index. + } else if (isa(user)) { + callback(user, &userPath); + + // If the user is some other kind of context, recurse. + } else if (auto userConstant = dyn_cast(user)) { + findTopLevelUsesOfConstant(userConstant, &userPath, callback); + } + + // TODO: metadata uses? + } +} + +bool SoftPointerAuth::transformRelocations() { + SmallVector relocations; + SmallVector rootUsers; + DenseMap useSites; + + // Walk all the globals looking for relocations. + for (auto &global : M->globals()) { + if (!isPointerAuthRelocation(&global)) + continue; + + // Remember this relocation. + relocations.push_back(&global); + + // Remember all the top-level uses of the relocation, together with + // paths down to the use. + findTopLevelUsesOfConstant(&global, nullptr, + [&](User *user, const UsePath *path) { + // Look up an entry in the users map, adding one if necessary. + // We remember the order in which we encountered things to avoid + // non-deterministically walking over a DenseMap. This still leaves + // us vulnerable to use-list ordering, but that's harder to avoid. + auto result = useSites.try_emplace(user); + if (result.second) rootUsers.push_back(user); + + // Fill out the path down to the use. + UseSite *site = &result.first->second; + for (; path; path = path->Next) { + site = &site->Children[path->OperandIndex]; + } + (void) site; + }); + } + + // Bail out if we didn't find any uses. + if (relocations.empty()) + return false; + + // Rewrite all the root users. + for (auto user : rootUsers) { + const auto &uses = useSites.find(user)->second; + if (auto global = dyn_cast(user)) { + transformGlobalInitializer(global, uses); + } else { + transformInstructionOperands(cast(user), uses); + } + } + + // Destroy all the relocations. + for (auto reloc : relocations) { + reloc->replaceAllUsesWith(ConstantPointerNull::get(reloc->getType())); + reloc->eraseFromParent(); + } + + // Finish the global initialization function if we started one. + if (GlobalConstructorBuilder) { + GlobalConstructorBuilder->CreateRetVoid(); + GlobalConstructorBuilder.reset(); + } + + return true; +} + +/// Transform a global initializer that contains signing relocations. +void SoftPointerAuth::transformGlobalInitializer(GlobalVariable *global, + const UseSite &usesToTransform) { + auto oldInitializer = global->getInitializer(); + assert(oldInitializer && "global has no initializer?"); + + // transformInitializer wants the indices of a GEP to the initializer + // that it's transforming. Seed that with a '0' to enter the global. + SmallVector pathToInitializer; + pathToInitializer.push_back(getInt32(0)); + + auto newInitializer = transformInitializer(global, pathToInitializer, + oldInitializer, usesToTransform); + + assert(newInitializer != oldInitializer && "no changes?"); + assert(pathToInitializer.size() == 1 && "didn't balance push/pop"); + + global->setInitializer(newInitializer); + + // Make the global mutable; our constant initializer will change it. + global->setConstant(false); +} + +/// Transform part of a global initializer that contains signing relocations. +Constant *SoftPointerAuth::transformInitializer(GlobalVariable *global, + SmallVectorImpl &pathToInitializer, + Constant *initializer, + const UseSite &usesToTransform) { + auto aggregate = dyn_cast(initializer); + + // If the initializer is a simple reference to a relocation, or an + // expression in terms of same, compute it in the global construction. + if (!aggregate) { + auto &builder = continueGlobalConstructor(); + + // Compute the value. + auto transformedInitializer = + emitTransformedConstant(builder, initializer, usesToTransform); + + // Drill down to the current position. + Constant *addr = global; + if (pathToInitializer.size() != 1) + addr = ConstantExpr::getInBoundsGetElementPtr(global->getValueType(), + addr, pathToInitializer); + + // Store the transformed vlaue to this position. + builder.CreateStore(transformedInitializer, addr); + + // Use a null value for the global position. + return Constant::getNullValue(initializer->getType()); + } + + // Otherwise, the initializer is a constant aggregate. Recurse into it + // at the appropriate positions. The goal here is to avoid emitting the + // entire aggregate with stores. + assert(!usesToTransform.Children.empty() + && "walking into wrong initializer?"); + + // Copy the original elements. + SmallVector elts; + elts.reserve(aggregate->getNumOperands()); + for (auto &op : aggregate->operands()) + elts.push_back(cast(&*op)); + + // Modify just the elements that we decided to modify. + for (const auto &eltIndexAndUses : usesToTransform.Children) { + auto eltIndex = eltIndexAndUses.first; + + // Add an index to the GEP down to this position. + pathToInitializer.push_back(getInt32(eltIndex)); + + // Rewrite the element. + elts[eltIndex] = transformInitializer(global, pathToInitializer, + elts[eltIndex], eltIndexAndUses.second); + + // Pop the previously pushed path element. + pathToInitializer.pop_back(); + } + + // Rebuild the aggregate. + auto type = aggregate->getType(); + if (auto structType = dyn_cast(type)) { + return ConstantStruct::get(structType, elts); + } else if (auto arrayType = dyn_cast(type)) { + return ConstantArray::get(arrayType, elts); + } else { + return ConstantVector::get(elts); + } +} + +/// Continue emitting the global constructor function. +IRBuilderTy &SoftPointerAuth::continueGlobalConstructor() { + // Create the global initialization function if we haven't yet. + if (!GlobalConstructorBuilder) { + auto &context = M->getContext(); + + // Create the function. + auto fnType = FunctionType::get(Type::getVoidTy(context), + {}, false); + Function *fn = Function::Create(fnType, Function::PrivateLinkage, + "ptrauth_soft_init", M); + + // Add the function to the global initializers list. + appendToGlobalCtors(*M, fn, 0); + + auto entryBB = BasicBlock::Create(context, "", fn); + + GlobalConstructorBuilder.emplace(entryBB); + } + return *GlobalConstructorBuilder; +} + +void SoftPointerAuth::transformInstructionOperands(Instruction *user, + const UseSite &usesToTransform) { + assert(!usesToTransform.Children.empty() + && "no uses to transform for instruction"); + + // Handle PHIs differently because we have to insert code into the + // right predecessor(s). + if (auto phi = dyn_cast(user)) { + for (auto &useEntry : usesToTransform.Children) { + auto operandIndex = useEntry.first; + auto operand = cast(phi->getOperand(operandIndex)); + + // Figure out the block this edge corresponds to. + auto incomingValueIndex = + PHINode::getIncomingValueNumForOperand(operandIndex); + auto incomingBlock = phi->getIncomingBlock(incomingValueIndex); + + // Split the edge if necessary & possible. + // Note that we don't want to change anything structurally about 'phi'. + auto newBlock = SplitCriticalEdge(incomingBlock, phi->getParent(), + CriticalEdgeSplittingOptions() + .setKeepOneInputPHIs()); + + // Start inserting before the terminator in the new block. + // If a critical edge was unsplittable, this will insert the code + // unconditionally in the origin block, which is unfortunate but + // acceptable because sign operations cannot fail. + auto blockToInsertInto = newBlock ? newBlock : incomingBlock; + IRBuilderTy builder(blockToInsertInto->getTerminator()); + + // Transform the value. + auto transformedOperand = + emitTransformedConstant(builder, operand, useEntry.second); + + // Replace the incoming value. + phi->setIncomingValue(incomingValueIndex, transformedOperand); + } + + return; + } + + // Otherwise, emit immediately before the user. + IRBuilderTy builder(user); + for (auto &useEntry : usesToTransform.Children) { + auto operandIndex = useEntry.first; + auto operand = cast(user->getOperand(operandIndex)); + + auto transformedOperand = + emitTransformedConstant(builder, operand, useEntry.second); + + // Replace the incoming value. + user->setOperand(operandIndex, transformedOperand); + } +} + + +Value *SoftPointerAuth::emitTransformedConstant(IRBuilderTy &builder, + Constant *constant, + const UseSite &usesToTransform) { + // If it's a direct reference to the relocation, we're done. + if (auto global = dyn_cast(constant)) { + assert(isPointerAuthRelocation(global)); + assert(usesToTransform.Children.empty() && + "child uses of direct relocation reference?"); + + // Decompose the relocation. + ConstantStruct *init = cast(global->getInitializer()); + auto pointer = init->getOperand(0); + auto key = init->getOperand(1); + auto primaryDiscriminator = init->getOperand(2); + auto secondaryDiscriminator = init->getOperand(3); + + // Compute the discriminator. + Value *discriminator; + if (primaryDiscriminator->isNullValue()) { + discriminator = secondaryDiscriminator; + } else if (secondaryDiscriminator->isNullValue()) { + discriminator = primaryDiscriminator; + } else { + discriminator = emitBlend(builder, primaryDiscriminator, + secondaryDiscriminator); + } + + // Emit a sign operation. + auto signedValue = emitSign(builder, pointer, key, discriminator); + + // Cast back to the signed pointer type. + return builder.CreateBitCast(signedValue, global->getType()); + } + + // If it's a constant expression, make it an instruction and rebuild + // its operands. + if (auto expr = dyn_cast(constant)) { + assert(!usesToTransform.Children.empty() && + "direct use of constant expression?"); + + auto instruction = expr->getAsInstruction(); + + for (const auto &operandIndexAndUses : usesToTransform.Children) { + auto operandIndex = operandIndexAndUses.first; + + auto newOperand = + emitTransformedConstant(builder, expr->getOperand(operandIndex), + operandIndexAndUses.second); + instruction->setOperand(operandIndex, newOperand); + } + + builder.Insert(instruction); + return instruction; + } + + // Otherwise, it should be a constant aggregate. + // Recursively emit the transformed elements. + auto aggregate = cast(constant); + assert(!usesToTransform.Children.empty() && + "direct use of whole constant aggregate?"); + + SmallVector elts(aggregate->op_begin(), aggregate->op_end()); + + // Transform all of the children we're supposed to transform. + for (const auto &childUseEntry : usesToTransform.Children) { + auto &elt = elts[childUseEntry.first]; + elt = emitTransformedConstant(builder, cast(elt), + childUseEntry.second); + } + + // Build up the aggregate value using insertelement / insertvalue + // as appropriate. + auto type = aggregate->getType(); + bool isVector = isa(type); + Value *transformedAggregate = UndefValue::get(type); + for (unsigned i = 0, e = aggregate->getNumOperands(); i != e; ++i) { + if (isVector) + transformedAggregate = + builder.CreateInsertElement(transformedAggregate, elts[i], i); + else + transformedAggregate = + builder.CreateInsertValue(transformedAggregate, elts[i], i); + } + return transformedAggregate; +} + +/*****************************************************************************/ +/*********************** Intrinsics and Operand Bundles **********************/ +/*****************************************************************************/ + +bool SoftPointerAuth::transformCalls() { + bool changed = false; + + for (auto fi = M->begin(), fe = M->end(); fi != fe; ) { + auto fn = &*fi; + ++fi; + + // Soft return authentication is technically possible (even without backend + // support) but not currently necessary. + if (fn->hasFnAttribute("ptrauth-returns")) + report_fatal_error("Soft. lowering of return address auth unsupported"); + + for (auto bi = fn->begin(), be = fn->end(); bi != be; ) { + auto bb = &*bi; + ++bi; + + for (auto ii = bb->begin(), ie = bb->end(); ii != ie; ) { + auto instruction = &*ii; + ++ii; + + if (auto call = dyn_cast(instruction)) { + changed |= transformCall(call); + } else if (auto invoke = dyn_cast(instruction)) { + changed |= transformInvoke(invoke); + } + } + } + } + + return changed; +} + +bool SoftPointerAuth::transformCall(CallInst *call) { + // Handle calls with the llvm.ptrauth operand bundle attached. + if (auto bundle = call->getOperandBundle(LLVMContext::OB_ptrauth)) { + return transformPointerAuthCall(call, *bundle); + } + + // Otherwise, look for our intrinsics. + auto callee = call->getCalledFunction(); + if (!callee) return false; + auto intrinsic = CallSite(call).getIntrinsicID(); + if (!intrinsic) return false; + + auto rebuild = [&](function_ref fn) { + IRBuilderTy builder(call); + auto result = fn(builder); + call->replaceAllUsesWith(result); + call->eraseFromParent(); + return true; + }; + + switch (intrinsic) { + case Intrinsic::ptrauth_sign: + if (!hasExpectedPrototype(call, VoidPtr, {VoidPtr, Key, Discriminator})) + return false; + return rebuild([&](IRBuilderTy &builder) { + return emitSign(builder, call->getArgOperand(0), + call->getArgOperand(1), call->getArgOperand(2)); + }); + + case Intrinsic::ptrauth_resign: + if (!hasExpectedPrototype(call, VoidPtr, {VoidPtr, Key, Discriminator, + Key, Discriminator})) + return false; + return rebuild([&](IRBuilderTy &builder) { + return emitResign(builder, call->getArgOperand(0), + call->getArgOperand(1), call->getArgOperand(2), + call->getArgOperand(3), call->getArgOperand(4)); + }); + + case Intrinsic::ptrauth_auth: + if (!hasExpectedPrototype(call, VoidPtr, {VoidPtr, Key, Discriminator})) + return false; + return rebuild([&](IRBuilderTy &builder) { + return emitAuth(builder, call->getArgOperand(0), + call->getArgOperand(1), call->getArgOperand(2)); + }); + + case Intrinsic::ptrauth_strip: + if (!hasExpectedPrototype(call, VoidPtr, {VoidPtr, Key})) + return false; + return rebuild([&](IRBuilderTy &builder) { + return emitStrip(builder, call->getArgOperand(0), + call->getArgOperand(1)); + }); + + case Intrinsic::ptrauth_blend: + if (!hasExpectedPrototype(call, Discriminator, + {Discriminator, Discriminator})) + return false; + return rebuild([&](IRBuilderTy &builder) { + return emitBlend(builder, call->getArgOperand(0), + call->getArgOperand(1)); + }); + + case Intrinsic::ptrauth_sign_generic: + if (!hasExpectedPrototype(call, IntPtr, {IntPtr, IntPtr})) + return false; + return rebuild([&](IRBuilderTy &builder) { + return emitSignGeneric(builder, call->getArgOperand(0), + call->getArgOperand(1)); + }); + + default: + break; + } + + return false; +} + +bool SoftPointerAuth::transformInvoke(InvokeInst *call) { + // Handle invokes with the llvm.ptrauth operand bundle attached. + if (auto bundle = call->getOperandBundle(LLVMContext::OB_ptrauth)) { + return transformPointerAuthCall(call, *bundle); + } + + return false; +} + +bool SoftPointerAuth::transformPointerAuthCall(CallSite oldCall, + const OperandBundleUse &bundle) { + if (bundle.Inputs.size() != 2 || + !hasType(bundle.Inputs[0], Key) || + !hasType(bundle.Inputs[1], Discriminator)) + return false; + + IRBuilderTy builder(oldCall.getInstruction()); + + // Authenticate the callee. + Value *oldCallee = oldCall.getCalledValue(); + Value *callee = builder.CreateBitCast(oldCallee, getType(VoidPtr)); + callee = emitAuth(builder, callee, bundle.Inputs[0], bundle.Inputs[1]); + callee = builder.CreateBitCast(callee, oldCallee->getType()); + + // Get the arguments. + SmallVector args(oldCall.arg_begin(), oldCall.arg_end()); + + // Get the operand bundles besides llvm.ptrauth (probably none). + SmallVector opBundles; + for (unsigned i = 0, e = oldCall.getNumOperandBundles(); i != e; ++i) { + auto bundle = oldCall.getOperandBundleAt(i); + if (bundle.getTagID() != LLVMContext::OB_ptrauth) { + opBundles.emplace_back(bundle); + } + } + + // Build the new instruction. + CallSite newCall; + if (oldCall.isInvoke()) { + auto oldInvoke = cast(oldCall.getInstruction()); + newCall = builder.CreateInvoke(callee, + oldInvoke->getNormalDest(), + oldInvoke->getUnwindDest(), + args, opBundles); + } else { + newCall = builder.CreateCall(callee, args, opBundles); + } + + // Copy mandatory attributes. + newCall.setCallingConv(oldCall.getCallingConv()); + newCall.setAttributes(oldCall.getAttributes()); + + // TODO: copy metadata? + newCall.getInstruction()->takeName(oldCall.getInstruction()); + + // Destroy the old call. + oldCall.getInstruction()->replaceAllUsesWith(newCall.getInstruction()); + oldCall.getInstruction()->eraseFromParent(); + + return true; +} + +/*****************************************************************************/ +/**************************** Pass Manager Support ***************************/ +/*****************************************************************************/ + +namespace { + +class SoftPointerAuthLegacyPass : public ModulePass { +public: + static char ID; + SoftPointerAuthLegacyPass() : ModulePass(ID) { + initializeSoftPointerAuthLegacyPassPass(*PassRegistry::getPassRegistry()); + } + StringRef getPassName() const override { + return "Soft Pointer Auth Lowering"; + } + bool runOnModule(Module &M) override { return Pass.runOnModule(M); } + +private: + SoftPointerAuth Pass; +}; + +} // end anonymous namespace + +char SoftPointerAuthLegacyPass::ID = 0; +INITIALIZE_PASS(SoftPointerAuthLegacyPass, "soft-ptrauth", + "Lower pointer authentication intrinsics for soft targets", + false, false) + +ModulePass *llvm::createSoftPointerAuthPass() { + return new SoftPointerAuthLegacyPass(); +} diff --git a/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp b/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp index b27a36b67d62e4..4529567da05b99 100644 --- a/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp +++ b/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp @@ -240,7 +240,8 @@ static bool markTails(Function &F, bool &AllCallsAreTailCalls, if (!CI || CI->isTailCall() || isa(&I)) continue; - bool IsNoTail = CI->isNoTailCall() || CI->hasOperandBundles(); + bool IsNoTail = CI->isNoTailCall() || + CI->hasOperandBundlesOtherThan({LLVMContext::OB_ptrauth}); if (!IsNoTail && CI->doesNotAccessMemory()) { // A call to a readnone function whose arguments are all things computed diff --git a/llvm/test/Assembler/debug-info.ll b/llvm/test/Assembler/debug-info.ll index 8c3922ae609215..99ff46c56beef0 100644 --- a/llvm/test/Assembler/debug-info.ll +++ b/llvm/test/Assembler/debug-info.ll @@ -1,8 +1,8 @@ ; RUN: llvm-as < %s | llvm-dis | llvm-as | llvm-dis | FileCheck %s ; RUN: verify-uselistorder %s -; CHECK: !named = !{!0, !0, !1, !2, !3, !4, !5, !6, !7, !8, !8, !9, !10, !11, !12, !13, !14, !15, !16, !17, !18, !19, !20, !21, !22, !23, !24, !25, !26, !27, !27, !28, !29, !30, !31, !32, !33, !34, !35, !36, !37, !38, !39, !40, !41} -!named = !{!0, !1, !2, !3, !4, !5, !6, !7, !8, !9, !10, !11, !12, !13, !14, !15, !16, !17, !18, !19, !20, !21, !22, !23, !24, !25, !26, !27, !28, !29, !30, !31, !32, !33, !34, !35, !36, !37, !38, !39, !40, !41, !42, !43, !44} +; CHECK: !named = !{!0, !0, !1, !2, !3, !4, !5, !6, !7, !8, !8, !9, !10, !11, !12, !13, !14, !15, !16, !17, !18, !19, !20, !21, !22, !23, !24, !25, !26, !27, !27, !28, !29, !30, !31, !32, !33, !34, !35, !36, !37, !38, !39, !40, !41, !42} +!named = !{!0, !1, !2, !3, !4, !5, !6, !7, !8, !9, !10, !11, !12, !13, !14, !15, !16, !17, !18, !19, !20, !21, !22, !23, !24, !25, !26, !27, !28, !29, !30, !31, !32, !33, !34, !35, !36, !37, !38, !39, !40, !41, !42, !43, !44, !45} ; CHECK: !0 = !DISubrange(count: 3) ; CHECK-NEXT: !1 = !DISubrange(count: 3, lowerBound: 4) @@ -104,3 +104,6 @@ ; CHECK-NEXT: !41 = !DILocalVariable(name: "Name", arg: 1, scope: {{.*}}, file: {{.*}}, line: 13, type: {{.*}}, flags: DIFlagArgumentNotModified) !43 = distinct !DISubprogram(name: "fn", scope: !12, file: !12, spFlags: 0) !44 = !DILocalVariable(name: "Name", arg: 1, scope: !43, file: !12, line: 13, type: !7, flags: DIFlagArgumentNotModified) + +; CHECK: !DIDerivedType(tag: DW_TAG_APPLE_ptrauth_type, baseType: !13, ptrAuthKey: 2, ptrAuthIsAddressDiscriminated: true, ptrAuthExtraDiscriminator: 1234) +!45 = !DIDerivedType(tag: DW_TAG_APPLE_ptrauth_type, baseType: !15, ptrAuthKey: 2, ptrAuthIsAddressDiscriminated: true, ptrAuthExtraDiscriminator: 1234) diff --git a/llvm/test/Bitcode/operand-bundles-bc-analyzer.ll b/llvm/test/Bitcode/operand-bundles-bc-analyzer.ll index ac7feec77aa1b4..6d7d4c0a912c3d 100644 --- a/llvm/test/Bitcode/operand-bundles-bc-analyzer.ll +++ b/llvm/test/Bitcode/operand-bundles-bc-analyzer.ll @@ -7,6 +7,7 @@ ; CHECK-NEXT: > Call Site 1 << +; CHECK-NEXT: .uleb128 [[PRECALL]]-[[FNBEGIN]] ; Call between [[FNBEGIN]] and [[PRECALL]] +; CHECK-NEXT: .byte 0 ; has no landing pad +; CHECK-NEXT: .byte 0 ; On action: cleanup +; CHECK-NEXT: .uleb128 [[PRECALL]]-[[FNBEGIN]] ; >> Call Site 2 << +; CHECK-NEXT: .uleb128 [[POSTCALL]]-[[PRECALL]] ; Call between [[PRECALL]] and [[POSTCALL]] +; CHECK-NEXT: .uleb128 [[LPAD]]-[[FNBEGIN]] ; jumps to [[LPAD]] +; CHECK-NEXT: .byte 3 ; On action: 2 +; CHECK-NEXT: .uleb128 [[POSTCALL]]-[[FNBEGIN]] ; >> Call Site 3 << +; CHECK-NEXT: .uleb128 [[FNEND]]-[[POSTCALL]] ; Call between [[POSTCALL]] and [[FNEND]] +; CHECK-NEXT: .byte 0 ; has no landing pad +; CHECK-NEXT: .byte 0 ; On action: cleanup +; CHECK-NEXT: [[CSEND]]: + +; CHECK-NEXT: .byte 1 ; >> Action Record 1 << +; CHECK-NEXT: ; Catch TypeInfo 1 +; CHECK-NEXT: .byte 0 ; No further actions +; CHECK-NEXT: .byte 2 ; >> Action Record 2 << +; CHECK-NEXT: ; Catch TypeInfo 2 +; CHECK-NEXT: .byte 125 ; Continue to action 1 +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: ; >> Catch TypeInfos << +; CHECK-NEXT: [[TI:L.*]]: ; TypeInfo 2 +; CHECK-NEXT: .long __ZTIPKc@GOT-[[TI]] +; CHECK-NEXT: .long 0 ; TypeInfo 1 + +; CHECK-NEXT: [[TT]]: + +define void @test_invoke_ib_42_catch(void(i8*, i8*, i8*)* %fptr) #0 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { + %tmp0 = call i8* @__cxa_allocate_exception(i64 8) + %tmp1 = bitcast i8* %tmp0 to i8** + store i8* getelementptr inbounds ([6 x i8], [6 x i8]* @hello_str, i64 0, i64 0), i8** %tmp1, align 8 + invoke void %fptr(i8* %tmp0, i8* bitcast (i8** @_ZTIPKc to i8*), i8* null) [ "ptrauth"(i32 1, i64 42) ] + to label %continuebb unwind label %catchbb + +catchbb: + %tmp2 = landingpad { i8*, i32 } + catch i8* bitcast (i8** @_ZTIPKc to i8*) + catch i8* null + %tmp3 = extractvalue { i8*, i32 } %tmp2, 0 + %tmp4 = extractvalue { i8*, i32 } %tmp2, 1 + %tmp5 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIPKc to i8*)) + %tmp6 = icmp eq i32 %tmp4, %tmp5 + %tmp7 = call i8* @__cxa_begin_catch(i8* %tmp3) + br i1 %tmp6, label %PKc_catchbb, label %any_catchbb + +PKc_catchbb: + call void @bar(i8* %tmp7) + br label %any_catchbb + +any_catchbb: + call void @foo() + call void @__cxa_end_catch() + ret void + +continuebb: + unreachable +} + +declare void @foo() +declare void @bar(i8*) +declare i32 @baz() + +declare i32 @__gxx_personality_v0(...) +declare i8* @__cxa_allocate_exception(i64) +declare void @__cxa_throw(i8*, i8*, i8*) +declare i32 @llvm.eh.typeid.for(i8*) +declare i8* @__cxa_begin_catch(i8*) +declare void @__cxa_end_catch() + +attributes #0 = { nounwind "ptrauth-returns" } diff --git a/llvm/test/CodeGen/AArch64/arm64e-ptrauth-load.ll b/llvm/test/CodeGen/AArch64/arm64e-ptrauth-load.ll new file mode 100644 index 00000000000000..d7ed3c78e2b426 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/arm64e-ptrauth-load.ll @@ -0,0 +1,298 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple arm64e-apple-darwin -verify-machineinstrs | FileCheck %s + +target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" + +define i64 @test_load_auth_da(i64* %ptr) { +; CHECK-LABEL: test_load_auth_da: +; CHECK: ; %bb.0: +; CHECK-NEXT: ldraa x0, [x0] +; CHECK-NEXT: ret + %tmp0 = ptrtoint i64* %ptr to i64 + %tmp1 = call i64 @llvm.ptrauth.auth.i64(i64 %tmp0, i32 2, i64 0) + %tmp2 = inttoptr i64 %tmp1 to i64* + %tmp3 = load i64, i64* %tmp2 + ret i64 %tmp3 +} + +define i64 @test_load_auth_db(i64* %ptr) { +; CHECK-LABEL: test_load_auth_db: +; CHECK: ; %bb.0: +; CHECK-NEXT: ldrab x0, [x0] +; CHECK-NEXT: ret + %tmp0 = ptrtoint i64* %ptr to i64 + %tmp1 = call i64 @llvm.ptrauth.auth.i64(i64 %tmp0, i32 3, i64 0) + %tmp2 = inttoptr i64 %tmp1 to i64* + %tmp3 = load i64, i64* %tmp2 + ret i64 %tmp3 +} + +; Offset. + +define i64 @test_load_auth_da_8(i64* %ptr) { +; CHECK-LABEL: test_load_auth_da_8: +; CHECK: ; %bb.0: +; CHECK-NEXT: ldraa x0, [x0, #8] +; CHECK-NEXT: ret + %tmp0 = ptrtoint i64* %ptr to i64 + %tmp1 = call i64 @llvm.ptrauth.auth.i64(i64 %tmp0, i32 2, i64 0) + %tmp2 = add i64 %tmp1, 8 + %tmp3 = inttoptr i64 %tmp2 to i64* + %tmp4 = load i64, i64* %tmp3 + ret i64 %tmp4 +} + +define i64 @test_load_auth_da_m8(i64* %ptr) { +; CHECK-LABEL: test_load_auth_da_m8: +; CHECK: ; %bb.0: +; CHECK-NEXT: ldraa x0, [x0, #-8] +; CHECK-NEXT: ret + %tmp0 = ptrtoint i64* %ptr to i64 + %tmp1 = call i64 @llvm.ptrauth.auth.i64(i64 %tmp0, i32 2, i64 0) + %tmp2 = add i64 %tmp1, -8 + %tmp3 = inttoptr i64 %tmp2 to i64* + %tmp4 = load i64, i64* %tmp3 + ret i64 %tmp4 +} + +define i64 @test_load_auth_db_4088(i64* %ptr) { +; CHECK-LABEL: test_load_auth_db_4088: +; CHECK: ; %bb.0: +; CHECK-NEXT: ldrab x0, [x0, #4088] +; CHECK-NEXT: ret + %tmp0 = ptrtoint i64* %ptr to i64 + %tmp1 = call i64 @llvm.ptrauth.auth.i64(i64 %tmp0, i32 3, i64 0) + %tmp2 = add i64 %tmp1, 4088 + %tmp3 = inttoptr i64 %tmp2 to i64* + %tmp4 = load i64, i64* %tmp3 + ret i64 %tmp4 +} + +; Offset invalid cases. + +define i64 @test_load_auth_da_4(i64* %ptr) { +; CHECK-LABEL: test_load_auth_da_4: +; CHECK: ; %bb.0: +; CHECK-NEXT: mov x16, x0 +; CHECK-NEXT: autdza x16 +; CHECK-NEXT: ldur x0, [x16, #4] +; CHECK-NEXT: ret + %tmp0 = ptrtoint i64* %ptr to i64 + %tmp1 = call i64 @llvm.ptrauth.auth.i64(i64 %tmp0, i32 2, i64 0) + %tmp2 = add i64 %tmp1, 4 + %tmp3 = inttoptr i64 %tmp2 to i64* + %tmp4 = load i64, i64* %tmp3 + ret i64 %tmp4 +} + +define i64 @test_load_auth_da_4096(i64* %ptr) { +; CHECK-LABEL: test_load_auth_da_4096: +; CHECK: ; %bb.0: +; CHECK-NEXT: mov x16, x0 +; CHECK-NEXT: autdza x16 +; CHECK-NEXT: ldr x0, [x16, #4096] +; CHECK-NEXT: ret + %tmp0 = ptrtoint i64* %ptr to i64 + %tmp1 = call i64 @llvm.ptrauth.auth.i64(i64 %tmp0, i32 2, i64 0) + %tmp2 = add i64 %tmp1, 4096 + %tmp3 = inttoptr i64 %tmp2 to i64* + %tmp4 = load i64, i64* %tmp3 + ret i64 %tmp4 +} + +; Pre-indexed variant. + +define i64* @test_load_auth_da_8_pre(i64* %ptr, i64* %dst) { +; CHECK-LABEL: test_load_auth_da_8_pre: +; CHECK: ; %bb.0: +; CHECK-NEXT: ldraa x8, [x0, #8]! +; CHECK-NEXT: str x8, [x1] +; CHECK-NEXT: ret + %tmp0 = ptrtoint i64* %ptr to i64 + %tmp1 = call i64 @llvm.ptrauth.auth.i64(i64 %tmp0, i32 2, i64 0) + %tmp2 = add i64 %tmp1, 8 + %tmp3 = inttoptr i64 %tmp2 to i64* + %tmp4 = load i64, i64* %tmp3 + store i64 %tmp4, i64* %dst + ret i64* %tmp3 +} + +define i64* @test_load_auth_db_248_pre(i64* %ptr, i64* %dst) { +; CHECK-LABEL: test_load_auth_db_248_pre: +; CHECK: ; %bb.0: +; CHECK-NEXT: ldrab x8, [x0, #248]! +; CHECK-NEXT: str x8, [x1] +; CHECK-NEXT: ret + %tmp0 = ptrtoint i64* %ptr to i64 + %tmp1 = call i64 @llvm.ptrauth.auth.i64(i64 %tmp0, i32 3, i64 0) + %tmp2 = add i64 %tmp1, 248 + %tmp3 = inttoptr i64 %tmp2 to i64* + %tmp4 = load i64, i64* %tmp3 + store i64 %tmp4, i64* %dst + ret i64* %tmp3 +} + +define i64* @test_load_auth_db_m256_pre(i64* %ptr, i64* %dst) { +; CHECK-LABEL: test_load_auth_db_m256_pre: +; CHECK: ; %bb.0: +; CHECK-NEXT: ldrab x8, [x0, #-256]! +; CHECK-NEXT: str x8, [x1] +; CHECK-NEXT: ret + %tmp0 = ptrtoint i64* %ptr to i64 + %tmp1 = call i64 @llvm.ptrauth.auth.i64(i64 %tmp0, i32 3, i64 0) + %tmp2 = add i64 %tmp1, -256 + %tmp3 = inttoptr i64 %tmp2 to i64* + %tmp4 = load i64, i64* %tmp3 + store i64 %tmp4, i64* %dst + ret i64* %tmp3 +} + +; "Pre-indexed" with index 0: writeback the auth result. + +define i64* @test_load_auth_da_0_pre(i64* %ptr, i64* %dst) { +; CHECK-LABEL: test_load_auth_da_0_pre: +; CHECK: ; %bb.0: +; CHECK-NEXT: ldraa x8, [x0]! +; CHECK-NEXT: str x8, [x1] +; CHECK-NEXT: ret + %tmp0 = ptrtoint i64* %ptr to i64 + %tmp1 = call i64 @llvm.ptrauth.auth.i64(i64 %tmp0, i32 2, i64 0) + %tmp2 = inttoptr i64 %tmp1 to i64* + %tmp3 = load i64, i64* %tmp2 + store i64 %tmp3, i64* %dst + ret i64* %tmp2 +} + +; "Pre-indexed" with index 0, with a potential cycle. + +define void @test_load_auth_da_0_pre_cycle(i64* %ptr, i64* %dst, i64* %dst2, i64* %dst3) { +; CHECK-LABEL: test_load_auth_da_0_pre_cycle: +; CHECK: ; %bb.0: +; CHECK-NEXT: mov x16, x0 +; CHECK-NEXT: autdza x16 +; CHECK-NEXT: str x16, [x2] +; CHECK-NEXT: ldr x8, [x16] +; CHECK-NEXT: str x8, [x1] +; CHECK-NEXT: ret + %tmp0 = ptrtoint i64* %ptr to i64 + %tmp1 = call i64 @llvm.ptrauth.auth.i64(i64 %tmp0, i32 2, i64 0) + %tmp2 = inttoptr i64 %tmp1 to i64* + store i64 %tmp1, i64* %dst2 + %tmp3 = load i64, i64* %tmp2 + store i64 %tmp3, i64* %dst + ret void +} + +; Pre-indexed invalid offsets. + +define i64* @test_load_auth_db_4_pre(i64* %ptr, i64* %dst) { +; CHECK-LABEL: test_load_auth_db_4_pre: +; CHECK: ; %bb.0: +; CHECK-NEXT: mov x16, x0 +; CHECK-NEXT: autdzb x16 +; CHECK-NEXT: mov x0, x16 +; CHECK-NEXT: ldr x8, [x0, #4]! +; CHECK-NEXT: str x8, [x1] +; CHECK-NEXT: ret + %tmp0 = ptrtoint i64* %ptr to i64 + %tmp1 = call i64 @llvm.ptrauth.auth.i64(i64 %tmp0, i32 3, i64 0) + %tmp2 = add i64 %tmp1, 4 + %tmp3 = inttoptr i64 %tmp2 to i64* + %tmp4 = load i64, i64* %tmp3 + store i64 %tmp4, i64* %dst + ret i64* %tmp3 +} + +define i64* @test_load_auth_db_4096_pre(i64* %ptr, i64* %dst) { +; CHECK-LABEL: test_load_auth_db_4096_pre: +; CHECK: ; %bb.0: +; CHECK-NEXT: mov x16, x0 +; CHECK-NEXT: autdzb x16 +; CHECK-NEXT: add x0, x16, #1, lsl #12 ; =4096 +; CHECK-NEXT: ldr x8, [x16, #4096] +; CHECK-NEXT: str x8, [x1] +; CHECK-NEXT: ret + %tmp0 = ptrtoint i64* %ptr to i64 + %tmp1 = call i64 @llvm.ptrauth.auth.i64(i64 %tmp0, i32 3, i64 0) + %tmp2 = add i64 %tmp1, 4096 + %tmp3 = inttoptr i64 %tmp2 to i64* + %tmp4 = load i64, i64* %tmp3 + store i64 %tmp4, i64* %dst + ret i64* %tmp3 +} + +define i64* @test_load_auth_db_256_pre(i64* %ptr, i64* %dst) { +; CHECK-LABEL: test_load_auth_db_256_pre: +; CHECK: ; %bb.0: +; CHECK-NEXT: ldrab x8, [x0, #256]! +; CHECK-NEXT: str x8, [x1] +; CHECK-NEXT: ret + %tmp0 = ptrtoint i64* %ptr to i64 + %tmp1 = call i64 @llvm.ptrauth.auth.i64(i64 %tmp0, i32 3, i64 0) + %tmp2 = add i64 %tmp1, 256 + %tmp3 = inttoptr i64 %tmp2 to i64* + %tmp4 = load i64, i64* %tmp3 + store i64 %tmp4, i64* %dst + ret i64* %tmp3 +} + +define i64* @test_load_auth_db_m264_pre(i64* %ptr, i64* %dst) { +; CHECK-LABEL: test_load_auth_db_m264_pre: +; CHECK: ; %bb.0: +; CHECK-NEXT: ldrab x8, [x0, #-264]! +; CHECK-NEXT: str x8, [x1] +; CHECK-NEXT: ret + %tmp0 = ptrtoint i64* %ptr to i64 + %tmp1 = call i64 @llvm.ptrauth.auth.i64(i64 %tmp0, i32 3, i64 0) + %tmp2 = add i64 %tmp1, -264 + %tmp3 = inttoptr i64 %tmp2 to i64* + %tmp4 = load i64, i64* %tmp3 + store i64 %tmp4, i64* %dst + ret i64* %tmp3 +} + +; Pre-indexed multiple-use of the auth. + +define i64* @test_load_auth_da_8_pre_use(i64* %ptr, i64* %dst, i64* %dst2) { +; CHECK-LABEL: test_load_auth_da_8_pre_use: +; CHECK: ; %bb.0: +; CHECK-NEXT: mov x16, x0 +; CHECK-NEXT: autdza x16 +; CHECK-NEXT: mov x0, x16 +; CHECK-NEXT: ldr x8, [x0, #8]! +; CHECK-NEXT: str x8, [x1] +; CHECK-NEXT: str x16, [x2] +; CHECK-NEXT: ret + %tmp0 = ptrtoint i64* %ptr to i64 + %tmp1 = call i64 @llvm.ptrauth.auth.i64(i64 %tmp0, i32 2, i64 0) + %tmp2 = add i64 %tmp1, 8 + %tmp3 = inttoptr i64 %tmp2 to i64* + %tmp4 = load i64, i64* %tmp3 + store i64 %tmp4, i64* %dst + store i64 %tmp1, i64* %dst2 + ret i64* %tmp3 +} + +; Pre-indexed multiple-use of the auth, invalid offset. + +define i64* @test_load_auth_da_256_pre_use(i64* %ptr, i64* %dst, i64* %dst2) { +; CHECK-LABEL: test_load_auth_da_256_pre_use: +; CHECK: ; %bb.0: +; CHECK-NEXT: mov x16, x0 +; CHECK-NEXT: autdza x16 +; CHECK-NEXT: ldr x8, [x16, #256] +; CHECK-NEXT: add x0, x16, #256 ; =256 +; CHECK-NEXT: str x8, [x1] +; CHECK-NEXT: str x16, [x2] +; CHECK-NEXT: ret + %tmp0 = ptrtoint i64* %ptr to i64 + %tmp1 = call i64 @llvm.ptrauth.auth.i64(i64 %tmp0, i32 2, i64 0) + %tmp2 = add i64 %tmp1, 256 + %tmp3 = inttoptr i64 %tmp2 to i64* + %tmp4 = load i64, i64* %tmp3 + store i64 %tmp4, i64* %dst + store i64 %tmp1, i64* %dst2 + ret i64* %tmp3 +} + +declare i64 @llvm.ptrauth.auth.i64(i64, i32, i64) diff --git a/llvm/test/CodeGen/AArch64/arm64e-ptrauth-reloc.ll b/llvm/test/CodeGen/AArch64/arm64e-ptrauth-reloc.ll new file mode 100644 index 00000000000000..4199b0f6ed5b1a --- /dev/null +++ b/llvm/test/CodeGen/AArch64/arm64e-ptrauth-reloc.ll @@ -0,0 +1,343 @@ +; RUN: llc < %s -mtriple arm64e-apple-darwin -aarch64-ptrauth-global-dynamic-mat=1 | FileCheck %s --check-prefixes=CHECK,OPT,DYN-OPT +; RUN: llc < %s -mtriple arm64e-apple-darwin -O0 -aarch64-ptrauth-global-dynamic-mat=1 | FileCheck %s --check-prefixes=CHECK,O0,DYN-O0 +; RUN: llc < %s -mtriple arm64e-apple-darwin -aarch64-ptrauth-global-dynamic-mat=0 | FileCheck %s --check-prefixes=CHECK,OPT,LOAD-OPT +; RUN: llc < %s -mtriple arm64e-apple-darwin -O0 -aarch64-ptrauth-global-dynamic-mat=0 | FileCheck %s --check-prefixes=CHECK,O0,LOAD-O0 + +target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" + +; Check code references. + +; LOAD-LABEL: _test_global_zero_disc: +; LOAD-NEXT: ; %bb.0: +; LOAD-NEXT: Lloh{{.*}}: +; LOAD-NEXT: adrp x[[STUBPAGE:[0-9]+]], l_g$auth_ptr$ia$0@PAGE +; LOAD-NEXT: Lloh{{.*}}: +; LOAD-NEXT: ldr x0, [x[[STUBPAGE]], l_g$auth_ptr$ia$0@PAGEOFF] +; LOAD-NEXT: ret + +; DYN-O0-LABEL: _test_global_zero_disc: +; DYN-O0-NEXT: ; %bb.0: +; DYN-O0-NEXT: mov x8, #0 +; DYN-O0-NEXT: ; implicit-def: $x16 +; DYN-O0-NEXT: ; implicit-def: $x17 +; DYN-O0-NEXT: adrp x16, _g@GOTPAGE +; DYN-O0-NEXT: ldr x16, [x16, _g@GOTPAGEOFF] +; DYN-O0-NEXT: pacia x16, x8 +; DYN-O0-NEXT: mov x0, x16 +; DYN-O0-NEXT: ret + +; DYN-OPT-LABEL: _test_global_zero_disc: +; DYN-OPT-NEXT: ; %bb.0: +; DYN-OPT-NEXT: Lloh{{.*}}: +; DYN-OPT-NEXT: adrp x16, _g@GOTPAGE +; DYN-OPT-NEXT: Lloh{{.*}}: +; DYN-OPT-NEXT: ldr x16, [x16, _g@GOTPAGEOFF] +; DYN-OPT-NEXT: paciza x16 +; DYN-OPT-NEXT: mov x0, x16 +; DYN-OPT-NEXT: ret + +define i8* @test_global_zero_disc() #0 { + %tmp0 = bitcast { i8*, i32, i64, i64 }* @g.ptrauth.ia.0 to i8* + ret i8* %tmp0 +} + +; LOAD-LABEL: _test_global_offset_zero_disc: +; LOAD-NEXT: ; %bb.0: +; LOAD-NEXT: Lloh{{.*}}: +; LOAD-NEXT: adrp x[[STUBPAGE:[0-9]+]], l_g$148$auth_ptr$da$0@PAGE +; LOAD-NEXT: Lloh{{.*}}: +; LOAD-NEXT: ldr x0, [x[[STUBPAGE]], l_g$148$auth_ptr$da$0@PAGEOFF] +; LOAD-NEXT: ret + +; DYN-O0-LABEL: _test_global_offset_zero_disc: +; DYN-O0-NEXT: ; %bb.0: +; DYN-O0-NEXT: mov x8, #0 +; DYN-O0-NEXT: ; implicit-def: $x16 +; DYN-O0-NEXT: ; implicit-def: $x17 +; DYN-O0-NEXT: adrp x16, _g@GOTPAGE +; DYN-O0-NEXT: ldr x16, [x16, _g@GOTPAGEOFF] +; DYN-O0-NEXT: add x16, x16, #148 +; DYN-O0-NEXT: pacda x16, x8 +; DYN-O0-NEXT: mov x0, x16 +; DYN-O0-NEXT: ret + +; DYN-OPT-LABEL: _test_global_offset_zero_disc: +; DYN-OPT-NEXT: ; %bb.0: +; DYN-OPT-NEXT: Lloh{{.*}}: +; DYN-OPT-NEXT: adrp x16, _g@GOTPAGE +; DYN-OPT-NEXT: Lloh{{.*}}: +; DYN-OPT-NEXT: ldr x16, [x16, _g@GOTPAGEOFF] +; DYN-OPT-NEXT: add x16, x16, #148 +; DYN-OPT-NEXT: pacdza x16 +; DYN-OPT-NEXT: mov x0, x16 +; DYN-OPT-NEXT: ret + +define i8* @test_global_offset_zero_disc() #0 { + %tmp0 = bitcast { i8*, i32, i64, i64 }* @g.offset.ptrauth.da.0 to i8* + ret i8* %tmp0 +} + +; For large offsets, materializing it can take up to 3 add instructions. +; We limit the offset to 32-bits. We theoretically could support up to +; 64 bit offsets, but 32 bits Ought To Be Enough For Anybody, and that's +; the limit for the relocation addend anyway. + +; LOAD-LABEL: _test_global_big_offset_zero_disc: +; LOAD-NEXT: ; %bb.0: +; LOAD-NEXT: Lloh{{.*}}: +; LOAD-NEXT: adrp x[[STUBPAGE:[0-9]+]], l_g$2147549185$auth_ptr$da$0@PAGE +; LOAD-NEXT: Lloh{{.*}}: +; LOAD-NEXT: ldr x0, [x[[STUBPAGE]], l_g$2147549185$auth_ptr$da$0@PAGEOFF] +; LOAD-NEXT: ret + +; DYN-O0-LABEL: _test_global_big_offset_zero_disc: +; DYN-O0-NEXT: ; %bb.0: +; DYN-O0-NEXT: mov x8, #0 +; DYN-O0-NEXT: ; implicit-def: $x16 +; DYN-O0-NEXT: ; implicit-def: $x17 +; DYN-O0-NEXT: adrp x16, _g@GOTPAGE +; DYN-O0-NEXT: ldr x16, [x16, _g@GOTPAGEOFF] +; DYN-O0-NEXT: add x16, x16, #1 +; DYN-O0-NEXT: add x16, x16, #16, lsl #12 +; DYN-O0-NEXT: add x16, x16, #128, lsl #24 +; DYN-O0-NEXT: pacda x16, x8 +; DYN-O0-NEXT: mov x0, x16 +; DYN-O0-NEXT: ret + +; DYN-OPT-LABEL: _test_global_big_offset_zero_disc: +; DYN-OPT-NEXT: ; %bb.0: +; DYN-OPT-NEXT: Lloh{{.*}}: +; DYN-OPT-NEXT: adrp x16, _g@GOTPAGE +; DYN-OPT-NEXT: Lloh{{.*}}: +; DYN-OPT-NEXT: ldr x16, [x16, _g@GOTPAGEOFF] +; DYN-OPT-NEXT: add x16, x16, #1 +; DYN-OPT-NEXT: add x16, x16, #16, lsl #12 +; DYN-OPT-NEXT: add x16, x16, #128, lsl #24 +; DYN-OPT-NEXT: pacdza x16 +; DYN-OPT-NEXT: mov x0, x16 +; DYN-OPT-NEXT: ret + +define i8* @test_global_big_offset_zero_disc() #0 { + %tmp0 = bitcast { i8*, i32, i64, i64 }* @g.big_offset.ptrauth.da.0 to i8* + ret i8* %tmp0 +} + +; LOAD-LABEL: _test_global_disc: +; LOAD-NEXT: ; %bb.0: +; LOAD-NEXT: Lloh{{.*}}: +; LOAD-NEXT: adrp x[[STUBPAGE:[0-9]+]], l_g$auth_ptr$ia$42@PAGE +; LOAD-NEXT: Lloh{{.*}}: +; LOAD-NEXT: ldr x0, [x[[STUBPAGE]], l_g$auth_ptr$ia$42@PAGEOFF] +; LOAD-NEXT: ret + +; DYN-O0-LABEL: _test_global_disc: +; DYN-O0-NEXT: ; %bb.0: +; DYN-O0-NEXT: mov x8, #0 +; DYN-O0-NEXT: ; implicit-def: $x16 +; DYN-O0-NEXT: ; implicit-def: $x17 +; DYN-O0-NEXT: adrp x16, _g@GOTPAGE +; DYN-O0-NEXT: ldr x16, [x16, _g@GOTPAGEOFF] +; DYN-O0-NEXT: mov x17, #42 +; DYN-O0-NEXT: pacia x16, x17 +; DYN-O0-NEXT: mov x0, x16 +; DYN-O0-NEXT: ret + +; DYN-OPT-LABEL: _test_global_disc: +; DYN-OPT-NEXT: ; %bb.0: +; DYN-OPT-NEXT: Lloh{{.*}}: +; DYN-OPT-NEXT: adrp x16, _g@GOTPAGE +; DYN-OPT-NEXT: Lloh{{.*}}: +; DYN-OPT-NEXT: ldr x16, [x16, _g@GOTPAGEOFF] +; DYN-OPT-NEXT: mov x17, #42 +; DYN-OPT-NEXT: pacia x16, x17 +; DYN-OPT-NEXT: mov x0, x16 +; DYN-OPT-NEXT: ret + +define i8* @test_global_disc() #0 { + %tmp0 = bitcast { i8*, i32, i64, i64 }* @g.ptrauth.ia.42 to i8* + ret i8* %tmp0 +} + +; LOAD-LABEL: _test_global_addr_disc: +; LOAD-NEXT: ; %bb.0: +; LOAD-NEXT: Lloh{{.*}}: +; LOAD-NEXT: adrp x8, _g.ref.da.42.addr@PAGE +; LOAD-NEXT: Lloh{{.*}}: +; LOAD-NEXT: add x8, x8, _g.ref.da.42.addr@PAGEOFF +; LOAD-NEXT: movk x8, #42, lsl #48 +; LOAD-NEXT: Lloh{{.*}}: +; LOAD-NEXT: adrp x16, _g@GOTPAGE +; LOAD-NEXT: Lloh{{.*}}: +; LOAD-NEXT: ldr x16, [x16, _g@GOTPAGEOFF] +; LOAD-NEXT: pacda x16, x8 +; LOAD-NEXT: mov x0, x16 +; LOAD-NEXT: ret + +; DYN-O0-LABEL: _test_global_addr_disc: +; DYN-O0-NEXT: ; %bb.0: +; DYN-O0-NEXT: adrp x8, _g.ref.da.42.addr@PAGE +; DYN-O0-NEXT: add x8, x8, _g.ref.da.42.addr@PAGEOFF +; DYN-O0-NEXT: movk x8, #42, lsl #48 +; DYN-O0-NEXT: ; implicit-def: $x16 +; DYN-O0-NEXT: ; implicit-def: $x17 +; DYN-O0-NEXT: adrp x16, _g@GOTPAGE +; DYN-O0-NEXT: ldr x16, [x16, _g@GOTPAGEOFF] +; DYN-O0-NEXT: pacda x16, x8 +; DYN-O0-NEXT: mov x0, x16 +; DYN-O0-NEXT: ret + +; DYN-OPT-LABEL: _test_global_addr_disc: +; DYN-OPT-NEXT: ; %bb.0: +; DYN-OPT-NEXT: Lloh{{.*}}: +; DYN-OPT-NEXT: adrp x8, _g.ref.da.42.addr@PAGE +; DYN-OPT-NEXT: Lloh{{.*}}: +; DYN-OPT-NEXT: add x8, x8, _g.ref.da.42.addr@PAGEOFF +; DYN-OPT-NEXT: movk x8, #42, lsl #48 +; DYN-OPT-NEXT: Lloh{{.*}}: +; DYN-OPT-NEXT: adrp x16, _g@GOTPAGE +; DYN-OPT-NEXT: Lloh{{.*}}: +; DYN-OPT-NEXT: ldr x16, [x16, _g@GOTPAGEOFF] +; DYN-OPT-NEXT: pacda x16, x8 +; DYN-OPT-NEXT: mov x0, x16 +; DYN-OPT-NEXT: ret + +define i8* @test_global_addr_disc() #0 { + %tmp0 = bitcast { i8*, i32, i64, i64 }* @g.ptrauth.da.42.addr to i8* + ret i8* %tmp0 +} + +; Process-specific keys can't use __DATA,__auth_ptr + +; O0-LABEL: _test_global_process_specific: +; O0-NEXT: ; %bb.0: +; O0-NEXT: mov x8, #0 +; O0-NEXT: ; implicit-def: $x16 +; O0-NEXT: ; implicit-def: $x17 +; O0-NEXT: adrp x16, _g@GOTPAGE +; O0-NEXT: ldr x16, [x16, _g@GOTPAGEOFF] +; O0-NEXT: pacib x16, x8 +; O0-NEXT: mov x0, x16 +; O0-NEXT: ret + +; OPT-LABEL: _test_global_process_specific: +; OPT-NEXT: ; %bb.0: +; OPT-NEXT: Lloh{{.*}}: +; OPT-NEXT: adrp x16, _g@GOTPAGE +; OPT-NEXT: Lloh{{.*}}: +; OPT-NEXT: ldr x16, [x16, _g@GOTPAGEOFF] +; OPT-NEXT: pacizb x16 +; OPT-NEXT: mov x0, x16 +; OPT-NEXT: ret + +define i8* @test_global_process_specific() #0 { + %tmp0 = bitcast { i8*, i32, i64, i64 }* @g.ptrauth.ib.0 to i8* + ret i8* %tmp0 +} + +; weak symbols can't be assumed to be non-nil. Use __DATA,__auth_ptr always. +; The alternative is to emit a null-check here, but that'd be redundant with +; whatever null-check follows in user code. + +; O0-LABEL: _test_global_weak: +; O0-NEXT: ; %bb.0: +; O0-NEXT: adrp x[[STUBPAGE:[0-9]+]], l_g_weak$auth_ptr$ia$42@PAGE +; O0-NEXT: ldr x8, [x[[STUBPAGE]], l_g_weak$auth_ptr$ia$42@PAGEOFF] +; O0-NEXT: mov x0, x8 +; O0-NEXT: ret + +; OPT-LABEL: _test_global_weak: +; OPT-NEXT: ; %bb.0: +; OPT-NEXT: Lloh{{.*}}: +; OPT-NEXT: adrp x[[STUBPAGE:[0-9]+]], l_g_weak$auth_ptr$ia$42@PAGE +; OPT-NEXT: Lloh{{.*}}: +; OPT-NEXT: ldr x0, [x[[STUBPAGE]], l_g_weak$auth_ptr$ia$42@PAGEOFF] +; OPT-NEXT: ret + +define i8* @test_global_weak() #0 { + %tmp0 = bitcast { i8*, i32, i64, i64 }* @g_weak.ptrauth.ia.42 to i8* + ret i8* %tmp0 +} + +attributes #0 = { nounwind } + +; Check global references. + +@g = external global i32 + +@g_weak = extern_weak global i32 + +; CHECK-LABEL: .section __DATA,__const +; CHECK-NEXT: .globl _g.ref.ia.0 +; CHECK-NEXT: .p2align 4 +; CHECK-NEXT: _g.ref.ia.0: +; CHECK-NEXT: .quad 5 +; CHECK-NEXT: .quad _g@AUTH(ia,0) +; CHECK-NEXT: .quad 6 + +@g.ptrauth.ia.0 = private constant { i8*, i32, i64, i64 } { i8* bitcast (i32* @g to i8*), i32 0, i64 0, i64 0 }, section "llvm.ptrauth" + +@g.ref.ia.0 = constant { i64, i8*, i64 } { i64 5, i8* bitcast ({ i8*, i32, i64, i64 }* @g.ptrauth.ia.0 to i8*), i64 6 } + +; CHECK-LABEL: .globl _g.ref.ia.42 +; CHECK-NEXT: .p2align 3 +; CHECK-NEXT: _g.ref.ia.42: +; CHECK-NEXT: .quad _g@AUTH(ia,42) + +@g.ptrauth.ia.42 = private constant { i8*, i32, i64, i64 } { i8* bitcast (i32* @g to i8*), i32 0, i64 0, i64 42 }, section "llvm.ptrauth" + +@g.ref.ia.42 = constant i8* bitcast ({ i8*, i32, i64, i64 }* @g.ptrauth.ia.42 to i8*) + +; CHECK-LABEL: .globl _g.ref.ib.0 +; CHECK-NEXT: .p2align 4 +; CHECK-NEXT: _g.ref.ib.0: +; CHECK-NEXT: .quad 5 +; CHECK-NEXT: .quad _g@AUTH(ib,0) +; CHECK-NEXT: .quad 6 + +@g.ptrauth.ib.0 = private constant { i8*, i32, i64, i64 } { i8* bitcast (i32* @g to i8*), i32 1, i64 0, i64 0 }, section "llvm.ptrauth" + +@g.ref.ib.0 = constant { i64, i8*, i64 } { i64 5, i8* bitcast ({ i8*, i32, i64, i64 }* @g.ptrauth.ib.0 to i8*), i64 6 } + + +; CHECK-LABEL: .globl _g.ref.da.42.addr +; CHECK-NEXT: .p2align 3 +; CHECK-NEXT: _g.ref.da.42.addr: +; CHECK-NEXT: .quad _g@AUTH(da,42,addr) + +@g.ptrauth.da.42.addr = private constant { i8*, i32, i64, i64 } { i8* bitcast (i32* @g to i8*), i32 2, i64 ptrtoint (i8** @g.ref.da.42.addr to i64), i64 42 }, section "llvm.ptrauth" + +@g.ref.da.42.addr = constant i8* bitcast ({ i8*, i32, i64, i64 }* @g.ptrauth.da.42.addr to i8*) + +; CHECK-LABEL: .globl _g.offset.ref.da.0 +; CHECK-NEXT: .p2align 3 +; CHECK-NEXT: _g.offset.ref.da.0: +; CHECK-NEXT: .quad (_g+148)@AUTH(da,0) + +@g.offset.ptrauth.da.0 = private constant { i8*, i32, i64, i64 } { i8* getelementptr inbounds (i8, i8* bitcast (i32* @g to i8*), i64 148), i32 2, i64 0, i64 0 }, section "llvm.ptrauth" + +@g.offset.ref.da.0 = constant i8* bitcast ({ i8*, i32, i64, i64 }* @g.offset.ptrauth.da.0 to i8*) + +; CHECK-LABEL: .globl _g.big_offset.ref.da.0 +; CHECK-NEXT: .p2align 3 +; CHECK-NEXT: _g.big_offset.ref.da.0: +; CHECK-NEXT: .quad (_g+2147549185)@AUTH(da,0) + +@g.big_offset.ptrauth.da.0 = private constant { i8*, i32, i64, i64 } { i8* getelementptr inbounds (i8, i8* bitcast (i32* @g to i8*), i64 add (i64 2147483648, i64 65537)), i32 2, i64 0, i64 0 }, section "llvm.ptrauth" + +@g.big_offset.ref.da.0 = constant i8* bitcast ({ i8*, i32, i64, i64 }* @g.big_offset.ptrauth.da.0 to i8*) + +; CHECK-LABEL: .globl _g.weird_ref.da.0 +; CHECK-NEXT: .p2align 3 +; CHECK-NEXT: _g.weird_ref.da.0: +; CHECK-NEXT: .quad (_g+148)@AUTH(da,0) + +@g.weird_ref.da.0 = constant i64 ptrtoint (i8* bitcast (i64* inttoptr (i64 ptrtoint (i8* bitcast ({ i8*, i32, i64, i64 }* @g.offset.ptrauth.da.0 to i8*) to i64) to i64*) to i8*) to i64) + +; CHECK-LABEL: .globl _g_weak.ref.ia.42 +; CHECK-NEXT: .p2align 3 +; CHECK-NEXT: _g_weak.ref.ia.42: +; CHECK-NEXT: .quad _g_weak@AUTH(ia,42) + +@g_weak.ptrauth.ia.42 = private constant { i8*, i32, i64, i64 } { i8* bitcast (i32* @g_weak to i8*), i32 0, i64 0, i64 42 }, section "llvm.ptrauth" + +@g_weak.ref.ia.42 = constant i8* bitcast ({ i8*, i32, i64, i64 }* @g_weak.ptrauth.ia.42 to i8*) diff --git a/llvm/test/CodeGen/AArch64/arm64e-ptrauth-ret.ll b/llvm/test/CodeGen/AArch64/arm64e-ptrauth-ret.ll new file mode 100644 index 00000000000000..d93ef83f2a2fa0 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/arm64e-ptrauth-ret.ll @@ -0,0 +1,221 @@ +; RUN: llc -mtriple arm64e-apple-darwin -asm-verbose=false -disable-post-ra -o - %s | FileCheck %s + +; CHECK-LABEL: _test: +; CHECK-NEXT: stp x20, x19, [sp, #-16]! +; CHECK-NEXT: ; InlineAsm Start +; CHECK-NEXT: ; InlineAsm End +; CHECK-NEXT: mov w0, #0 +; CHECK-NEXT: ldp x20, x19, [sp], #16 +; CHECK-NEXT: ret +define i32 @test() #0 { + call void asm sideeffect "", "~{x19}"() + ret i32 0 +} + +; CHECK-LABEL: _test_alloca: +; CHECK-NEXT: sub sp, sp, #32 +; CHECK-NEXT: mov x8, sp +; CHECK-NEXT: ; InlineAsm Start +; CHECK-NEXT: ; InlineAsm End +; CHECK-NEXT: mov w0, #0 +; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: ret +define i32 @test_alloca() #0 { + %p = alloca i8, i32 32 + call void asm sideeffect "", "r"(i8* %p) + ret i32 0 +} + +; CHECK-LABEL: _test_realign_alloca: +; CHECK-NEXT: pacibsp +; CHECK-NEXT: stp x29, x30, [sp, #-16]! +; CHECK-NEXT: mov x29, sp +; CHECK-NEXT: sub x9, sp, #112 +; CHECK-NEXT: and sp, x9, #0xffffffffffffff80 +; CHECK-NEXT: mov x8, sp +; CHECK-NEXT: ; InlineAsm Start +; CHECK-NEXT: ; InlineAsm End +; CHECK-NEXT: mov w0, #0 +; CHECK-NEXT: mov sp, x29 +; CHECK-NEXT: ldp x29, x30, [sp], #16 +; CHECK-NEXT: retab +define i32 @test_realign_alloca() #0 { + %p = alloca i8, i32 32, align 128 + call void asm sideeffect "", "r"(i8* %p) + ret i32 0 +} + +; CHECK-LABEL: _test_big_alloca: +; CHECK-NEXT: stp x28, x27, [sp, #-16]! +; CHECK-NEXT: sub sp, sp, #1024 +; CHECK-NEXT: mov x8, sp +; CHECK-NEXT: ; InlineAsm Start +; CHECK-NEXT: ; InlineAsm End +; CHECK-NEXT: mov w0, #0 +; CHECK-NEXT: add sp, sp, #1024 +; CHECK-NEXT: ldp x28, x27, [sp], #16 +; CHECK-NEXT: ret +define i32 @test_big_alloca() #0 { + %p = alloca i8, i32 1024 + call void asm sideeffect "", "r"(i8* %p) + ret i32 0 +} + +; CHECK-LABEL: _test_var_alloca: +; CHECK-NEXT: pacibsp +; CHECK-NEXT: stp x29, x30, [sp, #-16]! +; CHECK-NEXT: mov x29, sp +; CHECK-NEXT: mov x8, sp +; CHECK-NEXT: mov w9, w0 +; CHECK-NEXT: add x9, x9, #15 +; CHECK-NEXT: and x9, x9, #0x1fffffff0 +; CHECK-NEXT: sub x8, x8, x9 +; CHECK-NEXT: mov sp, x8 +; CHECK-NEXT: ; InlineAsm Start +; CHECK-NEXT: ; InlineAsm End +; CHECK-NEXT: mov w0, #0 +; CHECK-NEXT: mov sp, x29 +; CHECK-NEXT: ldp x29, x30, [sp], #16 +; CHECK-NEXT: retab +define i32 @test_var_alloca(i32 %s) #0 { + %p = alloca i8, i32 %s + call void asm sideeffect "", "r"(i8* %p) + ret i32 0 +} + +; CHECK-LABEL: _test_noframe_saved: +; CHECK-NEXT: pacibsp +; CHECK-NEXT: stp x28, x27, [sp, #-96]! +; CHECK-NEXT: stp x26, x25, [sp, #16] +; CHECK-NEXT: stp x24, x23, [sp, #32] +; CHECK-NEXT: stp x22, x21, [sp, #48] +; CHECK-NEXT: stp x20, x19, [sp, #64] +; CHECK-NEXT: stp x29, x30, [sp, #80] +; CHECK-NEXT: ldr w30, [x0] +; CHECK-NEXT: ; InlineAsm Start +; CHECK-NEXT: ; InlineAsm End +; CHECK-NEXT: mov x0, x30 +; CHECK-NEXT: ldp x29, x30, [sp, #80] +; CHECK-NEXT: ldp x20, x19, [sp, #64] +; CHECK-NEXT: ldp x22, x21, [sp, #48] +; CHECK-NEXT: ldp x24, x23, [sp, #32] +; CHECK-NEXT: ldp x26, x25, [sp, #16] +; CHECK-NEXT: ldp x28, x27, [sp], #96 +; CHECK-NEXT: retab +define i32 @test_noframe_saved(i32* %p) #0 { + %v = load i32, i32* %p + call void asm sideeffect "", "~{x0},~{x1},~{x2},~{x3},~{x4},~{x5},~{x6},~{x7},~{x8},~{x9},~{x10},~{x11},~{x12},~{x13},~{x14},~{x15},~{x16},~{x17},~{x18},~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27},~{x28}"() + ret i32 %v +} + +; CHECK-LABEL: _test_noframe: +; CHECK-NEXT: ret +define void @test_noframe() #0 { + ret void +} + +; CHECK-LABEL: _test_returnaddress_0: +; CHECK-NEXT: mov x0, x30 +; CHECK-NEXT: xpaci x0 +; CHECK-NEXT: ret +define i8* @test_returnaddress_0() #0 { + %r = call i8* @llvm.returnaddress(i32 0) + ret i8* %r +} + +; CHECK-LABEL: _test_returnaddress_1: +; CHECK-NEXT: pacibsp +; CHECK-NEXT: stp x29, x30, [sp, #-16]! +; CHECK-NEXT: mov x29, sp +; CHECK-NEXT: ldr x8, [x29] +; CHECK-NEXT: ldr x8, [x8, #8] +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: xpaci x0 +; CHECK-NEXT: ldp x29, x30, [sp], #16 +; CHECK-NEXT: retab +define i8* @test_returnaddress_1() #0 { + %r = call i8* @llvm.returnaddress(i32 1) + ret i8* %r +} + +; CHECK-LABEL: _test_noframe_alloca: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: add x8, sp, #15 +; CHECK-NEXT: ; InlineAsm Start +; CHECK-NEXT: ; InlineAsm End +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret +define void @test_noframe_alloca() #0 { + %p = alloca i8, i32 1 + call void asm sideeffect "", "r"(i8* %p) + ret void +} + +; CHECK-LABEL: _test_call: +; CHECK-NEXT: pacibsp +; CHECK-NEXT: stp x29, x30, [sp, #-16]! +; CHECK-NEXT: bl _bar +; CHECK-NEXT: ldp x29, x30, [sp], #16 +; CHECK-NEXT: retab +define void @test_call() #0 { + call i32 @bar() + ret void +} + +; CHECK-LABEL: _test_call_alloca: +; CHECK-NEXT: pacibsp +; CHECK-NEXT: sub sp, sp, #32 +; CHECK-NEXT: stp x29, x30, [sp, #16] +; CHECK-NEXT: bl _bar +; CHECK-NEXT: ldp x29, x30, [sp, #16] +; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: retab +define void @test_call_alloca() #0 { + alloca i8 + call i32 @bar() + ret void +} + +; CHECK-LABEL: _test_call_shrinkwrapping: +; CHECK-NEXT: tbz w0, #0, [[RETBB:LBB[0-9_]+]] +; CHECK-NEXT: pacibsp +; CHECK-NEXT: stp x29, x30, [sp, #-16]! +; CHECK-NEXT: bl _bar +; CHECK-NEXT: ldp x29, x30, [sp], #16 +; CHECK-NEXT: autibsp +; CHECK-NEXT: [[RETBB]]: +; CHECK-NEXT: ret +define void @test_call_shrinkwrapping(i1 %c) #0 { + br i1 %c, label %tbb, label %fbb +tbb: + call i32 @bar() + br label %fbb +fbb: + ret void +} + +; CHECK-LABEL: _test_tailcall: +; CHECK-NEXT: pacibsp +; CHECK-NEXT: stp x29, x30, [sp, #-16]! +; CHECK-NEXT: bl _bar +; CHECK-NEXT: ldp x29, x30, [sp], #16 +; CHECK-NEXT: autibsp +; CHECK-NEXT: b _bar +define i32 @test_tailcall() #0 { + call i32 @bar() + %c = tail call i32 @bar() + ret i32 %c +} + +; CHECK-LABEL: _test_tailcall_noframe: +; CHECK-NEXT: b _bar +define i32 @test_tailcall_noframe() #0 { + %c = tail call i32 @bar() + ret i32 %c +} + +declare i32 @bar() + +declare i8* @llvm.returnaddress(i32) + +attributes #0 = { nounwind "ptrauth-returns" } diff --git a/llvm/test/CodeGen/AArch64/arm64e-ptrauth-tls.ll b/llvm/test/CodeGen/AArch64/arm64e-ptrauth-tls.ll new file mode 100644 index 00000000000000..05904005563be2 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/arm64e-ptrauth-tls.ll @@ -0,0 +1,17 @@ +; RUN: llc -mtriple=arm64e-apple-ios %s -o - | FileCheck %s + +@var = thread_local global i8 0 + +define i8 @get_var() #0 { +; CHECK-LABEL: get_var: +; CHECK: adrp x[[TLVPDESC_SLOT_HI:[0-9]+]], _var@TLVPPAGE +; CHECK: ldr x0, [x[[TLVPDESC_SLOT_HI]], _var@TLVPPAGEOFF] +; CHECK: ldr [[TLV_GET_ADDR:x[0-9]+]], [x0] +; CHECK: blraaz [[TLV_GET_ADDR]] +; CHECK: ldrb w0, [x0] + + %val = load i8, i8* @var, align 1 + ret i8 %val +} + +attributes #0 = { nounwind "ptrauth-calls" } diff --git a/llvm/test/CodeGen/AArch64/branch-target-enforcment.mir b/llvm/test/CodeGen/AArch64/branch-target-enforcment.mir index 5db503ddcee95c..e0eff1e2de8b26 100644 --- a/llvm/test/CodeGen/AArch64/branch-target-enforcment.mir +++ b/llvm/test/CodeGen/AArch64/branch-target-enforcment.mir @@ -142,7 +142,7 @@ body: | INLINEASM &"", 1, 12, implicit-def dead early-clobber $lr $w0 = ORRWrs $wzr, $wzr, 0 early-clobber $sp, $lr = frame-destroy LDRXpost $sp, 16 :: (load 8 from %stack.0) - RETAA implicit killed $w0 + RETAA implicit $sp, implicit $lr, implicit killed $w0 --- # Function starts with PACIBSP, which implicitly acts as BTI JC, so no change @@ -166,7 +166,7 @@ body: | INLINEASM &"", 1, 12, implicit-def dead early-clobber $lr $w0 = ORRWrs $wzr, $wzr, 0 early-clobber $sp, $lr = frame-destroy LDRXpost $sp, 16 :: (load 8 from %stack.0) - RETAB implicit killed $w0 + RETAB implicit $sp, implicit $lr, implicit killed $w0 --- # Function contains a jump table, so every target of the jump table must start diff --git a/llvm/test/DebugInfo/AArch64/ptrauth.ll b/llvm/test/DebugInfo/AArch64/ptrauth.ll new file mode 100644 index 00000000000000..beee254309eb4a --- /dev/null +++ b/llvm/test/DebugInfo/AArch64/ptrauth.ll @@ -0,0 +1,27 @@ +; RUN: llc %s -filetype=obj -mtriple arm64e-apple-darwin -o - \ +; RUN: | llvm-dwarfdump - | FileCheck %s + +; CHECK: DW_AT_type (0x{{0+}}[[TY:.*]] "*__ptrauth(4, 1, 0x04d2)") +; CHECK: 0x{{0+}}[[TY]]: DW_TAG_APPLE_ptrauth_type +; CHECK-NEXT: DW_AT_type {{.*}}"*" +; CHECK-NEXT: DW_AT_APPLE_ptrauth_key (0x04) +; CHECK-NEXT: DW_AT_APPLE_ptrauth_address_discriminated (true) +; CHECK-NEXT: DW_AT_APPLE_ptrauth_extra_discriminator (0x04d2) + +target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" + +@p = common global i8* null, align 8, !dbg !0 + +!llvm.dbg.cu = !{!2} +!llvm.module.flags = !{!7, !8} + +!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression()) +!1 = distinct !DIGlobalVariable(name: "p", scope: !2, file: !3, line: 1, type: !6, isLocal: false, isDefinition: true) +!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, emissionKind: FullDebug, globals: !5) +!3 = !DIFile(filename: "/tmp/p.c", directory: "/") +!4 = !{} +!5 = !{!0} +!6 = !DIDerivedType(tag: DW_TAG_APPLE_ptrauth_type, baseType: !9, ptrAuthKey: 4, ptrAuthIsAddressDiscriminated: true, ptrAuthExtraDiscriminator: 1234) +!7 = !{i32 2, !"Dwarf Version", i32 4} +!8 = !{i32 2, !"Debug Info Version", i32 3} +!9 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: null) diff --git a/llvm/test/Instrumentation/AddressSanitizer/do-not-instrument-ptrauth-globals.ll b/llvm/test/Instrumentation/AddressSanitizer/do-not-instrument-ptrauth-globals.ll new file mode 100644 index 00000000000000..a2e3330a73e17d --- /dev/null +++ b/llvm/test/Instrumentation/AddressSanitizer/do-not-instrument-ptrauth-globals.ll @@ -0,0 +1,11 @@ +; This test checks that we are not instrumenting llvm.ptrauth globals. +; RUN: opt < %s -asan -asan-module -S | FileCheck %s + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-apple-darwin" + +declare void @f() + +@f.ptrauth.ia.42 = private constant { i8*, i32, i64, i64 } { i8* bitcast (void ()* @f to i8*), i32 0, i64 0, i64 42 }, section "llvm.ptrauth" + +; CHECK: @f.ptrauth.ia.42 = private constant { i8*, i32, i64, i64 } { i8* bitcast (void ()* @f to i8*), i32 0, i64 0, i64 42 }, section "llvm.ptrauth" diff --git a/llvm/test/MC/AArch64/arm64e-ptrauth-reloc.s b/llvm/test/MC/AArch64/arm64e-ptrauth-reloc.s new file mode 100644 index 00000000000000..b70c5ba8de3e8f --- /dev/null +++ b/llvm/test/MC/AArch64/arm64e-ptrauth-reloc.s @@ -0,0 +1,118 @@ +// RUN: llvm-mc -triple=arm64-apple-ios < %s | \ +// RUN: FileCheck %s --check-prefix=ASM + +// RUN: llvm-mc -triple=arm64-apple-ios -filetype=obj < %s | \ +// RUN: llvm-readobj --expand-relocs -sections -section-relocations -section-data | \ +// RUN: FileCheck %s --check-prefix=RELOC + + + +// RELOC: Sections [ +// RELOC-LABEL: Section { +// RELOC-LABEL: Section { +// RELOC-NEXT: Index: 1 +// RELOC-NEXT: Name: __const (5F 5F 63 6F 6E 73 74 00 00 00 00 00 00 00 00 00) +// RELOC-NEXT: Segment: __DATA (5F 5F 44 41 54 41 00 00 00 00 00 00 00 00 00 00) + +.section __DATA,__const +.p2align 3 + +// RELOC-LABEL: Relocations [ +// RELOC-NEXT: Relocation { +// RELOC-NEXT: Offset: 0x70 +// RELOC-NEXT: PCRel: 0 +// RELOC-NEXT: Length: 3 +// RELOC-NEXT: Type: ARM64_RELOC_AUTHENTICATED_POINTER (11) +// RELOC-NEXT: Symbol: _g 7 +// RELOC-NEXT: } +// RELOC-NEXT: Relocation { +// RELOC-NEXT: Offset: 0x60 +// RELOC-NEXT: PCRel: 0 +// RELOC-NEXT: Length: 3 +// RELOC-NEXT: Type: ARM64_RELOC_AUTHENTICATED_POINTER (11) +// RELOC-NEXT: Symbol: _g 6 +// RELOC-NEXT: } +// RELOC-NEXT: Relocation { +// RELOC-NEXT: Offset: 0x50 +// RELOC-NEXT: PCRel: 0 +// RELOC-NEXT: Length: 3 +// RELOC-NEXT: Type: ARM64_RELOC_AUTHENTICATED_POINTER (11) +// RELOC-NEXT: Symbol: _g5 +// RELOC-NEXT: } +// RELOC-NEXT: Relocation { +// RELOC-NEXT: Offset: 0x40 +// RELOC-NEXT: PCRel: 0 +// RELOC-NEXT: Length: 3 +// RELOC-NEXT: Type: ARM64_RELOC_AUTHENTICATED_POINTER (11) +// RELOC-NEXT: Symbol: _g4 +// RELOC-NEXT: } +// RELOC-NEXT: Relocation { +// RELOC-NEXT: Offset: 0x30 +// RELOC-NEXT: PCRel: 0 +// RELOC-NEXT: Length: 3 +// RELOC-NEXT: Type: ARM64_RELOC_AUTHENTICATED_POINTER (11) +// RELOC-NEXT: Symbol: _g3 +// RELOC-NEXT: } +// RELOC-NEXT: Relocation { +// RELOC-NEXT: Offset: 0x20 +// RELOC-NEXT: PCRel: 0 +// RELOC-NEXT: Length: 3 +// RELOC-NEXT: Type: ARM64_RELOC_AUTHENTICATED_POINTER (11) +// RELOC-NEXT: Symbol: _g2 +// RELOC-NEXT: } +// RELOC-NEXT: Relocation { +// RELOC-NEXT: Offset: 0x10 +// RELOC-NEXT: PCRel: 0 +// RELOC-NEXT: Length: 3 +// RELOC-NEXT: Type: ARM64_RELOC_AUTHENTICATED_POINTER (11) +// RELOC-NEXT: Symbol: _g1 +// RELOC-NEXT: } +// RELOC-NEXT: Relocation { +// RELOC-NEXT: Offset: 0x0 +// RELOC-NEXT: PCRel: 0 +// RELOC-NEXT: Length: 3 +// RELOC-NEXT: Type: ARM64_RELOC_AUTHENTICATED_POINTER (11) +// RELOC-NEXT: Symbol: _g0 +// RELOC-NEXT: } +// RELOC-NEXT: ] +// RELOC-NEXT: SectionData ( + +// RELOC-NEXT: 0000: 00000000 2A000080 +// ASM: .quad _g0@AUTH(ia,42) +.quad _g0@AUTH(ia,42) +.quad 0 + +// RELOC-NEXT: 0010: 00000000 00000280 +// ASM: .quad _g1@AUTH(ib,0) +.quad _g1@AUTH(ib,0) +.quad 0 + +// RELOC-NEXT: 0020: 00000000 05000580 +// ASM: .quad _g2@AUTH(da,5,addr) +.quad _g2@AUTH(da,5,addr) +.quad 0 + +// RELOC-NEXT: 0030: 00000000 FFFF0780 +// ASM: .quad _g3@AUTH(db,65535,addr) +.quad _g3@AUTH(db,0xffff,addr) +.quad 0 + +// RELOC-NEXT: 0040: 07000000 00000080 +// ASM: .quad (_g4+7)@AUTH(ia,0) +.quad (_g4 + 7)@AUTH(ia,0) +.quad 0 + +// RELOC-NEXT: 0050: FDFFFFFF 00DE0280 +// ASM: .quad (_g5-3)@AUTH(ib,56832) +.quad (_g5 - 3)@AUTH(ib,0xde00) +.quad 0 + +// RELOC-NEXT: 0060: 00000000 FF000780 +// ASM: .quad "_g 6"@AUTH(db,255,addr) +.quad "_g 6"@AUTH(db,0xff,addr) +.quad 0 + +// RELOC-NEXT: 0070: 07000000 10000080 +// ASM: .quad ("_g 7"+7)@AUTH(ia,16) +.quad ("_g 7" + 7)@AUTH(ia,16) +.quad 0 diff --git a/llvm/test/MC/AArch64/arm64e-subtype.s b/llvm/test/MC/AArch64/arm64e-subtype.s new file mode 100644 index 00000000000000..03c5c8d30287a9 --- /dev/null +++ b/llvm/test/MC/AArch64/arm64e-subtype.s @@ -0,0 +1,12 @@ +; RUN: llvm-mc -triple=arm64e-apple-ios -filetype=obj %s -o - | llvm-objdump -macho -d -p - | FileCheck %s + +; CHECK: _foo: +; CHECK: 0: c0 03 5f d6 ret + +; CHECK: Mach header +; CHECK: magic cputype cpusubtype caps filetype ncmds sizeofcmds flags +; CHECK: MH_MAGIC_64 ARM64 E 0x00 OBJECT 3 256 0x00000000 + +.globl _foo +_foo: + ret diff --git a/llvm/test/MC/AArch64/arm64e.s b/llvm/test/MC/AArch64/arm64e.s new file mode 100644 index 00000000000000..d034f9196aca18 --- /dev/null +++ b/llvm/test/MC/AArch64/arm64e.s @@ -0,0 +1,9 @@ +// RUN: not llvm-mc -triple arm64-- -show-encoding < %s 2> %t +// RUN: FileCheck --check-prefix=CHECK-GENERIC < %t %s + +// RUN: llvm-mc -triple arm64e-- -show-encoding < %s |\ +// RUN: FileCheck %s --check-prefix=CHECK-ARM64E + +// CHECK-GENERIC: error: instruction requires: pa +// CHECK-ARM64E: pacia x0, x1 // encoding: [0x20,0x00,0xc1,0xda] + pacia x0, x1 diff --git a/llvm/test/MC/AArch64/armv8.3a-signed-pointer.s b/llvm/test/MC/AArch64/armv8.3a-signed-pointer.s index fe34002680e3c3..2ca15fceccc8f0 100644 --- a/llvm/test/MC/AArch64/armv8.3a-signed-pointer.s +++ b/llvm/test/MC/AArch64/armv8.3a-signed-pointer.s @@ -306,3 +306,11 @@ // CHECK-NEXT: ldrab x0, [x1] // encoding: [0x20,0x04,0xa0,0xf8] // CHECK-REQ: error: instruction requires: pa // CHECK-REQ-NEXT: ldrab x0, [x1] + ldraa x0, [x1]! +// CHECK-NEXT: ldraa x0, [x1]! // encoding: [0x20,0x0c,0x20,0xf8] +// CHECK-REQ: error: instruction requires: pa +// CHECK-REQ-NEXT: ldraa x0, [x1]! + ldrab x0, [x1]! +// CHECK-NEXT: ldrab x0, [x1]! // encoding: [0x20,0x0c,0xa0,0xf8] +// CHECK-REQ: error: instruction requires: pa +// CHECK-REQ-NEXT: ldrab x0, [x1]! diff --git a/llvm/test/MC/Disassembler/AArch64/armv8.3a-signed-pointer.txt b/llvm/test/MC/Disassembler/AArch64/armv8.3a-signed-pointer.txt index 18b376631c4b23..d11056044fa481 100644 --- a/llvm/test/MC/Disassembler/AArch64/armv8.3a-signed-pointer.txt +++ b/llvm/test/MC/Disassembler/AArch64/armv8.3a-signed-pointer.txt @@ -83,14 +83,6 @@ # CHECK: retab # CHECK: eretaa # CHECK: eretab -# CHECK: ldraa x0, [x1, #4088] -# CHECK: ldraa x0, [x1, #-4096] -# CHECK: ldrab x0, [x1, #4088] -# CHECK: ldrab x0, [x1, #-4096] -# CHECK: ldraa x0, [x1, #4088]! -# CHECK: ldraa x0, [x1, #-4096]! -# CHECK: ldrab x0, [x1, #4088]! -# CHECK: ldrab x0, [x1, #-4096]! [0x1f,0x08,0x1f,0xd6] [0x1f,0x0c,0x1f,0xd6] [0x1f,0x08,0x3f,0xd6] @@ -99,6 +91,15 @@ [0xff,0x0f,0x5f,0xd6] [0xff,0x0b,0x9f,0xd6] [0xff,0x0f,0x9f,0xd6] + +# CHECK: ldraa x0, [x1, #4088] +# CHECK: ldraa x0, [x1, #-4096] +# CHECK: ldrab x0, [x1, #4088] +# CHECK: ldrab x0, [x1, #-4096] +# CHECK: ldraa x0, [x1, #4088]! +# CHECK: ldraa x0, [x1, #-4096]! +# CHECK: ldrab x0, [x1, #4088]! +# CHECK: ldrab x0, [x1, #-4096]! [0x20,0xf4,0x3f,0xf8] [0x20,0x04,0x60,0xf8] [0x20,0xf4,0xbf,0xf8] @@ -112,3 +113,8 @@ # CHECK: ldrab x0, [x1] [0x20,0x04,0x20,0xf8] [0x20,0x04,0xa0,0xf8] + +# CHECK: ldraa x0, [x1]! +# CHECK: ldrab x0, [x1]! +[0x20,0x0c,0x20,0xf8] +[0x20,0x0c,0xa0,0xf8] diff --git a/llvm/test/Object/AArch64/nm-trivial-object-arm64e.test b/llvm/test/Object/AArch64/nm-trivial-object-arm64e.test new file mode 100644 index 00000000000000..c44b1550ef2f0a --- /dev/null +++ b/llvm/test/Object/AArch64/nm-trivial-object-arm64e.test @@ -0,0 +1,4 @@ +RUN: llvm-nm -arch arm64e %p/../Inputs/trivial-object-test.macho-arm64e \ +RUN: | FileCheck %s + +CHECK: 00000000 t ltmp0 diff --git a/llvm/test/Object/Inputs/trivial-object-test.macho-arm64e b/llvm/test/Object/Inputs/trivial-object-test.macho-arm64e new file mode 100644 index 00000000000000..5813378c8b1320 Binary files /dev/null and b/llvm/test/Object/Inputs/trivial-object-test.macho-arm64e differ diff --git a/llvm/test/Transforms/InstCombine/ptrauth-call.ll b/llvm/test/Transforms/InstCombine/ptrauth-call.ll new file mode 100644 index 00000000000000..ce272e25529252 --- /dev/null +++ b/llvm/test/Transforms/InstCombine/ptrauth-call.ll @@ -0,0 +1,82 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -instcombine -S | FileCheck %s + +define i32 @test_ptrauth_call_resign(i8* %p) { +; CHECK-LABEL: @test_ptrauth_call_resign( +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[P:%.*]] to i32 ()* +; CHECK-NEXT: [[TMP3:%.*]] = call i32 [[TMP1]]() [ "ptrauth"(i32 0, i64 1234) ] +; CHECK-NEXT: ret i32 [[TMP3]] +; + %tmp0 = ptrtoint i8* %p to i64 + %tmp1 = call i64 @llvm.ptrauth.resign.i64(i64 %tmp0, i32 0, i64 1234, i32 2, i64 5678) + %tmp2 = inttoptr i64 %tmp1 to i32()* + %tmp3 = call i32 %tmp2() [ "ptrauth"(i32 2, i64 5678) ] + ret i32 %tmp3 +} + +define i32 @test_ptrauth_call_resign_blend(i8** %pp) { +; CHECK-LABEL: @test_ptrauth_call_resign_blend( +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8** [[PP:%.*]] to i32 ()** +; CHECK-NEXT: [[TMP012:%.*]] = load i32 ()*, i32 ()** [[TMP1]], align 8 +; CHECK-NEXT: [[TMP6:%.*]] = call i32 [[TMP012]]() [ "ptrauth"(i32 0, i64 1234) ] +; CHECK-NEXT: ret i32 [[TMP6]] +; + %tmp0 = load i8*, i8** %pp, align 8 + %tmp1 = ptrtoint i8** %pp to i64 + %tmp2 = ptrtoint i8* %tmp0 to i64 + %tmp3 = call i64 @llvm.ptrauth.blend.i64(i64 %tmp1, i64 5678) + %tmp4 = call i64 @llvm.ptrauth.resign.i64(i64 %tmp2, i32 0, i64 1234, i32 1, i64 %tmp3) + %tmp5 = inttoptr i64 %tmp4 to i32()* + %tmp6 = call i32 %tmp5() [ "ptrauth"(i32 1, i64 %tmp3) ] + ret i32 %tmp6 +} + +define i32 @test_ptrauth_call_resign_blend_2(i8** %pp) { +; CHECK-LABEL: @test_ptrauth_call_resign_blend_2( +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8** [[PP:%.*]] to i32 ()** +; CHECK-NEXT: [[TMP012:%.*]] = load i32 ()*, i32 ()** [[TMP1]], align 8 +; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint i8** [[PP]] to i64 +; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 [[TMP1]], i64 5678) +; CHECK-NEXT: [[TMP6:%.*]] = call i32 [[TMP012]]() [ "ptrauth"(i32 1, i64 [[TMP3]]) ] +; CHECK-NEXT: ret i32 [[TMP6]] +; + %tmp0 = load i8*, i8** %pp, align 8 + %tmp1 = ptrtoint i8** %pp to i64 + %tmp2 = ptrtoint i8* %tmp0 to i64 + %tmp3 = call i64 @llvm.ptrauth.blend.i64(i64 %tmp1, i64 5678) + %tmp4 = call i64 @llvm.ptrauth.resign.i64(i64 %tmp2, i32 1, i64 %tmp3, i32 0, i64 1234) + %tmp5 = inttoptr i64 %tmp4 to i32()* + %tmp6 = call i32 %tmp5() [ "ptrauth"(i32 0, i64 1234) ] + ret i32 %tmp6 +} + +define i32 @test_ptrauth_call_auth(i8* %p) { +; CHECK-LABEL: @test_ptrauth_call_auth( +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[P:%.*]] to i32 ()* +; CHECK-NEXT: [[TMP3:%.*]] = call i32 [[TMP1]]() [ "ptrauth"(i32 2, i64 5678) ] +; CHECK-NEXT: ret i32 [[TMP3]] +; + %tmp0 = ptrtoint i8* %p to i64 + %tmp1 = call i64 @llvm.ptrauth.auth.i64(i64 %tmp0, i32 2, i64 5678) + %tmp2 = inttoptr i64 %tmp1 to i32()* + %tmp3 = call i32 %tmp2() + ret i32 %tmp3 +} + +define i32 @test_ptrauth_call_sign(i8* %p) { +; CHECK-LABEL: @test_ptrauth_call_sign( +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[P:%.*]] to i32 ()* +; CHECK-NEXT: [[TMP3:%.*]] = call i32 [[TMP1]]() +; CHECK-NEXT: ret i32 [[TMP3]] +; + %tmp0 = ptrtoint i8* %p to i64 + %tmp1 = call i64 @llvm.ptrauth.sign.i64(i64 %tmp0, i32 2, i64 5678) + %tmp2 = inttoptr i64 %tmp1 to i32()* + %tmp3 = call i32 %tmp2() [ "ptrauth"(i32 2, i64 5678) ] + ret i32 %tmp3 +} + +declare i64 @llvm.ptrauth.auth.i64(i64, i32, i64) +declare i64 @llvm.ptrauth.sign.i64(i64, i32, i64) +declare i64 @llvm.ptrauth.resign.i64(i64, i32, i64, i32, i64) +declare i64 @llvm.ptrauth.blend.i64(i64, i64) diff --git a/llvm/test/Transforms/SoftPointerAuth/intrinsics.ll b/llvm/test/Transforms/SoftPointerAuth/intrinsics.ll new file mode 100644 index 00000000000000..4082a7463ac9fc --- /dev/null +++ b/llvm/test/Transforms/SoftPointerAuth/intrinsics.ll @@ -0,0 +1,35 @@ +; RUN: opt < %s -soft-ptrauth -S | FileCheck %s + +target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.12.0" + +%struct.__block_descriptor = type { i64, i64 } +%struct.__block_literal_generic = type { i8*, i32, i32, i8*, %struct.__block_descriptor* } + +@blockptr = common global void ()* null, align 8 + +define internal void @test1() { +entry: + %0 = load void ()*, void ()** @blockptr, align 8 + %block = bitcast void ()* %0 to %struct.__block_literal_generic* + %fnptr_addr = getelementptr inbounds %struct.__block_literal_generic, %struct.__block_literal_generic* %block, i32 0, i32 3 + %block_opaque = bitcast %struct.__block_literal_generic* %block to i8* + %1 = load i8*, i8** %fnptr_addr, align 8 + %fnptr = bitcast i8* %1 to void (i8*)* + %discriminator = ptrtoint i8** %fnptr_addr to i64 + call void %fnptr(i8* %block_opaque) [ "ptrauth"(i32 1, i64 %discriminator) ] + ret void +} + +; CHECK: define internal void @test1() { +; CHECK: %fnptr_addr = getelementptr inbounds %struct.__block_literal_generic, %struct.__block_literal_generic* %block, i32 0, i32 3 +; CHECK-NEXT: %block_opaque = bitcast %struct.__block_literal_generic* %block to i8* +; CHECK-NEXT: [[T0:%.*]] = load i8*, i8** %fnptr_addr, align 8 +; CHECK-NEXT: %fnptr = bitcast i8* [[T0]] to void (i8*)* +; CHECK-NEXT: %discriminator = ptrtoint i8** %fnptr_addr to i64 +; CHECK-NEXT: [[FNPTR_CAST:%.*]] = bitcast void (i8*)* %fnptr to i8* +; CHECK-NEXT: [[FNPTR_AUTH:%.*]] = call i8* @__ptrauth_auth(i8* [[FNPTR_CAST]], i32 1, i64 %discriminator) [[NOUNWIND:#[0-9]+]] +; CHECK-NEXT: [[FNPTR_AUTH_CAST:%.*]] = bitcast i8* [[FNPTR_AUTH]] to void (i8*)* +; CHECK-NEXT: call void [[FNPTR_AUTH_CAST]](i8* %block_opaque){{$}} + +; CHECK: attributes [[NOUNWIND]] = { nounwind } diff --git a/llvm/test/Transforms/SoftPointerAuth/relocs.ll b/llvm/test/Transforms/SoftPointerAuth/relocs.ll new file mode 100644 index 00000000000000..818af93ebdacff --- /dev/null +++ b/llvm/test/Transforms/SoftPointerAuth/relocs.ll @@ -0,0 +1,23 @@ +; RUN: opt < %s -soft-ptrauth -S | FileCheck %s + +target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.12.0" + +; CHECK-NOT: @test1_reloc +; CHECK: @test1 = internal global { i8**, i32, i32, i8* } { i8** null, i32 1342177280, i32 0, i8* null }, align 8 + +@test1_reloc = private constant { i8*, i32, i64, i64 } { i8* bitcast (void (i8*)* @test1_function to i8*), i32 1, i64 ptrtoint (i8** getelementptr inbounds ({ i8**, i32, i32, i8* }, { i8**, i32, i32, i8* }* @test1, i32 0, i32 3) to i64), i64 0 }, section "llvm.ptrauth", align 8 +@test1 = internal constant { i8**, i32, i32, i8* } { i8** null, i32 1342177280, i32 0, i8* bitcast ({ i8*, i32, i64, i64 }* @test1_reloc to i8*) }, align 8 + +define internal void @test1_function(i8*) { +entry: + ret void +} + +; CHECK: define private void @ptrauth_soft_init() { +; CHECK: [[T0:%.*]] = call i8* @__ptrauth_sign(i8* bitcast (void (i8*)* @test1_function to i8*), i32 1, i64 ptrtoint (i8** getelementptr inbounds ({ i8**, i32, i32, i8* }, { i8**, i32, i32, i8* }* @test1, i32 0, i32 3) to i64)) [[NOUNWIND:#[0-9]+]] +; CHECK: [[T1:%.*]] = bitcast i8* [[T0]] to { i8*, i32, i64, i64 }* +; CHECK: [[T2:%.*]] = bitcast { i8*, i32, i64, i64 }* [[T1]] to i8* +; CHECK: store i8* [[T2]], i8** getelementptr inbounds ({ i8**, i32, i32, i8* }, { i8**, i32, i32, i8* }* @test1, i32 0, i32 3) + +; CHECK: attributes [[NOUNWIND]] = { nounwind } diff --git a/llvm/test/Transforms/TailCallElim/ptrauth-bundle.ll b/llvm/test/Transforms/TailCallElim/ptrauth-bundle.ll new file mode 100644 index 00000000000000..578d4d500fa3ca --- /dev/null +++ b/llvm/test/Transforms/TailCallElim/ptrauth-bundle.ll @@ -0,0 +1,12 @@ +; RUN: opt < %s -tailcallelim -verify-dom-info -S | FileCheck %s +; Check that the "ptrauth" operand bundle doesn't prevent tail calls. + +declare i64 @f_0(i64 %x) + +define i64 @f_1(i64 %x) { +; CHECK-LABEL: @f_1( +entry: +; CHECK: tail call i64 @f_0(i64 %x) [ "ptrauth"(i32 42, i64 %x) ] + %tmp = call i64 @f_0(i64 %x) [ "ptrauth"(i32 42, i64 %x) ] + ret i64 0 +} diff --git a/llvm/test/Verifier/ptrauth-global.ll b/llvm/test/Verifier/ptrauth-global.ll new file mode 100644 index 00000000000000..ef2f86c8f8c07f --- /dev/null +++ b/llvm/test/Verifier/ptrauth-global.ll @@ -0,0 +1,21 @@ +; RUN: not opt -S -verify < %s 2>&1 | FileCheck %s + +; CHECK: invalid llvm.ptrauth global: global doesn't have an initializer +@no_init = external global { i8*, i32, i64, i64 }, section "llvm.ptrauth" + +; CHECK: invalid llvm.ptrauth global: global isn't a struct +@not_struct = constant i8* null, section "llvm.ptrauth" + +; CHECK: invalid llvm.ptrauth global: global doesn't have type '{ i8*, i32, i64, i64 }' +@bad_type = constant { i8*, i32, i32, i32 } zeroinitializer, section "llvm.ptrauth" + +; CHECK: invalid llvm.ptrauth global: key isn't a constant integer +@bad_key = constant { i8*, i32, i64, i64 } { i8* null, i32 ptrtoint (i32* @g to i32), i64 0, i64 0}, section "llvm.ptrauth" + +; CHECK: invalid llvm.ptrauth global: discriminator isn't a constant integer +@bad_disc = constant { i8*, i32, i64, i64 } { i8* null, i32 0, i64 0, i64 ptrtoint (i32* @g to i64)}, section "llvm.ptrauth" + +; CHECK-NOT: invalid +@valid = private constant { i8*, i32, i64, i64 } { i8* getelementptr inbounds (i8, i8* bitcast (i32* @g to i8*), i64 2), i32 3, i64 0, i64 0 }, section "llvm.ptrauth" + +@g = external global i32 diff --git a/llvm/test/Verifier/ptrauth-operand-bundles.ll b/llvm/test/Verifier/ptrauth-operand-bundles.ll new file mode 100644 index 00000000000000..311cea20922eeb --- /dev/null +++ b/llvm/test/Verifier/ptrauth-operand-bundles.ll @@ -0,0 +1,23 @@ +; RUN: not opt -verify < %s 2>&1 | FileCheck %s + +declare void @g() + +define void @f_deopt(i64 %arg0, i32 %arg1) { +; CHECK: Multiple ptrauth operand bundles +; CHECK-NEXT: call void @g() [ "ptrauth"(i32 42, i64 100), "ptrauth"(i32 42, i64 %arg0) ] +; CHECK: Ptrauth bundle key operand must be an i32 constant +; CHECK-NEXT: call void @g() [ "ptrauth"(i32 %arg1, i64 120) ] +; CHECK: Ptrauth bundle key operand must be an i32 constant +; CHECK-NEXT: call void @g() [ "ptrauth"(i64 42, i64 120) ] +; CHECK: Ptrauth bundle discriminator operand must be an i64 +; CHECK-NEXT: call void @g() [ "ptrauth"(i32 42, i32 120) ] +; CHECK-NOT: call void @g() [ "ptrauth"(i32 42, i64 120, i32 %x) ] + + entry: + call void @g() [ "ptrauth"(i32 42, i64 100), "ptrauth"(i32 42, i64 %arg0) ] + call void @g() [ "ptrauth"(i32 %arg1, i64 120) ] + call void @g() [ "ptrauth"(i64 42, i64 120) ] + call void @g() [ "ptrauth"(i32 42, i32 120) ] + call void @g() [ "ptrauth"(i32 42, i64 120) ] ;; The verifier should not complain about this one + ret void +} diff --git a/llvm/test/tools/llvm-dwarfdump/AArch64/arm64e.ll b/llvm/test/tools/llvm-dwarfdump/AArch64/arm64e.ll new file mode 100644 index 00000000000000..2626caa916bac9 --- /dev/null +++ b/llvm/test/tools/llvm-dwarfdump/AArch64/arm64e.ll @@ -0,0 +1,17 @@ +; RUN: llc -O0 %s -filetype=obj -o - \ +; RUN: | llvm-dwarfdump -arch arm64e - | FileCheck %s +; CHECK: file format Mach-O arm64e + +source_filename = "/tmp/empty.c" +target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" +target triple = "arm64e-apple-ios" + +!llvm.module.flags = !{!1, !2, !3, !4} +!llvm.dbg.cu = !{!5} + +!1 = !{i32 2, !"Dwarf Version", i32 4} +!2 = !{i32 2, !"Debug Info Version", i32 3} +!3 = !{i32 1, !"wchar_size", i32 4} +!4 = !{i32 7, !"PIC Level", i32 2} +!5 = distinct !DICompileUnit(language: DW_LANG_C99, file: !6, producer: "Apple clang", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug) +!6 = !DIFile(filename: "/tmp/empty.c", directory: "/Volumes/Data/llvm-project") diff --git a/llvm/test/tools/llvm-readobj/Inputs/trivial.obj.macho-arm64e b/llvm/test/tools/llvm-readobj/Inputs/trivial.obj.macho-arm64e new file mode 100644 index 00000000000000..5813378c8b1320 Binary files /dev/null and b/llvm/test/tools/llvm-readobj/Inputs/trivial.obj.macho-arm64e differ diff --git a/llvm/test/tools/llvm-readobj/macho-arm64e.test b/llvm/test/tools/llvm-readobj/macho-arm64e.test new file mode 100644 index 00000000000000..3e0ef2d6622e36 --- /dev/null +++ b/llvm/test/tools/llvm-readobj/macho-arm64e.test @@ -0,0 +1,5 @@ +RUN: llvm-readobj -h %p/Inputs/trivial.obj.macho-arm64e | FileCheck %s + +CHECK: Magic: Magic64 (0xFEEDFACF) +CHECK: CpuType: Arm64 (0x100000C) +CHECK: CpuSubType: CPU_SUBTYPE_ARM64E (0x2) diff --git a/llvm/unittests/ADT/TripleTest.cpp b/llvm/unittests/ADT/TripleTest.cpp index d8123bbbfdf7a6..b9e18d0bcea46e 100644 --- a/llvm/unittests/ADT/TripleTest.cpp +++ b/llvm/unittests/ADT/TripleTest.cpp @@ -1497,5 +1497,10 @@ TEST(TripleTest, ParseARMArch) { Triple T = Triple("aarch64_be"); EXPECT_EQ(Triple::aarch64_be, T.getArch()); } + { + Triple T = Triple("arm64e"); + EXPECT_EQ(Triple::aarch64, T.getArch()); + EXPECT_EQ(Triple::AArch64SubArch_E, T.getSubArch()); + } } } // end anonymous namespace diff --git a/llvm/unittests/IR/MetadataTest.cpp b/llvm/unittests/IR/MetadataTest.cpp index e6c7a50113957c..1d912487bd3360 100644 --- a/llvm/unittests/IR/MetadataTest.cpp +++ b/llvm/unittests/IR/MetadataTest.cpp @@ -103,7 +103,7 @@ class MetadataTest : public testing::Test { DIType *getDerivedType() { return DIDerivedType::getDistinct( Context, dwarf::DW_TAG_pointer_type, "", nullptr, 0, nullptr, - getBasicType("basictype"), 1, 2, 0, None, DINode::FlagZero); + getBasicType("basictype"), 1, 2, 0, None, {}, DINode::FlagZero); } Constant *getConstant() { return ConstantInt::get(Type::getInt32Ty(Context), Counter++); @@ -1292,13 +1292,16 @@ TEST_F(DIDerivedTypeTest, get) { DIType *BaseType = getBasicType("basic"); MDTuple *ExtraData = getTuple(); unsigned DWARFAddressSpace = 8; + DIDerivedType::PtrAuthData PtrAuthData(1, false, 1234); DINode::DIFlags Flags5 = static_cast(5); DINode::DIFlags Flags4 = static_cast(4); - auto *N = - DIDerivedType::get(Context, dwarf::DW_TAG_pointer_type, "something", File, - 1, Scope, BaseType, 2, 3, 4, DWARFAddressSpace, Flags5, - ExtraData); + auto *N = DIDerivedType::get( + Context, dwarf::DW_TAG_pointer_type, "something", File, 1, Scope, + BaseType, 2, 3, 4, DWARFAddressSpace, None, Flags5, ExtraData); + auto *N1 = DIDerivedType::get(Context, dwarf::DW_TAG_APPLE_ptrauth_type, "", + File, 1, Scope, N, 2, 3, 4, DWARFAddressSpace, + PtrAuthData, Flags5, ExtraData); EXPECT_EQ(dwarf::DW_TAG_pointer_type, N->getTag()); EXPECT_EQ("something", N->getName()); EXPECT_EQ(File, N->getFile()); @@ -1309,53 +1312,71 @@ TEST_F(DIDerivedTypeTest, get) { EXPECT_EQ(3u, N->getAlignInBits()); EXPECT_EQ(4u, N->getOffsetInBits()); EXPECT_EQ(DWARFAddressSpace, N->getDWARFAddressSpace().getValue()); + EXPECT_EQ(None, N->getPtrAuthData()); + EXPECT_EQ(PtrAuthData, N1->getPtrAuthData()); EXPECT_EQ(5u, N->getFlags()); EXPECT_EQ(ExtraData, N->getExtraData()); EXPECT_EQ(N, DIDerivedType::get(Context, dwarf::DW_TAG_pointer_type, "something", File, 1, Scope, BaseType, 2, 3, - 4, DWARFAddressSpace, Flags5, ExtraData)); + 4, DWARFAddressSpace, None, Flags5, + ExtraData)); EXPECT_NE(N, DIDerivedType::get(Context, dwarf::DW_TAG_reference_type, "something", File, 1, Scope, BaseType, 2, 3, - 4, DWARFAddressSpace, Flags5, ExtraData)); + 4, DWARFAddressSpace, None, Flags5, + ExtraData)); EXPECT_NE(N, DIDerivedType::get(Context, dwarf::DW_TAG_pointer_type, "else", - File, 1, Scope, BaseType, 2, 3, - 4, DWARFAddressSpace, Flags5, ExtraData)); + File, 1, Scope, BaseType, 2, 3, 4, + DWARFAddressSpace, None, Flags5, + ExtraData)); EXPECT_NE(N, DIDerivedType::get(Context, dwarf::DW_TAG_pointer_type, "something", getFile(), 1, Scope, BaseType, 2, - 3, 4, DWARFAddressSpace, Flags5, ExtraData)); + 3, 4, DWARFAddressSpace, None, Flags5, + ExtraData)); EXPECT_NE(N, DIDerivedType::get(Context, dwarf::DW_TAG_pointer_type, "something", File, 2, Scope, BaseType, 2, 3, - 4, DWARFAddressSpace, Flags5, ExtraData)); + 4, DWARFAddressSpace, None, Flags5, + ExtraData)); EXPECT_NE(N, DIDerivedType::get(Context, dwarf::DW_TAG_pointer_type, "something", File, 1, getSubprogram(), - BaseType, 2, 3, 4, DWARFAddressSpace, Flags5, - ExtraData)); - EXPECT_NE(N, DIDerivedType::get( - Context, dwarf::DW_TAG_pointer_type, "something", File, 1, - Scope, getBasicType("basic2"), 2, 3, 4, DWARFAddressSpace, - Flags5, ExtraData)); + BaseType, 2, 3, 4, DWARFAddressSpace, + None, Flags5, ExtraData)); + EXPECT_NE( + N, DIDerivedType::get(Context, dwarf::DW_TAG_pointer_type, "something", + File, 1, Scope, getBasicType("basic2"), 2, 3, 4, + DWARFAddressSpace, None, Flags5, ExtraData)); EXPECT_NE(N, DIDerivedType::get(Context, dwarf::DW_TAG_pointer_type, "something", File, 1, Scope, BaseType, 3, 3, - 4, DWARFAddressSpace, Flags5, ExtraData)); + 4, DWARFAddressSpace, None, Flags5, + ExtraData)); EXPECT_NE(N, DIDerivedType::get(Context, dwarf::DW_TAG_pointer_type, "something", File, 1, Scope, BaseType, 2, 2, - 4, DWARFAddressSpace, Flags5, ExtraData)); + 4, DWARFAddressSpace, None, Flags5, + ExtraData)); EXPECT_NE(N, DIDerivedType::get(Context, dwarf::DW_TAG_pointer_type, "something", File, 1, Scope, BaseType, 2, 3, - 5, DWARFAddressSpace, Flags5, ExtraData)); + 5, DWARFAddressSpace, None, Flags5, + ExtraData)); EXPECT_NE(N, DIDerivedType::get(Context, dwarf::DW_TAG_pointer_type, "something", File, 1, Scope, BaseType, 2, 3, - 4, DWARFAddressSpace + 1, Flags5, ExtraData)); + 4, DWARFAddressSpace + 1, None, Flags5, + ExtraData)); + EXPECT_NE(N1, DIDerivedType::get(Context, dwarf::DW_TAG_APPLE_ptrauth_type, + "", File, 1, Scope, N, 2, 3, 4, + DWARFAddressSpace, None, Flags5, ExtraData)); EXPECT_NE(N, DIDerivedType::get(Context, dwarf::DW_TAG_pointer_type, "something", File, 1, Scope, BaseType, 2, 3, - 4, DWARFAddressSpace, Flags4, ExtraData)); + 4, DWARFAddressSpace, None, Flags4, + ExtraData)); EXPECT_NE(N, DIDerivedType::get(Context, dwarf::DW_TAG_pointer_type, "something", File, 1, Scope, BaseType, 2, 3, - 4, DWARFAddressSpace, Flags5, getTuple())); + 4, DWARFAddressSpace, None, Flags5, + getTuple())); TempDIDerivedType Temp = N->clone(); EXPECT_EQ(N, MDNode::replaceWithUniqued(std::move(Temp))); + TempDIDerivedType Temp1 = N1->clone(); + EXPECT_EQ(N1, MDNode::replaceWithUniqued(std::move(Temp1))); } TEST_F(DIDerivedTypeTest, getWithLargeValues) { @@ -1365,14 +1386,22 @@ TEST_F(DIDerivedTypeTest, getWithLargeValues) { MDTuple *ExtraData = getTuple(); DINode::DIFlags Flags = static_cast(5); - auto *N = DIDerivedType::get( - Context, dwarf::DW_TAG_pointer_type, "something", File, 1, Scope, - BaseType, UINT64_MAX, UINT32_MAX - 1, UINT64_MAX - 2, UINT32_MAX - 3, - Flags, ExtraData); + auto *N = DIDerivedType::get(Context, dwarf::DW_TAG_pointer_type, "something", + File, 1, Scope, BaseType, UINT64_MAX, + UINT32_MAX - 1, UINT64_MAX - 2, UINT32_MAX - 3, + None, Flags, ExtraData); EXPECT_EQ(UINT64_MAX, N->getSizeInBits()); EXPECT_EQ(UINT32_MAX - 1, N->getAlignInBits()); EXPECT_EQ(UINT64_MAX - 2, N->getOffsetInBits()); - EXPECT_EQ(UINT32_MAX - 3, N->getDWARFAddressSpace().getValue()); + EXPECT_EQ(UINT32_MAX - 3, *N->getDWARFAddressSpace()); + + auto *N1 = DIDerivedType::get( + Context, dwarf::DW_TAG_APPLE_ptrauth_type, "", File, 1, Scope, N, + UINT64_MAX, UINT32_MAX - 1, UINT64_MAX - 2, UINT32_MAX - 3, + DIDerivedType::PtrAuthData(7, true, 0xffff), Flags, ExtraData); + EXPECT_EQ(7U, *N1->getPtrAuthKey()); + EXPECT_EQ(true, *N1->isPtrAuthAddressDiscriminated()); + EXPECT_EQ(0xffffU, *N1->getPtrAuthExtraDiscriminator()); } typedef MetadataTest DICompositeTypeTest; diff --git a/llvm/unittests/Support/TargetParserTest.cpp b/llvm/unittests/Support/TargetParserTest.cpp index 64052519226b32..b93e454ad6ef1b 100644 --- a/llvm/unittests/Support/TargetParserTest.cpp +++ b/llvm/unittests/Support/TargetParserTest.cpp @@ -829,6 +829,17 @@ TEST(TargetParserTest, testAArch64CPU) { EXPECT_TRUE(testAArch64CPU( "cyclone", "armv8-a", "crypto-neon-fp-armv8", AArch64::AEK_CRYPTO | AArch64::AEK_FP | AArch64::AEK_SIMD, "8-A")); + EXPECT_TRUE(testAArch64CPU( + "vortex", "armv8.3-a", "crypto-neon-fp-armv8", + AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP | + AArch64::AEK_SIMD | AArch64::AEK_LSE | AArch64::AEK_RAS | + AArch64::AEK_RDM | AArch64::AEK_RCPC | AArch64::AEK_FP16, "8.3-A")); + EXPECT_TRUE(testAArch64CPU( + "lightning", "armv8.4-a", "crypto-neon-fp-armv8", + AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP | + AArch64::AEK_SIMD | AArch64::AEK_LSE | AArch64::AEK_RAS | + AArch64::AEK_RDM | AArch64::AEK_RCPC | AArch64::AEK_DOTPROD | + AArch64::AEK_FP16 | AArch64::AEK_FP16FML, "8.4-A")); EXPECT_TRUE(testAArch64CPU( "exynos-m1", "armv8-a", "crypto-neon-fp-armv8", AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP | @@ -908,7 +919,7 @@ TEST(TargetParserTest, testAArch64CPU) { "8.2-A")); } -static constexpr unsigned NumAArch64CPUArchs = 28; +static constexpr unsigned NumAArch64CPUArchs = 30; TEST(TargetParserTest, testAArch64CPUArchList) { SmallVector List; diff --git a/llvm/utils/UpdateTestChecks/asm.py b/llvm/utils/UpdateTestChecks/asm.py index 81556d65802c9c..d999f3a41c6ca5 100644 --- a/llvm/utils/UpdateTestChecks/asm.py +++ b/llvm/utils/UpdateTestChecks/asm.py @@ -35,6 +35,14 @@ class string: r'.Lfunc_end[0-9]+:\n', flags=(re.M | re.S)) +ASM_FUNCTION_ARM64_RE = re.compile( + r'^_?(?P[^:]+):[ \t]*;[ \t]*@(?P=func)\n' + r'(?:[ \t]+.cfi_startproc\n)?' + r'(?P.*?)\n' + # Darwin doesn't rely on function end labels, so we look for cfi instead. + r'^[ \t]+.cfi_endproc\n', + flags=(re.M | re.S)) + ASM_FUNCTION_AMDGPU_RE = re.compile( r'^_?(?P[^:]+):[ \t]*;+[ \t]*@(?P=func)\n[^:]*?' r'(?P.*?)\n' # (body of the function) @@ -317,6 +325,8 @@ def build_function_body_dictionary_for_triple(args, raw_tool_output, triple, pre 'x86': (scrub_asm_x86, ASM_FUNCTION_X86_RE), 'i386': (scrub_asm_x86, ASM_FUNCTION_X86_RE), 'aarch64': (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_RE), + 'arm64': (scrub_asm_arm_eabi, ASM_FUNCTION_ARM64_RE), + 'arm64e': (scrub_asm_arm_eabi, ASM_FUNCTION_ARM64_RE), 'aarch64-apple-darwin': (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_DARWIN_RE), 'hexagon': (scrub_asm_hexagon, ASM_FUNCTION_HEXAGON_RE), 'r600': (scrub_asm_amdgpu, ASM_FUNCTION_AMDGPU_RE),