Skip to content

Commit

Permalink
arm64: kasan: simplify and inline MTE functions
Browse files Browse the repository at this point in the history
This change provides a simpler implementation of mte_get_mem_tag(),
mte_get_random_tag(), and mte_set_mem_tag_range().

Simplifications include removing system_supports_mte() checks as these
functions are onlye called from KASAN runtime that had already checked
system_supports_mte().  Besides that, size and address alignment checks
are removed from mte_set_mem_tag_range(), as KASAN now does those.

This change also moves these functions into the asm/mte-kasan.h header and
implements mte_set_mem_tag_range() via inline assembly to avoid
unnecessary functions calls.

[vincenzo.frascino@arm.com: fix warning in mte_get_random_tag()]
  Link: https://lkml.kernel.org/r/20210211152208.23811-1-vincenzo.frascino@arm.com

Link: https://lkml.kernel.org/r/a26121b294fdf76e369cb7a74351d1c03a908930.1612546384.git.andreyknvl@google.com
Co-developed-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
Signed-off-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Branislav Rankov <Branislav.Rankov@arm.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Kevin Brodsky <kevin.brodsky@arm.com>
Cc: Marco Elver <elver@google.com>
Cc: Peter Collingbourne <pcc@google.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
xairy authored and torvalds committed Feb 26, 2021
1 parent cde8a7e commit 2cb3427
Show file tree
Hide file tree
Showing 7 changed files with 60 additions and 73 deletions.
1 change: 0 additions & 1 deletion arch/arm64/include/asm/cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
#define __ASM_CACHE_H

#include <asm/cputype.h>
#include <asm/mte-kasan.h>

#define CTR_L1IP_SHIFT 14
#define CTR_L1IP_MASK 3
Expand Down
1 change: 1 addition & 0 deletions arch/arm64/include/asm/kasan.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@

#include <linux/linkage.h>
#include <asm/memory.h>
#include <asm/mte-kasan.h>
#include <asm/pgtable-types.h>

#define arch_kasan_set_tag(addr, tag) __tag_set(addr, tag)
Expand Down
2 changes: 2 additions & 0 deletions arch/arm64/include/asm/mte-def.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,4 +11,6 @@
#define MTE_TAG_SIZE 4
#define MTE_TAG_MASK GENMASK((MTE_TAG_SHIFT + (MTE_TAG_SIZE - 1)), MTE_TAG_SHIFT)

#define __MTE_PREAMBLE ARM64_ASM_PREAMBLE ".arch_extension memtag\n"

#endif /* __ASM_MTE_DEF_H */
65 changes: 57 additions & 8 deletions arch/arm64/include/asm/mte-kasan.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,14 @@

#include <linux/types.h>

#ifdef CONFIG_ARM64_MTE

/*
* The functions below are meant to be used only for the
* KASAN_HW_TAGS interface defined in asm/memory.h.
* These functions are meant to be only used from KASAN runtime through
* the arch_*() interface defined in asm/memory.h.
* These functions don't include system_supports_mte() checks,
* as KASAN only calls them when MTE is supported and enabled.
*/
#ifdef CONFIG_ARM64_MTE

static inline u8 mte_get_ptr_tag(void *ptr)
{
Expand All @@ -25,9 +28,54 @@ static inline u8 mte_get_ptr_tag(void *ptr)
return tag;
}

u8 mte_get_mem_tag(void *addr);
u8 mte_get_random_tag(void);
void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag);
/* Get allocation tag for the address. */
static inline u8 mte_get_mem_tag(void *addr)
{
asm(__MTE_PREAMBLE "ldg %0, [%0]"
: "+r" (addr));

return mte_get_ptr_tag(addr);
}

/* Generate a random tag. */
static inline u8 mte_get_random_tag(void)
{
void *addr;

asm(__MTE_PREAMBLE "irg %0, %0"
: "=r" (addr));

return mte_get_ptr_tag(addr);
}

/*
* Assign allocation tags for a region of memory based on the pointer tag.
* Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and
* size must be non-zero and MTE_GRANULE_SIZE aligned.
*/
static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
{
u64 curr, end;

if (!size)
return;

curr = (u64)__tag_set(addr, tag);
end = curr + size;

do {
/*
* 'asm volatile' is required to prevent the compiler to move
* the statement outside of the loop.
*/
asm volatile(__MTE_PREAMBLE "stg %0, [%0]"
:
: "r" (curr)
: "memory");

curr += MTE_GRANULE_SIZE;
} while (curr != end);
}

void mte_enable_kernel(void);
void mte_init_tags(u64 max_tag);
Expand All @@ -46,13 +94,14 @@ static inline u8 mte_get_mem_tag(void *addr)
{
return 0xFF;
}

static inline u8 mte_get_random_tag(void)
{
return 0xFF;
}
static inline void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)

static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
{
return addr;
}

static inline void mte_enable_kernel(void)
Expand Down
2 changes: 0 additions & 2 deletions arch/arm64/include/asm/mte.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,6 @@
#include <asm/compiler.h>
#include <asm/mte-def.h>

#define __MTE_PREAMBLE ARM64_ASM_PREAMBLE ".arch_extension memtag\n"

#ifndef __ASSEMBLY__

#include <linux/bitfield.h>
Expand Down
46 changes: 0 additions & 46 deletions arch/arm64/kernel/mte.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@
#include <asm/barrier.h>
#include <asm/cpufeature.h>
#include <asm/mte.h>
#include <asm/mte-kasan.h>
#include <asm/ptrace.h>
#include <asm/sysreg.h>

Expand Down Expand Up @@ -88,51 +87,6 @@ int memcmp_pages(struct page *page1, struct page *page2)
return ret;
}

u8 mte_get_mem_tag(void *addr)
{
if (!system_supports_mte())
return 0xFF;

asm(__MTE_PREAMBLE "ldg %0, [%0]"
: "+r" (addr));

return mte_get_ptr_tag(addr);
}

u8 mte_get_random_tag(void)
{
void *addr;

if (!system_supports_mte())
return 0xFF;

asm(__MTE_PREAMBLE "irg %0, %0"
: "+r" (addr));

return mte_get_ptr_tag(addr);
}

void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
{
void *ptr = addr;

if ((!system_supports_mte()) || (size == 0))
return addr;

/* Make sure that size is MTE granule aligned. */
WARN_ON(size & (MTE_GRANULE_SIZE - 1));

/* Make sure that the address is MTE granule aligned. */
WARN_ON((u64)addr & (MTE_GRANULE_SIZE - 1));

tag = 0xF0 | tag;
ptr = (void *)__tag_set(ptr, tag);

mte_assign_mem_tag_range(ptr, size);

return ptr;
}

void mte_init_tags(u64 max_tag)
{
static bool gcr_kernel_excl_initialized;
Expand Down
16 changes: 0 additions & 16 deletions arch/arm64/lib/mte.S
Original file line number Diff line number Diff line change
Expand Up @@ -149,19 +149,3 @@ SYM_FUNC_START(mte_restore_page_tags)

ret
SYM_FUNC_END(mte_restore_page_tags)

/*
* Assign allocation tags for a region of memory based on the pointer tag
* x0 - source pointer
* x1 - size
*
* Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and
* size must be non-zero and MTE_GRANULE_SIZE aligned.
*/
SYM_FUNC_START(mte_assign_mem_tag_range)
1: stg x0, [x0]
add x0, x0, #MTE_GRANULE_SIZE
subs x1, x1, #MTE_GRANULE_SIZE
b.gt 1b
ret
SYM_FUNC_END(mte_assign_mem_tag_range)

0 comments on commit 2cb3427

Please sign in to comment.