Commit 3bdf1a25 authored by Stephen Rothwell's avatar Stephen Rothwell

Merge branch 'akpm/master'

parents d9927d46 626379c9
This diff is collapsed.
......@@ -955,16 +955,16 @@ config VMAP_STACK
default y
bool "Use a virtually-mapped stack"
depends on HAVE_ARCH_VMAP_STACK
depends on !KASAN || KASAN_VMALLOC
depends on !KASAN || KASAN_HW_TAGS || KASAN_VMALLOC
help
Enable this if you want the use virtually-mapped kernel stacks
with guard pages. This causes kernel stack overflows to be
caught immediately rather than causing difficult-to-diagnose
corruption.
To use this with KASAN, the architecture must support backing
virtual mappings with real shadow memory, and KASAN_VMALLOC must
be enabled.
To use this with software KASAN modes, the architecture must support
backing virtual mappings with real shadow memory, and KASAN_VMALLOC
must be enabled.
config ARCH_OPTIONAL_KERNEL_RWX
def_bool n
......
......@@ -21,9 +21,9 @@
#undef memcpy
#undef memmove
#undef memset
void *__memcpy(void *__dest, __const void *__src, size_t __n) __alias(memcpy);
void *__memmove(void *__dest, __const void *__src, size_t count) __alias(memmove);
void *__memset(void *s, int c, size_t count) __alias(memset);
void *__memcpy(void *__dest, __const void *__src, size_t __n) __alias("memcpy");
void *__memmove(void *__dest, __const void *__src, size_t count) __alias("memmove");
void *__memset(void *s, int c, size_t count) __alias("memset");
#endif
void *memcpy(void *__dest, __const void *__src, size_t __n)
......
......@@ -135,7 +135,8 @@ config ARM64
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
select HAVE_ARCH_KASAN_SW_TAGS if HAVE_ARCH_KASAN
select HAVE_ARCH_KASAN_SW_TAGS if (HAVE_ARCH_KASAN && !ARM64_MTE)
select HAVE_ARCH_KASAN_HW_TAGS if (HAVE_ARCH_KASAN && ARM64_MTE)
select HAVE_ARCH_KFENCE
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
......@@ -333,7 +334,7 @@ config BROKEN_GAS_INST
config KASAN_SHADOW_OFFSET
hex
depends on KASAN
depends on KASAN_GENERIC || KASAN_SW_TAGS
default 0xdfff800000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && !KASAN_SW_TAGS
default 0xdfffc00000000000 if ARM64_VA_BITS_47 && !KASAN_SW_TAGS
default 0xdffffe0000000000 if ARM64_VA_BITS_42 && !KASAN_SW_TAGS
......@@ -1591,6 +1592,9 @@ endmenu
menu "ARMv8.5 architectural features"
config AS_HAS_ARMV8_5
def_bool $(cc-option,-Wa$(comma)-march=armv8.5-a)
config ARM64_BTI
bool "Branch Target Identification support"
default y
......@@ -1665,6 +1669,7 @@ config ARM64_MTE
bool "Memory Tagging Extension support"
default y
depends on ARM64_AS_HAS_MTE && ARM64_TAGGED_ADDR_ABI
depends on AS_HAS_ARMV8_5
select ARCH_USES_HIGH_VMA_FLAGS
help
Memory Tagging (part of the ARMv8.5 Extensions) provides
......
......@@ -100,6 +100,11 @@ ifeq ($(CONFIG_AS_HAS_ARMV8_4), y)
asm-arch := armv8.4-a
endif
ifeq ($(CONFIG_AS_HAS_ARMV8_5), y)
# make sure to pass the newest target architecture to -march.
asm-arch := armv8.5-a
endif
ifdef asm-arch
KBUILD_CFLAGS += -Wa,-march=$(asm-arch) \
-DARM64_ASM_ARCH='"$(asm-arch)"'
......@@ -136,7 +141,7 @@ head-y := arch/arm64/kernel/head.o
ifeq ($(CONFIG_KASAN_SW_TAGS), y)
KASAN_SHADOW_SCALE_SHIFT := 4
else
else ifeq ($(CONFIG_KASAN_GENERIC), y)
KASAN_SHADOW_SCALE_SHIFT := 3
endif
......
......@@ -473,7 +473,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
#define NOKPROBE(x)
#endif
#ifdef CONFIG_KASAN
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
#define EXPORT_SYMBOL_NOKASAN(name)
#else
#define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name)
......
......@@ -6,6 +6,7 @@
#define __ASM_CACHE_H
#include <asm/cputype.h>
#include <asm/mte-kasan.h>
#define CTR_L1IP_SHIFT 14
#define CTR_L1IP_MASK 3
......@@ -51,6 +52,8 @@
#ifdef CONFIG_KASAN_SW_TAGS
#define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT)
#elif defined(CONFIG_KASAN_HW_TAGS)
#define ARCH_SLAB_MINALIGN MTE_GRANULE_SIZE
#endif
#ifndef __ASSEMBLY__
......
......@@ -105,6 +105,7 @@
#define ESR_ELx_FSC (0x3F)
#define ESR_ELx_FSC_TYPE (0x3C)
#define ESR_ELx_FSC_EXTABT (0x10)
#define ESR_ELx_FSC_MTE (0x11)
#define ESR_ELx_FSC_SERROR (0x11)
#define ESR_ELx_FSC_ACCESS (0x08)
#define ESR_ELx_FSC_FAULT (0x04)
......
......@@ -12,7 +12,9 @@
#define arch_kasan_reset_tag(addr) __tag_reset(addr)
#define arch_kasan_get_tag(addr) __tag_get(addr)
#ifdef CONFIG_KASAN
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
void kasan_init(void);
/*
* KASAN_SHADOW_START: beginning of the kernel virtual addresses.
......@@ -33,7 +35,6 @@
#define _KASAN_SHADOW_START(va) (KASAN_SHADOW_END - (1UL << ((va) - KASAN_SHADOW_SCALE_SHIFT)))
#define KASAN_SHADOW_START _KASAN_SHADOW_START(vabits_actual)
void kasan_init(void);
void kasan_copy_shadow(pgd_t *pgdir);
asmlinkage void kasan_early_init(void);
......
......@@ -72,7 +72,7 @@
* address space for the shadow region respectively. They can bloat the stack
* significantly, so double the (minimum) stack size when they are in use.
*/
#ifdef CONFIG_KASAN
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
#define KASAN_SHADOW_END ((UL(1) << (64 - KASAN_SHADOW_SCALE_SHIFT)) \
+ KASAN_SHADOW_OFFSET)
......@@ -214,7 +214,7 @@ static inline unsigned long kaslr_offset(void)
(__force __typeof__(addr))__addr; \
})
#ifdef CONFIG_KASAN_SW_TAGS
#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
#define __tag_shifted(tag) ((u64)(tag) << 56)
#define __tag_reset(addr) __untagged_addr(addr)
#define __tag_get(addr) (__u8)((u64)(addr) >> 56)
......@@ -222,7 +222,7 @@ static inline unsigned long kaslr_offset(void)
#define __tag_shifted(tag) 0UL
#define __tag_reset(addr) (addr)
#define __tag_get(addr) 0
#endif /* CONFIG_KASAN_SW_TAGS */
#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
static inline const void *__tag_set(const void *addr, u8 tag)
{
......@@ -230,6 +230,15 @@ static inline const void *__tag_set(const void *addr, u8 tag)
return (const void *)(__addr | __tag_shifted(tag));
}
#ifdef CONFIG_KASAN_HW_TAGS
#define arch_enable_tagging() mte_enable_kernel()
#define arch_init_tags(max_tag) mte_init_tags(max_tag)
#define arch_get_random_tag() mte_get_random_tag()
#define arch_get_mem_tag(addr) mte_get_mem_tag(addr)
#define arch_set_mem_tag_range(addr, size, tag) \
mte_set_mem_tag_range((addr), (size), (tag))
#endif /* CONFIG_KASAN_HW_TAGS */
/*
* Physical vs virtual RAM address space conversion. These are
* private definitions which should NOT be used outside memory.h
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 ARM Ltd.
*/
#ifndef __ASM_MTE_DEF_H
#define __ASM_MTE_DEF_H
#define MTE_GRANULE_SIZE UL(16)
#define MTE_GRANULE_MASK (~(MTE_GRANULE_SIZE - 1))
#define MTE_TAG_SHIFT 56
#define MTE_TAG_SIZE 4
#define MTE_TAG_MASK GENMASK((MTE_TAG_SHIFT + (MTE_TAG_SIZE - 1)), MTE_TAG_SHIFT)
#endif /* __ASM_MTE_DEF_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 ARM Ltd.
*/
#ifndef __ASM_MTE_KASAN_H
#define __ASM_MTE_KASAN_H
#include <asm/mte-def.h>
#ifndef __ASSEMBLY__
#include <linux/types.h>
/*
* The functions below are meant to be used only for the
* KASAN_HW_TAGS interface defined in asm/memory.h.
*/
#ifdef CONFIG_ARM64_MTE
static inline u8 mte_get_ptr_tag(void *ptr)
{
/* Note: The format of KASAN tags is 0xF<x> */
u8 tag = 0xF0 | (u8)(((u64)(ptr)) >> MTE_TAG_SHIFT);
return tag;
}
u8 mte_get_mem_tag(void *addr);
u8 mte_get_random_tag(void);
void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag);
void mte_enable_kernel(void);
void mte_init_tags(u64 max_tag);
#else /* CONFIG_ARM64_MTE */
static inline u8 mte_get_ptr_tag(void *ptr)
{
return 0xFF;
}
static inline u8 mte_get_mem_tag(void *addr)
{
return 0xFF;
}
static inline u8 mte_get_random_tag(void)
{
return 0xFF;
}
static inline void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
{
return addr;
}
static inline void mte_enable_kernel(void)
{
}
static inline void mte_init_tags(u64 max_tag)
{
}
#endif /* CONFIG_ARM64_MTE */
#endif /* __ASSEMBLY__ */
#endif /* __ASM_MTE_KASAN_H */
......@@ -5,17 +5,21 @@
#ifndef __ASM_MTE_H
#define __ASM_MTE_H
#define MTE_GRANULE_SIZE UL(16)
#define MTE_GRANULE_MASK (~(MTE_GRANULE_SIZE - 1))
#define MTE_TAG_SHIFT 56
#define MTE_TAG_SIZE 4
#include <asm/compiler.h>
#include <asm/mte-def.h>
#define __MTE_PREAMBLE ARM64_ASM_PREAMBLE ".arch_extension memtag\n"
#ifndef __ASSEMBLY__
#include <linux/bitfield.h>
#include <linux/page-flags.h>
#include <linux/types.h>
#include <asm/pgtable-types.h>
extern u64 gcr_kernel_excl;
void mte_clear_page_tags(void *addr);
unsigned long mte_copy_tags_from_user(void *to, const void __user *from,
unsigned long n);
......@@ -45,7 +49,9 @@ long get_mte_ctrl(struct task_struct *task);
int mte_ptrace_copy_tags(struct task_struct *child, long request,
unsigned long addr, unsigned long data);
#else
void mte_assign_mem_tag_range(void *addr, size_t size);
#else /* CONFIG_ARM64_MTE */
/* unused if !CONFIG_ARM64_MTE, silence the compiler */
#define PG_mte_tagged 0
......@@ -80,7 +86,11 @@ static inline int mte_ptrace_copy_tags(struct task_struct *child,
return -EIO;
}
#endif
static inline void mte_assign_mem_tag_range(void *addr, size_t size)
{
}
#endif /* CONFIG_ARM64_MTE */
#endif /* __ASSEMBLY__ */
#endif /* __ASM_MTE_H */
......@@ -154,7 +154,7 @@ struct thread_struct {
#endif
#ifdef CONFIG_ARM64_MTE
u64 sctlr_tcf0;
u64 gcr_user_incl;
u64 gcr_user_excl;
#endif
};
......
......@@ -5,7 +5,7 @@
#ifndef __ASM_STRING_H
#define __ASM_STRING_H
#ifndef CONFIG_KASAN
#if !(defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
#define __HAVE_ARCH_STRRCHR
extern char *strrchr(const char *, int c);
......@@ -48,7 +48,8 @@ extern void *__memset(void *, int, __kernel_size_t);
void memcpy_flushcache(void *dst, const void *src, size_t cnt);
#endif
#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
!defined(__SANITIZE_ADDRESS__)
/*
* For files that are not instrumented (e.g. mm/slub.c) we
......
......@@ -200,13 +200,36 @@ do { \
CONFIG_ARM64_PAN)); \
} while (0)
/*
* The Tag Check Flag (TCF) mode for MTE is per EL, hence TCF0
* affects EL0 and TCF affects EL1 irrespective of which TTBR is
* used.
* The kernel accesses TTBR0 usually with LDTR/STTR instructions
* when UAO is available, so these would act as EL0 accesses using
* TCF0.
* However futex.h code uses exclusives which would be executed as
* EL1, this can potentially cause a tag check fault even if the
* user disables TCF0.
*
* To address the problem we set the PSTATE.TCO bit in uaccess_enable()
* and reset it in uaccess_disable().
*
* The Tag check override (TCO) bit disables temporarily the tag checking
* preventing the issue.
*/
static inline void uaccess_disable(void)
{
asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(0),
ARM64_MTE, CONFIG_KASAN_HW_TAGS));
__uaccess_disable(ARM64_HAS_PAN);
}
static inline void uaccess_enable(void)
{
asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(1),
ARM64_MTE, CONFIG_KASAN_HW_TAGS));
__uaccess_enable(ARM64_HAS_PAN);
}
......
......@@ -47,6 +47,9 @@ int main(void)
#ifdef CONFIG_ARM64_PTR_AUTH
DEFINE(THREAD_KEYS_USER, offsetof(struct task_struct, thread.keys_user));
DEFINE(THREAD_KEYS_KERNEL, offsetof(struct task_struct, thread.keys_kernel));
#endif
#ifdef CONFIG_ARM64_MTE
DEFINE(THREAD_GCR_EL1_USER, offsetof(struct task_struct, thread.gcr_user_excl));
#endif
BLANK();
DEFINE(S_X0, offsetof(struct pt_regs, regs[0]));
......
......@@ -70,6 +70,7 @@
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/cpu.h>
#include <linux/kasan.h>
#include <asm/cpu.h>
#include <asm/cpufeature.h>
#include <asm/cpu_ops.h>
......@@ -1713,6 +1714,8 @@ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
cleared_zero_page = true;
mte_clear_page_tags(lm_alias(empty_zero_page));
}
kasan_init_hw_tags_cpu();
}
#endif /* CONFIG_ARM64_MTE */
......
......@@ -173,6 +173,43 @@ alternative_else_nop_endif
#endif
.endm
.macro mte_set_gcr, tmp, tmp2
#ifdef CONFIG_ARM64_MTE
/*
* Calculate and set the exclude mask preserving
* the RRND (bit[16]) setting.
*/
mrs_s \tmp2, SYS_GCR_EL1
bfi \tmp2, \tmp, #0, #16
msr_s SYS_GCR_EL1, \tmp2
isb
#endif
.endm
.macro mte_set_kernel_gcr, tmp, tmp2
#ifdef CONFIG_KASAN_HW_TAGS
alternative_if_not ARM64_MTE
b 1f
alternative_else_nop_endif
ldr_l \tmp, gcr_kernel_excl
mte_set_gcr \tmp, \tmp2
1:
#endif
.endm
.macro mte_set_user_gcr, tsk, tmp, tmp2
#ifdef CONFIG_ARM64_MTE
alternative_if_not ARM64_MTE
b 1f
alternative_else_nop_endif
ldr \tmp, [\tsk, #THREAD_GCR_EL1_USER]
mte_set_gcr \tmp, \tmp2
1:
#endif
.endm
.macro kernel_entry, el, regsize = 64
.if \regsize == 32
mov w0, w0 // zero upper 32 bits of x0
......@@ -212,6 +249,8 @@ alternative_else_nop_endif
ptrauth_keys_install_kernel tsk, x20, x22, x23
mte_set_kernel_gcr x22, x23
scs_load tsk, x20
.else
add x21, sp, #S_FRAME_SIZE
......@@ -330,6 +369,8 @@ alternative_else_nop_endif
/* No kernel C function calls after this as user keys are set. */
ptrauth_keys_install_user tsk, x0, x1, x2
mte_set_user_gcr tsk, x0, x1
apply_ssbd 0, x0, x1
.endif
......
......@@ -433,7 +433,7 @@ SYM_FUNC_START_LOCAL(__primary_switched)
bl __pi_memset
dsb ishst // Make zero page visible to PTW
#ifdef CONFIG_KASAN
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
bl kasan_early_init
#endif
#ifdef CONFIG_RANDOMIZE_BASE
......
......@@ -371,6 +371,11 @@ static void swsusp_mte_restore_tags(void)
unsigned long pfn = xa_state.xa_index;
struct page *page = pfn_to_online_page(pfn);
/*
* It is not required to invoke page_kasan_tag_reset(page)
* at this point since the tags stored in page->flags are
* already restored.
*/
mte_restore_page_tags(page_address(page), tags);
mte_free_tag_storage(tags);
......
......@@ -37,7 +37,7 @@ __efistub_strncmp = __pi_strncmp;
__efistub_strrchr = __pi_strrchr;
__efistub___clean_dcache_area_poc = __pi___clean_dcache_area_poc;
#ifdef CONFIG_KASAN
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
__efistub___memcpy = __pi_memcpy;
__efistub___memmove = __pi_memmove;
__efistub___memset = __pi_memset;
......
......@@ -151,7 +151,8 @@ u64 __init kaslr_early_init(u64 dt_phys)
/* use the top 16 bits to randomize the linear region */
memstart_offset_seed = seed >> 48;
if (IS_ENABLED(CONFIG_KASAN))
if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
IS_ENABLED(CONFIG_KASAN_SW_TAGS))
/*
* KASAN does not expect the module region to intersect the
* vmalloc region, since shadow memory is allocated for each
......
......@@ -30,7 +30,8 @@ void *module_alloc(unsigned long size)
if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
gfp_mask |= __GFP_NOWARN;
if (IS_ENABLED(CONFIG_KASAN))
if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
IS_ENABLED(CONFIG_KASAN_SW_TAGS))
/* don't exceed the static module region - see below */
module_alloc_end = MODULES_END;
......@@ -39,7 +40,8 @@ void *module_alloc(unsigned long size)
NUMA_NO_NODE, __builtin_return_address(0));
if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
!IS_ENABLED(CONFIG_KASAN))
!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
!IS_ENABLED(CONFIG_KASAN_SW_TAGS))
/*
* KASAN can only deal with module allocations being served
* from the reserved module region, since the remainder of
......
......@@ -13,13 +13,18 @@
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/thread_info.h>
#include <linux/types.h>
#include <linux/uio.h>
#include <asm/barrier.h>
#include <asm/cpufeature.h>
#include <asm/mte.h>
#include <asm/mte-kasan.h>
#include <asm/ptrace.h>
#include <asm/sysreg.h>
u64 gcr_kernel_excl __ro_after_init;
static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap)
{
pte_t old_pte = READ_ONCE(*ptep);
......@@ -31,6 +36,15 @@ static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap)
return;
}
page_kasan_tag_reset(page);
/*
* We need smp_wmb() in between setting the flags and clearing the
* tags because if another thread reads page->flags and builds a
* tagged address out of it, there is an actual dependency to the
* memory access, but on the current thread we do not guarantee that
* the new page->flags are visible before the tags were updated.
*/
smp_wmb();
mte_clear_page_tags(page_address(page));
}
......@@ -72,6 +86,78 @@ int memcmp_pages(struct page *page1, struct page *page2)
return ret;
}
u8 mte_get_mem_tag(void *addr)
{
if (!system_supports_mte())
return 0xFF;
asm(__MTE_PREAMBLE "ldg %0, [%0]"
: "+r" (addr));
return mte_get_ptr_tag(addr);
}
u8 mte_get_random_tag(void)
{
void *addr;
if (!system_supports_mte())
return 0xFF;
asm(__MTE_PREAMBLE "irg %0, %0"
: "+r" (addr));
return mte_get_ptr_tag(addr);
}
void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
{
void *ptr = addr;
if ((!system_supports_mte()) || (size == 0))
return addr;
/* Make sure that size is MTE granule aligned. */
WARN_ON(size & (MTE_GRANULE_SIZE - 1));
/* Make sure that the address is MTE granule aligned. */
WARN_ON((u64)addr & (MTE_GRANULE_SIZE - 1));
tag = 0xF0 | tag;
ptr = (void *)__tag_set(ptr, tag);
mte_assign_mem_tag_range(ptr, size);
return ptr;
}
void mte_init_tags(u64 max_tag)
{
static bool gcr_kernel_excl_initialized;
if (!gcr_kernel_excl_initialized) {
/*
* The format of the tags in KASAN is 0xFF and in MTE is 0xF.
* This conversion extracts an MTE tag from a KASAN tag.
*/
u64 incl = GENMASK(FIELD_GET(MTE_TAG_MASK >> MTE_TAG_SHIFT,
max_tag), 0);
gcr_kernel_excl = ~incl & SYS_GCR_EL1_EXCL_MASK;
gcr_kernel_excl_initialized = true;
}
/* Enable the kernel exclude mask for random tags generation. */
write_sysreg_s(SYS_GCR_EL1_RRND | gcr_kernel_excl, SYS_GCR_EL1);
}
void mte_enable_kernel(void)
{
/* Enable MTE Sync Mode for EL1. */
sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, SCTLR_ELx_TCF_SYNC);
isb();
}
static void update_sctlr_el1_tcf0(u64 tcf0)
{
/* ISB required for the kernel uaccess routines */
......@@ -92,23 +178,26 @@ static void set_sctlr_el1_tcf0(u64 tcf0)
preempt_enable();
}
static void update_gcr_el1_excl(u64 incl)
static void update_gcr_el1_excl(u64 excl)
{
u64 excl = ~incl & SYS_GCR_EL1_EXCL_MASK;
/*
* Note that 'incl' is an include mask (controlled by the user via
* prctl()) while GCR_EL1 accepts an exclude mask.
* Note that the mask controlled by the user via prctl() is an
* include while GCR_EL1 accepts an exclude mask.