diff options
Diffstat (limited to 'arch/arm64/include')
-rw-r--r-- | arch/arm64/include/asm/brk-imm.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/dma-mapping.h | 8 | ||||
-rw-r--r-- | arch/arm64/include/asm/futex.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/kasan.h | 8 | ||||
-rw-r--r-- | arch/arm64/include/asm/memory.h | 56 | ||||
-rw-r--r-- | arch/arm64/include/asm/pgalloc.h | 4 | ||||
-rw-r--r-- | arch/arm64/include/asm/pgtable-hwdef.h | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/smp.h | 8 | ||||
-rw-r--r-- | arch/arm64/include/asm/uaccess.h | 15 | ||||
-rw-r--r-- | arch/arm64/include/asm/unistd.h | 7 | ||||
-rw-r--r-- | arch/arm64/include/asm/unistd32.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/uapi/asm/ptrace.h | 39 | ||||
-rw-r--r-- | arch/arm64/include/uapi/asm/sigcontext.h | 56 | ||||
-rw-r--r-- | arch/arm64/include/uapi/asm/sve_context.h | 53 |
14 files changed, 165 insertions, 96 deletions
diff --git a/arch/arm64/include/asm/brk-imm.h b/arch/arm64/include/asm/brk-imm.h index ed693c5bcec0..2945fe6cd863 100644 --- a/arch/arm64/include/asm/brk-imm.h +++ b/arch/arm64/include/asm/brk-imm.h @@ -16,10 +16,12 @@ * 0x400: for dynamic BRK instruction * 0x401: for compile time BRK instruction * 0x800: kernel-mode BUG() and WARN() traps + * 0x9xx: tag-based KASAN trap (allowed values 0x900 - 0x9ff) */ #define FAULT_BRK_IMM 0x100 #define KGDB_DYN_DBG_BRK_IMM 0x400 #define KGDB_COMPILED_DBG_BRK_IMM 0x401 #define BUG_BRK_IMM 0x800 +#define KASAN_BRK_IMM 0x900 #endif diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index c41f3fb1446c..95dbf3ef735a 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -24,15 +24,9 @@ #include <xen/xen.h> #include <asm/xen/hypervisor.h> -extern const struct dma_map_ops dummy_dma_ops; - static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { - /* - * We expect no ISA devices, and all other DMA masters are expected to - * have someone call arch_setup_dma_ops at device creation time. - */ - return &dummy_dma_ops; + return NULL; } void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index 07fe2479d310..cccb83ad7fa8 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h @@ -96,7 +96,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr, u32 val, tmp; u32 __user *uaddr; - if (!access_ok(VERIFY_WRITE, _uaddr, sizeof(u32))) + if (!access_ok(_uaddr, sizeof(u32))) return -EFAULT; uaddr = __uaccess_mask_ptr(_uaddr); diff --git a/arch/arm64/include/asm/kasan.h b/arch/arm64/include/asm/kasan.h index 8758bb008436..b52aacd2c526 100644 --- a/arch/arm64/include/asm/kasan.h +++ b/arch/arm64/include/asm/kasan.h @@ -4,12 +4,16 @@ #ifndef __ASSEMBLY__ -#ifdef CONFIG_KASAN - #include <linux/linkage.h> #include <asm/memory.h> #include <asm/pgtable-types.h> +#define arch_kasan_set_tag(addr, tag) __tag_set(addr, tag) +#define arch_kasan_reset_tag(addr) __tag_reset(addr) +#define arch_kasan_get_tag(addr) __tag_get(addr) + +#ifdef CONFIG_KASAN + /* * KASAN_SHADOW_START: beginning of the kernel virtual addresses. * KASAN_SHADOW_END: KASAN_SHADOW_START + 1/N of kernel virtual addresses, diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index a0ee78c208c3..e1ec947e7c0c 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -53,8 +53,11 @@ #define PAGE_OFFSET (UL(0xffffffffffffffff) - \ (UL(1) << (VA_BITS - 1)) + 1) #define KIMAGE_VADDR (MODULES_END) +#define BPF_JIT_REGION_START (VA_START + KASAN_SHADOW_SIZE) +#define BPF_JIT_REGION_SIZE (SZ_128M) +#define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE) #define MODULES_END (MODULES_VADDR + MODULES_VSIZE) -#define MODULES_VADDR (VA_START + KASAN_SHADOW_SIZE) +#define MODULES_VADDR (BPF_JIT_REGION_END) #define MODULES_VSIZE (SZ_128M) #define VMEMMAP_START (PAGE_OFFSET - VMEMMAP_SIZE) #define PCI_IO_END (VMEMMAP_START - SZ_2M) @@ -71,13 +74,11 @@ #endif /* - * KASAN requires 1/8th of the kernel virtual address space for the shadow - * region. KASAN can bloat the stack significantly, so double the (minimum) - * stack size when KASAN is in use, and then double it again if KASAN_EXTRA is - * on. + * Generic and tag-based KASAN require 1/8th and 1/16th of the kernel virtual + * address space for the shadow region respectively. They can bloat the stack + * significantly, so double the (minimum) stack size when they are in use. */ #ifdef CONFIG_KASAN -#define KASAN_SHADOW_SCALE_SHIFT 3 #define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT)) #ifdef CONFIG_KASAN_EXTRA #define KASAN_THREAD_SHIFT 2 @@ -170,14 +171,6 @@ #define IOREMAP_MAX_ORDER (PMD_SHIFT) #endif -#ifdef CONFIG_BLK_DEV_INITRD -#define __early_init_dt_declare_initrd(__start, __end) \ - do { \ - initrd_start = (__start); \ - initrd_end = (__end); \ - } while (0) -#endif - #ifndef __ASSEMBLY__ #include <linux/bitops.h> @@ -218,6 +211,26 @@ extern u64 vabits_user; #define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT) /* + * When dealing with data aborts, watchpoints, or instruction traps we may end + * up with a tagged userland pointer. Clear the tag to get a sane pointer to + * pass on to access_ok(), for instance. + */ +#define untagged_addr(addr) \ + ((__typeof__(addr))sign_extend64((u64)(addr), 55)) + +#ifdef CONFIG_KASAN_SW_TAGS +#define __tag_shifted(tag) ((u64)(tag) << 56) +#define __tag_set(addr, tag) (__typeof__(addr))( \ + ((u64)(addr) & ~__tag_shifted(0xff)) | __tag_shifted(tag)) +#define __tag_reset(addr) untagged_addr(addr) +#define __tag_get(addr) (__u8)((u64)(addr) >> 56) +#else +#define __tag_set(addr, tag) (addr) +#define __tag_reset(addr) (addr) +#define __tag_get(addr) 0 +#endif + +/* * Physical vs virtual RAM address space conversion. These are * private definitions which should NOT be used outside memory.h * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. @@ -300,7 +313,13 @@ static inline void *phys_to_virt(phys_addr_t x) #define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page)) #define __page_to_voff(kaddr) (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page)) -#define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET)) +#define page_to_virt(page) ({ \ + unsigned long __addr = \ + ((__page_to_voff(page)) | PAGE_OFFSET); \ + __addr = __tag_set(__addr, page_kasan_tag(page)); \ + ((void *)__addr); \ +}) + #define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START)) #define _virt_addr_valid(kaddr) pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \ @@ -308,9 +327,10 @@ static inline void *phys_to_virt(phys_addr_t x) #endif #endif -#define _virt_addr_is_linear(kaddr) (((u64)(kaddr)) >= PAGE_OFFSET) -#define virt_addr_valid(kaddr) (_virt_addr_is_linear(kaddr) && \ - _virt_addr_valid(kaddr)) +#define _virt_addr_is_linear(kaddr) \ + (__tag_reset((u64)(kaddr)) >= PAGE_OFFSET) +#define virt_addr_valid(kaddr) \ + (_virt_addr_is_linear(kaddr) && _virt_addr_valid(kaddr)) #include <asm-generic/memory_model.h> diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h index 2e05bcd944c8..52fa47c73bf0 100644 --- a/arch/arm64/include/asm/pgalloc.h +++ b/arch/arm64/include/asm/pgalloc.h @@ -91,13 +91,13 @@ extern pgd_t *pgd_alloc(struct mm_struct *mm); extern void pgd_free(struct mm_struct *mm, pgd_t *pgdp); static inline pte_t * -pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) +pte_alloc_one_kernel(struct mm_struct *mm) { return (pte_t *)__get_free_page(PGALLOC_GFP); } static inline pgtable_t -pte_alloc_one(struct mm_struct *mm, unsigned long addr) +pte_alloc_one(struct mm_struct *mm) { struct page *pte; diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 22bb3ae514f5..e9b0a7d75184 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -299,6 +299,7 @@ #define TCR_A1 (UL(1) << 22) #define TCR_ASID16 (UL(1) << 36) #define TCR_TBI0 (UL(1) << 37) +#define TCR_TBI1 (UL(1) << 38) #define TCR_HA (UL(1) << 39) #define TCR_HD (UL(1) << 40) #define TCR_NFD1 (UL(1) << 54) diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index 1895561839a9..18553f399e08 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h @@ -16,9 +16,11 @@ #ifndef __ASM_SMP_H #define __ASM_SMP_H +#include <linux/const.h> + /* Values for secondary_data.status */ #define CPU_STUCK_REASON_SHIFT (8) -#define CPU_BOOT_STATUS_MASK ((1U << CPU_STUCK_REASON_SHIFT) - 1) +#define CPU_BOOT_STATUS_MASK ((UL(1) << CPU_STUCK_REASON_SHIFT) - 1) #define CPU_MMU_OFF (-1) #define CPU_BOOT_SUCCESS (0) @@ -29,8 +31,8 @@ /* Fatal system error detected by secondary CPU, crash the system */ #define CPU_PANIC_KERNEL (3) -#define CPU_STUCK_REASON_52_BIT_VA (1U << CPU_STUCK_REASON_SHIFT) -#define CPU_STUCK_REASON_NO_GRAN (2U << CPU_STUCK_REASON_SHIFT) +#define CPU_STUCK_REASON_52_BIT_VA (UL(1) << CPU_STUCK_REASON_SHIFT) +#define CPU_STUCK_REASON_NO_GRAN (UL(2) << CPU_STUCK_REASON_SHIFT) #ifndef __ASSEMBLY__ diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index fad33f5fde47..547d7a0c9d05 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -95,14 +95,7 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si return ret; } -/* - * When dealing with data aborts, watchpoints, or instruction traps we may end - * up with a tagged userland pointer. Clear the tag to get a sane pointer to - * pass on to access_ok(), for instance. - */ -#define untagged_addr(addr) sign_extend64(addr, 55) - -#define access_ok(type, addr, size) __range_ok(addr, size) +#define access_ok(addr, size) __range_ok(addr, size) #define user_addr_max get_fs #define _ASM_EXTABLE(from, to) \ @@ -308,7 +301,7 @@ do { \ ({ \ __typeof__(*(ptr)) __user *__p = (ptr); \ might_fault(); \ - if (access_ok(VERIFY_READ, __p, sizeof(*__p))) { \ + if (access_ok(__p, sizeof(*__p))) { \ __p = uaccess_mask_ptr(__p); \ __get_user_err((x), __p, (err)); \ } else { \ @@ -377,7 +370,7 @@ do { \ ({ \ __typeof__(*(ptr)) __user *__p = (ptr); \ might_fault(); \ - if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) { \ + if (access_ok(__p, sizeof(*__p))) { \ __p = uaccess_mask_ptr(__p); \ __put_user_err((x), __p, (err)); \ } else { \ @@ -425,7 +418,7 @@ extern unsigned long __must_check __arch_copy_in_user(void __user *to, const voi extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n); static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n) { - if (access_ok(VERIFY_WRITE, to, n)) + if (access_ok(to, n)) n = __arch_clear_user(__uaccess_mask_ptr(to), n); return n; } diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index b13ca091f833..a7b1fc58ffdf 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -40,10 +40,11 @@ * The following SVCs are ARM private. */ #define __ARM_NR_COMPAT_BASE 0x0f0000 -#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) -#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) +#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE + 2) +#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5) +#define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800) -#define __NR_compat_syscalls 399 +#define __NR_compat_syscalls 400 #endif #define __ARCH_WANT_SYS_CLONE diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 2cd6dcf8d246..04ee190b90fe 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -819,6 +819,8 @@ __SYSCALL(__NR_pkey_free, sys_pkey_free) __SYSCALL(__NR_statx, sys_statx) #define __NR_rseq 398 __SYSCALL(__NR_rseq, sys_rseq) +#define __NR_io_pgetevents 399 +__SYSCALL(__NR_io_pgetevents, compat_sys_io_pgetevents) /* * Please add new compat syscalls above this comment and update diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h index c2f249bcd829..28d77c9ed531 100644 --- a/arch/arm64/include/uapi/asm/ptrace.h +++ b/arch/arm64/include/uapi/asm/ptrace.h @@ -23,7 +23,7 @@ #include <linux/types.h> #include <asm/hwcap.h> -#include <asm/sigcontext.h> +#include <asm/sve_context.h> /* @@ -130,9 +130,9 @@ struct user_sve_header { */ /* Offset from the start of struct user_sve_header to the register data */ -#define SVE_PT_REGS_OFFSET \ - ((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1)) \ - / SVE_VQ_BYTES * SVE_VQ_BYTES) +#define SVE_PT_REGS_OFFSET \ + ((sizeof(struct user_sve_header) + (__SVE_VQ_BYTES - 1)) \ + / __SVE_VQ_BYTES * __SVE_VQ_BYTES) /* * The register data content and layout depends on the value of the @@ -178,39 +178,36 @@ struct user_sve_header { * Additional data might be appended in the future. */ -#define SVE_PT_SVE_ZREG_SIZE(vq) SVE_SIG_ZREG_SIZE(vq) -#define SVE_PT_SVE_PREG_SIZE(vq) SVE_SIG_PREG_SIZE(vq) -#define SVE_PT_SVE_FFR_SIZE(vq) SVE_SIG_FFR_SIZE(vq) +#define SVE_PT_SVE_ZREG_SIZE(vq) __SVE_ZREG_SIZE(vq) +#define SVE_PT_SVE_PREG_SIZE(vq) __SVE_PREG_SIZE(vq) +#define SVE_PT_SVE_FFR_SIZE(vq) __SVE_FFR_SIZE(vq) #define SVE_PT_SVE_FPSR_SIZE sizeof(__u32) #define SVE_PT_SVE_FPCR_SIZE sizeof(__u32) -#define __SVE_SIG_TO_PT(offset) \ - ((offset) - SVE_SIG_REGS_OFFSET + SVE_PT_REGS_OFFSET) - #define SVE_PT_SVE_OFFSET SVE_PT_REGS_OFFSET #define SVE_PT_SVE_ZREGS_OFFSET \ - __SVE_SIG_TO_PT(SVE_SIG_ZREGS_OFFSET) + (SVE_PT_REGS_OFFSET + __SVE_ZREGS_OFFSET) #define SVE_PT_SVE_ZREG_OFFSET(vq, n) \ - __SVE_SIG_TO_PT(SVE_SIG_ZREG_OFFSET(vq, n)) + (SVE_PT_REGS_OFFSET + __SVE_ZREG_OFFSET(vq, n)) #define SVE_PT_SVE_ZREGS_SIZE(vq) \ - (SVE_PT_SVE_ZREG_OFFSET(vq, SVE_NUM_ZREGS) - SVE_PT_SVE_ZREGS_OFFSET) + (SVE_PT_SVE_ZREG_OFFSET(vq, __SVE_NUM_ZREGS) - SVE_PT_SVE_ZREGS_OFFSET) #define SVE_PT_SVE_PREGS_OFFSET(vq) \ - __SVE_SIG_TO_PT(SVE_SIG_PREGS_OFFSET(vq)) + (SVE_PT_REGS_OFFSET + __SVE_PREGS_OFFSET(vq)) #define SVE_PT_SVE_PREG_OFFSET(vq, n) \ - __SVE_SIG_TO_PT(SVE_SIG_PREG_OFFSET(vq, n)) + (SVE_PT_REGS_OFFSET + __SVE_PREG_OFFSET(vq, n)) #define SVE_PT_SVE_PREGS_SIZE(vq) \ - (SVE_PT_SVE_PREG_OFFSET(vq, SVE_NUM_PREGS) - \ + (SVE_PT_SVE_PREG_OFFSET(vq, __SVE_NUM_PREGS) - \ SVE_PT_SVE_PREGS_OFFSET(vq)) #define SVE_PT_SVE_FFR_OFFSET(vq) \ - __SVE_SIG_TO_PT(SVE_SIG_FFR_OFFSET(vq)) + (SVE_PT_REGS_OFFSET + __SVE_FFR_OFFSET(vq)) #define SVE_PT_SVE_FPSR_OFFSET(vq) \ ((SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq) + \ - (SVE_VQ_BYTES - 1)) \ - / SVE_VQ_BYTES * SVE_VQ_BYTES) + (__SVE_VQ_BYTES - 1)) \ + / __SVE_VQ_BYTES * __SVE_VQ_BYTES) #define SVE_PT_SVE_FPCR_OFFSET(vq) \ (SVE_PT_SVE_FPSR_OFFSET(vq) + SVE_PT_SVE_FPSR_SIZE) @@ -221,8 +218,8 @@ struct user_sve_header { #define SVE_PT_SVE_SIZE(vq, flags) \ ((SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE \ - - SVE_PT_SVE_OFFSET + (SVE_VQ_BYTES - 1)) \ - / SVE_VQ_BYTES * SVE_VQ_BYTES) + - SVE_PT_SVE_OFFSET + (__SVE_VQ_BYTES - 1)) \ + / __SVE_VQ_BYTES * __SVE_VQ_BYTES) #define SVE_PT_SIZE(vq, flags) \ (((flags) & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE ? \ diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h index dca8f8b5168b..5f3c0cec5af9 100644 --- a/arch/arm64/include/uapi/asm/sigcontext.h +++ b/arch/arm64/include/uapi/asm/sigcontext.h @@ -130,6 +130,8 @@ struct sve_context { #endif /* !__ASSEMBLY__ */ +#include <asm/sve_context.h> + /* * The SVE architecture leaves space for future expansion of the * vector length beyond its initial architectural limit of 2048 bits @@ -138,21 +140,20 @@ struct sve_context { * See linux/Documentation/arm64/sve.txt for a description of the VL/VQ * terminology. */ -#define SVE_VQ_BYTES 16 /* number of bytes per quadword */ +#define SVE_VQ_BYTES __SVE_VQ_BYTES /* bytes per quadword */ -#define SVE_VQ_MIN 1 -#define SVE_VQ_MAX 512 +#define SVE_VQ_MIN __SVE_VQ_MIN +#define SVE_VQ_MAX __SVE_VQ_MAX -#define SVE_VL_MIN (SVE_VQ_MIN * SVE_VQ_BYTES) -#define SVE_VL_MAX (SVE_VQ_MAX * SVE_VQ_BYTES) +#define SVE_VL_MIN __SVE_VL_MIN +#define SVE_VL_MAX __SVE_VL_MAX -#define SVE_NUM_ZREGS 32 -#define SVE_NUM_PREGS 16 +#define SVE_NUM_ZREGS __SVE_NUM_ZREGS +#define SVE_NUM_PREGS __SVE_NUM_PREGS -#define sve_vl_valid(vl) \ - ((vl) % SVE_VQ_BYTES == 0 && (vl) >= SVE_VL_MIN && (vl) <= SVE_VL_MAX) -#define sve_vq_from_vl(vl) ((vl) / SVE_VQ_BYTES) -#define sve_vl_from_vq(vq) ((vq) * SVE_VQ_BYTES) +#define sve_vl_valid(vl) __sve_vl_valid(vl) +#define sve_vq_from_vl(vl) __sve_vq_from_vl(vl) +#define sve_vl_from_vq(vq) __sve_vl_from_vq(vq) /* * If the SVE registers are currently live for the thread at signal delivery, @@ -205,34 +206,33 @@ struct sve_context { * Additional data might be appended in the future. */ -#define SVE_SIG_ZREG_SIZE(vq) ((__u32)(vq) * SVE_VQ_BYTES) -#define SVE_SIG_PREG_SIZE(vq) ((__u32)(vq) * (SVE_VQ_BYTES / 8)) -#define SVE_SIG_FFR_SIZE(vq) SVE_SIG_PREG_SIZE(vq) +#define SVE_SIG_ZREG_SIZE(vq) __SVE_ZREG_SIZE(vq) +#define SVE_SIG_PREG_SIZE(vq) __SVE_PREG_SIZE(vq) +#define SVE_SIG_FFR_SIZE(vq) __SVE_FFR_SIZE(vq) #define SVE_SIG_REGS_OFFSET \ - ((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1)) \ - / SVE_VQ_BYTES * SVE_VQ_BYTES) + ((sizeof(struct sve_context) + (__SVE_VQ_BYTES - 1)) \ + / __SVE_VQ_BYTES * __SVE_VQ_BYTES) -#define SVE_SIG_ZREGS_OFFSET SVE_SIG_REGS_OFFSET +#define SVE_SIG_ZREGS_OFFSET \ + (SVE_SIG_REGS_OFFSET + __SVE_ZREGS_OFFSET) #define SVE_SIG_ZREG_OFFSET(vq, n) \ - (SVE_SIG_ZREGS_OFFSET + SVE_SIG_ZREG_SIZE(vq) * (n)) -#define SVE_SIG_ZREGS_SIZE(vq) \ - (SVE_SIG_ZREG_OFFSET(vq, SVE_NUM_ZREGS) - SVE_SIG_ZREGS_OFFSET) + (SVE_SIG_REGS_OFFSET + __SVE_ZREG_OFFSET(vq, n)) +#define SVE_SIG_ZREGS_SIZE(vq) __SVE_ZREGS_SIZE(vq) #define SVE_SIG_PREGS_OFFSET(vq) \ - (SVE_SIG_ZREGS_OFFSET + SVE_SIG_ZREGS_SIZE(vq)) + (SVE_SIG_REGS_OFFSET + __SVE_PREGS_OFFSET(vq)) #define SVE_SIG_PREG_OFFSET(vq, n) \ - (SVE_SIG_PREGS_OFFSET(vq) + SVE_SIG_PREG_SIZE(vq) * (n)) -#define SVE_SIG_PREGS_SIZE(vq) \ - (SVE_SIG_PREG_OFFSET(vq, SVE_NUM_PREGS) - SVE_SIG_PREGS_OFFSET(vq)) + (SVE_SIG_REGS_OFFSET + __SVE_PREG_OFFSET(vq, n)) +#define SVE_SIG_PREGS_SIZE(vq) __SVE_PREGS_SIZE(vq) #define SVE_SIG_FFR_OFFSET(vq) \ - (SVE_SIG_PREGS_OFFSET(vq) + SVE_SIG_PREGS_SIZE(vq)) + (SVE_SIG_REGS_OFFSET + __SVE_FFR_OFFSET(vq)) #define SVE_SIG_REGS_SIZE(vq) \ - (SVE_SIG_FFR_OFFSET(vq) + SVE_SIG_FFR_SIZE(vq) - SVE_SIG_REGS_OFFSET) - -#define SVE_SIG_CONTEXT_SIZE(vq) (SVE_SIG_REGS_OFFSET + SVE_SIG_REGS_SIZE(vq)) + (__SVE_FFR_OFFSET(vq) + __SVE_FFR_SIZE(vq)) +#define SVE_SIG_CONTEXT_SIZE(vq) \ + (SVE_SIG_REGS_OFFSET + SVE_SIG_REGS_SIZE(vq)) #endif /* _UAPI__ASM_SIGCONTEXT_H */ diff --git a/arch/arm64/include/uapi/asm/sve_context.h b/arch/arm64/include/uapi/asm/sve_context.h new file mode 100644 index 000000000000..754ab751b523 --- /dev/null +++ b/arch/arm64/include/uapi/asm/sve_context.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* Copyright (C) 2017-2018 ARM Limited */ + +/* + * For use by other UAPI headers only. + * Do not make direct use of header or its definitions. + */ + +#ifndef _UAPI__ASM_SVE_CONTEXT_H +#define _UAPI__ASM_SVE_CONTEXT_H + +#include <linux/types.h> + +#define __SVE_VQ_BYTES 16 /* number of bytes per quadword */ + +#define __SVE_VQ_MIN 1 +#define __SVE_VQ_MAX 512 + +#define __SVE_VL_MIN (__SVE_VQ_MIN * __SVE_VQ_BYTES) +#define __SVE_VL_MAX (__SVE_VQ_MAX * __SVE_VQ_BYTES) + +#define __SVE_NUM_ZREGS 32 +#define __SVE_NUM_PREGS 16 + +#define __sve_vl_valid(vl) \ + ((vl) % __SVE_VQ_BYTES == 0 && \ + (vl) >= __SVE_VL_MIN && \ + (vl) <= __SVE_VL_MAX) + +#define __sve_vq_from_vl(vl) ((vl) / __SVE_VQ_BYTES) +#define __sve_vl_from_vq(vq) ((vq) * __SVE_VQ_BYTES) + +#define __SVE_ZREG_SIZE(vq) ((__u32)(vq) * __SVE_VQ_BYTES) +#define __SVE_PREG_SIZE(vq) ((__u32)(vq) * (__SVE_VQ_BYTES / 8)) +#define __SVE_FFR_SIZE(vq) __SVE_PREG_SIZE(vq) + +#define __SVE_ZREGS_OFFSET 0 +#define __SVE_ZREG_OFFSET(vq, n) \ + (__SVE_ZREGS_OFFSET + __SVE_ZREG_SIZE(vq) * (n)) +#define __SVE_ZREGS_SIZE(vq) \ + (__SVE_ZREG_OFFSET(vq, __SVE_NUM_ZREGS) - __SVE_ZREGS_OFFSET) + +#define __SVE_PREGS_OFFSET(vq) \ + (__SVE_ZREGS_OFFSET + __SVE_ZREGS_SIZE(vq)) +#define __SVE_PREG_OFFSET(vq, n) \ + (__SVE_PREGS_OFFSET(vq) + __SVE_PREG_SIZE(vq) * (n)) +#define __SVE_PREGS_SIZE(vq) \ + (__SVE_PREG_OFFSET(vq, __SVE_NUM_PREGS) - __SVE_PREGS_OFFSET(vq)) + +#define __SVE_FFR_OFFSET(vq) \ + (__SVE_PREGS_OFFSET(vq) + __SVE_PREGS_SIZE(vq)) + +#endif /* ! _UAPI__ASM_SVE_CONTEXT_H */ |