aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/features/sched/membarrier-sync-core/arch-support.txt4
-rw-r--r--arch/s390/Kconfig3
-rw-r--r--arch/s390/appldata/appldata_base.c32
-rw-r--r--arch/s390/boot/Makefile2
-rw-r--r--arch/s390/boot/boot.h39
-rw-r--r--arch/s390/boot/ipl_report.c106
-rw-r--r--arch/s390/boot/kaslr.c115
-rw-r--r--arch/s390/boot/mem_detect.c191
-rw-r--r--arch/s390/boot/pgm_check_info.c5
-rw-r--r--arch/s390/boot/physmem_info.c323
-rw-r--r--arch/s390/boot/startup.c96
-rw-r--r--arch/s390/boot/vmem.c258
-rw-r--r--arch/s390/boot/vmlinux.lds.S2
-rw-r--r--arch/s390/include/asm/ap.h152
-rw-r--r--arch/s390/include/asm/kasan.h31
-rw-r--r--arch/s390/include/asm/linkage.h2
-rw-r--r--arch/s390/include/asm/mem_detect.h117
-rw-r--r--arch/s390/include/asm/nospec-insn.h3
-rw-r--r--arch/s390/include/asm/perf_event.h2
-rw-r--r--arch/s390/include/asm/physmem_info.h171
-rw-r--r--arch/s390/include/asm/processor.h4
-rw-r--r--arch/s390/include/asm/setup.h13
-rw-r--r--arch/s390/include/asm/thread_info.h2
-rw-r--r--arch/s390/kernel/debug.c12
-rw-r--r--arch/s390/kernel/dumpstack.c8
-rw-r--r--arch/s390/kernel/early.c23
-rw-r--r--arch/s390/kernel/entry.S53
-rw-r--r--arch/s390/kernel/ftrace.c20
-rw-r--r--arch/s390/kernel/head64.S3
-rw-r--r--arch/s390/kernel/ipl.c1
-rw-r--r--arch/s390/kernel/mcount.S16
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c14
-rw-r--r--arch/s390/kernel/process.c10
-rw-r--r--arch/s390/kernel/processor.c18
-rw-r--r--arch/s390/kernel/setup.c96
-rw-r--r--arch/s390/kernel/smp.c2
-rw-r--r--arch/s390/kernel/topology.c12
-rw-r--r--arch/s390/kernel/vdso32/vdso_user_wrapper.S3
-rw-r--r--arch/s390/kernel/vdso64/vdso_user_wrapper.S5
-rw-r--r--arch/s390/kernel/vmlinux.lds.S7
-rw-r--r--arch/s390/mm/Makefile3
-rw-r--r--arch/s390/mm/cmm.c12
-rw-r--r--arch/s390/mm/kasan_init.c301
-rw-r--r--arch/s390/mm/pageattr.c2
-rw-r--r--arch/s390/mm/pgalloc.c20
-rw-r--r--arch/s390/mm/vmem.c11
-rw-r--r--arch/s390/pci/pci.c23
-rw-r--r--arch/s390/pci/pci_bus.c11
-rw-r--r--drivers/s390/char/sclp_early_core.c8
-rw-r--r--drivers/s390/crypto/ap_bus.c254
-rw-r--r--drivers/s390/crypto/ap_bus.h70
-rw-r--r--drivers/s390/crypto/ap_card.c23
-rw-r--r--drivers/s390/crypto/ap_queue.c410
-rw-r--r--drivers/s390/crypto/vfio_ap_drv.c6
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c16
-rw-r--r--drivers/s390/crypto/zcrypt_api.c60
-rw-r--r--drivers/s390/crypto/zcrypt_card.c6
-rw-r--r--drivers/s390/crypto/zcrypt_cex2c.c66
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c141
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c15
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c63
-rw-r--r--drivers/s390/crypto/zcrypt_queue.c4
-rw-r--r--lib/Kconfig.debug2
63 files changed, 1850 insertions, 1653 deletions
diff --git a/Documentation/features/sched/membarrier-sync-core/arch-support.txt b/Documentation/features/sched/membarrier-sync-core/arch-support.txt
index 1e51614c136e..23260ca44946 100644
--- a/Documentation/features/sched/membarrier-sync-core/arch-support.txt
+++ b/Documentation/features/sched/membarrier-sync-core/arch-support.txt
@@ -5,7 +5,7 @@
#
# Architecture requirements
#
-# * arm/arm64/powerpc
+# * arm/arm64/powerpc/s390
#
# Rely on implicit context synchronization as a result of exception return
# when returning from IPI handler, and when returning to user-space.
@@ -45,7 +45,7 @@
| parisc: | TODO |
| powerpc: | ok |
| riscv: | TODO |
- | s390: | TODO |
+ | s390: | ok |
| sh: | TODO |
| sparc: | TODO |
| um: | TODO |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 9809c74e1240..d610f911677e 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -76,6 +76,7 @@ config S390
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_GIGANTIC_PAGE
select ARCH_HAS_KCOV
+ select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_MEM_ENCRYPT
select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS
select ARCH_HAS_PTE_SPECIAL
@@ -131,6 +132,8 @@ config S390
select CLONE_BACKWARDS2
select DMA_OPS if PCI
select DYNAMIC_FTRACE if FUNCTION_TRACER
+ select FUNCTION_ALIGNMENT_8B if CC_IS_GCC
+ select FUNCTION_ALIGNMENT_16B if !CC_IS_GCC
select GCC12_NO_ARRAY_BOUNDS
select GENERIC_ALLOCATOR
select GENERIC_CPU_AUTOPROBE
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index c0fd29133f27..b07b0610950e 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -66,16 +66,6 @@ static struct ctl_table appldata_table[] = {
{ },
};
-static struct ctl_table appldata_dir_table[] = {
- {
- .procname = appldata_proc_name,
- .maxlen = 0,
- .mode = S_IRUGO | S_IXUGO,
- .child = appldata_table,
- },
- { },
-};
-
/*
* Timer
*/
@@ -291,7 +281,7 @@ appldata_generic_handler(struct ctl_table *ctl, int write,
mutex_lock(&appldata_ops_mutex);
list_for_each(lh, &appldata_ops_list) {
tmp_ops = list_entry(lh, struct appldata_ops, list);
- if (&tmp_ops->ctl_table[2] == ctl) {
+ if (&tmp_ops->ctl_table[0] == ctl) {
found = 1;
}
}
@@ -361,7 +351,8 @@ int appldata_register_ops(struct appldata_ops *ops)
if (ops->size > APPLDATA_MAX_REC_SIZE)
return -EINVAL;
- ops->ctl_table = kcalloc(4, sizeof(struct ctl_table), GFP_KERNEL);
+ /* The last entry must be an empty one */
+ ops->ctl_table = kcalloc(2, sizeof(struct ctl_table), GFP_KERNEL);
if (!ops->ctl_table)
return -ENOMEM;
@@ -369,17 +360,12 @@ int appldata_register_ops(struct appldata_ops *ops)
list_add(&ops->list, &appldata_ops_list);
mutex_unlock(&appldata_ops_mutex);
- ops->ctl_table[0].procname = appldata_proc_name;
- ops->ctl_table[0].maxlen = 0;
- ops->ctl_table[0].mode = S_IRUGO | S_IXUGO;
- ops->ctl_table[0].child = &ops->ctl_table[2];
-
- ops->ctl_table[2].procname = ops->name;
- ops->ctl_table[2].mode = S_IRUGO | S_IWUSR;
- ops->ctl_table[2].proc_handler = appldata_generic_handler;
- ops->ctl_table[2].data = ops;
+ ops->ctl_table[0].procname = ops->name;
+ ops->ctl_table[0].mode = S_IRUGO | S_IWUSR;
+ ops->ctl_table[0].proc_handler = appldata_generic_handler;
+ ops->ctl_table[0].data = ops;
- ops->sysctl_header = register_sysctl_table(ops->ctl_table);
+ ops->sysctl_header = register_sysctl(appldata_proc_name, ops->ctl_table);
if (!ops->sysctl_header)
goto out;
return 0;
@@ -422,7 +408,7 @@ static int __init appldata_init(void)
appldata_wq = alloc_ordered_workqueue("appldata", 0);
if (!appldata_wq)
return -ENOMEM;
- appldata_sysctl_header = register_sysctl_table(appldata_dir_table);
+ appldata_sysctl_header = register_sysctl(appldata_proc_name, appldata_table);
return 0;
}
diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile
index cebd4ca16916..c7c81e5f9218 100644
--- a/arch/s390/boot/Makefile
+++ b/arch/s390/boot/Makefile
@@ -35,7 +35,7 @@ endif
CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
-obj-y := head.o als.o startup.o mem_detect.o ipl_parm.o ipl_report.o vmem.o
+obj-y := head.o als.o startup.o physmem_info.o ipl_parm.o ipl_report.o vmem.o
obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o
obj-y += version.o pgm_check_info.o ctype.o ipl_data.o machine_kexec_reloc.o
obj-$(findstring y, $(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) $(CONFIG_PGSTE)) += uv.o
diff --git a/arch/s390/boot/boot.h b/arch/s390/boot/boot.h
index 58ce701d6110..872963c8a0ab 100644
--- a/arch/s390/boot/boot.h
+++ b/arch/s390/boot/boot.h
@@ -8,6 +8,8 @@
#ifndef __ASSEMBLY__
+#include <asm/physmem_info.h>
+
struct machine_info {
unsigned char has_edat1 : 1;
unsigned char has_edat2 : 1;
@@ -30,24 +32,44 @@ struct vmlinux_info {
unsigned long init_mm_off;
unsigned long swapper_pg_dir_off;
unsigned long invalid_pg_dir_off;
+#ifdef CONFIG_KASAN
+ unsigned long kasan_early_shadow_page_off;
+ unsigned long kasan_early_shadow_pte_off;
+ unsigned long kasan_early_shadow_pmd_off;
+ unsigned long kasan_early_shadow_pud_off;
+ unsigned long kasan_early_shadow_p4d_off;
+#endif
};
void startup_kernel(void);
-unsigned long detect_memory(unsigned long *safe_addr);
-void mem_detect_set_usable_limit(unsigned long limit);
+unsigned long detect_max_physmem_end(void);
+void detect_physmem_online_ranges(unsigned long max_physmem_end);
+void physmem_set_usable_limit(unsigned long limit);
+void physmem_reserve(enum reserved_range_type type, unsigned long addr, unsigned long size);
+void physmem_free(enum reserved_range_type type);
+/* for continuous/multiple allocations per type */
+unsigned long physmem_alloc_top_down(enum reserved_range_type type, unsigned long size,
+ unsigned long align);
+/* for single allocations, 1 per type */
+unsigned long physmem_alloc_range(enum reserved_range_type type, unsigned long size,
+ unsigned long align, unsigned long min, unsigned long max,
+ bool die_on_oom);
+bool ipl_report_certs_intersects(unsigned long addr, unsigned long size,
+ unsigned long *intersection_start);
bool is_ipl_block_dump(void);
void store_ipl_parmblock(void);
-unsigned long read_ipl_report(unsigned long safe_addr);
+int read_ipl_report(void);
+void save_ipl_cert_comp_list(void);
void setup_boot_command_line(void);
void parse_boot_command_line(void);
void verify_facilities(void);
void print_missing_facilities(void);
void sclp_early_setup_buffer(void);
void print_pgm_check_info(void);
-unsigned long get_random_base(unsigned long safe_addr);
+unsigned long get_random_base(void);
void setup_vmem(unsigned long asce_limit);
-unsigned long vmem_estimate_memory_needs(unsigned long online_mem_total);
void __printf(1, 2) decompressor_printk(const char *fmt, ...);
+void print_stacktrace(unsigned long sp);
void error(char *m);
extern struct machine_info machine;
@@ -62,7 +84,7 @@ extern char __boot_data_start[], __boot_data_end[];
extern char __boot_data_preserved_start[], __boot_data_preserved_end[];
extern char _decompressor_syms_start[], _decompressor_syms_end[];
extern char _stack_start[], _stack_end[];
-extern char _end[];
+extern char _end[], _decompressor_end[];
extern unsigned char _compressed_start[];
extern unsigned char _compressed_end[];
extern struct vmlinux_info _vmlinux_info;
@@ -70,5 +92,10 @@ extern struct vmlinux_info _vmlinux_info;
#define __abs_lowcore_pa(x) (((unsigned long)(x) - __abs_lowcore) % sizeof(struct lowcore))
+static inline bool intersects(unsigned long addr0, unsigned long size0,
+ unsigned long addr1, unsigned long size1)
+{
+ return addr0 + size0 > addr1 && addr1 + size1 > addr0;
+}
#endif /* __ASSEMBLY__ */
#endif /* BOOT_BOOT_H */
diff --git a/arch/s390/boot/ipl_report.c b/arch/s390/boot/ipl_report.c
index 74b5cd264862..1803035e68d2 100644
--- a/arch/s390/boot/ipl_report.c
+++ b/arch/s390/boot/ipl_report.c
@@ -5,6 +5,7 @@
#include <asm/sclp.h>
#include <asm/sections.h>
#include <asm/boot_data.h>
+#include <asm/physmem_info.h>
#include <uapi/asm/ipl.h>
#include "boot.h"
@@ -16,20 +17,16 @@ unsigned long __bootdata_preserved(ipl_cert_list_size);
unsigned long __bootdata(early_ipl_comp_list_addr);
unsigned long __bootdata(early_ipl_comp_list_size);
+static struct ipl_rb_certificates *certs;
+static struct ipl_rb_components *comps;
+static bool ipl_report_needs_saving;
+
#define for_each_rb_entry(entry, rb) \
for (entry = rb->entries; \
(void *) entry + sizeof(*entry) <= (void *) rb + rb->len; \
entry++)
-static inline bool intersects(unsigned long addr0, unsigned long size0,
- unsigned long addr1, unsigned long size1)
-{
- return addr0 + size0 > addr1 && addr1 + size1 > addr0;
-}
-
-static unsigned long find_bootdata_space(struct ipl_rb_components *comps,
- struct ipl_rb_certificates *certs,
- unsigned long safe_addr)
+static unsigned long get_cert_comp_list_size(void)
{
struct ipl_rb_certificate_entry *cert;
struct ipl_rb_component_entry *comp;
@@ -44,44 +41,27 @@ static unsigned long find_bootdata_space(struct ipl_rb_components *comps,
ipl_cert_list_size = 0;
for_each_rb_entry(cert, certs)
ipl_cert_list_size += sizeof(unsigned int) + cert->len;
- size = ipl_cert_list_size + early_ipl_comp_list_size;
+ return ipl_cert_list_size + early_ipl_comp_list_size;
+}
- /*
- * Start from safe_addr to find a free memory area large
- * enough for the IPL report boot data. This area is used
- * for ipl_cert_list_addr/ipl_cert_list_size and
- * early_ipl_comp_list_addr/early_ipl_comp_list_size. It must
- * not overlap with any component or any certificate.
- */
-repeat:
- if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_data.start && initrd_data.size &&
- intersects(initrd_data.start, initrd_data.size, safe_addr, size))
- safe_addr = initrd_data.start + initrd_data.size;
- if (intersects(safe_addr, size, (unsigned long)comps, comps->len)) {
- safe_addr = (unsigned long)comps + comps->len;
- goto repeat;
- }
- for_each_rb_entry(comp, comps)
- if (intersects(safe_addr, size, comp->addr, comp->len)) {
- safe_addr = comp->addr + comp->len;
- goto repeat;
+bool ipl_report_certs_intersects(unsigned long addr, unsigned long size,
+ unsigned long *intersection_start)
+{
+ struct ipl_rb_certificate_entry *cert;
+
+ if (!ipl_report_needs_saving)
+ return false;
+
+ for_each_rb_entry(cert, certs) {
+ if (intersects(addr, size, cert->addr, cert->len)) {
+ *intersection_start = cert->addr;
+ return true;
}
- if (intersects(safe_addr, size, (unsigned long)certs, certs->len)) {
- safe_addr = (unsigned long)certs + certs->len;
- goto repeat;
}
- for_each_rb_entry(cert, certs)
- if (intersects(safe_addr, size, cert->addr, cert->len)) {
- safe_addr = cert->addr + cert->len;
- goto repeat;
- }
- early_ipl_comp_list_addr = safe_addr;
- ipl_cert_list_addr = safe_addr + early_ipl_comp_list_size;
-
- return safe_addr + size;
+ return false;
}
-static void copy_components_bootdata(struct ipl_rb_components *comps)
+static void copy_components_bootdata(void)
{
struct ipl_rb_component_entry *comp, *ptr;
@@ -90,7 +70,7 @@ static void copy_components_bootdata(struct ipl_rb_components *comps)
memcpy(ptr++, comp, sizeof(*ptr));
}
-static void copy_certificates_bootdata(struct ipl_rb_certificates *certs)
+static void copy_certificates_bootdata(void)
{
struct ipl_rb_certificate_entry *cert;
void *ptr;
@@ -104,10 +84,8 @@ static void copy_certificates_bootdata(struct ipl_rb_certificates *certs)
}
}
-unsigned long read_ipl_report(unsigned long safe_addr)
+int read_ipl_report(void)
{
- struct ipl_rb_certificates *certs;
- struct ipl_rb_components *comps;
struct ipl_pl_hdr *pl_hdr;
struct ipl_rl_hdr *rl_hdr;
struct ipl_rb_hdr *rb_hdr;
@@ -120,7 +98,7 @@ unsigned long read_ipl_report(unsigned long safe_addr)
*/
if (!ipl_block_valid ||
!(ipl_block.hdr.flags & IPL_PL_FLAG_IPLSR))
- return safe_addr;
+ return -1;
ipl_secure_flag = !!(ipl_block.hdr.flags & IPL_PL_FLAG_SIPL);
/*
* There is an IPL report, to find it load the pointer to the
@@ -158,16 +136,30 @@ unsigned long read_ipl_report(unsigned long safe_addr)
* With either the component list or the certificate list
* missing the kernel will stay ignorant of secure IPL.
*/
- if (!comps || !certs)
- return safe_addr;
+ if (!comps || !certs) {
+ certs = NULL;
+ return -1;
+ }
- /*
- * Copy component and certificate list to a safe area
- * where the decompressed kernel can find them.
- */
- safe_addr = find_bootdata_space(comps, certs, safe_addr);
- copy_components_bootdata(comps);
- copy_certificates_bootdata(certs);
+ ipl_report_needs_saving = true;
+ physmem_reserve(RR_IPLREPORT, (unsigned long)pl_hdr,
+ (unsigned long)rl_end - (unsigned long)pl_hdr);
+ return 0;
+}
+
+void save_ipl_cert_comp_list(void)
+{
+ unsigned long size;
+
+ if (!ipl_report_needs_saving)
+ return;
+
+ size = get_cert_comp_list_size();
+ early_ipl_comp_list_addr = physmem_alloc_top_down(RR_CERT_COMP_LIST, size, sizeof(int));
+ ipl_cert_list_addr = early_ipl_comp_list_addr + early_ipl_comp_list_size;
- return safe_addr;
+ copy_components_bootdata();
+ copy_certificates_bootdata();
+ physmem_free(RR_IPLREPORT);
+ ipl_report_needs_saving = false;
}
diff --git a/arch/s390/boot/kaslr.c b/arch/s390/boot/kaslr.c
index 3e3d846400b4..71f75f03f800 100644
--- a/arch/s390/boot/kaslr.c
+++ b/arch/s390/boot/kaslr.c
@@ -3,7 +3,7 @@
* Copyright IBM Corp. 2019
*/
#include <linux/pgtable.h>
-#include <asm/mem_detect.h>
+#include <asm/physmem_info.h>
#include <asm/cpacf.h>
#include <asm/timex.h>
#include <asm/sclp.h>
@@ -91,113 +91,16 @@ static int get_random(unsigned long limit, unsigned long *value)
return 0;
}
-/*
- * To randomize kernel base address we have to consider several facts:
- * 1. physical online memory might not be continuous and have holes. mem_detect
- * info contains list of online memory ranges we should consider.
- * 2. we have several memory regions which are occupied and we should not
- * overlap and destroy them. Currently safe_addr tells us the border below
- * which all those occupied regions are. We are safe to use anything above
- * safe_addr.
- * 3. the upper limit might apply as well, even if memory above that limit is
- * online. Currently those limitations are:
- * 3.1. Limit set by "mem=" kernel command line option
- * 3.2. memory reserved at the end for kasan initialization.
- * 4. kernel base address must be aligned to THREAD_SIZE (kernel stack size).
- * Which is required for CONFIG_CHECK_STACK. Currently THREAD_SIZE is 4 pages
- * (16 pages when the kernel is built with kasan enabled)
- * Assumptions:
- * 1. kernel size (including .bss size) and upper memory limit are page aligned.
- * 2. mem_detect memory region start is THREAD_SIZE aligned / end is PAGE_SIZE
- * aligned (in practice memory configurations granularity on z/VM and LPAR
- * is 1mb).
- *
- * To guarantee uniform distribution of kernel base address among all suitable
- * addresses we generate random value just once. For that we need to build a
- * continuous range in which every value would be suitable. We can build this
- * range by simply counting all suitable addresses (let's call them positions)
- * which would be valid as kernel base address. To count positions we iterate
- * over online memory ranges. For each range which is big enough for the
- * kernel image we count all suitable addresses we can put the kernel image at
- * that is
- * (end - start - kernel_size) / THREAD_SIZE + 1
- * Two functions count_valid_kernel_positions and position_to_address help
- * to count positions in memory range given and then convert position back
- * to address.
- */
-static unsigned long count_valid_kernel_positions(unsigned long kernel_size,
- unsigned long _min,
- unsigned long _max)
-{
- unsigned long start, end, pos = 0;
- int i;
-
- for_each_mem_detect_usable_block(i, &start, &end) {
- if (_min >= end)
- continue;
- if (start >= _max)
- break;
- start = max(_min, start);
- end = min(_max, end);
- if (end - start < kernel_size)
- continue;
- pos += (end - start - kernel_size) / THREAD_SIZE + 1;
- }
-
- return pos;
-}
-
-static unsigned long position_to_address(unsigned long pos, unsigned long kernel_size,
- unsigned long _min, unsigned long _max)
-{
- unsigned long start, end;
- int i;
-
- for_each_mem_detect_usable_block(i, &start, &end) {
- if (_min >= end)
- continue;
- if (start >= _max)
- break;
- start = max(_min, start);
- end = min(_max, end);
- if (end - start < kernel_size)
- continue;
- if ((end - start - kernel_size) / THREAD_SIZE + 1 >= pos)
- return start + (pos - 1) * THREAD_SIZE;
- pos -= (end - start - kernel_size) / THREAD_SIZE + 1;
- }
-
- return 0;
-}
-
-unsigned long get_random_base(unsigned long safe_addr)
+unsigned long get_random_base(void)
{
- unsigned long usable_total = get_mem_detect_usable_total();
- unsigned long memory_limit = get_mem_detect_end();
- unsigned long base_pos, max_pos, kernel_size;
- int i;
+ unsigned long vmlinux_size = vmlinux.image_size + vmlinux.bss_size;
+ unsigned long minimal_pos = vmlinux.default_lma + vmlinux_size;
+ unsigned long random;
- /*
- * Avoid putting kernel in the end of physical memory
- * which vmem and kasan code will use for shadow memory and
- * pgtable mapping allocations.
- */
- memory_limit -= kasan_estimate_memory_needs(usable_total);
- memory_limit -= vmem_estimate_memory_needs(usable_total);
-
- safe_addr = ALIGN(safe_addr, THREAD_SIZE);
- kernel_size = vmlinux.image_size + vmlinux.bss_size;
- if (safe_addr + kernel_size > memory_limit)
+ /* [vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size : physmem_info.usable] */
+ if (get_random(physmem_info.usable - minimal_pos, &random))
return 0;
- max_pos = count_valid_kernel_positions(kernel_size, safe_addr, memory_limit);
- if (!max_pos) {
- sclp_early_printk("KASLR disabled: not enough memory\n");
- return 0;
- }
-
- /* we need a value in the range [1, base_pos] inclusive */
- if (get_random(max_pos, &base_pos))
- return 0;
- return position_to_address(base_pos + 1, kernel_size, safe_addr, memory_limit);
+ return physmem_alloc_range(RR_VMLINUX, vmlinux_size, THREAD_SIZE,
+ vmlinux.default_lma, minimal_pos + random, false);
}
diff --git a/arch/s390/boot/mem_detect.c b/arch/s390/boot/mem_detect.c
deleted file mode 100644
index 35f4ba11f7fd..000000000000
--- a/arch/s390/boot/mem_detect.c
+++ /dev/null
@@ -1,191 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <asm/setup.h>
-#include <asm/processor.h>
-#include <asm/sclp.h>
-#include <asm/sections.h>
-#include <asm/mem_detect.h>
-#include <asm/sparsemem.h>
-#include "decompressor.h"
-#include "boot.h"
-
-struct mem_detect_info __bootdata(mem_detect);
-
-/* up to 256 storage elements, 1020 subincrements each */
-#define ENTRIES_EXTENDED_MAX \
- (256 * (1020 / 2) * sizeof(struct mem_detect_block))
-
-static struct mem_detect_block *__get_mem_detect_block_ptr(u32 n)
-{
- if (n < MEM_INLINED_ENTRIES)
- return &mem_detect.entries[n];
- return &mem_detect.entries_extended[n - MEM_INLINED_ENTRIES];
-}
-
-/*
- * sequential calls to add_mem_detect_block with adjacent memory areas
- * are merged together into single memory block.
- */
-void add_mem_detect_block(u64 start, u64 end)
-{
- struct mem_detect_block *block;
-
- if (mem_detect.count) {
- block = __get_mem_detect_block_ptr(mem_detect.count - 1);
- if (block->end == start) {
- block->end = end;
- return;
- }
- }
-
- block = __get_mem_detect_block_ptr(mem_detect.count);
- block->start = start;
- block->end = end;
- mem_detect.count++;
-}
-
-static int __diag260(unsigned long rx1, unsigned long rx2)
-{
- unsigned long reg1, reg2, ry;
- union register_pair rx;
- psw_t old;
- int rc;
-
- rx.even = rx1;
- rx.odd = rx2;
- ry = 0x10; /* storage configuration */
- rc = -1; /* fail */
- asm volatile(
- " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
- " epsw %[reg1],%[reg2]\n"
- " st %[reg1],0(%[psw_pgm])\n"
- " st %[reg2],4(%[psw_pgm])\n"
- " larl %[reg1],1f\n"
- " stg %[reg1],8(%[psw_pgm])\n"
- " diag %[rx],%[ry],0x260\n"
- " ipm %[rc]\n"
- " srl %[rc],28\n"
- "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
- : [reg1] "=&d" (reg1),
- [reg2] "=&a" (reg2),
- [rc] "+&d" (rc),
- [ry] "+&d" (ry),
- "+Q" (S390_lowcore.program_new_psw),
- "=Q" (old)
- : [rx] "d" (rx.pair),
- [psw_old] "a" (&old),
- [psw_pgm] "a" (&S390_lowcore.program_new_psw)
- : "cc", "memory");
- return rc == 0 ? ry : -1;
-}
-
-static int diag260(void)
-{
- int rc, i;
-
- struct {
- unsigned long start;
- unsigned long end;
- } storage_extents[8] __aligned(16); /* VM supports up to 8 extends */
-
- memset(storage_extents, 0, sizeof(storage_extents));
- rc = __diag260((unsigned long)storage_extents, sizeof(storage_extents));
- if (rc == -1)
- return -1;
-
- for (i = 0; i < min_t(int, rc, ARRAY_SIZE(storage_extents)); i++)
- add_mem_detect_block(storage_extents[i].start, storage_extents[i].end + 1);
- return 0;
-}
-
-static int tprot(unsigned long addr)
-{
- unsigned long reg1, reg2;
- int rc = -EFAULT;
- psw_t old;
-
- asm volatile(
- " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
- " epsw %[reg1],%[reg2]\n"
- " st %[reg1],0(%[psw_pgm])\n"
- " st %[reg2],4(%[psw_pgm])\n"
- " larl %[reg1],1f\n"
- " stg %[reg1],8(%[psw_pgm])\n"
- " tprot 0(%[addr]),0\n"
- " ipm %[rc]\n"
- " srl %[rc],28\n"
- "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
- : [reg1] "=&d" (reg1),
- [reg2] "=&a" (reg2),
- [rc] "+&d" (rc),
- "=Q" (S390_lowcore.program_new_psw.addr),
- "=Q" (old)
- : [psw_old] "a" (&old),
- [psw_pgm] "a" (&S390_lowcore.program_new_psw),
- [addr] "a" (addr)
- : "cc", "memory");
- return rc;
-}
-
-static unsigned long search_mem_end(void)
-{
- unsigned long range = 1 << (MAX_PHYSMEM_BITS - 20); /* in 1MB blocks */
- unsigned long offset = 0;
- unsigned long pivot;
-
- while (range > 1) {
- range >>= 1;
- pivot = offset + range;
- if (!tprot(pivot << 20))
- offset = pivot;
- }
- return (offset + 1) << 20;
-}
-
-unsigned long detect_memory(unsigned long *safe_addr)
-{
- unsigned long max_physmem_end = 0;
-
- sclp_early_get_memsize(&max_physmem_end);
- mem_detect.entries_extended = (struct mem_detect_block *)ALIGN(*safe_addr, sizeof(u64));
-
- if (!sclp_early_read_storage_info()) {
- mem_detect.info_source = MEM_DETECT_SCLP_STOR_INFO;
- } else if (!diag260()) {
- mem_detect.info_source = MEM_DETECT_DIAG260;
- max_physmem_end = max_physmem_end ?: get_mem_detect_end();
- } else if (max_physmem_end) {
- add_mem_detect_block(0, max_physmem_end);
- mem_detect.info_source = MEM_DETECT_SCLP_READ_INFO;
- } else {
- max_physmem_end = search_mem_end();
- add_mem_detect_block(0, max_physmem_end);
- mem_detect.info_source = MEM_DETECT_BIN_SEARCH;
- }
-
- if (mem_detect.count > MEM_INLINED_ENTRIES) {
- *safe_addr += (mem_detect.count - MEM_INLINED_ENTRIES) *
- sizeof(struct mem_detect_block);
- }
-
- return max_physmem_end;
-}
-
-void mem_detect_set_usable_limit(unsigned long limit)
-{
- struct mem_detect_block *block;
- int i;
-
- /* make sure mem_detect.usable ends up within online memory block */
- for (i = 0; i < mem_detect.count; i++) {
- block = __get_mem_detect_block_ptr(i);
- if (block->start >= limit)
- break;
- if (block->end >= limit) {
- mem_detect.usable = limit;
- break;
- }
- mem_detect.usable = block->end;
- }
-}
diff --git a/arch/s390/boot/pgm_check_info.c b/arch/s390/boot/pgm_check_info.c
index c2a1defc79da..0861e3c403f8 100644
--- a/arch/s390/boot/pgm_check_info.c
+++ b/arch/s390/boot/pgm_check_info.c
@@ -123,11 +123,10 @@ out:
sclp_early_printk(buf);
}
-static noinline void print_stacktrace(void)
+void print_stacktrace(unsigned long sp)
{
struct stack_info boot_stack = { STACK_TYPE_TASK, (unsigned long)_stack_start,
(unsigned long)_stack_end };
- unsigned long sp = S390_lowcore.gpregs_save_area[15];
bool first = true;
decompressor_printk("Call Trace:\n");
@@ -173,7 +172,7 @@ void print_pgm_check_info(void)
gpregs[8], gpregs[9], gpregs[10], gpregs[11]);
decompressor_printk(" %016lx %016lx %016lx %016lx\n",
gpregs[12], gpregs[13], gpregs[14], gpregs[15]);
- print_stacktrace();
+ print_stacktrace(S390_lowcore.gpregs_save_area[15]);
decompressor_printk("Last Breaking-Event-Address:\n");
decompressor_printk(" [<%016lx>] %pS\n", (unsigned long)S390_lowcore.pgm_last_break,
(void *)S390_lowcore.pgm_last_break);
diff --git a/arch/s390/boot/physmem_info.c b/arch/s390/boot/physmem_info.c
new file mode 100644
index 000000000000..4ee9b7381142
--- /dev/null
+++ b/arch/s390/boot/physmem_info.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/processor.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <asm/physmem_info.h>
+#include <asm/stacktrace.h>
+#include <asm/boot_data.h>
+#include <asm/sparsemem.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/sclp.h>
+#include <asm/uv.h>
+#include "decompressor.h"
+#include "boot.h"
+
+struct physmem_info __bootdata(physmem_info);
+static unsigned int physmem_alloc_ranges;
+static unsigned long physmem_alloc_pos;
+
+/* up to 256 storage elements, 1020 subincrements each */
+#define ENTRIES_EXTENDED_MAX \
+ (256 * (1020 / 2) * sizeof(struct physmem_range))
+
+static struct physmem_range *__get_physmem_range_ptr(u32 n)
+{
+ if (n < MEM_INLINED_ENTRIES)
+ return &physmem_info.online[n];
+ if (unlikely(!physmem_info.online_extended)) {
+ physmem_info.online_extended = (struct physmem_range *)physmem_alloc_range(
+ RR_MEM_DETECT_EXTENDED, ENTRIES_EXTENDED_MAX, sizeof(long), 0,
+ physmem_alloc_pos, true);
+ }
+ return &physmem_info.online_extended[n - MEM_INLINED_ENTRIES];
+}
+
+/*
+ * sequential calls to add_physmem_online_range with adjacent memory ranges
+ * are merged together into single memory range.
+ */
+void add_physmem_online_range(u64 start, u64 end)
+{
+ struct physmem_range *range;
+
+ if (physmem_info.range_count) {
+ range = __get_physmem_range_ptr(physmem_info.range_count - 1);
+ if (range->end == start) {
+ range->end = end;
+ return;
+ }
+ }
+
+ range = __get_physmem_range_ptr(physmem_info.range_count);
+ range->start = start;
+ range->end = end;
+ physmem_info.range_count++;
+}
+
+static int __diag260(unsigned long rx1, unsigned long rx2)
+{
+ unsigned long reg1, reg2, ry;
+ union register_pair rx;
+ psw_t old;
+ int rc;
+
+ rx.even = rx1;
+ rx.odd = rx2;
+ ry = 0x10; /* storage configuration */
+ rc = -1; /* fail */
+ asm volatile(
+ " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
+ " epsw %[reg1],%[reg2]\n"
+ " st %[reg1],0(%[psw_pgm])\n"
+ " st %[reg2],4(%[psw_pgm])\n"
+ " larl %[reg1],1f\n"
+ " stg %[reg1],8(%[psw_pgm])\n"
+ " diag %[rx],%[ry],0x260\n"
+ " ipm %[rc]\n"
+ " srl %[rc],28\n"
+ "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
+ : [reg1] "=&d" (reg1),
+ [reg2] "=&a" (reg2),
+ [rc] "+&d" (rc),
+ [ry] "+&d" (ry),
+ "+Q" (S390_lowcore.program_new_psw),
+ "=Q" (old)
+ : [rx] "d" (rx.pair),
+ [psw_old] "a" (&old),
+ [psw_pgm] "a" (&S390_lowcore.program_new_psw)
+ : "cc", "memory");
+ return rc == 0 ? ry : -1;
+}
+
+static int diag260(void)
+{
+ int rc, i;
+
+ struct {
+ unsigned long start;
+ unsigned long end;
+ } storage_extents[8] __aligned(16); /* VM supports up to 8 extends */
+
+ memset(storage_extents, 0, sizeof(storage_extents));
+ rc = __diag260((unsigned long)storage_extents, sizeof(storage_extents));
+ if (rc == -1)
+ return -1;
+
+ for (i = 0; i < min_t(int, rc, ARRAY_SIZE(storage_extents)); i++)
+ add_physmem_online_range(storage_extents[i].start, storage_extents[i].end + 1);
+ return 0;
+}
+
+static int tprot(unsigned long addr)
+{
+ unsigned long reg1, reg2;
+ int rc = -EFAULT;
+ psw_t old;
+
+ asm volatile(
+ " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
+ " epsw %[reg1],%[reg2]\n"
+ " st %[reg1],0(%[psw_pgm])\n"
+ " st %[reg2],4(%[psw_pgm])\n"
+ " larl %[reg1],1f\n"
+ " stg %[reg1],8(%[psw_pgm])\n"
+ " tprot 0(%[addr]),0\n"
+ " ipm %[rc]\n"
+ " srl %[rc],28\n"
+ "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
+ : [reg1] "=&d" (reg1),
+ [reg2] "=&a" (reg2),
+ [rc] "+&d" (rc),
+ "=Q" (S390_lowcore.program_new_psw.addr),
+ "=Q" (old)
+ : [psw_old] "a" (&old),
+ [psw_pgm] "a" (&S390_lowcore.program_new_psw),
+ [addr] "a" (addr)
+ : "cc", "memory");
+ return rc;
+}
+
+static unsigned long search_mem_end(void)
+{
+ unsigned long range = 1 << (MAX_PHYSMEM_BITS - 20); /* in 1MB blocks */
+ unsigned long offset = 0;
+ unsigned long pivot;
+
+ while (range > 1) {
+ range >>= 1;
+ pivot = offset + range;
+ if (!tprot(pivot << 20))
+ offset = pivot;
+ }
+ return (offset + 1) << 20;
+}
+
+unsigned long detect_max_physmem_end(void)
+{
+ unsigned long max_physmem_end = 0;
+
+ if (!sclp_early_get_memsize(&max_physmem_end)) {
+ physmem_info.info_source = MEM_DETECT_SCLP_READ_INFO;
+ } else {
+ max_physmem_end = search_mem_end();
+ physmem_info.info_source = MEM_DETECT_BIN_SEARCH;
+ }
+ return max_physmem_end;
+}
+
+void detect_physmem_online_ranges(unsigned long max_physmem_end)
+{
+ if (!sclp_early_read_storage_info()) {
+ physmem_info.info_source = MEM_DETECT_SCLP_STOR_INFO;
+ } else if (!diag260()) {
+ physmem_info.info_source = MEM_DETECT_DIAG260;
+ } else if (max_physmem_end) {
+ add_physmem_online_range(0, max_physmem_end);
+ }
+}
+
+void physmem_set_usable_limit(unsigned long limit)
+{
+ physmem_info.usable = limit;
+ physmem_alloc_pos = limit;
+}
+
+static void die_oom(unsigned long size, unsigned long align, unsigned long min, unsigned long max)
+{
+ unsigned long start, end, total_mem = 0, total_reserved_mem = 0;
+ struct reserved_range *range;
+ enum reserved_range_type t;
+ int i;
+
+ decompressor_printk("Linux version %s\n", kernel_version);
+ if (!is_prot_virt_guest() && early_command_line[0])
+ decompressor_printk("Kernel command line: %s\n", early_command_line);
+ decompressor_printk("Out of memory allocating %lx bytes %lx aligned in range %lx:%lx\n",
+ size, align, min, max);
+ decompressor_printk("Reserved memory ranges:\n");
+ for_each_physmem_reserved_range(t, range, &start, &end) {
+ decompressor_printk("%016lx %016lx %s\n", start, end, get_rr_type_name(t));
+ total_reserved_mem += end - start;
+ }
+ decompressor_printk("Usable online memory ranges (info source: %s [%x]):\n",
+ get_physmem_info_source(), physmem_info.info_source);
+ for_each_physmem_usable_range(i, &start, &end) {
+ decompressor_printk("%016lx %016lx\n", start, end);
+ total_mem += end - start;
+ }
+ decompressor_printk("Usable online memory total: %lx Reserved: %lx Free: %lx\n",
+ total_mem, total_reserved_mem,
+ total_mem > total_reserved_mem ? total_mem - total_reserved_mem : 0);
+ print_stacktrace(current_frame_address());
+ sclp_early_printk("\n\n -- System halted\n");
+ disabled_wait();
+}
+
+void physmem_reserve(enum reserved_range_type type, unsigned long addr, unsigned long size)
+{
+ physmem_info.reserved[type].start = addr;
+ physmem_info.reserved[type].end = addr + size;
+}
+
+void physmem_free(enum reserved_range_type type)
+{
+ physmem_info.reserved[type].start = 0;
+ physmem_info.reserved[type].end = 0;
+}
+
+static bool __physmem_alloc_intersects(unsigned long addr, unsigned long size,
+ unsigned long *intersection_start)
+{
+ unsigned long res_addr, res_size;
+ int t;
+
+ for (t = 0; t < RR_MAX; t++) {
+ if (!get_physmem_reserved(t, &res_addr, &res_size))
+ continue;
+ if (intersects(addr, size, res_addr, res_size)) {
+ *intersection_start = res_addr;
+ return true;
+ }
+ }
+ return ipl_report_certs_intersects(addr, size, intersection_start);
+}
+
+static unsigned long __physmem_alloc_range(unsigned long size, unsigned long align,
+ unsigned long min, unsigned long max,
+ unsigned int from_ranges, unsigned int *ranges_left,
+ bool die_on_oom)
+{
+ unsigned int nranges = from_ranges ?: physmem_info.range_count;
+ unsigned long range_start, range_end;
+ unsigned long intersection_start;
+ unsigned long addr, pos = max;
+
+ align = max(align, 8UL);
+ while (nranges) {
+ __get_physmem_range(nranges - 1, &range_start, &range_end, false);
+ pos = min(range_end, pos);
+
+ if (round_up(min, align) + size > pos)
+ break;
+ addr = round_down(pos - size, align);
+ if (range_start > addr) {
+ nranges--;
+ continue;
+ }
+ if (__physmem_alloc_intersects(addr, size, &intersection_start)) {
+ pos = intersection_start;
+ continue;
+ }
+
+ if (ranges_left)
+ *ranges_left = nranges;
+ return addr;
+ }
+ if (die_on_oom)
+ die_oom(size, align, min, max);
+ return 0;
+}
+
+unsigned long physmem_alloc_range(enum reserved_range_type type, unsigned long size,
+ unsigned long align, unsigned long min, unsigned long max,
+ bool die_on_oom)
+{
+ unsigned long addr;
+
+ max = min(max, physmem_alloc_pos);
+ addr = __physmem_alloc_range(size, align, min, max, 0, NULL, die_on_oom);
+ if (addr)
+ physmem_reserve(type, addr, size);
+ return addr;
+}
+
+unsigned long physmem_alloc_top_down(enum reserved_range_type type, unsigned long size,
+ unsigned long align)
+{
+ struct reserved_range *range = &physmem_info.reserved[type];
+ struct reserved_range *new_range;
+ unsigned int ranges_left;
+ unsigned long addr;
+
+ addr = __physmem_alloc_range(size, align, 0, physmem_alloc_pos, physmem_alloc_ranges,
+ &ranges_left, true);
+ /* if not a consecutive allocation of the same type or first allocation */
+ if (range->start != addr + size) {
+ if (range->end) {
+ physmem_alloc_pos = __physmem_alloc_range(
+ sizeof(struct reserved_range), 0, 0, physmem_alloc_pos,
+ physmem_alloc_ranges, &ranges_left, true);
+ new_range = (struct reserved_range *)physmem_alloc_pos;
+ *new_range = *range;
+ range->chain = new_range;
+ addr = __physmem_alloc_range(size, align, 0, physmem_alloc_pos,
+ ranges_left, &ranges_left, true);
+ }
+ range->end = addr + size;
+ }
+ range->start = addr;
+ physmem_alloc_pos = addr;
+ physmem_alloc_ranges = ranges_left;
+ return addr;
+}
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
index 11413f0baabc..bdf305a93987 100644
--- a/arch/s390/boot/startup.c
+++ b/arch/s390/boot/startup.c
@@ -12,7 +12,7 @@
#include <asm/diag.h>
#include <asm/uv.h>
#include <asm/abs_lowcore.h>
-#include <asm/mem_detect.h>
+#include <asm/physmem_info.h>
#include "decompressor.h"
#include "boot.h"
#include "uv.h"
@@ -21,7 +21,6 @@ unsigned long __bootdata_preserved(__kaslr_offset);
unsigned long __bootdata_preserved(__abs_lowcore);
unsigned long __bootdata_preserved(__memcpy_real_area);
pte_t *__bootdata_preserved(memcpy_real_ptep);
-unsigned long __bootdata(__amode31_base);
unsigned long __bootdata_preserved(VMALLOC_START);
unsigned long __bootdata_preserved(VMALLOC_END);
struct page *__bootdata_preserved(vmemmap);
@@ -29,8 +28,6 @@ unsigned long __bootdata_preserved(vmemmap_size);
unsigned long __bootdata_preserved(MODULES_VADDR);
unsigned long __bootdata_preserved(MODULES_END);
unsigned long __bootdata(ident_map_size);
-int __bootdata(is_full_image) = 1;
-struct initrd_data __bootdata(initrd_data);
u64 __bootdata_preserved(stfle_fac_list[16]);
u64 __bootdata_preserved(alt_stfle_fac_list[16]);
@@ -76,17 +73,20 @@ unsigned long mem_safe_offset(void)
}
#endif
-static unsigned long rescue_initrd(unsigned long safe_addr)
+static void rescue_initrd(unsigned long min, unsigned long max)
{
+ unsigned long old_addr, addr, size;
+
if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
- return safe_addr;
- if (!initrd_data.start || !initrd_data.size)
- return safe_addr;
- if (initrd_data.start < safe_addr) {
- memmove((void *)safe_addr, (void *)initrd_data.start, initrd_data.size);
- initrd_data.start = safe_addr;
- }
- return initrd_data.start + initrd_data.size;
+ return;
+ if (!get_physmem_reserved(RR_INITRD, &addr, &size))
+ return;
+ if (addr >= min && addr + size <= max)
+ return;
+ old_addr = addr;
+ physmem_free(RR_INITRD);
+ addr = physmem_alloc_top_down(RR_INITRD, size, 0);
+ memmove((void *)addr, (void *)old_addr, size);
}
static void copy_bootdata(void)
@@ -140,7 +140,7 @@ static void handle_relocs(unsigned long offset)
*
* Consider the following factors:
* 1. max_physmem_end - end of physical memory online or standby.
- * Always <= end of the last online memory block (get_mem_detect_end()).
+ * Always >= end of the last online memory range (get_physmem_online_end()).
* 2. CONFIG_MAX_PHYSMEM_BITS - the maximum size of physical memory the
* kernel is able to support.
* 3. "mem=" kernel command line option which limits physical memory usage.
@@ -266,48 +266,57 @@ static void offset_vmlinux_info(unsigned long offset)
vmlinux.init_mm_off += offset;
vmlinux.swapper_pg_dir_off += offset;
vmlinux.invalid_pg_dir_off += offset;
-}
-
-static unsigned long reserve_amode31(unsigned long safe_addr)
-{
- __amode31_base = PAGE_ALIGN(safe_addr);
- return __amode31_base + vmlinux.amode31_size;
+#ifdef CONFIG_KASAN
+ vmlinux.kasan_early_shadow_page_off += offset;
+ vmlinux.kasan_early_shadow_pte_off += offset;
+ vmlinux.kasan_early_shadow_pmd_off += offset;
+ vmlinux.kasan_early_shadow_pud_off += offset;
+ vmlinux.kasan_early_shadow_p4d_off += offset;
+#endif
}
void startup_kernel(void)
{
unsigned long max_physmem_end;
unsigned long random_lma;
- unsigned long safe_addr;
unsigned long asce_limit;
+ unsigned long safe_addr;
void *img;
psw_t psw;
- initrd_data.start = parmarea.initrd_start;
- initrd_data.size = parmarea.initrd_size;
+ setup_lpp();
+ safe_addr = mem_safe_offset();
+ /*
+ * reserve decompressor memory together with decompression heap, buffer and
+ * memory which might be occupied by uncompressed kernel at default 1Mb
+ * position (if KASLR is off or failed).
+ */
+ physmem_reserve(RR_DECOMPRESSOR, 0, safe_addr);
+ if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && parmarea.initrd_size)
+ physmem_reserve(RR_INITRD, parmarea.initrd_start, parmarea.initrd_size);
oldmem_data.start = parmarea.oldmem_base;
oldmem_data.size = parmarea.oldmem_size;
- setup_lpp();
store_ipl_parmblock();
- safe_addr = mem_safe_offset();
- safe_addr = reserve_amode31(safe_addr);
- safe_addr = read_ipl_report(safe_addr);
+ read_ipl_report();
uv_query_info();
- safe_addr = rescue_initrd(safe_addr);
sclp_early_read_info();
setup_boot_command_line();
parse_boot_command_line();
detect_facilities();
sanitize_prot_virt_host();
- max_physmem_end = detect_memory(&safe_addr);
+ max_physmem_end = detect_max_physmem_end();
setup_ident_map_size(max_physmem_end);
setup_vmalloc_size();
asce_limit = setup_kernel_memory_layout();
- mem_detect_set_usable_limit(ident_map_size);
+ /* got final ident_map_size, physmem allocations could be performed now */
+ physmem_set_usable_limit(ident_map_size);
+ detect_physmem_online_ranges(max_physmem_end);
+ save_ipl_cert_comp_list();
+ rescue_initrd(safe_addr, ident_map_size);
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) {
- random_lma = get_random_base(safe_addr);
+ random_lma = get_random_base();
if (random_lma) {
__kaslr_offset = random_lma - vmlinux.default_lma;
img = (void *)vmlinux.default_lma;
@@ -318,8 +327,16 @@ void startup_kernel(void)
if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) {
img = decompress_kernel();
memmove((void *)vmlinux.default_lma, img, vmlinux.image_size);
- } else if (__kaslr_offset)
+ } else if (__kaslr_offset) {
memcpy((void *)vmlinux.default_lma, img, vmlinux.image_size);
+ memset(img, 0, vmlinux.image_size);
+ }
+
+ /* vmlinux decompression is done, shrink reserved low memory */
+ physmem_reserve(RR_DECOMPRESSOR, 0, (unsigned long)_decompressor_end);
+ if (!__kaslr_offset)
+ physmem_reserve(RR_VMLINUX, vmlinux.default_lma, vmlinux.image_size + vmlinux.bss_size);
+ physmem_alloc_range(RR_AMODE31, vmlinux.amode31_size, PAGE_SIZE, 0, SZ_2G, true);
/*
* The order of the following operations is important:
@@ -339,16 +356,11 @@ void startup_kernel(void)
setup_vmem(asce_limit);
copy_bootdata();
- if (__kaslr_offset) {
- /*
- * Save KASLR offset for early dumps, before vmcore_info is set.
- * Mark as uneven to distinguish from real vmcore_info pointer.
- */
- S390_lowcore.vmcore_info = __kaslr_offset | 0x1UL;
- /* Clear non-relocated kernel */
- if (IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED))
- memset(img, 0, vmlinux.image_size);
- }
+ /*
+ * Save KASLR offset for early dumps, before vmcore_info is set.
+ * Mark as uneven to distinguish from real vmcore_info pointer.
+ */
+ S390_lowcore.vmcore_info = __kaslr_offset ? __kaslr_offset | 0x1UL : 0;
/*
* Jump to the decompressed kernel entry point and switch DAT mode on.
diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c
index 4d1d0d8e99cb..b01ea2abda03 100644
--- a/arch/s390/boot/vmem.c
+++ b/arch/s390/boot/vmem.c
@@ -1,81 +1,213 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/sched/task.h>
#include <linux/pgtable.h>
+#include <linux/kasan.h>
#include <asm/pgalloc.h>
#include <asm/facility.h>
#include <asm/sections.h>
-#include <asm/mem_detect.h>
+#include <asm/physmem_info.h>
#include <asm/maccess.h>
#include <asm/abs_lowcore.h>
#include "decompressor.h"
#include "boot.h"
+unsigned long __bootdata_preserved(s390_invalid_asce);
+
#define init_mm (*(struct mm_struct *)vmlinux.init_mm_off)
#define swapper_pg_dir vmlinux.swapper_pg_dir_off
#define invalid_pg_dir vmlinux.invalid_pg_dir_off
-/*
- * Mimic virt_to_kpte() in lack of init_mm symbol. Skip pmd NULL check though.
- */
-static inline pte_t *__virt_to_kpte(unsigned long va)
-{
- return pte_offset_kernel(pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va), va);
-}
-
-unsigned long __bootdata_preserved(s390_invalid_asce);
-unsigned long __bootdata(pgalloc_pos);
-unsigned long __bootdata(pgalloc_end);
-unsigned long __bootdata(pgalloc_low);
-
enum populate_mode {
POPULATE_NONE,
POPULATE_ONE2ONE,
POPULATE_ABS_LOWCORE,
+#ifdef CONFIG_KASAN
+ POPULATE_KASAN_MAP_SHADOW,
+ POPULATE_KASAN_ZERO_SHADOW,
+ POPULATE_KASAN_SHALLOW
+#endif
};
-static void boot_check_oom(void)
+static void pgtable_populate(unsigned long addr, unsigned long end, enum populate_mode mode);
+
+#ifdef CONFIG_KASAN
+
+#define kasan_early_shadow_page vmlinux.kasan_early_shadow_page_off
+#define kasan_early_shadow_pte ((pte_t *)vmlinux.kasan_early_shadow_pte_off)
+#define kasan_early_shadow_pmd ((pmd_t *)vmlinux.kasan_early_shadow_pmd_off)
+#define kasan_early_shadow_pud ((pud_t *)vmlinux.kasan_early_shadow_pud_off)
+#define kasan_early_shadow_p4d ((p4d_t *)vmlinux.kasan_early_shadow_p4d_off)
+#define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
+
+static pte_t pte_z;
+
+static void kasan_populate_shadow(void)
+{
+ pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
+ pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
+ p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
+ unsigned long untracked_end;
+ unsigned long start, end;
+ int i;
+
+ pte_z = __pte(__pa(kasan_early_shadow_page) | pgprot_val(PAGE_KERNEL_RO));
+ if (!machine.has_nx)
+ pte_z = clear_pte_bit(pte_z, __pgprot(_PAGE_NOEXEC));
+ crst_table_init((unsigned long *)kasan_early_shadow_p4d, p4d_val(p4d_z));
+ crst_table_init((unsigned long *)kasan_early_shadow_pud, pud_val(pud_z));
+ crst_table_init((unsigned long *)kasan_early_shadow_pmd, pmd_val(pmd_z));
+ memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
+
+ /*
+ * Current memory layout:
+ * +- 0 -------------+ +- shadow start -+
+ * |1:1 ident mapping| /|1/8 of ident map|
+ * | | / | |
+ * +-end of ident map+ / +----------------+
+ * | ... gap ... | / | kasan |
+ * | | / | zero page |
+ * +- vmalloc area -+ / | mapping |
+ * | vmalloc_size | / | (untracked) |
+ * +- modules vaddr -+ / +----------------+
+ * | 2Gb |/ | unmapped | allocated per module
+ * +- shadow start -+ +----------------+
+ * | 1/8 addr space | | zero pg mapping| (untracked)
+ * +- shadow end ----+---------+- shadow end ---+
+ *
+ * Current memory layout (KASAN_VMALLOC):
+ * +- 0 -------------+ +- shadow start -+
+ * |1:1 ident mapping| /|1/8 of ident map|
+ * | | / | |
+ * +-end of ident map+ / +----------------+
+ * | ... gap ... | / | kasan zero page| (untracked)
+ * | | / | mapping |
+ * +- vmalloc area -+ / +----------------+
+ * | vmalloc_size | / |shallow populate|
+ * +- modules vaddr -+ / +----------------+
+ * | 2Gb |/ |shallow populate|
+ * +- shadow start -+ +----------------+
+ * | 1/8 addr space | | zero pg mapping| (untracked)
+ * +- shadow end ----+---------+- shadow end ---+
+ */
+
+ for_each_physmem_usable_range(i, &start, &end)
+ pgtable_populate(__sha(start), __sha(end), POPULATE_KASAN_MAP_SHADOW);
+ if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
+ untracked_end = VMALLOC_START;
+ /* shallowly populate kasan shadow for vmalloc and modules */
+ pgtable_populate(__sha(VMALLOC_START), __sha(MODULES_END), POPULATE_KASAN_SHALLOW);
+ } else {
+ untracked_end = MODULES_VADDR;
+ }
+ /* populate kasan shadow for untracked memory */
+ pgtable_populate(__sha(ident_map_size), __sha(untracked_end), POPULATE_KASAN_ZERO_SHADOW);
+ pgtable_populate(__sha(MODULES_END), __sha(_REGION1_SIZE), POPULATE_KASAN_ZERO_SHADOW);
+}
+
+static bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr,
+ unsigned long end, enum populate_mode mode)
+{
+ if (mode == POPULATE_KASAN_ZERO_SHADOW &&
+ IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) {
+ pgd_populate(&init_mm, pgd, kasan_early_shadow_p4d);
+ return true;
+ }
+ return false;
+}
+
+static bool kasan_p4d_populate_zero_shadow(p4d_t *p4d, unsigned long addr,
+ unsigned long end, enum populate_mode mode)
+{
+ if (mode == POPULATE_KASAN_ZERO_SHADOW &&
+ IS_ALIGNED(addr, P4D_SIZE) && end - addr >= P4D_SIZE) {
+ p4d_populate(&init_mm, p4d, kasan_early_shadow_pud);
+ return true;
+ }
+ return false;
+}
+
+static bool kasan_pud_populate_zero_shadow(pud_t *pud, unsigned long addr,
+ unsigned long end, enum populate_mode mode)
+{
+ if (mode == POPULATE_KASAN_ZERO_SHADOW &&
+ IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
+ pud_populate(&init_mm, pud, kasan_early_shadow_pmd);
+ return true;
+ }
+ return false;
+}
+
+static bool kasan_pmd_populate_zero_shadow(pmd_t *pmd, unsigned long addr,
+ unsigned long end, enum populate_mode mode)
{
- if (pgalloc_pos < pgalloc_low)
- error("out of memory on boot\n");
+ if (mode == POPULATE_KASAN_ZERO_SHADOW &&
+ IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
+ pmd_populate(&init_mm, pmd, kasan_early_shadow_pte);
+ return true;
+ }
+ return false;
}
-static void pgtable_populate_init(void)
+static bool kasan_pte_populate_zero_shadow(pte_t *pte, enum populate_mode mode)
{
- unsigned long initrd_end;
- unsigned long kernel_end;
-
- kernel_end = vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size;
- pgalloc_low = round_up(kernel_end, PAGE_SIZE);
- if (IS_ENABLED(CONFIG_BLK_DEV_INITRD)) {
- initrd_end = round_up(initrd_data.start + initrd_data.size, _SEGMENT_SIZE);
- pgalloc_low = max(pgalloc_low, initrd_end);
+ pte_t entry;
+
+ if (mode == POPULATE_KASAN_ZERO_SHADOW) {
+ set_pte(pte, pte_z);
+ return true;
}
+ return false;
+}
+#else
- pgalloc_end = round_down(get_mem_detect_end(), PAGE_SIZE);
- pgalloc_pos = pgalloc_end;
+static inline void kasan_populate_shadow(void) {}
- boot_check_oom();
+static inline bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr,
+ unsigned long end, enum populate_mode mode)
+{
+ return false;
}
-static void *boot_alloc_pages(unsigned int order)
+static inline bool kasan_p4d_populate_zero_shadow(p4d_t *p4d, unsigned long addr,
+ unsigned long end, enum populate_mode mode)
{
- unsigned long size = PAGE_SIZE << order;
+ return false;
+}
- pgalloc_pos -= size;
- pgalloc_pos = round_down(pgalloc_pos, size);
+static inline bool kasan_pud_populate_zero_shadow(pud_t *pud, unsigned long addr,
+ unsigned long end, enum populate_mode mode)
+{
+ return false;
+}
- boot_check_oom();
+static inline bool kasan_pmd_populate_zero_shadow(pmd_t *pmd, unsigned long addr,
+ unsigned long end, enum populate_mode mode)
+{
+ return false;
+}
+
+static bool kasan_pte_populate_zero_shadow(pte_t *pte, enum populate_mode mode)
+{
+ return false;
+}
+
+#endif
- return (void *)pgalloc_pos;
+/*
+ * Mimic virt_to_kpte() in lack of init_mm symbol. Skip pmd NULL check though.
+ */
+static inline pte_t *__virt_to_kpte(unsigned long va)
+{
+ return pte_offset_kernel(pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va), va);
}
static void *boot_crst_alloc(unsigned long val)
{
+ unsigned long size = PAGE_SIZE << CRST_ALLOC_ORDER;
unsigned long *table;
- table = boot_alloc_pages(CRST_ALLOC_ORDER);
- if (table)
- crst_table_init(table, val);
+ table = (unsigned long *)physmem_alloc_top_down(RR_VMEM, size, size);
+ crst_table_init(table, val);
return table;
}
@@ -84,20 +216,23 @@ static pte_t *boot_pte_alloc(void)
static void *pte_leftover;
pte_t *pte;
- BUILD_BUG_ON(_PAGE_TABLE_SIZE * 2 != PAGE_SIZE);
-
+ /*
+ * handling pte_leftovers this way helps to avoid memory fragmentation
+ * during POPULATE_KASAN_MAP_SHADOW when EDAT is off
+ */
if (!pte_leftover) {
- pte_leftover = boot_alloc_pages(0);
+ pte_leftover = (void *)physmem_alloc_top_down(RR_VMEM, PAGE_SIZE, PAGE_SIZE);
pte = pte_leftover + _PAGE_TABLE_SIZE;
} else {
pte = pte_leftover;
pte_leftover = NULL;
}
+
memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
return pte;
}
-static unsigned long _pa(unsigned long addr, enum populate_mode mode)
+static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_mode mode)
{
switch (mode) {
case POPULATE_NONE:
@@ -106,6 +241,12 @@ static unsigned long _pa(unsigned long addr, enum populate_mode mode)
return addr;
case POPULATE_ABS_LOWCORE:
return __abs_lowcore_pa(addr);
+#ifdef CONFIG_KASAN
+ case POPULATE_KASAN_MAP_SHADOW:
+ addr = physmem_alloc_top_down(RR_VMEM, size, size);
+ memset((void *)addr, 0, size);
+ return addr;
+#endif
default:
return -1;
}
@@ -126,13 +267,14 @@ static bool can_large_pmd(pmd_t *pm_dir, unsigned long addr, unsigned long end)
static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long end,
enum populate_mode mode)
{
- unsigned long next;
pte_t *pte, entry;
pte = pte_offset_kernel(pmd, addr);
for (; addr < end; addr += PAGE_SIZE, pte++) {
if (pte_none(*pte)) {
- entry = __pte(_pa(addr, mode));
+ if (kasan_pte_populate_zero_shadow(pte, mode))
+ continue;
+ entry = __pte(_pa(addr, PAGE_SIZE, mode));
entry = set_pte_bit(entry, PAGE_KERNEL_EXEC);
set_pte(pte, entry);
}
@@ -150,8 +292,10 @@ static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long e
for (; addr < end; addr = next, pmd++) {
next = pmd_addr_end(addr, end);
if (pmd_none(*pmd)) {
+ if (kasan_pmd_populate_zero_shadow(pmd, addr, next, mode))
+ continue;
if (can_large_pmd(pmd, addr, next)) {
- entry = __pmd(_pa(addr, mode));
+ entry = __pmd(_pa(addr, _SEGMENT_SIZE, mode));
entry = set_pmd_bit(entry, SEGMENT_KERNEL_EXEC);
set_pmd(pmd, entry);
continue;
@@ -176,8 +320,10 @@ static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long e
for (; addr < end; addr = next, pud++) {
next = pud_addr_end(addr, end);
if (pud_none(*pud)) {
+ if (kasan_pud_populate_zero_shadow(pud, addr, next, mode))
+ continue;
if (can_large_pud(pud, addr, next)) {
- entry = __pud(_pa(addr, mode));
+ entry = __pud(_pa(addr, _REGION3_SIZE, mode));
entry = set_pud_bit(entry, REGION3_KERNEL_EXEC);
set_pud(pud, entry);
continue;
@@ -202,6 +348,8 @@ static void pgtable_p4d_populate(pgd_t *pgd, unsigned long addr, unsigned long e
for (; addr < end; addr = next, p4d++) {
next = p4d_addr_end(addr, end);
if (p4d_none(*p4d)) {
+ if (kasan_p4d_populate_zero_shadow(p4d, addr, next, mode))
+ continue;
pud = boot_crst_alloc(_REGION3_ENTRY_EMPTY);
p4d_populate(&init_mm, p4d, pud);
}
@@ -219,9 +367,15 @@ static void pgtable_populate(unsigned long addr, unsigned long end, enum populat
for (; addr < end; addr = next, pgd++) {
next = pgd_addr_end(addr, end);
if (pgd_none(*pgd)) {
+ if (kasan_pgd_populate_zero_shadow(pgd, addr, next, mode))
+ continue;
p4d = boot_crst_alloc(_REGION2_ENTRY_EMPTY);
pgd_populate(&init_mm, pgd, p4d);
}
+#ifdef CONFIG_KASAN
+ if (mode == POPULATE_KASAN_SHALLOW)
+ continue;
+#endif
pgtable_p4d_populate(pgd, addr, next, mode);
}
}
@@ -250,9 +404,8 @@ void setup_vmem(unsigned long asce_limit)
* To prevent creation of a large page at address 0 first map
* the lowcore and create the identity mapping only afterwards.
*/
- pgtable_populate_init();
pgtable_populate(0, sizeof(struct lowcore), POPULATE_ONE2ONE);
- for_each_mem_detect_usable_block(i, &start, &end)
+ for_each_physmem_usable_range(i, &start, &end)
pgtable_populate(start, end, POPULATE_ONE2ONE);
pgtable_populate(__abs_lowcore, __abs_lowcore + sizeof(struct lowcore),
POPULATE_ABS_LOWCORE);
@@ -260,6 +413,8 @@ void setup_vmem(unsigned long asce_limit)
POPULATE_NONE);
memcpy_real_ptep = __virt_to_kpte(__memcpy_real_area);
+ kasan_populate_shadow();
+
S390_lowcore.kernel_asce = swapper_pg_dir | asce_bits;
S390_lowcore.user_asce = s390_invalid_asce;
@@ -269,10 +424,3 @@ void setup_vmem(unsigned long asce_limit)
init_mm.context.asce = S390_lowcore.kernel_asce;
}
-
-unsigned long vmem_estimate_memory_needs(unsigned long online_mem_total)
-{
- unsigned long pages = DIV_ROUND_UP(online_mem_total, PAGE_SIZE);
-
- return DIV_ROUND_UP(pages, _PAGE_ENTRIES) * _PAGE_TABLE_SIZE * 2;
-}
diff --git a/arch/s390/boot/vmlinux.lds.S b/arch/s390/boot/vmlinux.lds.S
index fa9d33b01b85..389df0e0d9e5 100644
--- a/arch/s390/boot/vmlinux.lds.S
+++ b/arch/s390/boot/vmlinux.lds.S
@@ -93,6 +93,8 @@ SECTIONS
_decompressor_syms_end = .;
}
+ _decompressor_end = .;
+
#ifdef CONFIG_KERNEL_UNCOMPRESSED
. = 0x100000;
#else
diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h
index c699f251a464..d5d967166bac 100644
--- a/arch/s390/include/asm/ap.h
+++ b/arch/s390/include/asm/ap.h
@@ -43,10 +43,11 @@ struct ap_queue_status {
unsigned int queue_empty : 1;
unsigned int replies_waiting : 1;
unsigned int queue_full : 1;
- unsigned int _pad1 : 4;
+ unsigned int : 3;
+ unsigned int async : 1;
unsigned int irq_enabled : 1;
unsigned int response_code : 8;
- unsigned int _pad2 : 16;
+ unsigned int : 16;
};
/*
@@ -86,6 +87,42 @@ static inline bool ap_instructions_available(void)
return reg1 != 0;
}
+/* TAPQ register GR2 response struct */
+struct ap_tapq_gr2 {
+ union {
+ unsigned long value;
+ struct {
+ unsigned int fac : 32; /* facility bits */
+ unsigned int apinfo : 32; /* ap type, ... */
+ };
+ struct {
+ unsigned int s : 1; /* APSC */
+ unsigned int m : 1; /* AP4KM */
+ unsigned int c : 1; /* AP4KC */
+ unsigned int mode : 3;
+ unsigned int n : 1; /* APXA */
+ unsigned int : 1;
+ unsigned int class : 8;
+ unsigned int bs : 2; /* SE bind/assoc */
+ unsigned int : 14;
+ unsigned int at : 8; /* ap type */
+ unsigned int nd : 8; /* nr of domains */
+ unsigned int : 4;
+ unsigned int ml : 4; /* apxl ml */
+ unsigned int : 4;
+ unsigned int qd : 4; /* queue depth */
+ };
+ };
+};
+
+/*
+ * Convenience defines to be used with the bs field from struct ap_tapq_gr2
+ */
+#define AP_BS_Q_USABLE 0
+#define AP_BS_Q_USABLE_NO_SECURE_KEY 1
+#define AP_BS_Q_AVAIL_FOR_BINDING 2
+#define AP_BS_Q_UNUSABLE 3
+
/**
* ap_tapq(): Test adjunct processor queue.
* @qid: The AP queue number
@@ -93,7 +130,7 @@ static inline bool ap_instructions_available(void)
*
* Returns AP queue status structure.
*/
-static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info)
+static inline struct ap_queue_status ap_tapq(ap_qid_t qid, struct ap_tapq_gr2 *info)
{
union ap_queue_status_reg reg1;
unsigned long reg2;
@@ -108,7 +145,7 @@ static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info)
: [qid] "d" (qid)
: "cc", "0", "1", "2");
if (info)
- *info = reg2;
+ info->value = reg2;
return reg1.status;
}
@@ -116,13 +153,12 @@ static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info)
* ap_test_queue(): Test adjunct processor queue.
* @qid: The AP queue number
* @tbit: Test facilities bit
- * @info: Pointer to queue descriptor
+ * @info: Ptr to tapq gr2 struct
*
* Returns AP queue status structure.
*/
-static inline struct ap_queue_status ap_test_queue(ap_qid_t qid,
- int tbit,
- unsigned long *info)
+static inline struct ap_queue_status ap_test_queue(ap_qid_t qid, int tbit,
+ struct ap_tapq_gr2 *info)
{
if (tbit)
qid |= 1UL << 23; /* set T bit*/
@@ -132,14 +168,18 @@ static inline struct ap_queue_status ap_test_queue(ap_qid_t qid,
/**
* ap_pqap_rapq(): Reset adjunct processor queue.
* @qid: The AP queue number
+ * @fbit: if != 0 set F bit
*
* Returns AP queue status structure.
*/
-static inline struct ap_queue_status ap_rapq(ap_qid_t qid)
+static inline struct ap_queue_status ap_rapq(ap_qid_t qid, int fbit)
{
unsigned long reg0 = qid | (1UL << 24); /* fc 1UL is RAPQ */
union ap_queue_status_reg reg1;
+ if (fbit)
+ reg0 |= 1UL << 22;
+
asm volatile(
" lgr 0,%[reg0]\n" /* qid arg into gr0 */
" .insn rre,0xb2af0000,0,0\n" /* PQAP(RAPQ) */
@@ -153,14 +193,18 @@ static inline struct ap_queue_status ap_rapq(ap_qid_t qid)
/**
* ap_pqap_zapq(): Reset and zeroize adjunct processor queue.
* @qid: The AP queue number
+ * @fbit: if != 0 set F bit
*
* Returns AP queue status structure.
*/
-static inline struct ap_queue_status ap_zapq(ap_qid_t qid)
+static inline struct ap_queue_status ap_zapq(ap_qid_t qid, int fbit)
{
unsigned long reg0 = qid | (2UL << 24); /* fc 2UL is ZAPQ */
union ap_queue_status_reg reg1;
+ if (fbit)
+ reg0 |= 1UL << 22;
+
asm volatile(
" lgr 0,%[reg0]\n" /* qid arg into gr0 */
" .insn rre,0xb2af0000,0,0\n" /* PQAP(ZAPQ) */
@@ -180,15 +224,16 @@ struct ap_config_info {
unsigned int apxa : 1; /* N bit */
unsigned int qact : 1; /* C bit */
unsigned int rc8a : 1; /* R bit */
- unsigned char _reserved1 : 4;
- unsigned char _reserved2[3];
- unsigned char Na; /* max # of APs - 1 */
- unsigned char Nd; /* max # of Domains - 1 */
- unsigned char _reserved3[10];
+ unsigned int : 4;
+ unsigned int apsb : 1; /* B bit */
+ unsigned int : 23;
+ unsigned char na; /* max # of APs - 1 */
+ unsigned char nd; /* max # of Domains - 1 */
+ unsigned char _reserved0[10];
unsigned int apm[8]; /* AP ID mask */
unsigned int aqm[8]; /* AP (usage) queue mask */
unsigned int adm[8]; /* AP (control) domain mask */
- unsigned char _reserved4[16];
+ unsigned char _reserved1[16];
} __aligned(8);
/**
@@ -318,6 +363,59 @@ static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit,
return reg1.status;
}
+/*
+ * ap_bapq(): SE bind AP queue.
+ * @qid: The AP queue number
+ *
+ * Returns AP queue status structure.
+ *
+ * Invoking this function in a non-SE environment
+ * may case a specification exception.
+ */
+static inline struct ap_queue_status ap_bapq(ap_qid_t qid)
+{
+ unsigned long reg0 = qid | (7UL << 24); /* fc 7 is BAPQ */
+ union ap_queue_status_reg reg1;
+
+ asm volatile(
+ " lgr 0,%[reg0]\n" /* qid arg into gr0 */
+ " .insn rre,0xb2af0000,0,0\n" /* PQAP(BAPQ) */
+ " lgr %[reg1],1\n" /* gr1 (status) into reg1 */
+ : [reg1] "=&d" (reg1.value)
+ : [reg0] "d" (reg0)
+ : "cc", "0", "1");
+
+ return reg1.status;
+}
+
+/*
+ * ap_aapq(): SE associate AP queue.
+ * @qid: The AP queue number
+ * @sec_idx: The secret index
+ *
+ * Returns AP queue status structure.
+ *
+ * Invoking this function in a non-SE environment
+ * may case a specification exception.
+ */
+static inline struct ap_queue_status ap_aapq(ap_qid_t qid, unsigned int sec_idx)
+{
+ unsigned long reg0 = qid | (8UL << 24); /* fc 8 is AAPQ */
+ unsigned long reg2 = sec_idx;
+ union ap_queue_status_reg reg1;
+
+ asm volatile(
+ " lgr 0,%[reg0]\n" /* qid arg into gr0 */
+ " lgr 2,%[reg2]\n" /* secret index into gr2 */
+ " .insn rre,0xb2af0000,0,0\n" /* PQAP(AAPQ) */
+ " lgr %[reg1],1\n" /* gr1 (status) into reg1 */
+ : [reg1] "=&d" (reg1.value)
+ : [reg0] "d" (reg0), [reg2] "d" (reg2)
+ : "cc", "0", "1", "2");
+
+ return reg1.status;
+}
+
/**
* ap_nqap(): Send message to adjunct processor queue.
* @qid: The AP queue number
@@ -359,10 +457,11 @@ static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
* ap_dqap(): Receive message from adjunct processor queue.
* @qid: The AP queue number
* @psmid: Pointer to program supplied message identifier
- * @msg: The message text
- * @length: The message length
- * @reslength: Resitual length on return
- * @resgr0: input: gr0 value (only used if != 0), output: resitual gr0 content
+ * @msg: Pointer to message buffer
+ * @msglen: Message buffer size
+ * @length: Pointer to length of actually written bytes
+ * @reslength: Residual length on return
+ * @resgr0: input: gr0 value (only used if != 0), output: residual gr0 content
*
* Returns AP queue status structure.
* Condition code 1 on DQAP means the receive has taken place
@@ -386,8 +485,9 @@ static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
* *resgr0 is to be used instead of qid to further process this entry.
*/
static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
- unsigned long long *psmid,
- void *msg, size_t length,
+ unsigned long *psmid,
+ void *msg, size_t msglen,
+ size_t *length,
size_t *reslength,
unsigned long *resgr0)
{
@@ -399,7 +499,7 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
rp1.even = 0UL;
rp1.odd = 0UL;
rp2.even = (unsigned long)msg;
- rp2.odd = (unsigned long)length;
+ rp2.odd = (unsigned long)msglen;
asm volatile(
" lgr 0,%[reg0]\n" /* qid param into gr0 */
@@ -429,11 +529,15 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
if (resgr0)
*resgr0 = reg0;
} else {
- *psmid = (((unsigned long long)rp1.even) << 32) + rp1.odd;
+ *psmid = (rp1.even << 32) + rp1.odd;
if (resgr0)
*resgr0 = 0;
}
+ /* update *length with the nr of bytes stored into the msg buffer */
+ if (length)
+ *length = msglen - rp2.odd;
+
return reg1.status;
}
diff --git a/arch/s390/include/asm/kasan.h b/arch/s390/include/asm/kasan.h
index e5cfc81d5b61..0cffead0f2f2 100644
--- a/arch/s390/include/asm/kasan.h
+++ b/arch/s390/include/asm/kasan.h
@@ -2,7 +2,7 @@
#ifndef __ASM_KASAN_H
#define __ASM_KASAN_H
-#include <asm/pgtable.h>
+#include <linux/const.h>
#ifdef CONFIG_KASAN
@@ -13,35 +13,6 @@
#define KASAN_SHADOW_START KASAN_SHADOW_OFFSET
#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
-extern void kasan_early_init(void);
-
-/*
- * Estimate kasan memory requirements, which it will reserve
- * at the very end of available physical memory. To estimate
- * that, we take into account that kasan would require
- * 1/8 of available physical memory (for shadow memory) +
- * creating page tables for the shadow memory region.
- * To keep page tables estimates simple take the double of
- * combined ptes size.
- *
- * physmem parameter has to be already adjusted if not entire physical memory
- * would be used (e.g. due to effect of "mem=" option).
- */
-static inline unsigned long kasan_estimate_memory_needs(unsigned long physmem)
-{
- unsigned long kasan_needs;
- unsigned long pages;
- /* for shadow memory */
- kasan_needs = round_up(physmem / 8, PAGE_SIZE);
- /* for paging structures */
- pages = DIV_ROUND_UP(kasan_needs, PAGE_SIZE);
- kasan_needs += DIV_ROUND_UP(pages, _PAGE_ENTRIES) * _PAGE_TABLE_SIZE * 2;
-
- return kasan_needs;
-}
-#else
-static inline void kasan_early_init(void) { }
-static inline unsigned long kasan_estimate_memory_needs(unsigned long physmem) { return 0; }
#endif
#endif
diff --git a/arch/s390/include/asm/linkage.h b/arch/s390/include/asm/linkage.h
index c76777b15fec..df3fb7d8227b 100644
--- a/arch/s390/include/asm/linkage.h
+++ b/arch/s390/include/asm/linkage.h
@@ -4,7 +4,7 @@
#include <linux/stringify.h>
-#define __ALIGN .align 16, 0x07
+#define __ALIGN .balign CONFIG_FUNCTION_ALIGNMENT, 0x07
#define __ALIGN_STR __stringify(__ALIGN)
#endif
diff --git a/arch/s390/include/asm/mem_detect.h b/arch/s390/include/asm/mem_detect.h
deleted file mode 100644
index f9e7354036d2..000000000000
--- a/arch/s390/include/asm/mem_detect.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_S390_MEM_DETECT_H
-#define _ASM_S390_MEM_DETECT_H
-
-#include <linux/types.h>
-
-enum mem_info_source {
- MEM_DETECT_NONE = 0,
- MEM_DETECT_SCLP_STOR_INFO,
- MEM_DETECT_DIAG260,
- MEM_DETECT_SCLP_READ_INFO,
- MEM_DETECT_BIN_SEARCH
-};
-
-struct mem_detect_block {
- u64 start;
- u64 end;
-};
-
-/*
- * Storage element id is defined as 1 byte (up to 256 storage elements).
- * In practise only storage element id 0 and 1 are used).
- * According to architecture one storage element could have as much as
- * 1020 subincrements. 255 mem_detect_blocks are embedded in mem_detect_info.
- * If more mem_detect_blocks are required, a block of memory from already
- * known mem_detect_block is taken (entries_extended points to it).
- */
-#define MEM_INLINED_ENTRIES 255 /* (PAGE_SIZE - 16) / 16 */
-
-struct mem_detect_info {
- u32 count;
- u8 info_source;
- unsigned long usable;
- struct mem_detect_block entries[MEM_INLINED_ENTRIES];
- struct mem_detect_block *entries_extended;
-};
-extern struct mem_detect_info mem_detect;
-
-void add_mem_detect_block(u64 start, u64 end);
-
-static inline int __get_mem_detect_block(u32 n, unsigned long *start,
- unsigned long *end, bool respect_usable_limit)
-{
- if (n >= mem_detect.count) {
- *start = 0;
- *end = 0;
- return -1;
- }
-
- if (n < MEM_INLINED_ENTRIES) {
- *start = (unsigned long)mem_detect.entries[n].start;
- *end = (unsigned long)mem_detect.entries[n].end;
- } else {
- *start = (unsigned long)mem_detect.entries_extended[n - MEM_INLINED_ENTRIES].start;
- *end = (unsigned long)mem_detect.entries_extended[n - MEM_INLINED_ENTRIES].end;
- }
-
- if (respect_usable_limit && mem_detect.usable) {
- if (*start >= mem_detect.usable)
- return -1;
- if (*end > mem_detect.usable)
- *end = mem_detect.usable;
- }
- return 0;
-}
-
-/**
- * for_each_mem_detect_usable_block - early online memory range iterator
- * @i: an integer used as loop variable
- * @p_start: ptr to unsigned long for start address of the range
- * @p_end: ptr to unsigned long for end address of the range
- *
- * Walks over detected online memory ranges below usable limit.
- */
-#define for_each_mem_detect_usable_block(i, p_start, p_end) \
- for (i = 0; !__get_mem_detect_block(i, p_start, p_end, true); i++)
-
-/* Walks over all detected online memory ranges disregarding usable limit. */
-#define for_each_mem_detect_block(i, p_start, p_end) \
- for (i = 0; !__get_mem_detect_block(i, p_start, p_end, false); i++)
-
-static inline unsigned long get_mem_detect_usable_total(void)
-{
- unsigned long start, end, total = 0;
- int i;
-
- for_each_mem_detect_usable_block(i, &start, &end)
- total += end - start;
-
- return total;
-}
-
-static inline void get_mem_detect_reserved(unsigned long *start,
- unsigned long *size)
-{
- *start = (unsigned long)mem_detect.entries_extended;
- if (mem_detect.count > MEM_INLINED_ENTRIES)
- *size = (mem_detect.count - MEM_INLINED_ENTRIES) * sizeof(struct mem_detect_block);
- else
- *size = 0;
-}
-
-static inline unsigned long get_mem_detect_end(void)
-{
- unsigned long start;
- unsigned long end;
-
- if (mem_detect.usable)
- return mem_detect.usable;
- if (mem_detect.count) {
- __get_mem_detect_block(mem_detect.count - 1, &start, &end, false);
- return end;
- }
- return 0;
-}
-
-#endif
diff --git a/arch/s390/include/asm/nospec-insn.h b/arch/s390/include/asm/nospec-insn.h
index 7e9e99523e95..7a946c42ad13 100644
--- a/arch/s390/include/asm/nospec-insn.h
+++ b/arch/s390/include/asm/nospec-insn.h
@@ -2,6 +2,7 @@
#ifndef _ASM_S390_NOSPEC_ASM_H
#define _ASM_S390_NOSPEC_ASM_H
+#include <linux/linkage.h>
#include <asm/dwarf.h>
#ifdef __ASSEMBLY__
@@ -16,7 +17,7 @@
.macro __THUNK_PROLOG_NAME name
#ifdef CONFIG_EXPOLINE_EXTERN
.pushsection .text,"ax",@progbits
- .align 16,0x07
+ __ALIGN
#else
.pushsection .text.\name,"axG",@progbits,\name,comdat
#endif
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
index b9da71632827..9917e2717b2b 100644
--- a/arch/s390/include/asm/perf_event.h
+++ b/arch/s390/include/asm/perf_event.h
@@ -60,7 +60,6 @@ struct perf_sf_sde_regs {
#define PERF_CPUM_SF_DIAG_MODE 0x0002 /* Diagnostic-sampling flag */
#define PERF_CPUM_SF_MODE_MASK (PERF_CPUM_SF_BASIC_MODE| \
PERF_CPUM_SF_DIAG_MODE)
-#define PERF_CPUM_SF_FULL_BLOCKS 0x0004 /* Process full SDBs only */
#define PERF_CPUM_SF_FREQ_MODE 0x0008 /* Sampling with frequency */
#define REG_NONE 0
@@ -71,7 +70,6 @@ struct perf_sf_sde_regs {
#define SAMPL_RATE(hwc) ((hwc)->event_base)
#define SAMPL_FLAGS(hwc) ((hwc)->config_base)
#define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE)
-#define SDB_FULL_BLOCKS(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FULL_BLOCKS)
#define SAMPLE_FREQ_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FREQ_MODE)
#define perf_arch_fetch_caller_regs(regs, __ip) do { \
diff --git a/arch/s390/include/asm/physmem_info.h b/arch/s390/include/asm/physmem_info.h
new file mode 100644
index 000000000000..8e9c582592b3
--- /dev/null
+++ b/arch/s390/include/asm/physmem_info.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_MEM_DETECT_H
+#define _ASM_S390_MEM_DETECT_H
+
+#include <linux/types.h>
+
+enum physmem_info_source {
+ MEM_DETECT_NONE = 0,
+ MEM_DETECT_SCLP_STOR_INFO,
+ MEM_DETECT_DIAG260,
+ MEM_DETECT_SCLP_READ_INFO,
+ MEM_DETECT_BIN_SEARCH
+};
+
+struct physmem_range {
+ u64 start;
+ u64 end;
+};
+
+enum reserved_range_type {
+ RR_DECOMPRESSOR,
+ RR_INITRD,
+ RR_VMLINUX,
+ RR_AMODE31,
+ RR_IPLREPORT,
+ RR_CERT_COMP_LIST,
+ RR_MEM_DETECT_EXTENDED,
+ RR_VMEM,
+ RR_MAX
+};
+
+struct reserved_range {
+ unsigned long start;
+ unsigned long end;
+ struct reserved_range *chain;
+};
+
+/*
+ * Storage element id is defined as 1 byte (up to 256 storage elements).
+ * In practise only storage element id 0 and 1 are used).
+ * According to architecture one storage element could have as much as
+ * 1020 subincrements. 255 physmem_ranges are embedded in physmem_info.
+ * If more physmem_ranges are required, a block of memory from already
+ * known physmem_range is taken (online_extended points to it).
+ */
+#define MEM_INLINED_ENTRIES 255 /* (PAGE_SIZE - 16) / 16 */
+
+struct physmem_info {
+ u32 range_count;
+ u8 info_source;
+ unsigned long usable;
+ struct reserved_range reserved[RR_MAX];
+ struct physmem_range online[MEM_INLINED_ENTRIES];
+ struct physmem_range *online_extended;
+};
+
+extern struct physmem_info physmem_info;
+
+void add_physmem_online_range(u64 start, u64 end);
+
+static inline int __get_physmem_range(u32 n, unsigned long *start,
+ unsigned long *end, bool respect_usable_limit)
+{
+ if (n >= physmem_info.range_count) {
+ *start = 0;
+ *end = 0;
+ return -1;
+ }
+
+ if (n < MEM_INLINED_ENTRIES) {
+ *start = (unsigned long)physmem_info.online[n].start;
+ *end = (unsigned long)physmem_info.online[n].end;
+ } else {
+ *start = (unsigned long)physmem_info.online_extended[n - MEM_INLINED_ENTRIES].start;
+ *end = (unsigned long)physmem_info.online_extended[n - MEM_INLINED_ENTRIES].end;
+ }
+
+ if (respect_usable_limit && physmem_info.usable) {
+ if (*start >= physmem_info.usable)
+ return -1;
+ if (*end > physmem_info.usable)
+ *end = physmem_info.usable;
+ }
+ return 0;
+}
+
+/**
+ * for_each_physmem_usable_range - early online memory range iterator
+ * @i: an integer used as loop variable
+ * @p_start: ptr to unsigned long for start address of the range
+ * @p_end: ptr to unsigned long for end address of the range
+ *
+ * Walks over detected online memory ranges below usable limit.
+ */
+#define for_each_physmem_usable_range(i, p_start, p_end) \
+ for (i = 0; !__get_physmem_range(i, p_start, p_end, true); i++)
+
+/* Walks over all detected online memory ranges disregarding usable limit. */
+#define for_each_physmem_online_range(i, p_start, p_end) \
+ for (i = 0; !__get_physmem_range(i, p_start, p_end, false); i++)
+
+static inline const char *get_physmem_info_source(void)
+{
+ switch (physmem_info.info_source) {
+ case MEM_DETECT_SCLP_STOR_INFO:
+ return "sclp storage info";
+ case MEM_DETECT_DIAG260:
+ return "diag260";
+ case MEM_DETECT_SCLP_READ_INFO:
+ return "sclp read info";
+ case MEM_DETECT_BIN_SEARCH:
+ return "binary search";
+ }
+ return "none";
+}
+
+#define RR_TYPE_NAME(t) case RR_ ## t: return #t
+static inline const char *get_rr_type_name(enum reserved_range_type t)
+{
+ switch (t) {
+ RR_TYPE_NAME(DECOMPRESSOR);
+ RR_TYPE_NAME(INITRD);
+ RR_TYPE_NAME(VMLINUX);
+ RR_TYPE_NAME(AMODE31);
+ RR_TYPE_NAME(IPLREPORT);
+ RR_TYPE_NAME(CERT_COMP_LIST);
+ RR_TYPE_NAME(MEM_DETECT_EXTENDED);
+ RR_TYPE_NAME(VMEM);
+ default:
+ return "UNKNOWN";
+ }
+}
+
+#define for_each_physmem_reserved_type_range(t, range, p_start, p_end) \
+ for (range = &physmem_info.reserved[t], *p_start = range->start, *p_end = range->end; \
+ range && range->end; range = range->chain, \
+ *p_start = range ? range->start : 0, *p_end = range ? range->end : 0)
+
+static inline struct reserved_range *__physmem_reserved_next(enum reserved_range_type *t,
+ struct reserved_range *range)
+{
+ if (!range) {
+ range = &physmem_info.reserved[*t];
+ if (range->end)
+ return range;
+ }
+ if (range->chain)
+ return range->chain;
+ while (++*t < RR_MAX) {
+ range = &physmem_info.reserved[*t];
+ if (range->end)
+ return range;
+ }
+ return NULL;
+}
+
+#define for_each_physmem_reserved_range(t, range, p_start, p_end) \
+ for (t = 0, range = __physmem_reserved_next(&t, NULL), \
+ *p_start = range ? range->start : 0, *p_end = range ? range->end : 0; \
+ range; range = __physmem_reserved_next(&t, range), \
+ *p_start = range ? range->start : 0, *p_end = range ? range->end : 0)
+
+static inline unsigned long get_physmem_reserved(enum reserved_range_type type,
+ unsigned long *addr, unsigned long *size)
+{
+ *addr = physmem_info.reserved[type].start;
+ *size = physmem_info.reserved[type].end - physmem_info.reserved[type].start;
+ return *size;
+}
+
+#endif
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index e98d9650764b..80ac0c1034dc 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -99,7 +99,6 @@ void cpu_detect_mhz_feature(void);
extern const struct seq_operations cpuinfo_op;
extern void execve_tail(void);
-extern void __bpon(void);
unsigned long vdso_size(void);
/*
@@ -329,9 +328,6 @@ static __always_inline void __noreturn disabled_wait(void)
#define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL
-extern int s390_isolate_bp(void);
-extern int s390_isolate_bp_guest(void);
-
static __always_inline bool regs_irqs_disabled(struct pt_regs *regs)
{
return arch_irqs_disabled_flags(regs->psw.mask);
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 3a1f8825bc7d..b28d250efbaa 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -74,10 +74,6 @@ extern unsigned int zlib_dfltcc_support;
extern int noexec_disabled;
extern unsigned long ident_map_size;
-extern unsigned long pgalloc_pos;
-extern unsigned long pgalloc_end;
-extern unsigned long pgalloc_low;
-extern unsigned long __amode31_base;
/* The Write Back bit position in the physaddr is given by the SLPC PCI */
extern unsigned long mio_wb_bit_mask;
@@ -150,13 +146,6 @@ static inline unsigned long kaslr_offset(void)
return __kaslr_offset;
}
-extern int is_full_image;
-
-struct initrd_data {
- unsigned long start;
- unsigned long size;
-};
-extern struct initrd_data initrd_data;
struct oldmem_data {
unsigned long start;
@@ -164,7 +153,7 @@ struct oldmem_data {
};
extern struct oldmem_data oldmem_data;
-static inline u32 gen_lpswe(unsigned long addr)
+static __always_inline u32 gen_lpswe(unsigned long addr)
{
BUILD_BUG_ON(addr > 0xfff);
return 0xb2b20000 | addr;
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index b2ffcb4fe000..f19e6f5ec367 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -70,7 +70,6 @@ void arch_setup_new_exec(void);
#define TIF_PATCH_PENDING 5 /* pending live patching update */
#define TIF_PGSTE 6 /* New mm's will use 4K page tables */
#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */
-#define TIF_ISOLATE_BP 8 /* Run process with isolated BP */
#define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */
#define TIF_PER_TRAP 10 /* Need to handle PER trap on exit to usermode */
@@ -94,7 +93,6 @@ void arch_setup_new_exec(void);
#define _TIF_UPROBE BIT(TIF_UPROBE)
#define _TIF_GUARDED_STORAGE BIT(TIF_GUARDED_STORAGE)
#define _TIF_PATCH_PENDING BIT(TIF_PATCH_PENDING)
-#define _TIF_ISOLATE_BP BIT(TIF_ISOLATE_BP)
#define _TIF_ISOLATE_BP_GUEST BIT(TIF_ISOLATE_BP_GUEST)
#define _TIF_PER_TRAP BIT(TIF_PER_TRAP)
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index b376f0377a2c..221c865785c2 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -981,16 +981,6 @@ static struct ctl_table s390dbf_table[] = {
{ }
};
-static struct ctl_table s390dbf_dir_table[] = {
- {
- .procname = "s390dbf",
- .maxlen = 0,
- .mode = S_IRUGO | S_IXUGO,
- .child = s390dbf_table,
- },
- { }
-};
-
static struct ctl_table_header *s390dbf_sysctl_header;
/**
@@ -1574,7 +1564,7 @@ out:
*/
static int __init debug_init(void)
{
- s390dbf_sysctl_header = register_sysctl_table(s390dbf_dir_table);
+ s390dbf_sysctl_header = register_sysctl("s390dbf", s390dbf_table);
mutex_lock(&debug_mutex);
debug_debugfs_root_entry = debugfs_create_dir(DEBUG_DIR_ROOT, NULL);
initialized = 1;
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index 1e3233eb510a..f257058d0828 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -152,7 +152,13 @@ void show_stack(struct task_struct *task, unsigned long *stack,
static void show_last_breaking_event(struct pt_regs *regs)
{
printk("Last Breaking-Event-Address:\n");
- printk(" [<%016lx>] %pSR\n", regs->last_break, (void *)regs->last_break);
+ printk(" [<%016lx>] ", regs->last_break);
+ if (user_mode(regs)) {
+ print_vma_addr(KERN_CONT, regs->last_break);
+ pr_cont("\n");
+ } else {
+ pr_cont("%pSR\n", (void *)regs->last_break);
+ }
}
void show_registers(struct pt_regs *regs)
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index d26f02495636..2dd5976a55ac 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -34,8 +34,6 @@
#include <asm/switch_to.h>
#include "entry.h"
-int __bootdata(is_full_image);
-
#define decompressor_handled_param(param) \
static int __init ignore_decompressor_param_##param(char *s) \
{ \
@@ -53,6 +51,14 @@ decompressor_handled_param(nokaslr);
decompressor_handled_param(prot_virt);
#endif
+static void __init kasan_early_init(void)
+{
+#ifdef CONFIG_KASAN
+ init_task.kasan_depth = 0;
+ sclp_early_printk("KernelAddressSanitizer initialized\n");
+#endif
+}
+
static void __init reset_tod_clock(void)
{
union tod_clock clk;
@@ -288,17 +294,6 @@ static void __init setup_boot_command_line(void)
strscpy(boot_command_line, early_command_line, COMMAND_LINE_SIZE);
}
-static void __init check_image_bootable(void)
-{
- if (is_full_image)
- return;
-
- sclp_early_printk("Linux kernel boot failure: An attempt to boot a vmlinux ELF image failed.\n");
- sclp_early_printk("This image does not contain all parts necessary for starting up. Use\n");
- sclp_early_printk("bzImage or arch/s390/boot/compressed/vmlinux instead.\n");
- disabled_wait();
-}
-
static void __init sort_amode31_extable(void)
{
sort_extable(__start_amode31_ex_table, __stop_amode31_ex_table);
@@ -306,8 +301,8 @@ static void __init sort_amode31_extable(void)
void __init startup_init(void)
{
+ kasan_early_init();
reset_tod_clock();
- check_image_bootable();
time_early_init();
init_kernel_storage_key();
lockdep_off();
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 76a06f3d3671..58b85aedca22 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -159,21 +159,15 @@ _LPP_OFFSET = __LC_LPP
.section .kprobes.text, "ax"
.Ldummy:
/*
- * This nop exists only in order to avoid that __bpon starts at
- * the beginning of the kprobes text section. In that case we would
- * have several symbols at the same address. E.g. objdump would take
- * an arbitrary symbol name when disassembling this code.
- * With the added nop in between the __bpon symbol is unique
- * again.
+ * The following nop exists only in order to avoid that the next
+ * symbol starts at the beginning of the kprobes text section.
+ * In that case there would be several symbols at the same address.
+ * E.g. objdump would take an arbitrary symbol when disassembling
+ * the code.
+ * With the added nop in between this cannot happen.
*/
nop 0
-ENTRY(__bpon)
- .globl __bpon
- BPON
- BR_EX %r14
-ENDPROC(__bpon)
-
/*
* Scheduler resume function, called by switch_to
* gpr2 = (task_struct *) prev
@@ -227,7 +221,7 @@ ENTRY(__sie64a)
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
jo .Lsie_skip # exit if fp/vx regs changed
lg %r14,__SF_SIE_CONTROL_PHYS(%r15) # get sie block phys addr
- BPEXIT __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
+ BPEXIT __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
.Lsie_entry:
sie 0(%r14)
# Let the next instruction be NOP to avoid triggering a machine check
@@ -235,7 +229,7 @@ ENTRY(__sie64a)
nopr 7
.Lsie_leave:
BPOFF
- BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
+ BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
.Lsie_skip:
lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
@@ -291,11 +285,9 @@ ENTRY(system_call)
.Lsysc_per:
STBEAR __LC_LAST_BREAK
lctlg %c1,%c1,__LC_KERNEL_ASCE
- lg %r12,__LC_CURRENT
lg %r15,__LC_KERNEL_STACK
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
stmg %r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
- BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
# clear user controlled register to prevent speculative use
xgr %r0,%r0
xgr %r1,%r1
@@ -314,7 +306,7 @@ ENTRY(system_call)
brasl %r14,__do_syscall
lctlg %c1,%c1,__LC_USER_ASCE
mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
- BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
+ BPON
LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
stpt __LC_EXIT_TIMER
@@ -329,7 +321,7 @@ ENTRY(ret_from_fork)
brasl %r14,__ret_from_fork
lctlg %c1,%c1,__LC_USER_ASCE
mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
- BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
+ BPON
LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
stpt __LC_EXIT_TIMER
@@ -344,7 +336,6 @@ ENTRY(pgm_check_handler)
stpt __LC_SYS_ENTER_TIMER
BPOFF
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
- lg %r12,__LC_CURRENT
lghi %r10,0
lmg %r8,%r9,__LC_PGM_OLD_PSW
tmhh %r8,0x0001 # coming from user space?
@@ -355,6 +346,7 @@ ENTRY(pgm_check_handler)
#if IS_ENABLED(CONFIG_KVM)
# cleanup critical section for program checks in __sie64a
OUTSIDE %r9,.Lsie_gmap,.Lsie_done,1f
+ BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
SIEEXIT
lghi %r10,_PIF_GUEST_FAULT
#endif
@@ -366,8 +358,7 @@ ENTRY(pgm_check_handler)
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
# CHECK_VMAP_STACK branches to stack_overflow or 4f
CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f
-3: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
- lg %r15,__LC_KERNEL_STACK
+3: lg %r15,__LC_KERNEL_STACK
4: la %r11,STACK_FRAME_OVERHEAD(%r15)
stg %r10,__PT_FLAGS(%r11)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
@@ -389,7 +380,7 @@ ENTRY(pgm_check_handler)
tmhh %r8,0x0001 # returning to user space?
jno .Lpgm_exit_kernel
lctlg %c1,%c1,__LC_USER_ASCE
- BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
+ BPON
stpt __LC_EXIT_TIMER
.Lpgm_exit_kernel:
mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
@@ -419,20 +410,18 @@ ENTRY(\name)
STBEAR __LC_LAST_BREAK
BPOFF
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
- lg %r12,__LC_CURRENT
lmg %r8,%r9,\lc_old_psw
tmhh %r8,0x0001 # interrupting from user ?
jnz 1f
#if IS_ENABLED(CONFIG_KVM)
OUTSIDE %r9,.Lsie_gmap,.Lsie_done,0f
- BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
+ BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
SIEEXIT
#endif
0: CHECK_STACK __LC_SAVE_AREA_ASYNC
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 2f
-1: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
- lctlg %c1,%c1,__LC_KERNEL_ASCE
+1: lctlg %c1,%c1,__LC_KERNEL_ASCE
lg %r15,__LC_KERNEL_STACK
2: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
la %r11,STACK_FRAME_OVERHEAD(%r15)
@@ -456,7 +445,7 @@ ENTRY(\name)
tmhh %r8,0x0001 # returning to user ?
jno 2f
lctlg %c1,%c1,__LC_USER_ASCE
- BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
+ BPON
stpt __LC_EXIT_TIMER
2: LBEAR __PT_LAST_BREAK(%r11)
lmg %r0,%r15,__PT_R0(%r11)
@@ -501,7 +490,6 @@ ENTRY(mcck_int_handler)
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer
LBEAR __LC_LAST_BREAK_SAVE_AREA-4095(%r1) # validate bear
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs
- lg %r12,__LC_CURRENT
lmg %r8,%r9,__LC_MCK_OLD_PSW
TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
jo .Lmcck_panic # yes -> rest of mcck code invalid
@@ -530,16 +518,13 @@ ENTRY(mcck_int_handler)
TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
jno .Lmcck_panic
#if IS_ENABLED(CONFIG_KVM)
- OUTSIDE %r9,.Lsie_gmap,.Lsie_done,.Lmcck_stack
+ OUTSIDE %r9,.Lsie_gmap,.Lsie_done,.Lmcck_user
OUTSIDE %r9,.Lsie_entry,.Lsie_leave,4f
oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
-4: BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
+4: BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
SIEEXIT
- j .Lmcck_stack
#endif
.Lmcck_user:
- BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
-.Lmcck_stack:
lg %r15,__LC_MCCK_STACK
la %r11,STACK_FRAME_OVERHEAD(%r15)
stctg %c1,%c1,__PT_CR1(%r11)
@@ -567,7 +552,7 @@ ENTRY(mcck_int_handler)
mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
jno 0f
- BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
+ BPON
stpt __LC_EXIT_TIMER
0: ALTERNATIVE "nop", __stringify(lghi %r12,__LC_LAST_BREAK_SAVE_AREA),193
LBEAR 0(%r12)
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 416b5a94353d..6f6c44b7af89 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -49,26 +49,6 @@ struct ftrace_insn {
s32 disp;
} __packed;
-asm(
- " .align 16\n"
- "ftrace_shared_hotpatch_trampoline_br:\n"
- " lmg %r0,%r1,2(%r1)\n"
- " br %r1\n"
- "ftrace_shared_hotpatch_trampoline_br_end:\n"
-);
-
-#ifdef CONFIG_EXPOLINE
-asm(
- " .align 16\n"
- "ftrace_shared_hotpatch_trampoline_exrl:\n"
- " lmg %r0,%r1,2(%r1)\n"
- " exrl %r0,0f\n"
- " j .\n"
- "0: br %r1\n"
- "ftrace_shared_hotpatch_trampoline_exrl_end:\n"
-);
-#endif /* CONFIG_EXPOLINE */
-
#ifdef CONFIG_MODULES
static char *ftrace_plt;
#endif /* CONFIG_MODULES */
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 3b3bf8329e6c..f68be3951103 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -26,9 +26,6 @@ ENTRY(startup_continue)
stg %r14,__LC_CURRENT
larl %r15,init_thread_union+THREAD_SIZE-STACK_FRAME_OVERHEAD-__PT_SIZE
brasl %r14,sclp_early_adjust_va # allow sclp_early_printk
-#ifdef CONFIG_KASAN
- brasl %r14,kasan_early_init
-#endif
brasl %r14,startup_init # s390 specific early init
brasl %r14,start_kernel # common init code
#
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 5f0f5c86963a..0f91cd401eef 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -649,7 +649,6 @@ static struct kset *ipl_kset;
static void __ipl_run(void *unused)
{
- __bpon();
diag308(DIAG308_LOAD_CLEAR, NULL);
}
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index 43ff91073d2a..4c4ee762f515 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -135,6 +135,22 @@ SYM_FUNC_END(return_to_handler)
#endif
#endif /* CONFIG_FUNCTION_TRACER */
+SYM_CODE_START(ftrace_shared_hotpatch_trampoline_br)
+ lmg %r0,%r1,2(%r1)
+ br %r1
+SYM_INNER_LABEL(ftrace_shared_hotpatch_trampoline_br_end, SYM_L_GLOBAL)
+SYM_CODE_END(ftrace_shared_hotpatch_trampoline_br)
+
+#ifdef CONFIG_EXPOLINE
+SYM_CODE_START(ftrace_shared_hotpatch_trampoline_exrl)
+ lmg %r0,%r1,2(%r1)
+ exrl %r0,0f
+ j .
+0: br %r1
+SYM_INNER_LABEL(ftrace_shared_hotpatch_trampoline_exrl_end, SYM_L_GLOBAL)
+SYM_CODE_END(ftrace_shared_hotpatch_trampoline_exrl)
+#endif /* CONFIG_EXPOLINE */
+
#ifdef CONFIG_RETHOOK
SYM_FUNC_START(arch_rethook_trampoline)
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index e7b867e2f73f..7ef72f5ff52e 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -882,10 +882,6 @@ static int __hw_perf_event_init(struct perf_event *event)
SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_DIAG_MODE;
}
- /* Check and set other sampling flags */
- if (attr->config1 & PERF_CPUM_SF_FULL_BLOCKS)
- SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_FULL_BLOCKS;
-
err = __hw_perf_event_init_rate(event, &si);
if (err)
goto out;
@@ -1293,11 +1289,8 @@ static inline __uint128_t __cdsg(__uint128_t *ptr, __uint128_t old, __uint128_t
* The sampling buffer position are retrieved and saved in the TEAR_REG
* register of the specified perf event.
*
- * Only full sample-data-blocks are processed. Specify the flash_all flag
- * to also walk through partially filled sample-data-blocks. It is ignored
- * if PERF_CPUM_SF_FULL_BLOCKS is set. The PERF_CPUM_SF_FULL_BLOCKS flag
- * enforces the processing of full sample-data-blocks only (trailer entries
- * with the block-full-indicator bit set).
+ * Only full sample-data-blocks are processed. Specify the flush_all flag
+ * to also walk through partially filled sample-data-blocks.
*/
static void hw_perf_event_update(struct perf_event *event, int flush_all)
{
@@ -1315,9 +1308,6 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
if (SAMPL_DIAG_MODE(&event->hw))
return;
- if (flush_all && SDB_FULL_BLOCKS(hwc))
- flush_all = 0;
-
sdbt = (unsigned long *) TEAR_REG(hwc);
done = event_overflow = sampl_overflow = num_sdb = 0;
while (!done) {
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 67df64ef4839..87ca3a727604 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -136,12 +136,12 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
p->thread.last_break = 1;
frame->sf.back_chain = 0;
- frame->sf.gprs[5] = (unsigned long)frame + sizeof(struct stack_frame);
- frame->sf.gprs[6] = (unsigned long)p;
+ frame->sf.gprs[11 - 6] = (unsigned long)&frame->childregs;
+ frame->sf.gprs[12 - 6] = (unsigned long)p;
/* new return point is ret_from_fork */
- frame->sf.gprs[8] = (unsigned long)ret_from_fork;
+ frame->sf.gprs[14 - 6] = (unsigned long)ret_from_fork;
/* fake return stack for resume(), don't go back to schedule */
- frame->sf.gprs[9] = (unsigned long)frame;
+ frame->sf.gprs[15 - 6] = (unsigned long)frame;
/* Store access registers to kernel stack of new process. */
if (unlikely(args->fn)) {
@@ -149,8 +149,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
memset(&frame->childregs, 0, sizeof(struct pt_regs));
frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO |
PSW_MASK_EXT | PSW_MASK_MCHECK;
- frame->childregs.psw.addr =
- (unsigned long)__ret_from_fork;
frame->childregs.gprs[9] = (unsigned long)args->fn;
frame->childregs.gprs[10] = (unsigned long)args->fn_arg;
frame->childregs.orig_gpr2 = -1;
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index a194611ba88c..0a999c8226d7 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -364,21 +364,3 @@ const struct seq_operations cpuinfo_op = {
.stop = c_stop,
.show = show_cpuinfo,
};
-
-int s390_isolate_bp(void)
-{
- if (!test_facility(82))
- return -EOPNOTSUPP;
- set_thread_flag(TIF_ISOLATE_BP);
- return 0;
-}
-EXPORT_SYMBOL(s390_isolate_bp);
-
-int s390_isolate_bp_guest(void)
-{
- if (!test_facility(82))
- return -EOPNOTSUPP;
- set_thread_flag(TIF_ISOLATE_BP_GUEST);
- return 0;
-}
-EXPORT_SYMBOL(s390_isolate_bp_guest);
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 8ec5cdf9dadc..d25425b8d0c0 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -74,7 +74,7 @@
#include <asm/numa.h>
#include <asm/alternative.h>
#include <asm/nospec-branch.h>
-#include <asm/mem_detect.h>
+#include <asm/physmem_info.h>
#include <asm/maccess.h>
#include <asm/uv.h>
#include <asm/asm-offsets.h>
@@ -147,14 +147,9 @@ static u32 __amode31_ref *__ctl_duct = __ctl_duct_amode31;
int __bootdata(noexec_disabled);
unsigned long __bootdata(ident_map_size);
-struct mem_detect_info __bootdata(mem_detect);
-struct initrd_data __bootdata(initrd_data);
-unsigned long __bootdata(pgalloc_pos);
-unsigned long __bootdata(pgalloc_end);
-unsigned long __bootdata(pgalloc_low);
+struct physmem_info __bootdata(physmem_info);
unsigned long __bootdata_preserved(__kaslr_offset);
-unsigned long __bootdata(__amode31_base);
unsigned int __bootdata_preserved(zlib_dfltcc_support);
EXPORT_SYMBOL(zlib_dfltcc_support);
u64 __bootdata_preserved(stfle_fac_list[16]);
@@ -635,7 +630,11 @@ static struct notifier_block kdump_mem_nb = {
*/
static void __init reserve_pgtables(void)
{
- memblock_reserve(pgalloc_pos, pgalloc_end - pgalloc_pos);
+ unsigned long start, end;
+ struct reserved_range *range;
+
+ for_each_physmem_reserved_type_range(RR_VMEM, range, &start, &end)
+ memblock_reserve(start, end - start);
}
/*
@@ -712,13 +711,13 @@ static void __init reserve_crashkernel(void)
*/
static void __init reserve_initrd(void)
{
-#ifdef CONFIG_BLK_DEV_INITRD
- if (!initrd_data.start || !initrd_data.size)
+ unsigned long addr, size;
+
+ if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD) || !get_physmem_reserved(RR_INITRD, &addr, &size))
return;
- initrd_start = (unsigned long)__va(initrd_data.start);
- initrd_end = initrd_start + initrd_data.size;
- memblock_reserve(initrd_data.start, initrd_data.size);
-#endif
+ initrd_start = (unsigned long)__va(addr);
+ initrd_end = initrd_start + size;
+ memblock_reserve(addr, size);
}
/*
@@ -730,72 +729,40 @@ static void __init reserve_certificate_list(void)
memblock_reserve(ipl_cert_list_addr, ipl_cert_list_size);
}
-static void __init reserve_mem_detect_info(void)
+static void __init reserve_physmem_info(void)
{
- unsigned long start, size;
+ unsigned long addr, size;
- get_mem_detect_reserved(&start, &size);
- if (size)
- memblock_reserve(start, size);
+ if (get_physmem_reserved(RR_MEM_DETECT_EXTENDED, &addr, &size))
+ memblock_reserve(addr, size);
}
-static void __init free_mem_detect_info(void)
+static void __init free_physmem_info(void)
{
- unsigned long start, size;
+ unsigned long addr, size;
- get_mem_detect_reserved(&start, &size);
- if (size)
- memblock_phys_free(start, size);
-}
-
-static const char * __init get_mem_info_source(void)
-{
- switch (mem_detect.info_source) {
- case MEM_DETECT_SCLP_STOR_INFO:
- return "sclp storage info";
- case MEM_DETECT_DIAG260:
- return "diag260";
- case MEM_DETECT_SCLP_READ_INFO:
- return "sclp read info";
- case MEM_DETECT_BIN_SEARCH:
- return "binary search";
- }
- return "none";
+ if (get_physmem_reserved(RR_MEM_DETECT_EXTENDED, &addr, &size))
+ memblock_phys_free(addr, size);
}
-static void __init memblock_add_mem_detect_info(void)
+static void __init memblock_add_physmem_info(void)
{
unsigned long start, end;
int i;
pr_debug("physmem info source: %s (%hhd)\n",
- get_mem_info_source(), mem_detect.info_source);
+ get_physmem_info_source(), physmem_info.info_source);
/* keep memblock lists close to the kernel */
memblock_set_bottom_up(true);
- for_each_mem_detect_usable_block(i, &start, &end)
+ for_each_physmem_usable_range(i, &start, &end)
memblock_add(start, end - start);
- for_each_mem_detect_block(i, &start, &end)
+ for_each_physmem_online_range(i, &start, &end)
memblock_physmem_add(start, end - start);
memblock_set_bottom_up(false);
memblock_set_node(0, ULONG_MAX, &memblock.memory, 0);
}
/*
- * Check for initrd being in usable memory
- */
-static void __init check_initrd(void)
-{
-#ifdef CONFIG_BLK_DEV_INITRD
- if (initrd_data.start && initrd_data.size &&
- !memblock_is_region_memory(initrd_data.start, initrd_data.size)) {
- pr_err("The initial RAM disk does not fit into the memory\n");
- memblock_phys_free(initrd_data.start, initrd_data.size);
- initrd_start = initrd_end = 0;
- }
-#endif
-}
-
-/*
* Reserve memory used for lowcore/command line/kernel image.
*/
static void __init reserve_kernel(void)
@@ -803,7 +770,7 @@ static void __init reserve_kernel(void)
memblock_reserve(0, STARTUP_NORMAL_OFFSET);
memblock_reserve(OLDMEM_BASE, sizeof(unsigned long));
memblock_reserve(OLDMEM_SIZE, sizeof(unsigned long));
- memblock_reserve(__amode31_base, __eamode31 - __samode31);
+ memblock_reserve(physmem_info.reserved[RR_AMODE31].start, __eamode31 - __samode31);
memblock_reserve(__pa(sclp_early_sccb), EXT_SCCB_READ_SCP);
memblock_reserve(__pa(_stext), _end - _stext);
}
@@ -825,13 +792,13 @@ static void __init setup_memory(void)
static void __init relocate_amode31_section(void)
{
unsigned long amode31_size = __eamode31 - __samode31;
- long amode31_offset = __amode31_base - __samode31;
+ long amode31_offset = physmem_info.reserved[RR_AMODE31].start - __samode31;
long *ptr;
pr_info("Relocating AMODE31 section of size 0x%08lx\n", amode31_size);
/* Move original AMODE31 section to the new one */
- memmove((void *)__amode31_base, (void *)__samode31, amode31_size);
+ memmove((void *)physmem_info.reserved[RR_AMODE31].start, (void *)__samode31, amode31_size);
/* Zero out the old AMODE31 section to catch invalid accesses within it */
memset((void *)__samode31, 0, amode31_size);
@@ -997,14 +964,14 @@ void __init setup_arch(char **cmdline_p)
reserve_kernel();
reserve_initrd();
reserve_certificate_list();
- reserve_mem_detect_info();
+ reserve_physmem_info();
memblock_set_current_limit(ident_map_size);
memblock_allow_resize();
/* Get information about *all* installed memory */
- memblock_add_mem_detect_info();
+ memblock_add_physmem_info();
- free_mem_detect_info();
+ free_physmem_info();
setup_memory_end();
memblock_dump_all();
setup_memory();
@@ -1017,7 +984,6 @@ void __init setup_arch(char **cmdline_p)
if (MACHINE_HAS_EDAT2)
hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
- check_initrd();
reserve_crashkernel();
#ifdef CONFIG_CRASH_DUMP
/*
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index d4888453bbf8..0126c5f6b904 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -348,7 +348,6 @@ static void pcpu_delegate(struct pcpu *pcpu,
abs_lc->restart_source = source_cpu;
put_abs_lowcore(abs_lc);
}
- __bpon();
asm volatile(
"0: sigp 0,%0,%2 # sigp restart to target cpu\n"
" brc 2,0b # busy, try again\n"
@@ -986,7 +985,6 @@ void __cpu_die(unsigned int cpu)
void __noreturn cpu_die(void)
{
idle_task_exit();
- __bpon();
pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
for (;;) ;
}
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index c6eecd4a5302..e5d6a1c25d13 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -637,16 +637,6 @@ static struct ctl_table topology_ctl_table[] = {
{ },
};
-static struct ctl_table topology_dir_table[] = {
- {
- .procname = "s390",
- .maxlen = 0,
- .mode = 0555,
- .child = topology_ctl_table,
- },
- { },
-};
-
static int __init topology_init(void)
{
timer_setup(&topology_timer, topology_timer_fn, TIMER_DEFERRABLE);
@@ -654,7 +644,7 @@ static int __init topology_init(void)
set_topology_timer();
else
topology_update_polarization_simple();
- register_sysctl_table(topology_dir_table);
+ register_sysctl("s390", topology_ctl_table);
return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
}
device_initcall(topology_init);
diff --git a/arch/s390/kernel/vdso32/vdso_user_wrapper.S b/arch/s390/kernel/vdso32/vdso_user_wrapper.S
index 3f42f27f978c..2e645003fdaf 100644
--- a/arch/s390/kernel/vdso32/vdso_user_wrapper.S
+++ b/arch/s390/kernel/vdso32/vdso_user_wrapper.S
@@ -1,12 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/linkage.h>
#include <asm/unistd.h>
#include <asm/dwarf.h>
.macro vdso_syscall func,syscall
.globl __kernel_compat_\func
.type __kernel_compat_\func,@function
- .align 8
+ __ALIGN
__kernel_compat_\func:
CFI_STARTPROC
svc \syscall
diff --git a/arch/s390/kernel/vdso64/vdso_user_wrapper.S b/arch/s390/kernel/vdso64/vdso_user_wrapper.S
index 97f0c0a669a5..57f62596e53b 100644
--- a/arch/s390/kernel/vdso64/vdso_user_wrapper.S
+++ b/arch/s390/kernel/vdso64/vdso_user_wrapper.S
@@ -1,4 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/linkage.h>
#include <asm/vdso.h>
#include <asm/unistd.h>
#include <asm/asm-offsets.h>
@@ -16,7 +17,7 @@
.macro vdso_func func
.globl __kernel_\func
.type __kernel_\func,@function
- .align 8
+ __ALIGN
__kernel_\func:
CFI_STARTPROC
aghi %r15,-WRAPPER_FRAME_SIZE
@@ -41,7 +42,7 @@ vdso_func getcpu
.macro vdso_syscall func,syscall
.globl __kernel_\func
.type __kernel_\func,@function
- .align 8
+ __ALIGN
__kernel_\func:
CFI_STARTPROC
svc \syscall
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index b653ba8d51e6..8d2288a5ba25 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -219,6 +219,13 @@ SECTIONS
QUAD(init_mm)
QUAD(swapper_pg_dir)
QUAD(invalid_pg_dir)
+#ifdef CONFIG_KASAN
+ QUAD(kasan_early_shadow_page)
+ QUAD(kasan_early_shadow_pte)
+ QUAD(kasan_early_shadow_pmd)
+ QUAD(kasan_early_shadow_pud)
+ QUAD(kasan_early_shadow_p4d)
+#endif
} :NONE
/* Debugging sections. */
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile
index 57e4f3a24829..d90db06a8af5 100644
--- a/arch/s390/mm/Makefile
+++ b/arch/s390/mm/Makefile
@@ -10,6 +10,3 @@ obj-$(CONFIG_CMM) += cmm.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_PTDUMP_CORE) += dump_pagetables.o
obj-$(CONFIG_PGSTE) += gmap.o
-
-KASAN_SANITIZE_kasan_init.o := n
-obj-$(CONFIG_KASAN) += kasan_init.o
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 9141ed4c52e9..5300c6867d5e 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -335,16 +335,6 @@ static struct ctl_table cmm_table[] = {
{ }
};
-static struct ctl_table cmm_dir_table[] = {
- {
- .procname = "vm",
- .maxlen = 0,
- .mode = 0555,
- .child = cmm_table,
- },
- { }
-};
-
#ifdef CONFIG_CMM_IUCV
#define SMSG_PREFIX "CMM"
static void cmm_smsg_target(const char *from, char *msg)
@@ -389,7 +379,7 @@ static int __init cmm_init(void)
{
int rc = -ENOMEM;
- cmm_sysctl_header = register_sysctl_table(cmm_dir_table);
+ cmm_sysctl_header = register_sysctl("vm", cmm_table);
if (!cmm_sysctl_header)
goto out_sysctl;
#ifdef CONFIG_CMM_IUCV
diff --git a/arch/s390/mm/kasan_init.c b/arch/s390/mm/kasan_init.c
deleted file mode 100644
index ef89a5f26853..000000000000
--- a/arch/s390/mm/kasan_init.c
+++ /dev/null
@@ -1,301 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/kasan.h>
-#include <linux/sched/task.h>
-#include <linux/pgtable.h>
-#include <asm/pgalloc.h>
-#include <asm/kasan.h>
-#include <asm/mem_detect.h>
-#include <asm/processor.h>
-#include <asm/sclp.h>
-#include <asm/facility.h>
-#include <asm/sections.h>
-#include <asm/setup.h>
-#include <asm/uv.h>
-
-static unsigned long segment_pos __initdata;
-static unsigned long segment_low __initdata;
-static bool has_edat __initdata;
-static bool has_nx __initdata;
-
-#define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
-
-static void __init kasan_early_panic(const char *reason)
-{
- sclp_early_printk("The Linux kernel failed to boot with the KernelAddressSanitizer:\n");
- sclp_early_printk(reason);
- disabled_wait();
-}
-
-static void * __init kasan_early_alloc_segment(void)
-{
- segment_pos -= _SEGMENT_SIZE;
-
- if (segment_pos < segment_low)
- kasan_early_panic("out of memory during initialisation\n");
-
- return __va(segment_pos);
-}
-
-static void * __init kasan_early_alloc_pages(unsigned int order)
-{
- pgalloc_pos -= (PAGE_SIZE << order);
-
- if (pgalloc_pos < pgalloc_low)
- kasan_early_panic("out of memory during initialisation\n");
-
- return __va(pgalloc_pos);
-}
-
-static void * __init kasan_early_crst_alloc(unsigned long val)
-{
- unsigned long *table;
-
- table = kasan_early_alloc_pages(CRST_ALLOC_ORDER);
- if (table)
- crst_table_init(table, val);
- return table;
-}
-
-static pte_t * __init kasan_early_pte_alloc(void)
-{
- static void *pte_leftover;
- pte_t *pte;
-
- BUILD_BUG_ON(_PAGE_TABLE_SIZE * 2 != PAGE_SIZE);
-
- if (!pte_leftover) {
- pte_leftover = kasan_early_alloc_pages(0);
- pte = pte_leftover + _PAGE_TABLE_SIZE;
- } else {
- pte = pte_leftover;
- pte_leftover = NULL;
- }
- memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
- return pte;
-}
-
-enum populate_mode {
- POPULATE_MAP,
- POPULATE_ZERO_SHADOW,
- POPULATE_SHALLOW
-};
-
-static inline pgprot_t pgprot_clear_bit(pgprot_t pgprot, unsigned long bit)
-{
- return __pgprot(pgprot_val(pgprot) & ~bit);
-}
-
-static void __init kasan_early_pgtable_populate(unsigned long address,
- unsigned long end,
- enum populate_mode mode)
-{
- pgprot_t pgt_prot_zero = PAGE_KERNEL_RO;
- pgprot_t pgt_prot = PAGE_KERNEL;
- pgprot_t sgt_prot = SEGMENT_KERNEL;
- pgd_t *pg_dir;
- p4d_t *p4_dir;
- pud_t *pu_dir;
- pmd_t *pm_dir;
- pte_t *pt_dir;
- pmd_t pmd;
- pte_t pte;
-
- if (!has_nx) {
- pgt_prot_zero = pgprot_clear_bit(pgt_prot_zero, _PAGE_NOEXEC);
- pgt_prot = pgprot_clear_bit(pgt_prot, _PAGE_NOEXEC);
- sgt_prot = pgprot_clear_bit(sgt_prot, _SEGMENT_ENTRY_NOEXEC);
- }
-
- while (address < end) {
- pg_dir = pgd_offset_k(address);
- if (pgd_none(*pg_dir)) {
- if (mode == POPULATE_ZERO_SHADOW &&
- IS_ALIGNED(address, PGDIR_SIZE) &&
- end - address >= PGDIR_SIZE) {
- pgd_populate(&init_mm, pg_dir,
- kasan_early_shadow_p4d);
- address = (address + PGDIR_SIZE) & PGDIR_MASK;
- continue;
- }
- p4_dir = kasan_early_crst_alloc(_REGION2_ENTRY_EMPTY);
- pgd_populate(&init_mm, pg_dir, p4_dir);
- }
-
- if (mode == POPULATE_SHALLOW) {
- address = (address + P4D_SIZE) & P4D_MASK;
- continue;
- }
-
- p4_dir = p4d_offset(pg_dir, address);
- if (p4d_none(*p4_dir)) {
- if (mode == POPULATE_ZERO_SHADOW &&
- IS_ALIGNED(address, P4D_SIZE) &&
- end - address >= P4D_SIZE) {
- p4d_populate(&init_mm, p4_dir,
- kasan_early_shadow_pud);
- address = (address + P4D_SIZE) & P4D_MASK;
- continue;
- }
- pu_dir = kasan_early_crst_alloc(_REGION3_ENTRY_EMPTY);
- p4d_populate(&init_mm, p4_dir, pu_dir);
- }
-
- pu_dir = pud_offset(p4_dir, address);
- if (pud_none(*pu_dir)) {
- if (mode == POPULATE_ZERO_SHADOW &&
- IS_ALIGNED(address, PUD_SIZE) &&
- end - address >= PUD_SIZE) {
- pud_populate(&init_mm, pu_dir,
- kasan_early_shadow_pmd);
- address = (address + PUD_SIZE) & PUD_MASK;
- continue;
- }
- pm_dir = kasan_early_crst_alloc(_SEGMENT_ENTRY_EMPTY);
- pud_populate(&init_mm, pu_dir, pm_dir);
- }
-
- pm_dir = pmd_offset(pu_dir, address);
- if (pmd_none(*pm_dir)) {
- if (IS_ALIGNED(address, PMD_SIZE) &&
- end - address >= PMD_SIZE) {
- if (mode == POPULATE_ZERO_SHADOW) {
- pmd_populate(&init_mm, pm_dir, kasan_early_shadow_pte);
- address = (address + PMD_SIZE) & PMD_MASK;
- continue;
- } else if (has_edat) {
- void *page = kasan_early_alloc_segment();
-
- memset(page, 0, _SEGMENT_SIZE);
- pmd = __pmd(__pa(page));
- pmd = set_pmd_bit(pmd, sgt_prot);
- set_pmd(pm_dir, pmd);
- address = (address + PMD_SIZE) & PMD_MASK;
- continue;
- }
- }
- pt_dir = kasan_early_pte_alloc();
- pmd_populate(&init_mm, pm_dir, pt_dir);
- } else if (pmd_large(*pm_dir)) {
- address = (address + PMD_SIZE) & PMD_MASK;
- continue;
- }
-
- pt_dir = pte_offset_kernel(pm_dir, address);
- if (pte_none(*pt_dir)) {
- void *page;
-
- switch (mode) {
- case POPULATE_MAP:
- page = kasan_early_alloc_pages(0);
- memset(page, 0, PAGE_SIZE);
- pte = __pte(__pa(page));
- pte = set_pte_bit(pte, pgt_prot);
- set_pte(pt_dir, pte);
- break;
- case POPULATE_ZERO_SHADOW:
- page = kasan_early_shadow_page;
- pte = __pte(__pa(page));
- pte = set_pte_bit(pte, pgt_prot_zero);
- set_pte(pt_dir, pte);
- break;
- case POPULATE_SHALLOW:
- /* should never happen */
- break;
- }
- }
- address += PAGE_SIZE;
- }
-}
-
-static void __init kasan_early_detect_facilities(void)
-{
- if (test_facility(8)) {
- has_edat = true;
- __ctl_set_bit(0, 23);
- }
- if (!noexec_disabled && test_facility(130)) {
- has_nx = true;
- __ctl_set_bit(0, 20);
- }
-}
-
-void __init kasan_early_init(void)
-{
- pte_t pte_z = __pte(__pa(kasan_early_shadow_page) | pgprot_val(PAGE_KERNEL_RO));
- pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
- pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
- p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
- unsigned long untracked_end = MODULES_VADDR;
- unsigned long shadow_alloc_size;
- unsigned long start, end;
- int i;
-
- kasan_early_detect_facilities();
- if (!has_nx)
- pte_z = clear_pte_bit(pte_z, __pgprot(_PAGE_NOEXEC));
-
- BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE));
- BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE));
-
- /* init kasan zero shadow */
- crst_table_init((unsigned long *)kasan_early_shadow_p4d, p4d_val(p4d_z));
- crst_table_init((unsigned long *)kasan_early_shadow_pud, pud_val(pud_z));
- crst_table_init((unsigned long *)kasan_early_shadow_pmd, pmd_val(pmd_z));
- memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
-
- if (has_edat) {
- shadow_alloc_size = get_mem_detect_usable_total() >> KASAN_SHADOW_SCALE_SHIFT;
- segment_pos = round_down(pgalloc_pos, _SEGMENT_SIZE);
- segment_low = segment_pos - shadow_alloc_size;
- segment_low = round_down(segment_low, _SEGMENT_SIZE);
- pgalloc_pos = segment_low;
- }
- /*
- * Current memory layout:
- * +- 0 -------------+ +- shadow start -+
- * |1:1 ident mapping| /|1/8 of ident map|
- * | | / | |
- * +-end of ident map+ / +----------------+
- * | ... gap ... | / | kasan |
- * | | / | zero page |
- * +- vmalloc area -+ / | mapping |
- * | vmalloc_size | / | (untracked) |
- * +- modules vaddr -+ / +----------------+
- * | 2Gb |/ | unmapped | allocated per module
- * +- shadow start -+ +----------------+
- * | 1/8 addr space | | zero pg mapping| (untracked)
- * +- shadow end ----+---------+- shadow end ---+
- *
- * Current memory layout (KASAN_VMALLOC):
- * +- 0 -------------+ +- shadow start -+
- * |1:1 ident mapping| /|1/8 of ident map|
- * | | / | |
- * +-end of ident map+ / +----------------+
- * | ... gap ... | / | kasan zero page| (untracked)
- * | | / | mapping |
- * +- vmalloc area -+ / +----------------+
- * | vmalloc_size | / |shallow populate|
- * +- modules vaddr -+ / +----------------+
- * | 2Gb |/ |shallow populate|
- * +- shadow start -+ +----------------+
- * | 1/8 addr space | | zero pg mapping| (untracked)
- * +- shadow end ----+---------+- shadow end ---+
- */
- /* populate kasan shadow (for identity mapping and zero page mapping) */
- for_each_mem_detect_usable_block(i, &start, &end)
- kasan_early_pgtable_populate(__sha(start), __sha(end), POPULATE_MAP);
- if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
- untracked_end = VMALLOC_START;
- /* shallowly populate kasan shadow for vmalloc and modules */
- kasan_early_pgtable_populate(__sha(VMALLOC_START), __sha(MODULES_END),
- POPULATE_SHALLOW);
- }
- /* populate kasan shadow for untracked memory */
- kasan_early_pgtable_populate(__sha(ident_map_size), __sha(untracked_end),
- POPULATE_ZERO_SHADOW);
- kasan_early_pgtable_populate(__sha(MODULES_END), __sha(_REGION1_SIZE),
- POPULATE_ZERO_SHADOW);
- /* enable kasan */
- init_task.kasan_depth = 0;
- sclp_early_printk("KernelAddressSanitizer initialized\n");
-}
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 85195c18b2e8..7838e9c70000 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -300,8 +300,6 @@ static int change_page_attr(unsigned long addr, unsigned long end,
if (addr == end)
return 0;
- if (end >= MODULES_END)
- return -EINVAL;
mutex_lock(&cpa_mutex);
pgdp = pgd_offset_k(addr);
do {
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 2de48b2c1b04..66ab68db9842 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -33,19 +33,9 @@ static struct ctl_table page_table_sysctl[] = {
{ }
};
-static struct ctl_table page_table_sysctl_dir[] = {
- {
- .procname = "vm",
- .maxlen = 0,
- .mode = 0555,
- .child = page_table_sysctl,
- },
- { }
-};
-
static int __init page_table_register_sysctl(void)
{
- return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
+ return register_sysctl("vm", page_table_sysctl) ? 0 : -ENOMEM;
}
__initcall(page_table_register_sysctl);
@@ -143,13 +133,7 @@ err_p4d:
static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
{
- unsigned int old, new;
-
- do {
- old = atomic_read(v);
- new = old ^ bits;
- } while (atomic_cmpxchg(v, old, new) != old);
- return new;
+ return atomic_fetch_xor(bits, v) ^ bits;
}
#ifdef CONFIG_PGSTE
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 4113a7ffa149..242f95aa9801 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -5,6 +5,7 @@
#include <linux/memory_hotplug.h>
#include <linux/memblock.h>
+#include <linux/kasan.h>
#include <linux/pfn.h>
#include <linux/mm.h>
#include <linux/init.h>
@@ -664,6 +665,9 @@ static void __init memblock_region_swap(void *a, void *b, int size)
swap(*(struct memblock_region *)a, *(struct memblock_region *)b);
}
+#ifdef CONFIG_KASAN
+#define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
+#endif
/*
* map whole physical memory to virtual memory (identity mapping)
* we reserve enough space in the vmalloc area for vmemmap to hotplug
@@ -733,6 +737,13 @@ void __init vmem_map_init(void)
SET_MEMORY_RW | SET_MEMORY_NX);
}
+#ifdef CONFIG_KASAN
+ for_each_mem_range(i, &base, &end)
+ __set_memory(__sha(base),
+ (__sha(end) - __sha(base)) >> PAGE_SHIFT,
+ SET_MEMORY_RW | SET_MEMORY_NX);
+#endif
+
__set_memory((unsigned long)_stext,
(unsigned long)(_etext - _stext) >> PAGE_SHIFT,
SET_MEMORY_RO | SET_MEMORY_X);
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index e16afacc8fd1..afc3f33788da 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -874,32 +874,15 @@ bool zpci_is_device_configured(struct zpci_dev *zdev)
* @fh: The general function handle supplied by the platform
*
* Given a device in the configuration state Configured, enables, scans and
- * adds it to the common code PCI subsystem if possible. If the PCI device is
- * parked because we can not yet create a PCI bus because we have not seen
- * function 0, it is ignored but will be scanned once function 0 appears.
- * If any failure occurs, the zpci_dev is left disabled.
+ * adds it to the common code PCI subsystem if possible. If any failure occurs,
+ * the zpci_dev is left disabled.
*
* Return: 0 on success, or an error code otherwise
*/
int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh)
{
- int rc;
-
zpci_update_fh(zdev, fh);
- /* the PCI function will be scanned once function 0 appears */
- if (!zdev->zbus->bus)
- return 0;
-
- /* For function 0 on a multi-function bus scan whole bus as we might
- * have to pick up existing functions waiting for it to allow creating
- * the PCI bus
- */
- if (zdev->devfn == 0 && zdev->zbus->multifunction)
- rc = zpci_bus_scan_bus(zdev->zbus);
- else
- rc = zpci_bus_scan_device(zdev);
-
- return rc;
+ return zpci_bus_scan_device(zdev);
}
/**
diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c
index a99926af2b69..32245b970a0c 100644
--- a/arch/s390/pci/pci_bus.c
+++ b/arch/s390/pci/pci_bus.c
@@ -85,9 +85,8 @@ int zpci_bus_scan_device(struct zpci_dev *zdev)
if (!pdev)
return -ENODEV;
- pci_bus_add_device(pdev);
pci_lock_rescan_remove();
- pci_bus_add_devices(zdev->zbus->bus);
+ pci_bus_add_device(pdev);
pci_unlock_rescan_remove();
return 0;
@@ -130,11 +129,8 @@ void zpci_bus_remove_device(struct zpci_dev *zdev, bool set_error)
* @zbus: the zbus to be scanned
*
* Enables and scans all PCI functions on the bus making them available to the
- * common PCI code. If there is no function 0 on the zbus nothing is scanned. If
- * a function does not have a slot yet because it was added to the zbus before
- * function 0 the slot is created. If a PCI function fails to be initialized
- * an error will be returned but attempts will still be made for all other
- * functions on the bus.
+ * common PCI code. If a PCI function fails to be initialized an error will be
+ * returned but attempts will still be made for all other functions on the bus.
*
* Return: 0 on success, an error value otherwise
*/
@@ -211,7 +207,6 @@ static int zpci_bus_create_pci_bus(struct zpci_bus *zbus, struct zpci_dev *fr, s
}
zbus->bus = bus;
- pci_bus_add_devices(bus);
return 0;
}
diff --git a/drivers/s390/char/sclp_early_core.c b/drivers/s390/char/sclp_early_core.c
index ac1d00980fa6..dbd5c53d8edf 100644
--- a/drivers/s390/char/sclp_early_core.c
+++ b/drivers/s390/char/sclp_early_core.c
@@ -10,7 +10,7 @@
#include <asm/ebcdic.h>
#include <asm/irq.h>
#include <asm/sections.h>
-#include <asm/mem_detect.h>
+#include <asm/physmem_info.h>
#include <asm/facility.h>
#include "sclp.h"
#include "sclp_rw.h"
@@ -336,7 +336,7 @@ int __init sclp_early_get_hsa_size(unsigned long *hsa_size)
#define SCLP_STORAGE_INFO_FACILITY 0x0000400000000000UL
-void __weak __init add_mem_detect_block(u64 start, u64 end) {}
+void __weak __init add_physmem_online_range(u64 start, u64 end) {}
int __init sclp_early_read_storage_info(void)
{
struct read_storage_sccb *sccb = (struct read_storage_sccb *)sclp_early_sccb;
@@ -369,7 +369,7 @@ int __init sclp_early_read_storage_info(void)
if (!sccb->entries[sn])
continue;
rn = sccb->entries[sn] >> 16;
- add_mem_detect_block((rn - 1) * rzm, rn * rzm);
+ add_physmem_online_range((rn - 1) * rzm, rn * rzm);
}
break;
case 0x0310:
@@ -382,6 +382,6 @@ int __init sclp_early_read_storage_info(void)
return 0;
fail:
- mem_detect.count = 0;
+ physmem_info.range_count = 0;
return -EIO;
}
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index f4cc1720156f..70855af8c281 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -122,7 +122,13 @@ static struct hrtimer ap_poll_timer;
* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds.
* If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.
*/
-static unsigned long long poll_timeout = 250000;
+static unsigned long poll_high_timeout = 250000UL;
+
+/*
+ * Some state machine states only require a low frequency polling.
+ * We use 25 Hz frequency for these.
+ */
+static unsigned long poll_low_timeout = 40000000UL;
/* Maximum domain id, if not given via qci */
static int ap_max_domain_id = 15;
@@ -201,6 +207,18 @@ static inline int ap_qact_available(void)
}
/*
+ * ap_sb_available(): Test if the AP secure binding facility is available.
+ *
+ * Returns 1 if secure binding facility is available.
+ */
+int ap_sb_available(void)
+{
+ if (ap_qci_info)
+ return ap_qci_info->apsb;
+ return 0;
+}
+
+/*
* ap_fetch_qci_info(): Fetch cryptographic config info
*
* Returns the ap configuration info fetched via PQAP(QCI).
@@ -248,13 +266,13 @@ static void __init ap_init_qci_info(void)
AP_DBF_INFO("%s successful fetched initial qci info\n", __func__);
if (ap_qci_info->apxa) {
- if (ap_qci_info->Na) {
- ap_max_adapter_id = ap_qci_info->Na;
+ if (ap_qci_info->na) {
+ ap_max_adapter_id = ap_qci_info->na;
AP_DBF_INFO("%s new ap_max_adapter_id is %d\n",
__func__, ap_max_adapter_id);
}
- if (ap_qci_info->Nd) {
- ap_max_domain_id = ap_qci_info->Nd;
+ if (ap_qci_info->nd) {
+ ap_max_domain_id = ap_qci_info->nd;
AP_DBF_INFO("%s new ap_max_domain_id is %d\n",
__func__, ap_max_domain_id);
}
@@ -324,35 +342,32 @@ EXPORT_SYMBOL(ap_test_config_ctrl_domain);
/*
* ap_queue_info(): Check and get AP queue info.
- * Returns true if TAPQ succeeded and the info is filled or
- * false otherwise.
+ * Returns: 1 if APQN exists and info is filled,
+ * 0 if APQN seems to exit but there is no info
+ * available (eg. caused by an asynch pending error)
+ * -1 invalid APQN, TAPQ error or AP queue status which
+ * indicates there is no APQN.
*/
-static bool ap_queue_info(ap_qid_t qid, int *q_type, unsigned int *q_fac,
- int *q_depth, int *q_ml, bool *q_decfg, bool *q_cstop)
+static int ap_queue_info(ap_qid_t qid, int *q_type, unsigned int *q_fac,
+ int *q_depth, int *q_ml, bool *q_decfg, bool *q_cstop)
{
struct ap_queue_status status;
- union {
- unsigned long value;
- struct {
- unsigned int fac : 32; /* facility bits */
- unsigned int at : 8; /* ap type */
- unsigned int _res1 : 8;
- unsigned int _res2 : 4;
- unsigned int ml : 4; /* apxl ml */
- unsigned int _res3 : 4;
- unsigned int qd : 4; /* queue depth */
- } tapq_gr2;
- } tapq_info;
+ struct ap_tapq_gr2 tapq_info;
tapq_info.value = 0;
/* make sure we don't run into a specifiation exception */
if (AP_QID_CARD(qid) > ap_max_adapter_id ||
AP_QID_QUEUE(qid) > ap_max_domain_id)
- return false;
+ return -1;
/* call TAPQ on this APQN */
- status = ap_test_queue(qid, ap_apft_available(), &tapq_info.value);
+ status = ap_test_queue(qid, ap_apft_available(), &tapq_info);
+
+ /* handle pending async error with return 'no info available' */
+ if (status.async)
+ return 0;
+
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
case AP_RESPONSE_RESET_IN_PROGRESS:
@@ -365,11 +380,11 @@ static bool ap_queue_info(ap_qid_t qid, int *q_type, unsigned int *q_fac,
* there is at least one of the mode bits set.
*/
if (WARN_ON_ONCE(!tapq_info.value))
- return false;
- *q_type = tapq_info.tapq_gr2.at;
- *q_fac = tapq_info.tapq_gr2.fac;
- *q_depth = tapq_info.tapq_gr2.qd;
- *q_ml = tapq_info.tapq_gr2.ml;
+ return 0;
+ *q_type = tapq_info.at;
+ *q_fac = tapq_info.fac;
+ *q_depth = tapq_info.qd;
+ *q_ml = tapq_info.ml;
*q_decfg = status.response_code == AP_RESPONSE_DECONFIGURED;
*q_cstop = status.response_code == AP_RESPONSE_CHECKSTOPPED;
switch (*q_type) {
@@ -389,12 +404,12 @@ static bool ap_queue_info(ap_qid_t qid, int *q_type, unsigned int *q_fac,
default:
break;
}
- return true;
+ return 1;
default:
/*
* A response code which indicates, there is no info available.
*/
- return false;
+ return -1;
}
}
@@ -412,10 +427,13 @@ void ap_wait(enum ap_sm_wait wait)
break;
}
fallthrough;
- case AP_SM_WAIT_TIMEOUT:
+ case AP_SM_WAIT_LOW_TIMEOUT:
+ case AP_SM_WAIT_HIGH_TIMEOUT:
spin_lock_bh(&ap_poll_timer_lock);
if (!hrtimer_is_queued(&ap_poll_timer)) {
- hr_time = poll_timeout;
+ hr_time =
+ wait == AP_SM_WAIT_LOW_TIMEOUT ?
+ poll_low_timeout : poll_high_timeout;
hrtimer_forward_now(&ap_poll_timer, hr_time);
hrtimer_restart(&ap_poll_timer);
}
@@ -1168,7 +1186,7 @@ EXPORT_SYMBOL(ap_parse_mask_str);
static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
+ return sysfs_emit(buf, "%d\n", ap_domain_index);
}
static ssize_t ap_domain_store(struct bus_type *bus,
@@ -1196,14 +1214,13 @@ static BUS_ATTR_RW(ap_domain);
static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf)
{
if (!ap_qci_info) /* QCI not supported */
- return scnprintf(buf, PAGE_SIZE, "not supported\n");
+ return sysfs_emit(buf, "not supported\n");
- return scnprintf(buf, PAGE_SIZE,
- "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
- ap_qci_info->adm[0], ap_qci_info->adm[1],
- ap_qci_info->adm[2], ap_qci_info->adm[3],
- ap_qci_info->adm[4], ap_qci_info->adm[5],
- ap_qci_info->adm[6], ap_qci_info->adm[7]);
+ return sysfs_emit(buf, "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
+ ap_qci_info->adm[0], ap_qci_info->adm[1],
+ ap_qci_info->adm[2], ap_qci_info->adm[3],
+ ap_qci_info->adm[4], ap_qci_info->adm[5],
+ ap_qci_info->adm[6], ap_qci_info->adm[7]);
}
static BUS_ATTR_RO(ap_control_domain_mask);
@@ -1211,14 +1228,13 @@ static BUS_ATTR_RO(ap_control_domain_mask);
static ssize_t ap_usage_domain_mask_show(struct bus_type *bus, char *buf)
{
if (!ap_qci_info) /* QCI not supported */
- return scnprintf(buf, PAGE_SIZE, "not supported\n");
+ return sysfs_emit(buf, "not supported\n");
- return scnprintf(buf, PAGE_SIZE,
- "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
- ap_qci_info->aqm[0], ap_qci_info->aqm[1],
- ap_qci_info->aqm[2], ap_qci_info->aqm[3],
- ap_qci_info->aqm[4], ap_qci_info->aqm[5],
- ap_qci_info->aqm[6], ap_qci_info->aqm[7]);
+ return sysfs_emit(buf, "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
+ ap_qci_info->aqm[0], ap_qci_info->aqm[1],
+ ap_qci_info->aqm[2], ap_qci_info->aqm[3],
+ ap_qci_info->aqm[4], ap_qci_info->aqm[5],
+ ap_qci_info->aqm[6], ap_qci_info->aqm[7]);
}
static BUS_ATTR_RO(ap_usage_domain_mask);
@@ -1226,29 +1242,27 @@ static BUS_ATTR_RO(ap_usage_domain_mask);
static ssize_t ap_adapter_mask_show(struct bus_type *bus, char *buf)
{
if (!ap_qci_info) /* QCI not supported */
- return scnprintf(buf, PAGE_SIZE, "not supported\n");
+ return sysfs_emit(buf, "not supported\n");
- return scnprintf(buf, PAGE_SIZE,
- "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
- ap_qci_info->apm[0], ap_qci_info->apm[1],
- ap_qci_info->apm[2], ap_qci_info->apm[3],
- ap_qci_info->apm[4], ap_qci_info->apm[5],
- ap_qci_info->apm[6], ap_qci_info->apm[7]);
+ return sysfs_emit(buf, "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
+ ap_qci_info->apm[0], ap_qci_info->apm[1],
+ ap_qci_info->apm[2], ap_qci_info->apm[3],
+ ap_qci_info->apm[4], ap_qci_info->apm[5],
+ ap_qci_info->apm[6], ap_qci_info->apm[7]);
}
static BUS_ATTR_RO(ap_adapter_mask);
static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%d\n",
- ap_irq_flag ? 1 : 0);
+ return sysfs_emit(buf, "%d\n", ap_irq_flag ? 1 : 0);
}
static BUS_ATTR_RO(ap_interrupts);
static ssize_t config_time_show(struct bus_type *bus, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
+ return sysfs_emit(buf, "%d\n", ap_config_time);
}
static ssize_t config_time_store(struct bus_type *bus,
@@ -1267,17 +1281,20 @@ static BUS_ATTR_RW(config_time);
static ssize_t poll_thread_show(struct bus_type *bus, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0);
+ return sysfs_emit(buf, "%d\n", ap_poll_kthread ? 1 : 0);
}
static ssize_t poll_thread_store(struct bus_type *bus,
const char *buf, size_t count)
{
- int flag, rc;
+ bool value;
+ int rc;
- if (sscanf(buf, "%d\n", &flag) != 1)
- return -EINVAL;
- if (flag) {
+ rc = kstrtobool(buf, &value);
+ if (rc)
+ return rc;
+
+ if (value) {
rc = ap_poll_thread_start();
if (rc)
count = rc;
@@ -1291,21 +1308,25 @@ static BUS_ATTR_RW(poll_thread);
static ssize_t poll_timeout_show(struct bus_type *bus, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout);
+ return sysfs_emit(buf, "%lu\n", poll_high_timeout);
}
static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
size_t count)
{
- unsigned long long time;
+ unsigned long value;
ktime_t hr_time;
+ int rc;
+
+ rc = kstrtoul(buf, 0, &value);
+ if (rc)
+ return rc;
/* 120 seconds = maximum poll interval */
- if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 ||
- time > 120000000000ULL)
+ if (value > 120000000000UL)
return -EINVAL;
- poll_timeout = time;
- hr_time = poll_timeout;
+ poll_high_timeout = value;
+ hr_time = poll_high_timeout;
spin_lock_bh(&ap_poll_timer_lock);
hrtimer_cancel(&ap_poll_timer);
@@ -1320,14 +1341,14 @@ static BUS_ATTR_RW(poll_timeout);
static ssize_t ap_max_domain_id_show(struct bus_type *bus, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%d\n", ap_max_domain_id);
+ return sysfs_emit(buf, "%d\n", ap_max_domain_id);
}
static BUS_ATTR_RO(ap_max_domain_id);
static ssize_t ap_max_adapter_id_show(struct bus_type *bus, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%d\n", ap_max_adapter_id);
+ return sysfs_emit(buf, "%d\n", ap_max_adapter_id);
}
static BUS_ATTR_RO(ap_max_adapter_id);
@@ -1338,10 +1359,9 @@ static ssize_t apmask_show(struct bus_type *bus, char *buf)
if (mutex_lock_interruptible(&ap_perms_mutex))
return -ERESTARTSYS;
- rc = scnprintf(buf, PAGE_SIZE,
- "0x%016lx%016lx%016lx%016lx\n",
- ap_perms.apm[0], ap_perms.apm[1],
- ap_perms.apm[2], ap_perms.apm[3]);
+ rc = sysfs_emit(buf, "0x%016lx%016lx%016lx%016lx\n",
+ ap_perms.apm[0], ap_perms.apm[1],
+ ap_perms.apm[2], ap_perms.apm[3]);
mutex_unlock(&ap_perms_mutex);
return rc;
@@ -1431,10 +1451,9 @@ static ssize_t aqmask_show(struct bus_type *bus, char *buf)
if (mutex_lock_interruptible(&ap_perms_mutex))
return -ERESTARTSYS;
- rc = scnprintf(buf, PAGE_SIZE,
- "0x%016lx%016lx%016lx%016lx\n",
- ap_perms.aqm[0], ap_perms.aqm[1],
- ap_perms.aqm[2], ap_perms.aqm[3]);
+ rc = sysfs_emit(buf, "0x%016lx%016lx%016lx%016lx\n",
+ ap_perms.aqm[0], ap_perms.aqm[1],
+ ap_perms.aqm[2], ap_perms.aqm[3]);
mutex_unlock(&ap_perms_mutex);
return rc;
@@ -1520,8 +1539,7 @@ static BUS_ATTR_RW(aqmask);
static ssize_t scans_show(struct bus_type *bus, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%llu\n",
- atomic64_read(&ap_scan_bus_count));
+ return sysfs_emit(buf, "%llu\n", atomic64_read(&ap_scan_bus_count));
}
static ssize_t scans_store(struct bus_type *bus, const char *buf,
@@ -1543,15 +1561,40 @@ static ssize_t bindings_show(struct bus_type *bus, char *buf)
ap_calc_bound_apqns(&apqns, &n);
if (atomic64_read(&ap_scan_bus_count) >= 1 && n == apqns)
- rc = scnprintf(buf, PAGE_SIZE, "%u/%u (complete)\n", n, apqns);
+ rc = sysfs_emit(buf, "%u/%u (complete)\n", n, apqns);
else
- rc = scnprintf(buf, PAGE_SIZE, "%u/%u\n", n, apqns);
+ rc = sysfs_emit(buf, "%u/%u\n", n, apqns);
return rc;
}
static BUS_ATTR_RO(bindings);
+static ssize_t features_show(struct bus_type *bus, char *buf)
+{
+ int n = 0;
+
+ if (!ap_qci_info) /* QCI not supported */
+ return sysfs_emit(buf, "-\n");
+
+ if (ap_qci_info->apsc)
+ n += sysfs_emit_at(buf, n, "APSC ");
+ if (ap_qci_info->apxa)
+ n += sysfs_emit_at(buf, n, "APXA ");
+ if (ap_qci_info->qact)
+ n += sysfs_emit_at(buf, n, "QACT ");
+ if (ap_qci_info->rc8a)
+ n += sysfs_emit_at(buf, n, "RC8A ");
+ if (ap_qci_info->apsb)
+ n += sysfs_emit_at(buf, n, "APSB ");
+
+ sysfs_emit_at(buf, n == 0 ? 0 : n - 1, "\n");
+
+ return n;
+}
+
+static BUS_ATTR_RO(features);
+
static struct attribute *ap_bus_attrs[] = {
&bus_attr_ap_domain.attr,
&bus_attr_ap_control_domain_mask.attr,
@@ -1567,6 +1610,7 @@ static struct attribute *ap_bus_attrs[] = {
&bus_attr_aqmask.attr,
&bus_attr_scans.attr,
&bus_attr_bindings.attr,
+ &bus_attr_features.attr,
NULL,
};
ATTRIBUTE_GROUPS(ap_bus);
@@ -1762,12 +1806,12 @@ static inline void ap_scan_rm_card_dev_and_queue_devs(struct ap_card *ac)
*/
static inline void ap_scan_domains(struct ap_card *ac)
{
+ int rc, dom, depth, type, ml;
bool decfg, chkstop;
- ap_qid_t qid;
- unsigned int func;
- struct device *dev;
struct ap_queue *aq;
- int rc, dom, depth, type, ml;
+ struct device *dev;
+ unsigned int func;
+ ap_qid_t qid;
/*
* Go through the configuration for the domains and compare them
@@ -1786,20 +1830,24 @@ static inline void ap_scan_domains(struct ap_card *ac)
AP_DBF_INFO("%s(%d,%d) not in config anymore, rm queue dev\n",
__func__, ac->id, dom);
device_unregister(dev);
- put_device(dev);
}
- continue;
+ goto put_dev_and_continue;
}
/* domain is valid, get info from this APQN */
- if (!ap_queue_info(qid, &type, &func, &depth,
- &ml, &decfg, &chkstop)) {
- if (aq) {
+ rc = ap_queue_info(qid, &type, &func, &depth,
+ &ml, &decfg, &chkstop);
+ switch (rc) {
+ case -1:
+ if (dev) {
AP_DBF_INFO("%s(%d,%d) queue_info() failed, rm queue dev\n",
__func__, ac->id, dom);
device_unregister(dev);
- put_device(dev);
}
- continue;
+ fallthrough;
+ case 0:
+ goto put_dev_and_continue;
+ default:
+ break;
}
/* if no queue device exists, create a new one */
if (!aq) {
@@ -1915,12 +1963,12 @@ put_dev_and_continue:
*/
static inline void ap_scan_adapter(int ap)
{
+ int rc, dom, depth, type, comp_type, ml;
bool decfg, chkstop;
- ap_qid_t qid;
- unsigned int func;
- struct device *dev;
struct ap_card *ac;
- int rc, dom, depth, type, comp_type, ml;
+ struct device *dev;
+ unsigned int func;
+ ap_qid_t qid;
/* Is there currently a card device for this adapter ? */
dev = bus_find_device(&ap_bus_type, NULL,
@@ -1950,11 +1998,11 @@ static inline void ap_scan_adapter(int ap)
if (ap_test_config_usage_domain(dom)) {
qid = AP_MKQID(ap, dom);
if (ap_queue_info(qid, &type, &func, &depth,
- &ml, &decfg, &chkstop))
+ &ml, &decfg, &chkstop) > 0)
break;
}
if (dom > ap_max_domain_id) {
- /* Could not find a valid APQN for this adapter */
+ /* Could not find one valid APQN for this adapter */
if (ac) {
AP_DBF_INFO("%s(%d) no type info (no APQN found), rm card and queue devs\n",
__func__, ap);
@@ -1979,7 +2027,6 @@ static inline void ap_scan_adapter(int ap)
}
return;
}
-
if (ac) {
/* Check APQN against existing card device for changes */
if (ac->raw_hwtype != type) {
@@ -1988,9 +2035,10 @@ static inline void ap_scan_adapter(int ap)
ap_scan_rm_card_dev_and_queue_devs(ac);
put_device(dev);
ac = NULL;
- } else if (ac->functions != func) {
+ } else if ((ac->functions & TAPQ_CARD_FUNC_CMP_MASK) !=
+ (func & TAPQ_CARD_FUNC_CMP_MASK)) {
AP_DBF_INFO("%s(%d) functions 0x%08x changed, rm card and queue devs\n",
- __func__, ap, type);
+ __func__, ap, func);
ap_scan_rm_card_dev_and_queue_devs(ac);
put_device(dev);
ac = NULL;
@@ -2245,7 +2293,7 @@ static int __init ap_module_init(void)
* If we are running under z/VM adjust polling to z/VM polling rate.
*/
if (MACHINE_IS_VM)
- poll_timeout = 1500000;
+ poll_high_timeout = 1500000;
hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
ap_poll_timer.function = ap_poll_timeout;
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 0f17933954fb..101fb324476f 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -39,22 +39,32 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
return (*ptr & (0x80000000u >> nr)) != 0;
}
-#define AP_RESPONSE_NORMAL 0x00
-#define AP_RESPONSE_Q_NOT_AVAIL 0x01
-#define AP_RESPONSE_RESET_IN_PROGRESS 0x02
-#define AP_RESPONSE_DECONFIGURED 0x03
-#define AP_RESPONSE_CHECKSTOPPED 0x04
-#define AP_RESPONSE_BUSY 0x05
-#define AP_RESPONSE_INVALID_ADDRESS 0x06
-#define AP_RESPONSE_OTHERWISE_CHANGED 0x07
-#define AP_RESPONSE_INVALID_GISA 0x08
-#define AP_RESPONSE_Q_FULL 0x10
-#define AP_RESPONSE_NO_PENDING_REPLY 0x10
-#define AP_RESPONSE_INDEX_TOO_BIG 0x11
-#define AP_RESPONSE_NO_FIRST_PART 0x13
-#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15
-#define AP_RESPONSE_REQ_FAC_NOT_INST 0x16
-#define AP_RESPONSE_INVALID_DOMAIN 0x42
+#define AP_RESPONSE_NORMAL 0x00
+#define AP_RESPONSE_Q_NOT_AVAIL 0x01
+#define AP_RESPONSE_RESET_IN_PROGRESS 0x02
+#define AP_RESPONSE_DECONFIGURED 0x03
+#define AP_RESPONSE_CHECKSTOPPED 0x04
+#define AP_RESPONSE_BUSY 0x05
+#define AP_RESPONSE_INVALID_ADDRESS 0x06
+#define AP_RESPONSE_OTHERWISE_CHANGED 0x07
+#define AP_RESPONSE_INVALID_GISA 0x08
+#define AP_RESPONSE_Q_BOUND_TO_ANOTHER 0x09
+#define AP_RESPONSE_STATE_CHANGE_IN_PROGRESS 0x0A
+#define AP_RESPONSE_Q_NOT_BOUND 0x0B
+#define AP_RESPONSE_Q_FULL 0x10
+#define AP_RESPONSE_NO_PENDING_REPLY 0x10
+#define AP_RESPONSE_INDEX_TOO_BIG 0x11
+#define AP_RESPONSE_NO_FIRST_PART 0x13
+#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15
+#define AP_RESPONSE_REQ_FAC_NOT_INST 0x16
+#define AP_RESPONSE_Q_BIND_ERROR 0x30
+#define AP_RESPONSE_Q_NOT_AVAIL_FOR_ASSOC 0x31
+#define AP_RESPONSE_Q_NOT_EMPTY 0x32
+#define AP_RESPONSE_BIND_LIMIT_EXCEEDED 0x33
+#define AP_RESPONSE_INVALID_ASSOC_SECRET 0x34
+#define AP_RESPONSE_ASSOC_SECRET_NOT_UNIQUE 0x35
+#define AP_RESPONSE_ASSOC_FAILED 0x36
+#define AP_RESPONSE_INVALID_DOMAIN 0x42
/*
* Known device types
@@ -92,6 +102,7 @@ enum ap_sm_state {
AP_SM_STATE_IDLE,
AP_SM_STATE_WORKING,
AP_SM_STATE_QUEUE_FULL,
+ AP_SM_STATE_ASSOC_WAIT,
NR_AP_SM_STATES
};
@@ -108,10 +119,11 @@ enum ap_sm_event {
* AP queue state wait behaviour
*/
enum ap_sm_wait {
- AP_SM_WAIT_AGAIN = 0, /* retry immediately */
- AP_SM_WAIT_TIMEOUT, /* wait for timeout */
- AP_SM_WAIT_INTERRUPT, /* wait for thin interrupt (if available) */
- AP_SM_WAIT_NONE, /* no wait */
+ AP_SM_WAIT_AGAIN = 0, /* retry immediately */
+ AP_SM_WAIT_HIGH_TIMEOUT, /* poll high freq, wait for timeout */
+ AP_SM_WAIT_LOW_TIMEOUT, /* poll low freq, wait for timeout */
+ AP_SM_WAIT_INTERRUPT, /* wait for thin interrupt (if available) */
+ AP_SM_WAIT_NONE, /* no wait */
NR_AP_SM_WAIT
};
@@ -178,7 +190,7 @@ struct ap_device {
struct ap_card {
struct ap_device ap_dev;
int raw_hwtype; /* AP raw hardware type. */
- unsigned int functions; /* AP device function bitfield. */
+ unsigned int functions; /* TAPQ GR2 upper 32 facility bits */
int queue_depth; /* AP queue depth.*/
int id; /* AP card number. */
unsigned int maxmsgsize; /* AP msg limit for this card */
@@ -187,6 +199,9 @@ struct ap_card {
atomic64_t total_request_count; /* # requests ever for this AP device.*/
};
+#define TAPQ_CARD_FUNC_CMP_MASK 0xFFFF0000
+#define ASSOC_IDX_INVALID 0x10000
+
#define to_ap_card(x) container_of((x), struct ap_card, ap_dev.device)
struct ap_queue {
@@ -199,6 +214,7 @@ struct ap_queue {
bool chkstop; /* checkstop state */
ap_qid_t qid; /* AP queue id. */
bool interrupt; /* indicate if interrupts are enabled */
+ unsigned int assoc_idx; /* SE association index */
int queue_count; /* # messages currently on AP queue. */
int pendingq_count; /* # requests on pendingq list. */
int requestq_count; /* # requests on requestq list. */
@@ -209,6 +225,7 @@ struct ap_queue {
struct list_head requestq; /* List of message yet to be sent. */
struct ap_message *reply; /* Per device reply message. */
enum ap_sm_state sm_state; /* ap queue state machine state */
+ int rapq_fbit; /* fbit arg for next rapq invocation */
int last_err_rc; /* last error state response code */
};
@@ -242,10 +259,10 @@ enum ap_fi_flags {
struct ap_message {
struct list_head list; /* Request queueing. */
- unsigned long long psmid; /* Message id. */
+ unsigned long psmid; /* Message id. */
void *msg; /* Pointer to message buffer. */
- unsigned int len; /* actual msg len in msg buffer */
- unsigned int bufsize; /* allocated msg buffer size */
+ size_t len; /* actual msg len in msg buffer */
+ size_t bufsize; /* allocated msg buffer size */
u16 flags; /* Flags, see AP_MSG_FLAG_xxx */
struct ap_fi fi; /* Failure Injection cmd */
int rc; /* Return code for this message */
@@ -285,8 +302,8 @@ static inline void ap_release_message(struct ap_message *ap_msg)
* for the first time. Otherwise the ap message queue will get
* confused.
*/
-int ap_send(ap_qid_t, unsigned long long, void *, size_t);
-int ap_recv(ap_qid_t, unsigned long long *, void *, size_t);
+int ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen);
+int ap_recv(ap_qid_t qid, unsigned long *psmid, void *msg, size_t msglen);
enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event);
enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event);
@@ -296,6 +313,7 @@ void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg);
void ap_flush_queue(struct ap_queue *aq);
void *ap_airq_ptr(void);
+int ap_sb_available(void);
void ap_wait(enum ap_sm_wait wait);
void ap_request_timeout(struct timer_list *t);
void ap_bus_force_rescan(void);
diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
index 6b2170cf186e..b2bd477659a7 100644
--- a/drivers/s390/crypto/ap_card.c
+++ b/drivers/s390/crypto/ap_card.c
@@ -24,7 +24,7 @@ static ssize_t hwtype_show(struct device *dev,
{
struct ap_card *ac = to_ap_card(dev);
- return scnprintf(buf, PAGE_SIZE, "%d\n", ac->ap_dev.device_type);
+ return sysfs_emit(buf, "%d\n", ac->ap_dev.device_type);
}
static DEVICE_ATTR_RO(hwtype);
@@ -34,7 +34,7 @@ static ssize_t raw_hwtype_show(struct device *dev,
{
struct ap_card *ac = to_ap_card(dev);
- return scnprintf(buf, PAGE_SIZE, "%d\n", ac->raw_hwtype);
+ return sysfs_emit(buf, "%d\n", ac->raw_hwtype);
}
static DEVICE_ATTR_RO(raw_hwtype);
@@ -44,7 +44,7 @@ static ssize_t depth_show(struct device *dev, struct device_attribute *attr,
{
struct ap_card *ac = to_ap_card(dev);
- return scnprintf(buf, PAGE_SIZE, "%d\n", ac->queue_depth);
+ return sysfs_emit(buf, "%d\n", ac->queue_depth);
}
static DEVICE_ATTR_RO(depth);
@@ -54,7 +54,7 @@ static ssize_t ap_functions_show(struct device *dev,
{
struct ap_card *ac = to_ap_card(dev);
- return scnprintf(buf, PAGE_SIZE, "0x%08X\n", ac->functions);
+ return sysfs_emit(buf, "0x%08X\n", ac->functions);
}
static DEVICE_ATTR_RO(ap_functions);
@@ -70,7 +70,7 @@ static ssize_t request_count_show(struct device *dev,
spin_lock_bh(&ap_queues_lock);
req_cnt = atomic64_read(&ac->total_request_count);
spin_unlock_bh(&ap_queues_lock);
- return scnprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
+ return sysfs_emit(buf, "%llu\n", req_cnt);
}
static ssize_t request_count_store(struct device *dev,
@@ -107,7 +107,7 @@ static ssize_t requestq_count_show(struct device *dev,
if (ac == aq->card)
reqq_cnt += aq->requestq_count;
spin_unlock_bh(&ap_queues_lock);
- return scnprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
+ return sysfs_emit(buf, "%d\n", reqq_cnt);
}
static DEVICE_ATTR_RO(requestq_count);
@@ -126,7 +126,7 @@ static ssize_t pendingq_count_show(struct device *dev,
if (ac == aq->card)
penq_cnt += aq->pendingq_count;
spin_unlock_bh(&ap_queues_lock);
- return scnprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
+ return sysfs_emit(buf, "%d\n", penq_cnt);
}
static DEVICE_ATTR_RO(pendingq_count);
@@ -134,8 +134,7 @@ static DEVICE_ATTR_RO(pendingq_count);
static ssize_t modalias_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "ap:t%02X\n",
- to_ap_dev(dev)->device_type);
+ return sysfs_emit(buf, "ap:t%02X\n", to_ap_dev(dev)->device_type);
}
static DEVICE_ATTR_RO(modalias);
@@ -145,7 +144,7 @@ static ssize_t config_show(struct device *dev,
{
struct ap_card *ac = to_ap_card(dev);
- return scnprintf(buf, PAGE_SIZE, "%d\n", ac->config ? 1 : 0);
+ return sysfs_emit(buf, "%d\n", ac->config ? 1 : 0);
}
static ssize_t config_store(struct device *dev,
@@ -179,7 +178,7 @@ static ssize_t chkstop_show(struct device *dev,
{
struct ap_card *ac = to_ap_card(dev);
- return scnprintf(buf, PAGE_SIZE, "%d\n", ac->chkstop ? 1 : 0);
+ return sysfs_emit(buf, "%d\n", ac->chkstop ? 1 : 0);
}
static DEVICE_ATTR_RO(chkstop);
@@ -189,7 +188,7 @@ static ssize_t max_msg_size_show(struct device *dev,
{
struct ap_card *ac = to_ap_card(dev);
- return scnprintf(buf, PAGE_SIZE, "%u\n", ac->maxmsgsize);
+ return sysfs_emit(buf, "%u\n", ac->maxmsgsize);
}
static DEVICE_ATTR_RO(max_msg_size);
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 2637fe1df727..ed8f813653fe 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -18,6 +18,21 @@
static void __ap_flush_queue(struct ap_queue *aq);
+/*
+ * some AP queue helper functions
+ */
+
+static inline bool ap_q_supports_bind(struct ap_queue *aq)
+{
+ return ap_test_bit(&aq->card->functions, AP_FUNC_EP11) ||
+ ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL);
+}
+
+static inline bool ap_q_supports_assoc(struct ap_queue *aq)
+{
+ return ap_test_bit(&aq->card->functions, AP_FUNC_EP11);
+}
+
/**
* ap_queue_enable_irq(): Enable interrupt support on this AP queue.
* @aq: The AP queue
@@ -35,6 +50,8 @@ static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
qirqctrl.ir = 1;
qirqctrl.isc = AP_ISC;
status = ap_aqic(aq->qid, qirqctrl, virt_to_phys(ind));
+ if (status.async)
+ return -EPERM;
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
case AP_RESPONSE_OTHERWISE_CHANGED:
@@ -59,7 +76,7 @@ static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
* @qid: The AP queue number
* @psmid: The program supplied message identifier
* @msg: The message text
- * @length: The message length
+ * @msglen: The message length
* @special: Special Bit
*
* Returns AP queue status structure.
@@ -68,19 +85,21 @@ static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
* because a segment boundary was reached. The NQAP is repeated.
*/
static inline struct ap_queue_status
-__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
+__ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen,
int special)
{
if (special)
qid |= 0x400000UL;
- return ap_nqap(qid, psmid, msg, length);
+ return ap_nqap(qid, psmid, msg, msglen);
}
-int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
+int ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen)
{
struct ap_queue_status status;
- status = __ap_send(qid, psmid, msg, length, 0);
+ status = __ap_send(qid, psmid, msg, msglen, 0);
+ if (status.async)
+ return -EPERM;
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
return 0;
@@ -95,13 +114,15 @@ int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
}
EXPORT_SYMBOL(ap_send);
-int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
+int ap_recv(ap_qid_t qid, unsigned long *psmid, void *msg, size_t msglen)
{
struct ap_queue_status status;
if (!msg)
return -EINVAL;
- status = ap_dqap(qid, psmid, msg, length, NULL, NULL);
+ status = ap_dqap(qid, psmid, msg, msglen, NULL, NULL, NULL);
+ if (status.async)
+ return -EPERM;
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
return 0;
@@ -150,7 +171,7 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
do {
status = ap_dqap(aq->qid, &aq->reply->psmid,
aq->reply->msg, aq->reply->bufsize,
- &reslen, &resgr0);
+ &aq->reply->len, &reslen, &resgr0);
parts++;
} while (status.response_code == 0xFF && resgr0 != 0);
@@ -177,7 +198,7 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
break;
}
if (!found) {
- AP_DBF_WARN("%s unassociated reply psmid=0x%016llx on 0x%02x.%04x\n",
+ AP_DBF_WARN("%s unassociated reply psmid=0x%016lx on 0x%02x.%04x\n",
__func__, aq->reply->psmid,
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
}
@@ -210,6 +231,8 @@ static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
if (!aq->reply)
return AP_SM_WAIT_NONE;
status = ap_sm_recv(aq);
+ if (status.async)
+ return AP_SM_WAIT_NONE;
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
if (aq->queue_count > 0) {
@@ -221,7 +244,7 @@ static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
case AP_RESPONSE_NO_PENDING_REPLY:
if (aq->queue_count > 0)
return aq->interrupt ?
- AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_TIMEOUT;
+ AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
aq->sm_state = AP_SM_STATE_IDLE;
return AP_SM_WAIT_NONE;
default:
@@ -261,6 +284,8 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
status = __ap_send(qid, ap_msg->psmid,
ap_msg->msg, ap_msg->len,
ap_msg->flags & AP_MSG_FLAG_SPECIAL);
+ if (status.async)
+ return AP_SM_WAIT_NONE;
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
aq->queue_count = max_t(int, 1, aq->queue_count + 1);
@@ -277,10 +302,10 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
case AP_RESPONSE_Q_FULL:
aq->sm_state = AP_SM_STATE_QUEUE_FULL;
return aq->interrupt ?
- AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_TIMEOUT;
+ AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
case AP_RESPONSE_RESET_IN_PROGRESS:
aq->sm_state = AP_SM_STATE_RESET_WAIT;
- return AP_SM_WAIT_TIMEOUT;
+ return AP_SM_WAIT_LOW_TIMEOUT;
case AP_RESPONSE_INVALID_DOMAIN:
AP_DBF_WARN("%s RESPONSE_INVALID_DOMAIN on NQAP\n", __func__);
fallthrough;
@@ -322,13 +347,16 @@ static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
{
struct ap_queue_status status;
- status = ap_rapq(aq->qid);
+ status = ap_rapq(aq->qid, aq->rapq_fbit);
+ if (status.async)
+ return AP_SM_WAIT_NONE;
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
case AP_RESPONSE_RESET_IN_PROGRESS:
aq->sm_state = AP_SM_STATE_RESET_WAIT;
aq->interrupt = false;
- return AP_SM_WAIT_TIMEOUT;
+ aq->rapq_fbit = 0;
+ return AP_SM_WAIT_LOW_TIMEOUT;
default:
aq->dev_state = AP_DEV_STATE_ERROR;
aq->last_err_rc = status.response_code;
@@ -368,7 +396,7 @@ static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
return AP_SM_WAIT_AGAIN;
case AP_RESPONSE_BUSY:
case AP_RESPONSE_RESET_IN_PROGRESS:
- return AP_SM_WAIT_TIMEOUT;
+ return AP_SM_WAIT_LOW_TIMEOUT;
case AP_RESPONSE_Q_NOT_AVAIL:
case AP_RESPONSE_DECONFIGURED:
case AP_RESPONSE_CHECKSTOPPED:
@@ -412,7 +440,7 @@ static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
return AP_SM_WAIT_AGAIN;
fallthrough;
case AP_RESPONSE_NO_PENDING_REPLY:
- return AP_SM_WAIT_TIMEOUT;
+ return AP_SM_WAIT_LOW_TIMEOUT;
default:
aq->dev_state = AP_DEV_STATE_ERROR;
aq->last_err_rc = status.response_code;
@@ -423,6 +451,59 @@ static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
}
}
+/**
+ * ap_sm_assoc_wait(): Test queue for completion of a pending
+ * association request.
+ * @aq: pointer to the AP queue
+ */
+static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq)
+{
+ struct ap_queue_status status;
+ struct ap_tapq_gr2 info;
+
+ status = ap_test_queue(aq->qid, 1, &info);
+ /* handle asynchronous error on this queue */
+ if (status.async && status.response_code) {
+ aq->dev_state = AP_DEV_STATE_ERROR;
+ aq->last_err_rc = status.response_code;
+ AP_DBF_WARN("%s asynch RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ return AP_SM_WAIT_NONE;
+ }
+ if (status.response_code > AP_RESPONSE_BUSY) {
+ aq->dev_state = AP_DEV_STATE_ERROR;
+ aq->last_err_rc = status.response_code;
+ AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ return AP_SM_WAIT_NONE;
+ }
+
+ /* check bs bits */
+ switch (info.bs) {
+ case AP_BS_Q_USABLE:
+ /* association is through */
+ aq->sm_state = AP_SM_STATE_IDLE;
+ AP_DBF_DBG("%s queue 0x%02x.%04x associated with %u\n",
+ __func__, AP_QID_CARD(aq->qid),
+ AP_QID_QUEUE(aq->qid), aq->assoc_idx);
+ return AP_SM_WAIT_NONE;
+ case AP_BS_Q_USABLE_NO_SECURE_KEY:
+ /* association still pending */
+ return AP_SM_WAIT_LOW_TIMEOUT;
+ default:
+ /* reset from 'outside' happened or no idea at all */
+ aq->assoc_idx = ASSOC_IDX_INVALID;
+ aq->dev_state = AP_DEV_STATE_ERROR;
+ aq->last_err_rc = status.response_code;
+ AP_DBF_WARN("%s bs 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
+ __func__, info.bs,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ return AP_SM_WAIT_NONE;
+ }
+}
+
/*
* AP state machine jump table
*/
@@ -451,6 +532,10 @@ static ap_func_t *ap_jumptable[NR_AP_SM_STATES][NR_AP_SM_EVENTS] = {
[AP_SM_EVENT_POLL] = ap_sm_read,
[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
},
+ [AP_SM_STATE_ASSOC_WAIT] = {
+ [AP_SM_EVENT_POLL] = ap_sm_assoc_wait,
+ [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
+ },
};
enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event)
@@ -490,9 +575,9 @@ static ssize_t request_count_show(struct device *dev,
spin_unlock_bh(&aq->lock);
if (valid)
- return scnprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
+ return sysfs_emit(buf, "%llu\n", req_cnt);
else
- return scnprintf(buf, PAGE_SIZE, "-\n");
+ return sysfs_emit(buf, "-\n");
}
static ssize_t request_count_store(struct device *dev,
@@ -520,7 +605,7 @@ static ssize_t requestq_count_show(struct device *dev,
if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
reqq_cnt = aq->requestq_count;
spin_unlock_bh(&aq->lock);
- return scnprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
+ return sysfs_emit(buf, "%d\n", reqq_cnt);
}
static DEVICE_ATTR_RO(requestq_count);
@@ -535,7 +620,7 @@ static ssize_t pendingq_count_show(struct device *dev,
if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
penq_cnt = aq->pendingq_count;
spin_unlock_bh(&aq->lock);
- return scnprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
+ return sysfs_emit(buf, "%d\n", penq_cnt);
}
static DEVICE_ATTR_RO(pendingq_count);
@@ -550,14 +635,14 @@ static ssize_t reset_show(struct device *dev,
switch (aq->sm_state) {
case AP_SM_STATE_RESET_START:
case AP_SM_STATE_RESET_WAIT:
- rc = scnprintf(buf, PAGE_SIZE, "Reset in progress.\n");
+ rc = sysfs_emit(buf, "Reset in progress.\n");
break;
case AP_SM_STATE_WORKING:
case AP_SM_STATE_QUEUE_FULL:
- rc = scnprintf(buf, PAGE_SIZE, "Reset Timer armed.\n");
+ rc = sysfs_emit(buf, "Reset Timer armed.\n");
break;
default:
- rc = scnprintf(buf, PAGE_SIZE, "No Reset Timer set.\n");
+ rc = sysfs_emit(buf, "No Reset Timer set.\n");
}
spin_unlock_bh(&aq->lock);
return rc;
@@ -591,11 +676,11 @@ static ssize_t interrupt_show(struct device *dev,
spin_lock_bh(&aq->lock);
if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT)
- rc = scnprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
+ rc = sysfs_emit(buf, "Enable Interrupt pending.\n");
else if (aq->interrupt)
- rc = scnprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
+ rc = sysfs_emit(buf, "Interrupts enabled.\n");
else
- rc = scnprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
+ rc = sysfs_emit(buf, "Interrupts disabled.\n");
spin_unlock_bh(&aq->lock);
return rc;
}
@@ -609,7 +694,7 @@ static ssize_t config_show(struct device *dev,
int rc;
spin_lock_bh(&aq->lock);
- rc = scnprintf(buf, PAGE_SIZE, "%d\n", aq->config ? 1 : 0);
+ rc = sysfs_emit(buf, "%d\n", aq->config ? 1 : 0);
spin_unlock_bh(&aq->lock);
return rc;
}
@@ -623,13 +708,33 @@ static ssize_t chkstop_show(struct device *dev,
int rc;
spin_lock_bh(&aq->lock);
- rc = scnprintf(buf, PAGE_SIZE, "%d\n", aq->chkstop ? 1 : 0);
+ rc = sysfs_emit(buf, "%d\n", aq->chkstop ? 1 : 0);
spin_unlock_bh(&aq->lock);
return rc;
}
static DEVICE_ATTR_RO(chkstop);
+static ssize_t ap_functions_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ struct ap_queue_status status;
+ struct ap_tapq_gr2 info;
+
+ status = ap_test_queue(aq->qid, 1, &info);
+ if (status.response_code > AP_RESPONSE_BUSY) {
+ AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ return -EIO;
+ }
+
+ return sysfs_emit(buf, "0x%08X\n", info.fac);
+}
+
+static DEVICE_ATTR_RO(ap_functions);
+
#ifdef CONFIG_ZCRYPT_DEBUG
static ssize_t states_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -641,50 +746,46 @@ static ssize_t states_show(struct device *dev,
/* queue device state */
switch (aq->dev_state) {
case AP_DEV_STATE_UNINITIATED:
- rc = scnprintf(buf, PAGE_SIZE, "UNINITIATED\n");
+ rc = sysfs_emit(buf, "UNINITIATED\n");
break;
case AP_DEV_STATE_OPERATING:
- rc = scnprintf(buf, PAGE_SIZE, "OPERATING");
+ rc = sysfs_emit(buf, "OPERATING");
break;
case AP_DEV_STATE_SHUTDOWN:
- rc = scnprintf(buf, PAGE_SIZE, "SHUTDOWN");
+ rc = sysfs_emit(buf, "SHUTDOWN");
break;
case AP_DEV_STATE_ERROR:
- rc = scnprintf(buf, PAGE_SIZE, "ERROR");
+ rc = sysfs_emit(buf, "ERROR");
break;
default:
- rc = scnprintf(buf, PAGE_SIZE, "UNKNOWN");
+ rc = sysfs_emit(buf, "UNKNOWN");
}
/* state machine state */
if (aq->dev_state) {
switch (aq->sm_state) {
case AP_SM_STATE_RESET_START:
- rc += scnprintf(buf + rc, PAGE_SIZE - rc,
- " [RESET_START]\n");
+ rc += sysfs_emit_at(buf, rc, " [RESET_START]\n");
break;
case AP_SM_STATE_RESET_WAIT:
- rc += scnprintf(buf + rc, PAGE_SIZE - rc,
- " [RESET_WAIT]\n");
+ rc += sysfs_emit_at(buf, rc, " [RESET_WAIT]\n");
break;
case AP_SM_STATE_SETIRQ_WAIT:
- rc += scnprintf(buf + rc, PAGE_SIZE - rc,
- " [SETIRQ_WAIT]\n");
+ rc += sysfs_emit_at(buf, rc, " [SETIRQ_WAIT]\n");
break;
case AP_SM_STATE_IDLE:
- rc += scnprintf(buf + rc, PAGE_SIZE - rc,
- " [IDLE]\n");
+ rc += sysfs_emit_at(buf, rc, " [IDLE]\n");
break;
case AP_SM_STATE_WORKING:
- rc += scnprintf(buf + rc, PAGE_SIZE - rc,
- " [WORKING]\n");
+ rc += sysfs_emit_at(buf, rc, " [WORKING]\n");
break;
case AP_SM_STATE_QUEUE_FULL:
- rc += scnprintf(buf + rc, PAGE_SIZE - rc,
- " [FULL]\n");
+ rc += sysfs_emit_at(buf, rc, " [FULL]\n");
+ break;
+ case AP_SM_STATE_ASSOC_WAIT:
+ rc += sysfs_emit_at(buf, rc, " [ASSOC_WAIT]\n");
break;
default:
- rc += scnprintf(buf + rc, PAGE_SIZE - rc,
- " [UNKNOWN]\n");
+ rc += sysfs_emit_at(buf, rc, " [UNKNOWN]\n");
}
}
spin_unlock_bh(&aq->lock);
@@ -705,33 +806,33 @@ static ssize_t last_err_rc_show(struct device *dev,
switch (rc) {
case AP_RESPONSE_NORMAL:
- return scnprintf(buf, PAGE_SIZE, "NORMAL\n");
+ return sysfs_emit(buf, "NORMAL\n");
case AP_RESPONSE_Q_NOT_AVAIL:
- return scnprintf(buf, PAGE_SIZE, "Q_NOT_AVAIL\n");
+ return sysfs_emit(buf, "Q_NOT_AVAIL\n");
case AP_RESPONSE_RESET_IN_PROGRESS:
- return scnprintf(buf, PAGE_SIZE, "RESET_IN_PROGRESS\n");
+ return sysfs_emit(buf, "RESET_IN_PROGRESS\n");
case AP_RESPONSE_DECONFIGURED:
- return scnprintf(buf, PAGE_SIZE, "DECONFIGURED\n");
+ return sysfs_emit(buf, "DECONFIGURED\n");
case AP_RESPONSE_CHECKSTOPPED:
- return scnprintf(buf, PAGE_SIZE, "CHECKSTOPPED\n");
+ return sysfs_emit(buf, "CHECKSTOPPED\n");
case AP_RESPONSE_BUSY:
- return scnprintf(buf, PAGE_SIZE, "BUSY\n");
+ return sysfs_emit(buf, "BUSY\n");
case AP_RESPONSE_INVALID_ADDRESS:
- return scnprintf(buf, PAGE_SIZE, "INVALID_ADDRESS\n");
+ return sysfs_emit(buf, "INVALID_ADDRESS\n");
case AP_RESPONSE_OTHERWISE_CHANGED:
- return scnprintf(buf, PAGE_SIZE, "OTHERWISE_CHANGED\n");
+ return sysfs_emit(buf, "OTHERWISE_CHANGED\n");
case AP_RESPONSE_Q_FULL:
- return scnprintf(buf, PAGE_SIZE, "Q_FULL/NO_PENDING_REPLY\n");
+ return sysfs_emit(buf, "Q_FULL/NO_PENDING_REPLY\n");
case AP_RESPONSE_INDEX_TOO_BIG:
- return scnprintf(buf, PAGE_SIZE, "INDEX_TOO_BIG\n");
+ return sysfs_emit(buf, "INDEX_TOO_BIG\n");
case AP_RESPONSE_NO_FIRST_PART:
- return scnprintf(buf, PAGE_SIZE, "NO_FIRST_PART\n");
+ return sysfs_emit(buf, "NO_FIRST_PART\n");
case AP_RESPONSE_MESSAGE_TOO_BIG:
- return scnprintf(buf, PAGE_SIZE, "MESSAGE_TOO_BIG\n");
+ return sysfs_emit(buf, "MESSAGE_TOO_BIG\n");
case AP_RESPONSE_REQ_FAC_NOT_INST:
- return scnprintf(buf, PAGE_SIZE, "REQ_FAC_NOT_INST\n");
+ return sysfs_emit(buf, "REQ_FAC_NOT_INST\n");
default:
- return scnprintf(buf, PAGE_SIZE, "response code %d\n", rc);
+ return sysfs_emit(buf, "response code %d\n", rc);
}
}
static DEVICE_ATTR_RO(last_err_rc);
@@ -745,6 +846,7 @@ static struct attribute *ap_queue_dev_attrs[] = {
&dev_attr_interrupt.attr,
&dev_attr_config.attr,
&dev_attr_chkstop.attr,
+ &dev_attr_ap_functions.attr,
#ifdef CONFIG_ZCRYPT_DEBUG
&dev_attr_states.attr,
&dev_attr_last_err_rc.attr,
@@ -766,6 +868,186 @@ static struct device_type ap_queue_type = {
.groups = ap_queue_dev_attr_groups,
};
+static ssize_t se_bind_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ struct ap_queue_status status;
+ struct ap_tapq_gr2 info;
+
+ if (!ap_q_supports_bind(aq))
+ return sysfs_emit(buf, "-\n");
+
+ status = ap_test_queue(aq->qid, 1, &info);
+ if (status.response_code > AP_RESPONSE_BUSY) {
+ AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ return -EIO;
+ }
+ switch (info.bs) {
+ case AP_BS_Q_USABLE:
+ case AP_BS_Q_USABLE_NO_SECURE_KEY:
+ return sysfs_emit(buf, "bound\n");
+ default:
+ return sysfs_emit(buf, "unbound\n");
+ }
+}
+
+static ssize_t se_bind_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ struct ap_queue_status status;
+ bool value;
+ int rc;
+
+ if (!ap_q_supports_bind(aq))
+ return -EINVAL;
+
+ /* only 0 (unbind) and 1 (bind) allowed */
+ rc = kstrtobool(buf, &value);
+ if (rc)
+ return rc;
+
+ if (value) {
+ /* bind, do BAPQ */
+ spin_lock_bh(&aq->lock);
+ if (aq->sm_state < AP_SM_STATE_IDLE) {
+ spin_unlock_bh(&aq->lock);
+ return -EBUSY;
+ }
+ status = ap_bapq(aq->qid);
+ spin_unlock_bh(&aq->lock);
+ if (status.response_code) {
+ AP_DBF_WARN("%s RC 0x%02x on bapq(0x%02x.%04x)\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid),
+ AP_QID_QUEUE(aq->qid));
+ return -EIO;
+ }
+ } else {
+ /* unbind, set F bit arg and trigger RAPQ */
+ spin_lock_bh(&aq->lock);
+ __ap_flush_queue(aq);
+ aq->rapq_fbit = 1;
+ aq->assoc_idx = ASSOC_IDX_INVALID;
+ aq->sm_state = AP_SM_STATE_RESET_START;
+ ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
+ spin_unlock_bh(&aq->lock);
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(se_bind);
+
+static ssize_t se_associate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ struct ap_queue_status status;
+ struct ap_tapq_gr2 info;
+
+ if (!ap_q_supports_assoc(aq))
+ return sysfs_emit(buf, "-\n");
+
+ status = ap_test_queue(aq->qid, 1, &info);
+ if (status.response_code > AP_RESPONSE_BUSY) {
+ AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ return -EIO;
+ }
+
+ switch (info.bs) {
+ case AP_BS_Q_USABLE:
+ if (aq->assoc_idx == ASSOC_IDX_INVALID) {
+ AP_DBF_WARN("%s AP_BS_Q_USABLE but invalid assoc_idx\n", __func__);
+ return -EIO;
+ }
+ return sysfs_emit(buf, "associated %u\n", aq->assoc_idx);
+ case AP_BS_Q_USABLE_NO_SECURE_KEY:
+ if (aq->assoc_idx != ASSOC_IDX_INVALID)
+ return sysfs_emit(buf, "association pending\n");
+ fallthrough;
+ default:
+ return sysfs_emit(buf, "unassociated\n");
+ }
+}
+
+static ssize_t se_associate_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ struct ap_queue_status status;
+ unsigned int value;
+ int rc;
+
+ if (!ap_q_supports_assoc(aq))
+ return -EINVAL;
+
+ /* association index needs to be >= 0 */
+ rc = kstrtouint(buf, 0, &value);
+ if (rc)
+ return rc;
+ if (value >= ASSOC_IDX_INVALID)
+ return -EINVAL;
+
+ spin_lock_bh(&aq->lock);
+
+ /* sm should be in idle state */
+ if (aq->sm_state != AP_SM_STATE_IDLE) {
+ spin_unlock_bh(&aq->lock);
+ return -EBUSY;
+ }
+
+ /* already associated or association pending ? */
+ if (aq->assoc_idx != ASSOC_IDX_INVALID) {
+ spin_unlock_bh(&aq->lock);
+ return -EINVAL;
+ }
+
+ /* trigger the asynchronous association request */
+ status = ap_aapq(aq->qid, value);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ case AP_RESPONSE_STATE_CHANGE_IN_PROGRESS:
+ aq->sm_state = AP_SM_STATE_ASSOC_WAIT;
+ aq->assoc_idx = value;
+ ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
+ spin_unlock_bh(&aq->lock);
+ break;
+ default:
+ spin_unlock_bh(&aq->lock);
+ AP_DBF_WARN("%s RC 0x%02x on aapq(0x%02x.%04x)\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ return -EIO;
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(se_associate);
+
+static struct attribute *ap_queue_dev_sb_attrs[] = {
+ &dev_attr_se_bind.attr,
+ &dev_attr_se_associate.attr,
+ NULL
+};
+
+static struct attribute_group ap_queue_dev_sb_attr_group = {
+ .attrs = ap_queue_dev_sb_attrs
+};
+
+static const struct attribute_group *ap_queue_dev_sb_attr_groups[] = {
+ &ap_queue_dev_sb_attr_group,
+ NULL
+};
+
static void ap_queue_device_release(struct device *dev)
{
struct ap_queue *aq = to_ap_queue(dev);
@@ -787,6 +1069,9 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
aq->ap_dev.device.release = ap_queue_device_release;
aq->ap_dev.device.type = &ap_queue_type;
aq->ap_dev.device_type = device_type;
+ // add optional SE secure binding attributes group
+ if (ap_sb_available() && is_prot_virt_guest())
+ aq->ap_dev.device.groups = ap_queue_dev_sb_attr_groups;
aq->qid = qid;
aq->interrupt = false;
spin_lock_init(&aq->lock);
@@ -922,7 +1207,7 @@ void ap_queue_remove(struct ap_queue *aq)
* to the initial value AP_DEV_STATE_UNINITIATED.
*/
spin_lock_bh(&aq->lock);
- ap_zapq(aq->qid);
+ ap_zapq(aq->qid, 0);
aq->dev_state = AP_DEV_STATE_UNINITIATED;
spin_unlock_bh(&aq->lock);
}
@@ -933,6 +1218,7 @@ void ap_queue_init_state(struct ap_queue *aq)
aq->dev_state = AP_DEV_STATE_OPERATING;
aq->sm_state = AP_SM_STATE_RESET_START;
aq->last_err_rc = 0;
+ aq->assoc_idx = ASSOC_IDX_INVALID;
ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
spin_unlock_bh(&aq->lock);
}
diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c
index 997b524bdd2b..9341c000da41 100644
--- a/drivers/s390/crypto/vfio_ap_drv.c
+++ b/drivers/s390/crypto/vfio_ap_drv.c
@@ -59,14 +59,8 @@ static void vfio_ap_matrix_dev_release(struct device *dev)
kfree(matrix_dev);
}
-static int matrix_bus_match(struct device *dev, struct device_driver *drv)
-{
- return 1;
-}
-
static struct bus_type matrix_bus = {
.name = "matrix",
- .match = &matrix_bus_match,
};
static struct device_driver matrix_driver = {
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 72e10abb103a..cfbcb864ab63 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -599,9 +599,9 @@ out_unlock:
static void vfio_ap_matrix_init(struct ap_config_info *info,
struct ap_matrix *matrix)
{
- matrix->apm_max = info->apxa ? info->Na : 63;
- matrix->aqm_max = info->apxa ? info->Nd : 15;
- matrix->adm_max = info->apxa ? info->Nd : 15;
+ matrix->apm_max = info->apxa ? info->na : 63;
+ matrix->aqm_max = info->apxa ? info->nd : 15;
+ matrix->adm_max = info->apxa ? info->nd : 15;
}
static void vfio_ap_mdev_update_guest_apcb(struct ap_matrix_mdev *matrix_mdev)
@@ -1657,7 +1657,7 @@ static int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q)
if (!q)
return 0;
retry_zapq:
- status = ap_zapq(q->apqn);
+ status = ap_zapq(q->apqn, 0);
q->reset_rc = status.response_code;
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
@@ -2115,8 +2115,8 @@ static void vfio_ap_filter_apid_by_qtype(unsigned long *apm, unsigned long *aqm)
{
bool apid_cleared;
struct ap_queue_status status;
- unsigned long apid, apqi, info;
- int qtype, qtype_mask = 0xff000000;
+ unsigned long apid, apqi;
+ struct ap_tapq_gr2 info;
for_each_set_bit_inv(apid, apm, AP_DEVICES) {
apid_cleared = false;
@@ -2133,15 +2133,13 @@ static void vfio_ap_filter_apid_by_qtype(unsigned long *apm, unsigned long *aqm)
case AP_RESPONSE_DECONFIGURED:
case AP_RESPONSE_CHECKSTOPPED:
case AP_RESPONSE_BUSY:
- qtype = info & qtype_mask;
-
/*
* The vfio_ap device driver only
* supports CEX4 and newer adapters, so
* remove the APID if the adapter is
* older than a CEX4.
*/
- if (qtype < AP_DEVICE_TYPE_CEX4) {
+ if (info.at < AP_DEVICE_TYPE_CEX4) {
clear_bit_inv(apid, apm);
apid_cleared = true;
}
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 6fe05bb82c77..896cb3f2a375 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -159,25 +159,20 @@ static ssize_t ioctlmask_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- int i, rc;
struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+ int i, n;
if (mutex_lock_interruptible(&ap_perms_mutex))
return -ERESTARTSYS;
- buf[0] = '0';
- buf[1] = 'x';
+ n = sysfs_emit(buf, "0x");
for (i = 0; i < sizeof(zcdndev->perms.ioctlm) / sizeof(long); i++)
- snprintf(buf + 2 + 2 * i * sizeof(long),
- PAGE_SIZE - 2 - 2 * i * sizeof(long),
- "%016lx", zcdndev->perms.ioctlm[i]);
- buf[2 + 2 * i * sizeof(long)] = '\n';
- buf[2 + 2 * i * sizeof(long) + 1] = '\0';
- rc = 2 + 2 * i * sizeof(long) + 1;
+ n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.ioctlm[i]);
+ n += sysfs_emit_at(buf, n, "\n");
mutex_unlock(&ap_perms_mutex);
- return rc;
+ return n;
}
static ssize_t ioctlmask_store(struct device *dev,
@@ -201,25 +196,20 @@ static ssize_t apmask_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- int i, rc;
struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+ int i, n;
if (mutex_lock_interruptible(&ap_perms_mutex))
return -ERESTARTSYS;
- buf[0] = '0';
- buf[1] = 'x';
+ n = sysfs_emit(buf, "0x");
for (i = 0; i < sizeof(zcdndev->perms.apm) / sizeof(long); i++)
- snprintf(buf + 2 + 2 * i * sizeof(long),
- PAGE_SIZE - 2 - 2 * i * sizeof(long),
- "%016lx", zcdndev->perms.apm[i]);
- buf[2 + 2 * i * sizeof(long)] = '\n';
- buf[2 + 2 * i * sizeof(long) + 1] = '\0';
- rc = 2 + 2 * i * sizeof(long) + 1;
+ n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.apm[i]);
+ n += sysfs_emit_at(buf, n, "\n");
mutex_unlock(&ap_perms_mutex);
- return rc;
+ return n;
}
static ssize_t apmask_store(struct device *dev,
@@ -243,25 +233,20 @@ static ssize_t aqmask_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- int i, rc;
struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+ int i, n;
if (mutex_lock_interruptible(&ap_perms_mutex))
return -ERESTARTSYS;
- buf[0] = '0';
- buf[1] = 'x';
+ n = sysfs_emit(buf, "0x");
for (i = 0; i < sizeof(zcdndev->perms.aqm) / sizeof(long); i++)
- snprintf(buf + 2 + 2 * i * sizeof(long),
- PAGE_SIZE - 2 - 2 * i * sizeof(long),
- "%016lx", zcdndev->perms.aqm[i]);
- buf[2 + 2 * i * sizeof(long)] = '\n';
- buf[2 + 2 * i * sizeof(long) + 1] = '\0';
- rc = 2 + 2 * i * sizeof(long) + 1;
+ n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.aqm[i]);
+ n += sysfs_emit_at(buf, n, "\n");
mutex_unlock(&ap_perms_mutex);
- return rc;
+ return n;
}
static ssize_t aqmask_store(struct device *dev,
@@ -285,25 +270,20 @@ static ssize_t admask_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- int i, rc;
struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+ int i, n;
if (mutex_lock_interruptible(&ap_perms_mutex))
return -ERESTARTSYS;
- buf[0] = '0';
- buf[1] = 'x';
+ n = sysfs_emit(buf, "0x");
for (i = 0; i < sizeof(zcdndev->perms.adm) / sizeof(long); i++)
- snprintf(buf + 2 + 2 * i * sizeof(long),
- PAGE_SIZE - 2 - 2 * i * sizeof(long),
- "%016lx", zcdndev->perms.adm[i]);
- buf[2 + 2 * i * sizeof(long)] = '\n';
- buf[2 + 2 * i * sizeof(long) + 1] = '\0';
- rc = 2 + 2 * i * sizeof(long) + 1;
+ n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.adm[i]);
+ n += sysfs_emit_at(buf, n, "\n");
mutex_unlock(&ap_perms_mutex);
- return rc;
+ return n;
}
static ssize_t admask_store(struct device *dev,
diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c
index 6ca675042416..c815722d0ac8 100644
--- a/drivers/s390/crypto/zcrypt_card.c
+++ b/drivers/s390/crypto/zcrypt_card.c
@@ -41,7 +41,7 @@ static ssize_t type_show(struct device *dev,
{
struct zcrypt_card *zc = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%s\n", zc->type_string);
+ return sysfs_emit(buf, "%s\n", zc->type_string);
}
static DEVICE_ATTR_RO(type);
@@ -54,7 +54,7 @@ static ssize_t online_show(struct device *dev,
struct ap_card *ac = to_ap_card(dev);
int online = ac->config && zc->online ? 1 : 0;
- return scnprintf(buf, PAGE_SIZE, "%d\n", online);
+ return sysfs_emit(buf, "%d\n", online);
}
static ssize_t online_store(struct device *dev,
@@ -118,7 +118,7 @@ static ssize_t load_show(struct device *dev,
{
struct zcrypt_card *zc = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zc->load));
+ return sysfs_emit(buf, "%d\n", atomic_read(&zc->load));
}
static DEVICE_ATTR_RO(load);
diff --git a/drivers/s390/crypto/zcrypt_cex2c.c b/drivers/s390/crypto/zcrypt_cex2c.c
index cb7849defce3..251b5bd3d19c 100644
--- a/drivers/s390/crypto/zcrypt_cex2c.c
+++ b/drivers/s390/crypto/zcrypt_cex2c.c
@@ -75,7 +75,7 @@ static ssize_t cca_serialnr_show(struct device *dev,
if (ap_domain_index >= 0)
cca_get_info(ac->id, ap_domain_index, &ci, zc->online);
- return scnprintf(buf, PAGE_SIZE, "%s\n", ci.serial);
+ return sysfs_emit(buf, "%s\n", ci.serial);
}
static struct device_attribute dev_attr_cca_serialnr =
@@ -110,51 +110,46 @@ static ssize_t cca_mkvps_show(struct device *dev,
&ci, zq->online);
if (ci.new_aes_mk_state >= '1' && ci.new_aes_mk_state <= '3')
- n = scnprintf(buf, PAGE_SIZE, "AES NEW: %s 0x%016llx\n",
- new_state[ci.new_aes_mk_state - '1'],
- ci.new_aes_mkvp);
+ n = sysfs_emit(buf, "AES NEW: %s 0x%016llx\n",
+ new_state[ci.new_aes_mk_state - '1'],
+ ci.new_aes_mkvp);
else
- n = scnprintf(buf, PAGE_SIZE, "AES NEW: - -\n");
+ n = sysfs_emit(buf, "AES NEW: - -\n");
if (ci.cur_aes_mk_state >= '1' && ci.cur_aes_mk_state <= '2')
- n += scnprintf(buf + n, PAGE_SIZE - n,
- "AES CUR: %s 0x%016llx\n",
- cao_state[ci.cur_aes_mk_state - '1'],
- ci.cur_aes_mkvp);
+ n += sysfs_emit_at(buf, n, "AES CUR: %s 0x%016llx\n",
+ cao_state[ci.cur_aes_mk_state - '1'],
+ ci.cur_aes_mkvp);
else
- n += scnprintf(buf + n, PAGE_SIZE - n, "AES CUR: - -\n");
+ n += sysfs_emit_at(buf, n, "AES CUR: - -\n");
if (ci.old_aes_mk_state >= '1' && ci.old_aes_mk_state <= '2')
- n += scnprintf(buf + n, PAGE_SIZE - n,
- "AES OLD: %s 0x%016llx\n",
- cao_state[ci.old_aes_mk_state - '1'],
- ci.old_aes_mkvp);
+ n += sysfs_emit_at(buf, n, "AES OLD: %s 0x%016llx\n",
+ cao_state[ci.old_aes_mk_state - '1'],
+ ci.old_aes_mkvp);
else
- n += scnprintf(buf + n, PAGE_SIZE - n, "AES OLD: - -\n");
+ n += sysfs_emit_at(buf, n, "AES OLD: - -\n");
if (ci.new_apka_mk_state >= '1' && ci.new_apka_mk_state <= '3')
- n += scnprintf(buf + n, PAGE_SIZE - n,
- "APKA NEW: %s 0x%016llx\n",
- new_state[ci.new_apka_mk_state - '1'],
- ci.new_apka_mkvp);
+ n += sysfs_emit_at(buf, n, "APKA NEW: %s 0x%016llx\n",
+ new_state[ci.new_apka_mk_state - '1'],
+ ci.new_apka_mkvp);
else
- n += scnprintf(buf + n, PAGE_SIZE - n, "APKA NEW: - -\n");
+ n += sysfs_emit_at(buf, n, "APKA NEW: - -\n");
if (ci.cur_apka_mk_state >= '1' && ci.cur_apka_mk_state <= '2')
- n += scnprintf(buf + n, PAGE_SIZE - n,
- "APKA CUR: %s 0x%016llx\n",
- cao_state[ci.cur_apka_mk_state - '1'],
- ci.cur_apka_mkvp);
+ n += sysfs_emit_at(buf, n, "APKA CUR: %s 0x%016llx\n",
+ cao_state[ci.cur_apka_mk_state - '1'],
+ ci.cur_apka_mkvp);
else
- n += scnprintf(buf + n, PAGE_SIZE - n, "APKA CUR: - -\n");
+ n += sysfs_emit_at(buf, n, "APKA CUR: - -\n");
if (ci.old_apka_mk_state >= '1' && ci.old_apka_mk_state <= '2')
- n += scnprintf(buf + n, PAGE_SIZE - n,
- "APKA OLD: %s 0x%016llx\n",
- cao_state[ci.old_apka_mk_state - '1'],
- ci.old_apka_mkvp);
+ n += sysfs_emit_at(buf, n, "APKA OLD: %s 0x%016llx\n",
+ cao_state[ci.old_apka_mk_state - '1'],
+ ci.old_apka_mkvp);
else
- n += scnprintf(buf + n, PAGE_SIZE - n, "APKA OLD: - -\n");
+ n += sysfs_emit_at(buf, n, "APKA OLD: - -\n");
return n;
}
@@ -181,7 +176,7 @@ static const struct attribute_group cca_queue_attr_grp = {
static int zcrypt_cex2c_rng_supported(struct ap_queue *aq)
{
struct ap_message ap_msg;
- unsigned long long psmid;
+ unsigned long psmid;
unsigned int domain;
struct {
struct type86_hdr hdr;
@@ -203,21 +198,22 @@ static int zcrypt_cex2c_rng_supported(struct ap_queue *aq)
ap_msg.msg = (void *)get_zeroed_page(GFP_KERNEL);
if (!ap_msg.msg)
return -ENOMEM;
+ ap_msg.bufsize = PAGE_SIZE;
rng_type6cprb_msgx(&ap_msg, 4, &domain);
msg = ap_msg.msg;
msg->cprbx.domain = AP_QID_QUEUE(aq->qid);
- rc = ap_send(aq->qid, 0x0102030405060708ULL, ap_msg.msg, ap_msg.len);
+ rc = ap_send(aq->qid, 0x0102030405060708UL, ap_msg.msg, ap_msg.len);
if (rc)
goto out_free;
/* Wait for the test message to complete. */
for (i = 0; i < 2 * HZ; i++) {
msleep(1000 / HZ);
- rc = ap_recv(aq->qid, &psmid, ap_msg.msg, 4096);
- if (rc == 0 && psmid == 0x0102030405060708ULL)
+ rc = ap_recv(aq->qid, &psmid, ap_msg.msg, ap_msg.bufsize);
+ if (rc == 0 && psmid == 0x0102030405060708UL)
break;
}
@@ -342,7 +338,7 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
zq->queue = aq;
zq->online = 1;
atomic_set(&zq->load, 0);
- ap_rapq(aq->qid);
+ ap_rapq(aq->qid, 0);
rc = zcrypt_cex2c_rng_supported(aq);
if (rc < 0) {
zcrypt_queue_free(zq);
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index b03916b7538b..9cfce9ff2e65 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -88,7 +88,7 @@ static ssize_t cca_serialnr_show(struct device *dev,
if (ap_domain_index >= 0)
cca_get_info(ac->id, ap_domain_index, &ci, zc->online);
- return scnprintf(buf, PAGE_SIZE, "%s\n", ci.serial);
+ return sysfs_emit(buf, "%s\n", ci.serial);
}
static struct device_attribute dev_attr_cca_serialnr =
@@ -123,79 +123,70 @@ static ssize_t cca_mkvps_show(struct device *dev,
&ci, zq->online);
if (ci.new_aes_mk_state >= '1' && ci.new_aes_mk_state <= '3')
- n += scnprintf(buf + n, PAGE_SIZE,
- "AES NEW: %s 0x%016llx\n",
- new_state[ci.new_aes_mk_state - '1'],
- ci.new_aes_mkvp);
+ n += sysfs_emit_at(buf, n, "AES NEW: %s 0x%016llx\n",
+ new_state[ci.new_aes_mk_state - '1'],
+ ci.new_aes_mkvp);
else
- n += scnprintf(buf + n, PAGE_SIZE, "AES NEW: - -\n");
+ n += sysfs_emit_at(buf, n, "AES NEW: - -\n");
if (ci.cur_aes_mk_state >= '1' && ci.cur_aes_mk_state <= '2')
- n += scnprintf(buf + n, PAGE_SIZE - n,
- "AES CUR: %s 0x%016llx\n",
- cao_state[ci.cur_aes_mk_state - '1'],
- ci.cur_aes_mkvp);
+ n += sysfs_emit_at(buf, n, "AES CUR: %s 0x%016llx\n",
+ cao_state[ci.cur_aes_mk_state - '1'],
+ ci.cur_aes_mkvp);
else
- n += scnprintf(buf + n, PAGE_SIZE - n, "AES CUR: - -\n");
+ n += sysfs_emit_at(buf, n, "AES CUR: - -\n");
if (ci.old_aes_mk_state >= '1' && ci.old_aes_mk_state <= '2')
- n += scnprintf(buf + n, PAGE_SIZE - n,
- "AES OLD: %s 0x%016llx\n",
- cao_state[ci.old_aes_mk_state - '1'],
- ci.old_aes_mkvp);
+ n += sysfs_emit_at(buf, n, "AES OLD: %s 0x%016llx\n",
+ cao_state[ci.old_aes_mk_state - '1'],
+ ci.old_aes_mkvp);
else
- n += scnprintf(buf + n, PAGE_SIZE - n, "AES OLD: - -\n");
+ n += sysfs_emit_at(buf, n, "AES OLD: - -\n");
if (ci.new_apka_mk_state >= '1' && ci.new_apka_mk_state <= '3')
- n += scnprintf(buf + n, PAGE_SIZE - n,
- "APKA NEW: %s 0x%016llx\n",
- new_state[ci.new_apka_mk_state - '1'],
- ci.new_apka_mkvp);
+ n += sysfs_emit_at(buf, n, "APKA NEW: %s 0x%016llx\n",
+ new_state[ci.new_apka_mk_state - '1'],
+ ci.new_apka_mkvp);
else
- n += scnprintf(buf + n, PAGE_SIZE - n, "APKA NEW: - -\n");
+ n += sysfs_emit_at(buf, n, "APKA NEW: - -\n");
if (ci.cur_apka_mk_state >= '1' && ci.cur_apka_mk_state <= '2')
- n += scnprintf(buf + n, PAGE_SIZE - n,
- "APKA CUR: %s 0x%016llx\n",
- cao_state[ci.cur_apka_mk_state - '1'],
- ci.cur_apka_mkvp);
+ n += sysfs_emit_at(buf, n, "APKA CUR: %s 0x%016llx\n",
+ cao_state[ci.cur_apka_mk_state - '1'],
+ ci.cur_apka_mkvp);
else
- n += scnprintf(buf + n, PAGE_SIZE - n, "APKA CUR: - -\n");
+ n += sysfs_emit_at(buf, n, "APKA CUR: - -\n");
if (ci.old_apka_mk_state >= '1' && ci.old_apka_mk_state <= '2')
- n += scnprintf(buf + n, PAGE_SIZE - n,
- "APKA OLD: %s 0x%016llx\n",
- cao_state[ci.old_apka_mk_state - '1'],
- ci.old_apka_mkvp);
+ n += sysfs_emit_at(buf, n, "APKA OLD: %s 0x%016llx\n",
+ cao_state[ci.old_apka_mk_state - '1'],
+ ci.old_apka_mkvp);
else
- n += scnprintf(buf + n, PAGE_SIZE - n, "APKA OLD: - -\n");
+ n += sysfs_emit_at(buf, n, "APKA OLD: - -\n");
if (ci.new_asym_mk_state >= '1' && ci.new_asym_mk_state <= '3')
- n += scnprintf(buf + n, PAGE_SIZE,
- "ASYM NEW: %s 0x%016llx%016llx\n",
- new_state[ci.new_asym_mk_state - '1'],
- *((u64 *)(ci.new_asym_mkvp)),
- *((u64 *)(ci.new_asym_mkvp + sizeof(u64))));
+ n += sysfs_emit_at(buf, n, "ASYM NEW: %s 0x%016llx%016llx\n",
+ new_state[ci.new_asym_mk_state - '1'],
+ *((u64 *)(ci.new_asym_mkvp)),
+ *((u64 *)(ci.new_asym_mkvp + sizeof(u64))));
else
- n += scnprintf(buf + n, PAGE_SIZE, "ASYM NEW: - -\n");
+ n += sysfs_emit_at(buf, n, "ASYM NEW: - -\n");
if (ci.cur_asym_mk_state >= '1' && ci.cur_asym_mk_state <= '2')
- n += scnprintf(buf + n, PAGE_SIZE - n,
- "ASYM CUR: %s 0x%016llx%016llx\n",
- cao_state[ci.cur_asym_mk_state - '1'],
- *((u64 *)(ci.cur_asym_mkvp)),
- *((u64 *)(ci.cur_asym_mkvp + sizeof(u64))));
+ n += sysfs_emit_at(buf, n, "ASYM CUR: %s 0x%016llx%016llx\n",
+ cao_state[ci.cur_asym_mk_state - '1'],
+ *((u64 *)(ci.cur_asym_mkvp)),
+ *((u64 *)(ci.cur_asym_mkvp + sizeof(u64))));
else
- n += scnprintf(buf + n, PAGE_SIZE - n, "ASYM CUR: - -\n");
+ n += sysfs_emit_at(buf, n, "ASYM CUR: - -\n");
if (ci.old_asym_mk_state >= '1' && ci.old_asym_mk_state <= '2')
- n += scnprintf(buf + n, PAGE_SIZE - n,
- "ASYM OLD: %s 0x%016llx%016llx\n",
- cao_state[ci.old_asym_mk_state - '1'],
- *((u64 *)(ci.old_asym_mkvp)),
- *((u64 *)(ci.old_asym_mkvp + sizeof(u64))));
+ n += sysfs_emit_at(buf, n, "ASYM OLD: %s 0x%016llx%016llx\n",
+ cao_state[ci.old_asym_mk_state - '1'],
+ *((u64 *)(ci.old_asym_mkvp)),
+ *((u64 *)(ci.old_asym_mkvp + sizeof(u64))));
else
- n += scnprintf(buf + n, PAGE_SIZE - n, "ASYM OLD: - -\n");
+ n += sysfs_emit_at(buf, n, "ASYM OLD: - -\n");
return n;
}
@@ -228,9 +219,9 @@ static ssize_t ep11_api_ordinalnr_show(struct device *dev,
ep11_get_card_info(ac->id, &ci, zc->online);
if (ci.API_ord_nr > 0)
- return scnprintf(buf, PAGE_SIZE, "%u\n", ci.API_ord_nr);
+ return sysfs_emit(buf, "%u\n", ci.API_ord_nr);
else
- return scnprintf(buf, PAGE_SIZE, "\n");
+ return sysfs_emit(buf, "\n");
}
static struct device_attribute dev_attr_ep11_api_ordinalnr =
@@ -249,11 +240,11 @@ static ssize_t ep11_fw_version_show(struct device *dev,
ep11_get_card_info(ac->id, &ci, zc->online);
if (ci.FW_version > 0)
- return scnprintf(buf, PAGE_SIZE, "%d.%d\n",
- (int)(ci.FW_version >> 8),
- (int)(ci.FW_version & 0xFF));
+ return sysfs_emit(buf, "%d.%d\n",
+ (int)(ci.FW_version >> 8),
+ (int)(ci.FW_version & 0xFF));
else
- return scnprintf(buf, PAGE_SIZE, "\n");
+ return sysfs_emit(buf, "\n");
}
static struct device_attribute dev_attr_ep11_fw_version =
@@ -272,9 +263,9 @@ static ssize_t ep11_serialnr_show(struct device *dev,
ep11_get_card_info(ac->id, &ci, zc->online);
if (ci.serial[0])
- return scnprintf(buf, PAGE_SIZE, "%16.16s\n", ci.serial);
+ return sysfs_emit(buf, "%16.16s\n", ci.serial);
else
- return scnprintf(buf, PAGE_SIZE, "\n");
+ return sysfs_emit(buf, "\n");
}
static struct device_attribute dev_attr_ep11_serialnr =
@@ -309,11 +300,11 @@ static ssize_t ep11_card_op_modes_show(struct device *dev,
if (ci.op_mode & (1ULL << ep11_op_modes[i].mode_bit)) {
if (n > 0)
buf[n++] = ' ';
- n += scnprintf(buf + n, PAGE_SIZE - n,
- "%s", ep11_op_modes[i].mode_txt);
+ n += sysfs_emit_at(buf, n, "%s",
+ ep11_op_modes[i].mode_txt);
}
}
- n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
+ n += sysfs_emit_at(buf, n, "\n");
return n;
}
@@ -356,29 +347,29 @@ static ssize_t ep11_mkvps_show(struct device *dev,
&di);
if (di.cur_wk_state == '0') {
- n = scnprintf(buf, PAGE_SIZE, "WK CUR: %s -\n",
- cwk_state[di.cur_wk_state - '0']);
+ n = sysfs_emit(buf, "WK CUR: %s -\n",
+ cwk_state[di.cur_wk_state - '0']);
} else if (di.cur_wk_state == '1') {
- n = scnprintf(buf, PAGE_SIZE, "WK CUR: %s 0x",
- cwk_state[di.cur_wk_state - '0']);
+ n = sysfs_emit(buf, "WK CUR: %s 0x",
+ cwk_state[di.cur_wk_state - '0']);
bin2hex(buf + n, di.cur_wkvp, sizeof(di.cur_wkvp));
n += 2 * sizeof(di.cur_wkvp);
- n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
+ n += sysfs_emit_at(buf, n, "\n");
} else {
- n = scnprintf(buf, PAGE_SIZE, "WK CUR: - -\n");
+ n = sysfs_emit(buf, "WK CUR: - -\n");
}
if (di.new_wk_state == '0') {
- n += scnprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s -\n",
- nwk_state[di.new_wk_state - '0']);
+ n += sysfs_emit_at(buf, n, "WK NEW: %s -\n",
+ nwk_state[di.new_wk_state - '0']);
} else if (di.new_wk_state >= '1' && di.new_wk_state <= '2') {
- n += scnprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s 0x",
- nwk_state[di.new_wk_state - '0']);
+ n += sysfs_emit_at(buf, n, "WK NEW: %s 0x",
+ nwk_state[di.new_wk_state - '0']);
bin2hex(buf + n, di.new_wkvp, sizeof(di.new_wkvp));
n += 2 * sizeof(di.new_wkvp);
- n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
+ n += sysfs_emit_at(buf, n, "\n");
} else {
- n += scnprintf(buf + n, PAGE_SIZE - n, "WK NEW: - -\n");
+ n += sysfs_emit_at(buf, n, "WK NEW: - -\n");
}
return n;
@@ -406,11 +397,11 @@ static ssize_t ep11_queue_op_modes_show(struct device *dev,
if (di.op_mode & (1ULL << ep11_op_modes[i].mode_bit)) {
if (n > 0)
buf[n++] = ' ';
- n += scnprintf(buf + n, PAGE_SIZE - n,
- "%s", ep11_op_modes[i].mode_txt);
+ n += sysfs_emit_at(buf, n, "%s",
+ ep11_op_modes[i].mode_txt);
}
}
- n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
+ n += sysfs_emit_at(buf, n, "\n");
return n;
}
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index 7d245645fdd5..05ace18c12b0 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -441,14 +441,17 @@ static void zcrypt_cex2a_receive(struct ap_queue *aq,
t80h = reply->msg;
if (t80h->type == TYPE80_RSP_CODE) {
len = t80h->len;
- if (len > reply->bufsize || len > msg->bufsize) {
+ if (len > reply->bufsize || len > msg->bufsize ||
+ len != reply->len) {
+ ZCRYPT_DBF_DBG("%s len mismatch => EMSGSIZE\n", __func__);
msg->rc = -EMSGSIZE;
- } else {
- memcpy(msg->msg, reply->msg, len);
- msg->len = len;
+ goto out;
}
+ memcpy(msg->msg, reply->msg, len);
+ msg->len = len;
} else {
memcpy(msg->msg, reply->msg, sizeof(error_reply));
+ msg->len = sizeof(error_reply);
}
out:
complete((struct completion *)msg->private);
@@ -476,7 +479,7 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_queue *zq,
if (!ap_msg->msg)
return -ENOMEM;
ap_msg->receive = zcrypt_cex2a_receive;
- ap_msg->psmid = (((unsigned long long)current->pid) << 32) +
+ ap_msg->psmid = (((unsigned long)current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg->private = &work;
rc = ICAMEX_msg_to_type50MEX_msg(zq, ap_msg, mex);
@@ -527,7 +530,7 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_queue *zq,
if (!ap_msg->msg)
return -ENOMEM;
ap_msg->receive = zcrypt_cex2a_receive;
- ap_msg->psmid = (((unsigned long long)current->pid) << 32) +
+ ap_msg->psmid = (((unsigned long)current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg->private = &work;
rc = ICACRT_msg_to_type50CRT_msg(zq, ap_msg, crt);
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 5ad251477593..914151c03753 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -926,8 +926,7 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq,
.type = TYPE82_RSP_CODE,
.reply_code = REP82_ERROR_MACHINE_FAILURE,
};
- struct response_type *resp_type =
- (struct response_type *)msg->private;
+ struct response_type *resp_type = msg->private;
struct type86x_reply *t86r;
int len;
@@ -939,28 +938,37 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq,
t86r->cprbx.cprb_ver_id == 0x02) {
switch (resp_type->type) {
case CEXXC_RESPONSE_TYPE_ICA:
- len = sizeof(struct type86x_reply) + t86r->length - 2;
- if (len > reply->bufsize || len > msg->bufsize) {
+ len = sizeof(struct type86x_reply) + t86r->length;
+ if (len > reply->bufsize || len > msg->bufsize ||
+ len != reply->len) {
+ ZCRYPT_DBF_DBG("%s len mismatch => EMSGSIZE\n", __func__);
msg->rc = -EMSGSIZE;
- } else {
- memcpy(msg->msg, reply->msg, len);
- msg->len = len;
+ goto out;
}
+ memcpy(msg->msg, reply->msg, len);
+ msg->len = len;
break;
case CEXXC_RESPONSE_TYPE_XCRB:
- len = t86r->fmt2.offset2 + t86r->fmt2.count2;
- if (len > reply->bufsize || len > msg->bufsize) {
+ if (t86r->fmt2.count2)
+ len = t86r->fmt2.offset2 + t86r->fmt2.count2;
+ else
+ len = t86r->fmt2.offset1 + t86r->fmt2.count1;
+ if (len > reply->bufsize || len > msg->bufsize ||
+ len != reply->len) {
+ ZCRYPT_DBF_DBG("%s len mismatch => EMSGSIZE\n", __func__);
msg->rc = -EMSGSIZE;
- } else {
- memcpy(msg->msg, reply->msg, len);
- msg->len = len;
+ goto out;
}
+ memcpy(msg->msg, reply->msg, len);
+ msg->len = len;
break;
default:
memcpy(msg->msg, &error_reply, sizeof(error_reply));
+ msg->len = sizeof(error_reply);
}
} else {
memcpy(msg->msg, reply->msg, sizeof(error_reply));
+ msg->len = sizeof(error_reply);
}
out:
complete(&resp_type->work);
@@ -982,8 +990,7 @@ static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq,
.type = TYPE82_RSP_CODE,
.reply_code = REP82_ERROR_MACHINE_FAILURE,
};
- struct response_type *resp_type =
- (struct response_type *)msg->private;
+ struct response_type *resp_type = msg->private;
struct type86_ep11_reply *t86r;
int len;
@@ -996,18 +1003,22 @@ static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq,
switch (resp_type->type) {
case CEXXC_RESPONSE_TYPE_EP11:
len = t86r->fmt2.offset1 + t86r->fmt2.count1;
- if (len > reply->bufsize || len > msg->bufsize) {
+ if (len > reply->bufsize || len > msg->bufsize ||
+ len != reply->len) {
+ ZCRYPT_DBF_DBG("%s len mismatch => EMSGSIZE\n", __func__);
msg->rc = -EMSGSIZE;
- } else {
- memcpy(msg->msg, reply->msg, len);
- msg->len = len;
+ goto out;
}
+ memcpy(msg->msg, reply->msg, len);
+ msg->len = len;
break;
default:
memcpy(msg->msg, &error_reply, sizeof(error_reply));
+ msg->len = sizeof(error_reply);
}
} else {
memcpy(msg->msg, reply->msg, sizeof(error_reply));
+ msg->len = sizeof(error_reply);
}
out:
complete(&resp_type->work);
@@ -1036,7 +1047,7 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq,
return -ENOMEM;
ap_msg->bufsize = PAGE_SIZE;
ap_msg->receive = zcrypt_msgtype6_receive;
- ap_msg->psmid = (((unsigned long long)current->pid) << 32) +
+ ap_msg->psmid = (((unsigned long)current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg->private = &resp_type;
rc = icamex_msg_to_type6mex_msgx(zq, ap_msg, mex);
@@ -1086,7 +1097,7 @@ static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq,
return -ENOMEM;
ap_msg->bufsize = PAGE_SIZE;
ap_msg->receive = zcrypt_msgtype6_receive;
- ap_msg->psmid = (((unsigned long long)current->pid) << 32) +
+ ap_msg->psmid = (((unsigned long)current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg->private = &resp_type;
rc = icacrt_msg_to_type6crt_msgx(zq, ap_msg, crt);
@@ -1137,7 +1148,7 @@ int prep_cca_ap_msg(bool userspace, struct ica_xcRB *xcrb,
if (!ap_msg->msg)
return -ENOMEM;
ap_msg->receive = zcrypt_msgtype6_receive;
- ap_msg->psmid = (((unsigned long long)current->pid) << 32) +
+ ap_msg->psmid = (((unsigned long)current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL);
if (!ap_msg->private)
@@ -1157,7 +1168,7 @@ static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq,
struct ap_message *ap_msg)
{
int rc;
- struct response_type *rtype = (struct response_type *)(ap_msg->private);
+ struct response_type *rtype = ap_msg->private;
struct {
struct type6_hdr hdr;
struct CPRBX cprbx;
@@ -1218,7 +1229,7 @@ int prep_ep11_ap_msg(bool userspace, struct ep11_urb *xcrb,
if (!ap_msg->msg)
return -ENOMEM;
ap_msg->receive = zcrypt_msgtype6_receive_ep11;
- ap_msg->psmid = (((unsigned long long)current->pid) << 32) +
+ ap_msg->psmid = (((unsigned long)current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL);
if (!ap_msg->private)
@@ -1240,7 +1251,7 @@ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue *
{
int rc;
unsigned int lfmt;
- struct response_type *rtype = (struct response_type *)(ap_msg->private);
+ struct response_type *rtype = ap_msg->private;
struct {
struct type6_hdr hdr;
struct ep11_cprb cprbx;
@@ -1328,7 +1339,7 @@ int prep_rng_ap_msg(struct ap_message *ap_msg, int *func_code,
if (!ap_msg->msg)
return -ENOMEM;
ap_msg->receive = zcrypt_msgtype6_receive;
- ap_msg->psmid = (((unsigned long long)current->pid) << 32) +
+ ap_msg->psmid = (((unsigned long)current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL);
if (!ap_msg->private)
@@ -1359,7 +1370,7 @@ static long zcrypt_msgtype6_rng(struct zcrypt_queue *zq,
short int verb_length;
short int key_length;
} __packed * msg = ap_msg->msg;
- struct response_type *rtype = (struct response_type *)(ap_msg->private);
+ struct response_type *rtype = ap_msg->private;
int rc;
msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c
index cdc5a4b2c019..112a80e8e6c2 100644
--- a/drivers/s390/crypto/zcrypt_queue.c
+++ b/drivers/s390/crypto/zcrypt_queue.c
@@ -44,7 +44,7 @@ static ssize_t online_show(struct device *dev,
struct ap_queue *aq = to_ap_queue(dev);
int online = aq->config && zq->online ? 1 : 0;
- return scnprintf(buf, PAGE_SIZE, "%d\n", online);
+ return sysfs_emit(buf, "%d\n", online);
}
static ssize_t online_store(struct device *dev,
@@ -84,7 +84,7 @@ static ssize_t load_show(struct device *dev,
{
struct zcrypt_queue *zq = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zq->load));
+ return sysfs_emit(buf, "%d\n", atomic_read(&zq->load));
}
static DEVICE_ATTR_RO(load);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index c8b379e2e9ad..59d8f3080cba 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -502,7 +502,7 @@ config SECTION_MISMATCH_WARN_ONLY
config DEBUG_FORCE_FUNCTION_ALIGN_64B
bool "Force all function address 64B aligned"
- depends on EXPERT && (X86_64 || ARM64 || PPC32 || PPC64 || ARC)
+ depends on EXPERT && (X86_64 || ARM64 || PPC32 || PPC64 || ARC || S390)
select FUNCTION_ALIGNMENT_64B
help
There are cases that a commit from one domain changes the function