aboutsummaryrefslogtreecommitdiff
path: root/arch/s390/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r--arch/s390/kernel/Makefile4
-rw-r--r--arch/s390/kernel/cert_store.c7
-rw-r--r--arch/s390/kernel/compat_signal.c16
-rw-r--r--arch/s390/kernel/crash_dump.c11
-rw-r--r--arch/s390/kernel/ctlreg.c121
-rw-r--r--arch/s390/kernel/debug.c1
-rw-r--r--arch/s390/kernel/diag.c3
-rw-r--r--arch/s390/kernel/early.c38
-rw-r--r--arch/s390/kernel/facility.c21
-rw-r--r--arch/s390/kernel/fpu.c13
-rw-r--r--arch/s390/kernel/guarded_storage.c6
-rw-r--r--arch/s390/kernel/ipl.c3
-rw-r--r--arch/s390/kernel/irq.c4
-rw-r--r--arch/s390/kernel/kprobes.c21
-rw-r--r--arch/s390/kernel/machine_kexec.c12
-rw-r--r--arch/s390/kernel/nmi.c34
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c2
-rw-r--r--arch/s390/kernel/perf_event.c41
-rw-r--r--arch/s390/kernel/perf_pai_crypto.c225
-rw-r--r--arch/s390/kernel/perf_pai_ext.c90
-rw-r--r--arch/s390/kernel/perf_regs.c6
-rw-r--r--arch/s390/kernel/process.c2
-rw-r--r--arch/s390/kernel/processor.c7
-rw-r--r--arch/s390/kernel/ptrace.c76
-rw-r--r--arch/s390/kernel/setup.c69
-rw-r--r--arch/s390/kernel/signal.c13
-rw-r--r--arch/s390/kernel/smp.c140
-rw-r--r--arch/s390/kernel/stacktrace.c43
-rw-r--r--arch/s390/kernel/syscalls/syscall.tbl11
-rw-r--r--arch/s390/kernel/sysinfo.c10
-rw-r--r--arch/s390/kernel/time.c4
-rw-r--r--arch/s390/kernel/topology.c3
-rw-r--r--arch/s390/kernel/traps.c15
-rw-r--r--arch/s390/kernel/uv.c2
-rw-r--r--arch/s390/kernel/vdso32/Makefile10
-rw-r--r--arch/s390/kernel/vdso64/Makefile10
-rw-r--r--arch/s390/kernel/vmlinux.lds.S1
37 files changed, 636 insertions, 459 deletions
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 0df2b88cc0da..7a562b4199c8 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -37,11 +37,11 @@ CFLAGS_unwind_bc.o += -fno-optimize-sibling-calls
obj-y := head64.o traps.o time.o process.o earlypgm.o early.o setup.o idle.o vtime.o
obj-y += processor.o syscall.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o cpufeature.o
-obj-y += sysinfo.o lgr.o os_info.o
+obj-y += sysinfo.o lgr.o os_info.o ctlreg.o
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
obj-y += entry.o reipl.o kdebugfs.o alternative.o
obj-y += nospec-branch.o ipl_vmparm.o machine_kexec_reloc.o unwind_bc.o
-obj-y += smp.o text_amode31.o stacktrace.o abs_lowcore.o
+obj-y += smp.o text_amode31.o stacktrace.o abs_lowcore.o facility.o
extra-y += vmlinux.lds
diff --git a/arch/s390/kernel/cert_store.c b/arch/s390/kernel/cert_store.c
index 3986a044eb36..554447768bdd 100644
--- a/arch/s390/kernel/cert_store.c
+++ b/arch/s390/kernel/cert_store.c
@@ -432,15 +432,16 @@ static char *get_key_description(struct vcssb *vcssb, const struct vce *vce)
char *desc;
cs_token = vcssb->cs_token;
- /* Description string contains "%64s:%04u:%08u\0". */
+ /* Description string contains "%64s:%05u:%010u\0". */
name_len = sizeof(vce->vce_hdr.vc_name);
- len = name_len + 1 + 4 + 1 + 8 + 1;
+ len = name_len + 1 + 5 + 1 + 10 + 1;
desc = kmalloc(len, GFP_KERNEL);
if (!desc)
return NULL;
memcpy(desc, vce->vce_hdr.vc_name, name_len);
- sprintf(desc + name_len, ":%04u:%08u", vce->vce_hdr.vc_index, cs_token);
+ snprintf(desc + name_len, len - name_len, ":%05u:%010u",
+ vce->vce_hdr.vc_index, cs_token);
return desc;
}
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index cecedd01d4ec..f8fc6c25d051 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -29,6 +29,7 @@
#include <asm/lowcore.h>
#include <asm/switch_to.h>
#include <asm/vdso.h>
+#include <asm/fpu/api.h>
#include "compat_linux.h"
#include "compat_ptrace.h"
#include "entry.h"
@@ -98,10 +99,6 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW32_MASK_RI))
return -EINVAL;
- /* Test the floating-point-control word. */
- if (test_fp_ctl(user_sregs.fpregs.fpc))
- return -EINVAL;
-
/* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */
regs->psw.mask = (regs->psw.mask & ~(PSW_MASK_USER | PSW_MASK_RI)) |
(__u64)(user_sregs.regs.psw.mask & PSW32_MASK_USER) << 32 |
@@ -137,7 +134,7 @@ static int save_sigregs_ext32(struct pt_regs *regs,
return -EFAULT;
/* Save vector registers to signal stack */
- if (MACHINE_HAS_VX) {
+ if (cpu_has_vx()) {
for (i = 0; i < __NUM_VXRS_LOW; i++)
vxrs[i] = current->thread.fpu.vxrs[i].low;
if (__copy_to_user(&sregs_ext->vxrs_low, vxrs,
@@ -165,7 +162,7 @@ static int restore_sigregs_ext32(struct pt_regs *regs,
*(__u32 *)&regs->gprs[i] = gprs_high[i];
/* Restore vector registers from signal stack */
- if (MACHINE_HAS_VX) {
+ if (cpu_has_vx()) {
if (__copy_from_user(vxrs, &sregs_ext->vxrs_low,
sizeof(sregs_ext->vxrs_low)) ||
__copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW,
@@ -265,7 +262,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
* the machine supports it
*/
frame_size = sizeof(*frame) - sizeof(frame->sregs_ext.__reserved);
- if (!MACHINE_HAS_VX)
+ if (!cpu_has_vx())
frame_size -= sizeof(frame->sregs_ext.vxrs_low) +
sizeof(frame->sregs_ext.vxrs_high);
frame = get_sigframe(&ksig->ka, regs, frame_size);
@@ -348,11 +345,12 @@ static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
* the machine supports it
*/
uc_flags = UC_GPRS_HIGH;
- if (MACHINE_HAS_VX) {
+ if (cpu_has_vx()) {
uc_flags |= UC_VXRS;
- } else
+ } else {
frame_size -= sizeof(frame->uc.uc_mcontext_ext.vxrs_low) +
sizeof(frame->uc.uc_mcontext_ext.vxrs_high);
+ }
frame = get_sigframe(&ksig->ka, regs, frame_size);
if (frame == (void __user *) -1UL)
return -EFAULT;
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index 7af69948b290..5c46c2659305 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -22,6 +22,7 @@
#include <asm/ipl.h>
#include <asm/sclp.h>
#include <asm/maccess.h>
+#include <asm/fpu/api.h>
#define PTR_ADD(x, y) (((char *) (x)) + ((unsigned long) (y)))
#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
@@ -319,7 +320,7 @@ static void *fill_cpu_elf_notes(void *ptr, int cpu, struct save_area *sa)
ptr = nt_init(ptr, NT_S390_TODPREG, &sa->todpreg, sizeof(sa->todpreg));
ptr = nt_init(ptr, NT_S390_CTRS, &sa->ctrs, sizeof(sa->ctrs));
ptr = nt_init(ptr, NT_S390_PREFIX, &sa->prefix, sizeof(sa->prefix));
- if (MACHINE_HAS_VX) {
+ if (cpu_has_vx()) {
ptr = nt_init(ptr, NT_S390_VXRS_HIGH,
&sa->vxrs_high, sizeof(sa->vxrs_high));
ptr = nt_init(ptr, NT_S390_VXRS_LOW,
@@ -343,7 +344,7 @@ static size_t get_cpu_elf_notes_size(void)
size += nt_size(NT_S390_TODPREG, sizeof(sa->todpreg));
size += nt_size(NT_S390_CTRS, sizeof(sa->ctrs));
size += nt_size(NT_S390_PREFIX, sizeof(sa->prefix));
- if (MACHINE_HAS_VX) {
+ if (cpu_has_vx()) {
size += nt_size(NT_S390_VXRS_HIGH, sizeof(sa->vxrs_high));
size += nt_size(NT_S390_VXRS_LOW, sizeof(sa->vxrs_low));
}
@@ -498,7 +499,7 @@ static int get_mem_chunk_cnt(void)
/*
* Initialize ELF loads (new kernel)
*/
-static void loads_init(Elf64_Phdr *phdr, u64 loads_offset)
+static void loads_init(Elf64_Phdr *phdr)
{
phys_addr_t start, end;
u64 idx;
@@ -507,7 +508,7 @@ static void loads_init(Elf64_Phdr *phdr, u64 loads_offset)
phdr->p_filesz = end - start;
phdr->p_type = PT_LOAD;
phdr->p_offset = start;
- phdr->p_vaddr = start;
+ phdr->p_vaddr = (unsigned long)__va(start);
phdr->p_paddr = start;
phdr->p_memsz = end - start;
phdr->p_flags = PF_R | PF_W | PF_X;
@@ -612,7 +613,7 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
ptr = notes_init(phdr_notes, ptr, ((unsigned long) hdr) + hdr_off);
/* Init loads */
hdr_off = PTR_DIFF(ptr, hdr);
- loads_init(phdr_loads, hdr_off);
+ loads_init(phdr_loads);
*addr = (unsigned long long) hdr;
*size = (unsigned long long) hdr_off;
BUG_ON(elfcorehdr_size > alloc_size);
diff --git a/arch/s390/kernel/ctlreg.c b/arch/s390/kernel/ctlreg.c
new file mode 100644
index 000000000000..8cc26cf2c64a
--- /dev/null
+++ b/arch/s390/kernel/ctlreg.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 1999, 2023
+ */
+
+#include <linux/irqflags.h>
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/cache.h>
+#include <asm/abs_lowcore.h>
+#include <asm/ctlreg.h>
+
+/*
+ * ctl_lock guards access to global control register contents which
+ * are kept in the control register save area within absolute lowcore
+ * at physical address zero.
+ */
+static DEFINE_SPINLOCK(system_ctl_lock);
+
+void system_ctlreg_lock(void)
+ __acquires(&system_ctl_lock)
+{
+ spin_lock(&system_ctl_lock);
+}
+
+void system_ctlreg_unlock(void)
+ __releases(&system_ctl_lock)
+{
+ spin_unlock(&system_ctl_lock);
+}
+
+static bool system_ctlreg_area_init __ro_after_init;
+
+void __init system_ctlreg_init_save_area(struct lowcore *lc)
+{
+ struct lowcore *abs_lc;
+
+ abs_lc = get_abs_lowcore();
+ __local_ctl_store(0, 15, lc->cregs_save_area);
+ __local_ctl_store(0, 15, abs_lc->cregs_save_area);
+ put_abs_lowcore(abs_lc);
+ system_ctlreg_area_init = true;
+}
+
+struct ctlreg_parms {
+ unsigned long andval;
+ unsigned long orval;
+ unsigned long val;
+ int request;
+ int cr;
+};
+
+static void ctlreg_callback(void *info)
+{
+ struct ctlreg_parms *pp = info;
+ struct ctlreg regs[16];
+
+ __local_ctl_store(0, 15, regs);
+ if (pp->request == CTLREG_LOAD) {
+ regs[pp->cr].val = pp->val;
+ } else {
+ regs[pp->cr].val &= pp->andval;
+ regs[pp->cr].val |= pp->orval;
+ }
+ __local_ctl_load(0, 15, regs);
+}
+
+static void system_ctlreg_update(void *info)
+{
+ unsigned long flags;
+
+ if (system_state == SYSTEM_BOOTING) {
+ /*
+ * For very early calls do not call on_each_cpu()
+ * since not everything might be setup.
+ */
+ local_irq_save(flags);
+ ctlreg_callback(info);
+ local_irq_restore(flags);
+ } else {
+ on_each_cpu(ctlreg_callback, info, 1);
+ }
+}
+
+void system_ctlreg_modify(unsigned int cr, unsigned long data, int request)
+{
+ struct ctlreg_parms pp = { .cr = cr, .request = request, };
+ struct lowcore *abs_lc;
+
+ switch (request) {
+ case CTLREG_SET_BIT:
+ pp.orval = 1UL << data;
+ pp.andval = -1UL;
+ break;
+ case CTLREG_CLEAR_BIT:
+ pp.orval = 0;
+ pp.andval = ~(1UL << data);
+ break;
+ case CTLREG_LOAD:
+ pp.val = data;
+ break;
+ }
+ if (system_ctlreg_area_init) {
+ system_ctlreg_lock();
+ abs_lc = get_abs_lowcore();
+ if (request == CTLREG_LOAD) {
+ abs_lc->cregs_save_area[cr].val = pp.val;
+ } else {
+ abs_lc->cregs_save_area[cr].val &= pp.andval;
+ abs_lc->cregs_save_area[cr].val |= pp.orval;
+ }
+ put_abs_lowcore(abs_lc);
+ system_ctlreg_update(&pp);
+ system_ctlreg_unlock();
+ } else {
+ system_ctlreg_update(&pp);
+ }
+}
+EXPORT_SYMBOL(system_ctlreg_modify);
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index a85e0c3e7027..85328a0ef3b6 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -978,7 +978,6 @@ static struct ctl_table s390dbf_table[] = {
.mode = S_IRUGO | S_IWUSR,
.proc_handler = s390dbf_procactive,
},
- { }
};
static struct ctl_table_header *s390dbf_sysctl_header;
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c
index f9f06cd8fcee..92fdc35f028c 100644
--- a/arch/s390/kernel/diag.c
+++ b/arch/s390/kernel/diag.c
@@ -245,6 +245,7 @@ EXPORT_SYMBOL(diag8c);
int diag224(void *ptr)
{
+ unsigned long addr = __pa(ptr);
int rc = -EOPNOTSUPP;
diag_stat_inc(DIAG_STAT_X224);
@@ -253,7 +254,7 @@ int diag224(void *ptr)
"0: lhi %0,0x0\n"
"1:\n"
EX_TABLE(0b,1b)
- : "+d" (rc) :"d" (0), "d" (ptr) : "memory");
+ : "+d" (rc) :"d" (0), "d" (addr) : "memory");
return rc;
}
EXPORT_SYMBOL(diag224);
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 2dd5976a55ac..2345ea332b97 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -44,9 +44,9 @@ early_param(#param, ignore_decompressor_param_##param)
decompressor_handled_param(mem);
decompressor_handled_param(vmalloc);
decompressor_handled_param(dfltcc);
-decompressor_handled_param(noexec);
decompressor_handled_param(facilities);
decompressor_handled_param(nokaslr);
+decompressor_handled_param(cmma);
#if IS_ENABLED(CONFIG_KVM)
decompressor_handled_param(prot_virt);
#endif
@@ -217,7 +217,7 @@ static __init void detect_machine_facilities(void)
{
if (test_facility(8)) {
S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT1;
- __ctl_set_bit(0, 23);
+ system_ctl_set_bit(0, CR0_EDAT_BIT);
}
if (test_facility(78))
S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT2;
@@ -225,25 +225,21 @@ static __init void detect_machine_facilities(void)
S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
if (test_facility(50) && test_facility(73)) {
S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
- __ctl_set_bit(0, 55);
+ system_ctl_set_bit(0, CR0_TRANSACTIONAL_EXECUTION_BIT);
}
if (test_facility(51))
S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
- if (test_facility(129)) {
- S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
- __ctl_set_bit(0, 17);
- }
- if (test_facility(130) && !noexec_disabled) {
+ if (test_facility(129))
+ system_ctl_set_bit(0, CR0_VECTOR_BIT);
+ if (test_facility(130))
S390_lowcore.machine_flags |= MACHINE_FLAG_NX;
- __ctl_set_bit(0, 20);
- }
if (test_facility(133))
S390_lowcore.machine_flags |= MACHINE_FLAG_GS;
if (test_facility(139) && (tod_clock_base.tod >> 63)) {
/* Enabled signed clock comparator comparisons */
S390_lowcore.machine_flags |= MACHINE_FLAG_SCC;
clock_comparator_max = -1ULL >> 1;
- __ctl_set_bit(0, 53);
+ system_ctl_set_bit(0, CR0_CLOCK_COMPARATOR_SIGN_BIT);
}
if (IS_ENABLED(CONFIG_PCI) && test_facility(153)) {
S390_lowcore.machine_flags |= MACHINE_FLAG_PCI_MIO;
@@ -261,15 +257,9 @@ static inline void save_vector_registers(void)
#endif
}
-static inline void setup_control_registers(void)
+static inline void setup_low_address_protection(void)
{
- unsigned long reg;
-
- __ctl_store(reg, 0, 0);
- reg |= CR0_LOW_ADDRESS_PROTECTION;
- reg |= CR0_EMERGENCY_SIGNAL_SUBMASK;
- reg |= CR0_EXTERNAL_CALL_SUBMASK;
- __ctl_load(reg, 0, 0);
+ system_ctl_set_bit(0, CR0_LOW_ADDRESS_PROTECTION_BIT);
}
static inline void setup_access_registers(void)
@@ -279,14 +269,6 @@ static inline void setup_access_registers(void)
restore_access_regs(acrs);
}
-static int __init disable_vector_extension(char *str)
-{
- S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX;
- __ctl_clear_bit(0, 17);
- return 0;
-}
-early_param("novx", disable_vector_extension);
-
char __bootdata(early_command_line)[COMMAND_LINE_SIZE];
static void __init setup_boot_command_line(void)
{
@@ -317,7 +299,7 @@ void __init startup_init(void)
save_vector_registers();
setup_topology();
sclp_early_detect();
- setup_control_registers();
+ setup_low_address_protection();
setup_access_registers();
lockdep_on();
}
diff --git a/arch/s390/kernel/facility.c b/arch/s390/kernel/facility.c
new file mode 100644
index 000000000000..f02127219a27
--- /dev/null
+++ b/arch/s390/kernel/facility.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2023
+ */
+
+#include <asm/facility.h>
+
+unsigned int stfle_size(void)
+{
+ static unsigned int size;
+ unsigned int r;
+ u64 dummy;
+
+ r = READ_ONCE(size);
+ if (!r) {
+ r = __stfle_asm(&dummy, 1) + 1;
+ WRITE_ONCE(size, r);
+ }
+ return r;
+}
+EXPORT_SYMBOL(stfle_size);
diff --git a/arch/s390/kernel/fpu.c b/arch/s390/kernel/fpu.c
index 4666b29ac8a1..a4f3449cc814 100644
--- a/arch/s390/kernel/fpu.c
+++ b/arch/s390/kernel/fpu.c
@@ -24,7 +24,7 @@ void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags)
/* Save floating point control */
asm volatile("stfpc %0" : "=Q" (state->fpc));
- if (!MACHINE_HAS_VX) {
+ if (!cpu_has_vx()) {
if (flags & KERNEL_VXR_V0V7) {
/* Save floating-point registers */
asm volatile("std 0,%0" : "=Q" (state->fprs[0]));
@@ -106,7 +106,7 @@ void __kernel_fpu_end(struct kernel_fpu *state, u32 flags)
/* Restore floating-point controls */
asm volatile("lfpc %0" : : "Q" (state->fpc));
- if (!MACHINE_HAS_VX) {
+ if (!cpu_has_vx()) {
if (flags & KERNEL_VXR_V0V7) {
/* Restore floating-point registers */
asm volatile("ld 0,%0" : : "Q" (state->fprs[0]));
@@ -177,11 +177,11 @@ EXPORT_SYMBOL(__kernel_fpu_end);
void __load_fpu_regs(void)
{
- struct fpu *state = &current->thread.fpu;
unsigned long *regs = current->thread.fpu.regs;
+ struct fpu *state = &current->thread.fpu;
- asm volatile("lfpc %0" : : "Q" (state->fpc));
- if (likely(MACHINE_HAS_VX)) {
+ sfpc_safe(state->fpc);
+ if (likely(cpu_has_vx())) {
asm volatile("lgr 1,%0\n"
"VLM 0,15,0,1\n"
"VLM 16,31,256,1\n"
@@ -208,7 +208,6 @@ void __load_fpu_regs(void)
}
clear_cpu_flag(CIF_FPU);
}
-EXPORT_SYMBOL(__load_fpu_regs);
void load_fpu_regs(void)
{
@@ -232,7 +231,7 @@ void save_fpu_regs(void)
regs = current->thread.fpu.regs;
asm volatile("stfpc %0" : "=Q" (state->fpc));
- if (likely(MACHINE_HAS_VX)) {
+ if (likely(cpu_has_vx())) {
asm volatile("lgr 1,%0\n"
"VSTM 0,15,0,1\n"
"VSTM 16,31,256,1\n"
diff --git a/arch/s390/kernel/guarded_storage.c b/arch/s390/kernel/guarded_storage.c
index d14dd1c2e524..0b68168d9566 100644
--- a/arch/s390/kernel/guarded_storage.c
+++ b/arch/s390/kernel/guarded_storage.c
@@ -28,7 +28,7 @@ static int gs_enable(void)
return -ENOMEM;
gs_cb->gsd = 25;
preempt_disable();
- __ctl_set_bit(2, 4);
+ local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT);
load_gs_cb(gs_cb);
current->thread.gs_cb = gs_cb;
preempt_enable();
@@ -42,7 +42,7 @@ static int gs_disable(void)
preempt_disable();
kfree(current->thread.gs_cb);
current->thread.gs_cb = NULL;
- __ctl_clear_bit(2, 4);
+ local_ctl_clear_bit(2, CR2_GUARDED_STORAGE_BIT);
preempt_enable();
}
return 0;
@@ -84,7 +84,7 @@ void gs_load_bc_cb(struct pt_regs *regs)
if (gs_cb) {
kfree(current->thread.gs_cb);
current->thread.gs_bc_cb = NULL;
- __ctl_set_bit(2, 4);
+ local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT);
load_gs_cb(gs_cb);
current->thread.gs_cb = gs_cb;
}
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 05e51666db03..ba75f6bee774 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -666,6 +666,7 @@ static int __init ipl_init(void)
&ipl_ccw_attr_group_lpar);
break;
case IPL_TYPE_ECKD:
+ case IPL_TYPE_ECKD_DUMP:
rc = sysfs_create_group(&ipl_kset->kobj, &ipl_eckd_attr_group);
break;
case IPL_TYPE_FCP:
@@ -2381,7 +2382,7 @@ void s390_reset_system(void)
set_prefix(0);
/* Disable lowcore protection */
- __ctl_clear_bit(0, 28);
+ local_ctl_clear_bit(0, CR0_LOW_ADDRESS_PROTECTION_BIT);
diag_amode31_ops.diag308_reset();
}
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index b020ff17d206..6f71b0ce1068 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -385,7 +385,7 @@ void irq_subclass_register(enum irq_subclass subclass)
{
spin_lock(&irq_subclass_lock);
if (!irq_subclass_refcount[subclass])
- ctl_set_bit(0, subclass);
+ system_ctl_set_bit(0, subclass);
irq_subclass_refcount[subclass]++;
spin_unlock(&irq_subclass_lock);
}
@@ -396,7 +396,7 @@ void irq_subclass_unregister(enum irq_subclass subclass)
spin_lock(&irq_subclass_lock);
irq_subclass_refcount[subclass]--;
if (!irq_subclass_refcount[subclass])
- ctl_clear_bit(0, subclass);
+ system_ctl_clear_bit(0, subclass);
spin_unlock(&irq_subclass_lock);
}
EXPORT_SYMBOL(irq_subclass_unregister);
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index d4b863ed0aa7..f0cf20d4b3c5 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -224,20 +224,27 @@ static void enable_singlestep(struct kprobe_ctlblk *kcb,
struct pt_regs *regs,
unsigned long ip)
{
- struct per_regs per_kprobe;
+ union {
+ struct ctlreg regs[3];
+ struct {
+ struct ctlreg control;
+ struct ctlreg start;
+ struct ctlreg end;
+ };
+ } per_kprobe;
/* Set up the PER control registers %cr9-%cr11 */
- per_kprobe.control = PER_EVENT_IFETCH;
- per_kprobe.start = ip;
- per_kprobe.end = ip;
+ per_kprobe.control.val = PER_EVENT_IFETCH;
+ per_kprobe.start.val = ip;
+ per_kprobe.end.val = ip;
/* Save control regs and psw mask */
- __ctl_store(kcb->kprobe_saved_ctl, 9, 11);
+ __local_ctl_store(9, 11, kcb->kprobe_saved_ctl);
kcb->kprobe_saved_imask = regs->psw.mask &
(PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT);
/* Set PER control regs, turns on single step for the given address */
- __ctl_load(per_kprobe, 9, 11);
+ __local_ctl_load(9, 11, per_kprobe.regs);
regs->psw.mask |= PSW_MASK_PER;
regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
regs->psw.addr = ip;
@@ -249,7 +256,7 @@ static void disable_singlestep(struct kprobe_ctlblk *kcb,
unsigned long ip)
{
/* Restore control regs and psw mask, set new psw address */
- __ctl_load(kcb->kprobe_saved_ctl, 9, 11);
+ __local_ctl_load(9, 11, kcb->kprobe_saved_ctl);
regs->psw.mask &= ~PSW_MASK_PER;
regs->psw.mask |= kcb->kprobe_saved_imask;
regs->psw.addr = ip;
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 12a2bd4fc88c..aa22ffc16bcd 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -91,15 +91,15 @@ static noinline void __machine_kdump(void *image)
}
/* Store status of the boot CPU */
mcesa = __va(S390_lowcore.mcesad & MCESA_ORIGIN_MASK);
- if (MACHINE_HAS_VX)
+ if (cpu_has_vx())
save_vx_regs((__vector128 *) mcesa->vector_save_area);
if (MACHINE_HAS_GS) {
- __ctl_store(cr2_old.val, 2, 2);
+ local_ctl_store(2, &cr2_old.reg);
cr2_new = cr2_old;
cr2_new.gse = 1;
- __ctl_load(cr2_new.val, 2, 2);
+ local_ctl_load(2, &cr2_new.reg);
save_gs_cb((struct gs_cb *) mcesa->guarded_storage_save_area);
- __ctl_load(cr2_old.val, 2, 2);
+ local_ctl_load(2, &cr2_old.reg);
}
/*
* To create a good backchain for this CPU in the dump store_status
@@ -216,8 +216,8 @@ void arch_crash_save_vmcoreinfo(void)
VMCOREINFO_SYMBOL(lowcore_ptr);
VMCOREINFO_SYMBOL(high_memory);
VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS);
- vmcoreinfo_append_str("SAMODE31=%lx\n", __samode31);
- vmcoreinfo_append_str("EAMODE31=%lx\n", __eamode31);
+ vmcoreinfo_append_str("SAMODE31=%lx\n", (unsigned long)__samode31);
+ vmcoreinfo_append_str("EAMODE31=%lx\n", (unsigned long)__eamode31);
vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
abs_lc = get_abs_lowcore();
abs_lc->vmcore_info = paddr_vmcoreinfo_note();
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 38ec0487521c..9ad44c26d1a2 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -22,16 +22,17 @@
#include <linux/kvm_host.h>
#include <linux/export.h>
#include <asm/lowcore.h>
+#include <asm/ctlreg.h>
#include <asm/smp.h>
#include <asm/stp.h>
#include <asm/cputime.h>
#include <asm/nmi.h>
#include <asm/crw.h>
#include <asm/switch_to.h>
-#include <asm/ctl_reg.h>
#include <asm/asm-offsets.h>
#include <asm/pai.h>
#include <asm/vx-insn.h>
+#include <asm/fpu/api.h>
struct mcck_struct {
unsigned int kill_task : 1;
@@ -45,7 +46,7 @@ static DEFINE_PER_CPU(struct mcck_struct, cpu_mcck);
static inline int nmi_needs_mcesa(void)
{
- return MACHINE_HAS_VX || MACHINE_HAS_GS;
+ return cpu_has_vx() || MACHINE_HAS_GS;
}
/*
@@ -131,10 +132,10 @@ static notrace void s390_handle_damage(void)
* Disable low address protection and make machine check new PSW a
* disabled wait PSW. Any additional machine check cannot be handled.
*/
- __ctl_store(cr0.val, 0, 0);
+ local_ctl_store(0, &cr0.reg);
cr0_new = cr0;
cr0_new.lap = 0;
- __ctl_load(cr0_new.val, 0, 0);
+ local_ctl_load(0, &cr0_new.reg);
psw_save = S390_lowcore.mcck_new_psw;
psw_bits(S390_lowcore.mcck_new_psw).io = 0;
psw_bits(S390_lowcore.mcck_new_psw).ext = 0;
@@ -146,7 +147,7 @@ static notrace void s390_handle_damage(void)
* values. This makes possible system dump analysis easier.
*/
S390_lowcore.mcck_new_psw = psw_save;
- __ctl_load(cr0.val, 0, 0);
+ local_ctl_load(0, &cr0.reg);
disabled_wait();
while (1);
}
@@ -159,16 +160,17 @@ NOKPROBE_SYMBOL(s390_handle_damage);
void s390_handle_mcck(void)
{
struct mcck_struct mcck;
+ unsigned long mflags;
/*
* Disable machine checks and get the current state of accumulated
* machine checks. Afterwards delete the old state and enable machine
* checks again.
*/
- local_mcck_disable();
+ local_mcck_save(mflags);
mcck = *this_cpu_ptr(&cpu_mcck);
memset(this_cpu_ptr(&cpu_mcck), 0, sizeof(mcck));
- local_mcck_enable();
+ local_mcck_restore(mflags);
if (mcck.channel_report)
crw_handle_channel_report();
@@ -185,7 +187,7 @@ void s390_handle_mcck(void)
static int mchchk_wng_posted = 0;
/* Use single cpu clear, as we cannot handle smp here. */
- __ctl_clear_bit(14, 24); /* Disable WARNING MCH */
+ local_ctl_clear_bit(14, CR14_WARNING_SUBMASK_BIT);
if (xchg(&mchchk_wng_posted, 1) == 0)
kill_cad_pid(SIGPWR, 1);
}
@@ -234,7 +236,7 @@ static int notrace s390_validate_registers(union mci mci)
}
mcesa = __va(S390_lowcore.mcesad & MCESA_ORIGIN_MASK);
- if (!MACHINE_HAS_VX) {
+ if (!cpu_has_vx()) {
/* Validate floating point registers */
asm volatile(
" ld 0,0(%0)\n"
@@ -269,9 +271,9 @@ static int notrace s390_validate_registers(union mci mci)
*/
if (!mci.vr && !test_cpu_flag(CIF_MCCK_GUEST))
kill_task = 1;
- cr0.val = S390_lowcore.cregs_save_area[0];
+ cr0.reg = S390_lowcore.cregs_save_area[0];
cr0.afp = cr0.vx = 1;
- __ctl_load(cr0.val, 0, 0);
+ local_ctl_load(0, &cr0.reg);
asm volatile(
" la 1,%0\n"
" VLM 0,15,0,1\n"
@@ -279,7 +281,7 @@ static int notrace s390_validate_registers(union mci mci)
:
: "Q" (*(struct vx_array *)mcesa->vector_save_area)
: "1");
- __ctl_load(S390_lowcore.cregs_save_area[0], 0, 0);
+ local_ctl_load(0, &S390_lowcore.cregs_save_area[0]);
}
/* Validate access registers */
asm volatile(
@@ -290,7 +292,7 @@ static int notrace s390_validate_registers(union mci mci)
if (!mci.ar)
kill_task = 1;
/* Validate guarded storage registers */
- cr2.val = S390_lowcore.cregs_save_area[2];
+ cr2.reg = S390_lowcore.cregs_save_area[2];
if (cr2.gse) {
if (!mci.gs) {
/*
@@ -505,9 +507,9 @@ NOKPROBE_SYMBOL(s390_do_machine_check);
static int __init machine_check_init(void)
{
- ctl_set_bit(14, 25); /* enable external damage MCH */
- ctl_set_bit(14, 27); /* enable system recovery MCH */
- ctl_set_bit(14, 24); /* enable warning MCH */
+ system_ctl_set_bit(14, CR14_EXTERNAL_DAMAGE_SUBMASK_BIT);
+ system_ctl_set_bit(14, CR14_RECOVERY_SUBMASK_BIT);
+ system_ctl_set_bit(14, CR14_WARNING_SUBMASK_BIT);
return 0;
}
early_initcall(machine_check_init);
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 850c11ea631a..41ed6e0f0a2a 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -1193,7 +1193,7 @@ static int __init cpumf_pmu_init(void)
* Clear bit 15 of cr0 to unauthorize problem-state to
* extract measurement counters
*/
- ctl_clear_bit(0, 48);
+ system_ctl_clear_bit(0, CR0_CPUMF_EXTRACTION_AUTH_BIT);
/* register handler for measurement-alert interruptions */
rc = register_external_irq(EXT_IRQ_MEASURE_ALERT,
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index c27321cb0969..dfa77da2fd2e 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -15,7 +15,10 @@
#include <linux/export.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/compat.h>
#include <linux/sysfs.h>
+#include <asm/stacktrace.h>
#include <asm/irq.h>
#include <asm/cpu_mf.h>
#include <asm/lowcore.h>
@@ -212,6 +215,44 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
}
}
+void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
+{
+ struct stack_frame_user __user *sf;
+ unsigned long ip, sp;
+ bool first = true;
+
+ if (is_compat_task())
+ return;
+ perf_callchain_store(entry, instruction_pointer(regs));
+ sf = (void __user *)user_stack_pointer(regs);
+ pagefault_disable();
+ while (entry->nr < entry->max_stack) {
+ if (__get_user(sp, &sf->back_chain))
+ break;
+ if (__get_user(ip, &sf->gprs[8]))
+ break;
+ if (ip & 0x1) {
+ /*
+ * If the instruction address is invalid, and this
+ * is the first stack frame, assume r14 has not
+ * been written to the stack yet. Otherwise exit.
+ */
+ if (first && !(regs->gprs[14] & 0x1))
+ ip = regs->gprs[14];
+ else
+ break;
+ }
+ perf_callchain_store(entry, ip);
+ /* Sanity check: ABI requires SP to be aligned 8 bytes. */
+ if (!sp || sp & 0x7)
+ break;
+ sf = (void __user *)sp;
+ first = false;
+ }
+ pagefault_enable();
+}
+
/* Perf definitions for PMU event attributes in sysfs */
ssize_t cpumf_events_sysfs_show(struct device *dev,
struct device_attribute *attr, char *page)
diff --git a/arch/s390/kernel/perf_pai_crypto.c b/arch/s390/kernel/perf_pai_crypto.c
index fe7d1774ded1..bf8a672b15a4 100644
--- a/arch/s390/kernel/perf_pai_crypto.c
+++ b/arch/s390/kernel/perf_pai_crypto.c
@@ -16,8 +16,7 @@
#include <linux/export.h>
#include <linux/io.h>
#include <linux/perf_event.h>
-
-#include <asm/ctl_reg.h>
+#include <asm/ctlreg.h>
#include <asm/pai.h>
#include <asm/debug.h>
@@ -41,7 +40,43 @@ struct paicrypt_map {
struct perf_event *event; /* Perf event for sampling */
};
-static DEFINE_PER_CPU(struct paicrypt_map, paicrypt_map);
+struct paicrypt_mapptr {
+ struct paicrypt_map *mapptr;
+};
+
+static struct paicrypt_root { /* Anchor to per CPU data */
+ refcount_t refcnt; /* Overall active events */
+ struct paicrypt_mapptr __percpu *mapptr;
+} paicrypt_root;
+
+/* Free per CPU data when the last event is removed. */
+static void paicrypt_root_free(void)
+{
+ if (refcount_dec_and_test(&paicrypt_root.refcnt)) {
+ free_percpu(paicrypt_root.mapptr);
+ paicrypt_root.mapptr = NULL;
+ }
+ debug_sprintf_event(cfm_dbg, 5, "%s root.refcount %d\n", __func__,
+ refcount_read(&paicrypt_root.refcnt));
+}
+
+/*
+ * On initialization of first event also allocate per CPU data dynamically.
+ * Start with an array of pointers, the array size is the maximum number of
+ * CPUs possible, which might be larger than the number of CPUs currently
+ * online.
+ */
+static int paicrypt_root_alloc(void)
+{
+ if (!refcount_inc_not_zero(&paicrypt_root.refcnt)) {
+ /* The memory is already zeroed. */
+ paicrypt_root.mapptr = alloc_percpu(struct paicrypt_mapptr);
+ if (!paicrypt_root.mapptr)
+ return -ENOMEM;
+ refcount_set(&paicrypt_root.refcnt, 1);
+ }
+ return 0;
+}
/* Release the PMU if event is the last perf event */
static DEFINE_MUTEX(pai_reserve_mutex);
@@ -51,7 +86,9 @@ static DEFINE_MUTEX(pai_reserve_mutex);
*/
static void paicrypt_event_destroy(struct perf_event *event)
{
- struct paicrypt_map *cpump = per_cpu_ptr(&paicrypt_map, event->cpu);
+ struct paicrypt_mapptr *mp = per_cpu_ptr(paicrypt_root.mapptr,
+ event->cpu);
+ struct paicrypt_map *cpump = mp->mapptr;
cpump->event = NULL;
static_branch_dec(&pai_key);
@@ -66,19 +103,19 @@ static void paicrypt_event_destroy(struct perf_event *event)
__func__, (unsigned long)cpump->page,
cpump->save);
free_page((unsigned long)cpump->page);
- cpump->page = NULL;
kvfree(cpump->save);
- cpump->save = NULL;
- cpump->mode = PAI_MODE_NONE;
+ kfree(cpump);
+ mp->mapptr = NULL;
}
+ paicrypt_root_free();
mutex_unlock(&pai_reserve_mutex);
}
-static u64 paicrypt_getctr(struct paicrypt_map *cpump, int nr, bool kernel)
+static u64 paicrypt_getctr(unsigned long *page, int nr, bool kernel)
{
if (kernel)
nr += PAI_CRYPTO_MAXCTR;
- return cpump->page[nr];
+ return page[nr];
}
/* Read the counter values. Return value from location in CMP. For event
@@ -86,18 +123,19 @@ static u64 paicrypt_getctr(struct paicrypt_map *cpump, int nr, bool kernel)
*/
static u64 paicrypt_getdata(struct perf_event *event, bool kernel)
{
- struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map);
+ struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
+ struct paicrypt_map *cpump = mp->mapptr;
u64 sum = 0;
int i;
if (event->attr.config != PAI_CRYPTO_BASE) {
- return paicrypt_getctr(cpump,
+ return paicrypt_getctr(cpump->page,
event->attr.config - PAI_CRYPTO_BASE,
kernel);
}
for (i = 1; i <= paicrypt_cnt; i++) {
- u64 val = paicrypt_getctr(cpump, i, kernel);
+ u64 val = paicrypt_getctr(cpump->page, i, kernel);
if (!val)
continue;
@@ -132,11 +170,31 @@ static u64 paicrypt_getall(struct perf_event *event)
*
* Allocate the memory for the event.
*/
-static int paicrypt_busy(struct perf_event_attr *a, struct paicrypt_map *cpump)
+static struct paicrypt_map *paicrypt_busy(struct perf_event *event)
{
- int rc = 0;
+ struct perf_event_attr *a = &event->attr;
+ struct paicrypt_map *cpump = NULL;
+ struct paicrypt_mapptr *mp;
+ int rc;
mutex_lock(&pai_reserve_mutex);
+
+ /* Allocate root node */
+ rc = paicrypt_root_alloc();
+ if (rc)
+ goto unlock;
+
+ /* Allocate node for this event */
+ mp = per_cpu_ptr(paicrypt_root.mapptr, event->cpu);
+ cpump = mp->mapptr;
+ if (!cpump) { /* Paicrypt_map allocated? */
+ cpump = kzalloc(sizeof(*cpump), GFP_KERNEL);
+ if (!cpump) {
+ rc = -ENOMEM;
+ goto free_root;
+ }
+ }
+
if (a->sample_period) { /* Sampling requested */
if (cpump->mode != PAI_MODE_NONE)
rc = -EBUSY; /* ... sampling/counting active */
@@ -144,8 +202,15 @@ static int paicrypt_busy(struct perf_event_attr *a, struct paicrypt_map *cpump)
if (cpump->mode == PAI_MODE_SAMPLING)
rc = -EBUSY; /* ... and sampling active */
}
+ /*
+ * This error case triggers when there is a conflict:
+ * Either sampling requested and counting already active, or visa
+ * versa. Therefore the struct paicrypto_map for this CPU is
+ * needed or the error could not have occurred. Only adjust root
+ * node refcount.
+ */
if (rc)
- goto unlock;
+ goto free_root;
/* Allocate memory for counter page and counter extraction.
* Only the first counting event has to allocate a page.
@@ -158,30 +223,36 @@ static int paicrypt_busy(struct perf_event_attr *a, struct paicrypt_map *cpump)
rc = -ENOMEM;
cpump->page = (unsigned long *)get_zeroed_page(GFP_KERNEL);
if (!cpump->page)
- goto unlock;
+ goto free_paicrypt_map;
cpump->save = kvmalloc_array(paicrypt_cnt + 1,
sizeof(struct pai_userdata), GFP_KERNEL);
if (!cpump->save) {
free_page((unsigned long)cpump->page);
cpump->page = NULL;
- goto unlock;
+ goto free_paicrypt_map;
}
+
+ /* Set mode and reference count */
rc = 0;
refcount_set(&cpump->refcnt, 1);
-
-unlock:
- /* If rc is non-zero, do not set mode and reference count */
- if (!rc) {
- cpump->mode = a->sample_period ? PAI_MODE_SAMPLING
- : PAI_MODE_COUNTING;
- }
+ cpump->mode = a->sample_period ? PAI_MODE_SAMPLING : PAI_MODE_COUNTING;
+ mp->mapptr = cpump;
debug_sprintf_event(cfm_dbg, 5, "%s sample_period %#llx users %d"
" mode %d refcnt %u page %#lx save %p rc %d\n",
__func__, a->sample_period, cpump->active_events,
cpump->mode, refcount_read(&cpump->refcnt),
(unsigned long)cpump->page, cpump->save, rc);
+ goto unlock;
+
+free_paicrypt_map:
+ kfree(cpump);
+ mp->mapptr = NULL;
+free_root:
+ paicrypt_root_free();
+
+unlock:
mutex_unlock(&pai_reserve_mutex);
- return rc;
+ return rc ? ERR_PTR(rc) : cpump;
}
/* Might be called on different CPU than the one the event is intended for. */
@@ -189,7 +260,6 @@ static int paicrypt_event_init(struct perf_event *event)
{
struct perf_event_attr *a = &event->attr;
struct paicrypt_map *cpump;
- int rc;
/* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */
if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type)
@@ -199,24 +269,16 @@ static int paicrypt_event_init(struct perf_event *event)
a->config > PAI_CRYPTO_BASE + paicrypt_cnt)
return -EINVAL;
/* Allow only CPU wide operation, no process context for now. */
- if (event->hw.target || event->cpu == -1)
+ if ((event->attach_state & PERF_ATTACH_TASK) || event->cpu == -1)
return -ENOENT;
/* Allow only CRYPTO_ALL for sampling. */
if (a->sample_period && a->config != PAI_CRYPTO_BASE)
return -EINVAL;
- cpump = per_cpu_ptr(&paicrypt_map, event->cpu);
- rc = paicrypt_busy(a, cpump);
- if (rc)
- return rc;
+ cpump = paicrypt_busy(event);
+ if (IS_ERR(cpump))
+ return PTR_ERR(cpump);
- /* Event initialization sets last_tag to 0. When later on the events
- * are deleted and re-added, do not reset the event count value to zero.
- * Events are added, deleted and re-added when 2 or more events
- * are active at the same time.
- */
- event->hw.last_tag = 0;
- cpump->event = event;
event->destroy = paicrypt_event_destroy;
if (a->sample_period) {
@@ -250,52 +312,57 @@ static void paicrypt_start(struct perf_event *event, int flags)
{
u64 sum;
- if (!event->hw.last_tag) {
- event->hw.last_tag = 1;
- sum = paicrypt_getall(event); /* Get current value */
- local64_set(&event->count, 0);
- local64_set(&event->hw.prev_count, sum);
+ /* Event initialization sets last_tag to 0. When later on the events
+ * are deleted and re-added, do not reset the event count value to zero.
+ * Events are added, deleted and re-added when 2 or more events
+ * are active at the same time.
+ */
+ if (!event->attr.sample_period) { /* Counting */
+ if (!event->hw.last_tag) {
+ event->hw.last_tag = 1;
+ sum = paicrypt_getall(event); /* Get current value */
+ local64_set(&event->hw.prev_count, sum);
+ }
+ } else { /* Sampling */
+ perf_sched_cb_inc(event->pmu);
}
}
static int paicrypt_add(struct perf_event *event, int flags)
{
- struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map);
+ struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
+ struct paicrypt_map *cpump = mp->mapptr;
unsigned long ccd;
if (++cpump->active_events == 1) {
ccd = virt_to_phys(cpump->page) | PAI_CRYPTO_KERNEL_OFFSET;
WRITE_ONCE(S390_lowcore.ccd, ccd);
- __ctl_set_bit(0, 50);
+ local_ctl_set_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
}
cpump->event = event;
- if (flags & PERF_EF_START && !event->attr.sample_period) {
- /* Only counting needs initial counter value */
+ if (flags & PERF_EF_START)
paicrypt_start(event, PERF_EF_RELOAD);
- }
event->hw.state = 0;
- if (event->attr.sample_period)
- perf_sched_cb_inc(event->pmu);
return 0;
}
static void paicrypt_stop(struct perf_event *event, int flags)
{
- paicrypt_read(event);
+ if (!event->attr.sample_period) /* Counting */
+ paicrypt_read(event);
+ else /* Sampling */
+ perf_sched_cb_dec(event->pmu);
event->hw.state = PERF_HES_STOPPED;
}
static void paicrypt_del(struct perf_event *event, int flags)
{
- struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map);
+ struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
+ struct paicrypt_map *cpump = mp->mapptr;
- if (event->attr.sample_period)
- perf_sched_cb_dec(event->pmu);
- if (!event->attr.sample_period)
- /* Only counting needs to read counter */
- paicrypt_stop(event, PERF_EF_UPDATE);
+ paicrypt_stop(event, PERF_EF_UPDATE);
if (--cpump->active_events == 0) {
- __ctl_clear_bit(0, 50);
+ local_ctl_clear_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
WRITE_ONCE(S390_lowcore.ccd, 0);
}
}
@@ -305,8 +372,7 @@ static void paicrypt_del(struct perf_event *event, int flags)
* 2 bytes: Number of counter
* 8 bytes: Value of counter
*/
-static size_t paicrypt_copy(struct pai_userdata *userdata,
- struct paicrypt_map *cpump,
+static size_t paicrypt_copy(struct pai_userdata *userdata, unsigned long *page,
bool exclude_user, bool exclude_kernel)
{
int i, outidx = 0;
@@ -315,9 +381,9 @@ static size_t paicrypt_copy(struct pai_userdata *userdata,
u64 val = 0;
if (!exclude_kernel)
- val += paicrypt_getctr(cpump, i, true);
+ val += paicrypt_getctr(page, i, true);
if (!exclude_user)
- val += paicrypt_getctr(cpump, i, false);
+ val += paicrypt_getctr(page, i, false);
if (val) {
userdata[outidx].num = i;
userdata[outidx].value = val;
@@ -327,24 +393,14 @@ static size_t paicrypt_copy(struct pai_userdata *userdata,
return outidx * sizeof(struct pai_userdata);
}
-static int paicrypt_push_sample(void)
+static int paicrypt_push_sample(size_t rawsize, struct paicrypt_map *cpump,
+ struct perf_event *event)
{
- struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map);
- struct perf_event *event = cpump->event;
struct perf_sample_data data;
struct perf_raw_record raw;
struct pt_regs regs;
- size_t rawsize;
int overflow;
- if (!cpump->event) /* No event active */
- return 0;
- rawsize = paicrypt_copy(cpump->save, cpump,
- cpump->event->attr.exclude_user,
- cpump->event->attr.exclude_kernel);
- if (!rawsize) /* No incremented counters */
- return 0;
-
/* Setup perf sample */
memset(&regs, 0, sizeof(regs));
memset(&raw, 0, sizeof(raw));
@@ -375,6 +431,25 @@ static int paicrypt_push_sample(void)
return overflow;
}
+/* Check if there is data to be saved on schedule out of a task. */
+static int paicrypt_have_sample(void)
+{
+ struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
+ struct paicrypt_map *cpump = mp->mapptr;
+ struct perf_event *event = cpump->event;
+ size_t rawsize;
+ int rc = 0;
+
+ if (!event) /* No event active */
+ return 0;
+ rawsize = paicrypt_copy(cpump->save, cpump->page,
+ cpump->event->attr.exclude_user,
+ cpump->event->attr.exclude_kernel);
+ if (rawsize) /* No incremented counters */
+ rc = paicrypt_push_sample(rawsize, cpump, event);
+ return rc;
+}
+
/* Called on schedule-in and schedule-out. No access to event structure,
* but for sampling only event CRYPTO_ALL is allowed.
*/
@@ -384,7 +459,7 @@ static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sch
* results on schedule_out and if page was dirty, clear values.
*/
if (!sched_in)
- paicrypt_push_sample();
+ paicrypt_have_sample();
}
/* Attribute definitions for paicrypt interface. As with other CPU
diff --git a/arch/s390/kernel/perf_pai_ext.c b/arch/s390/kernel/perf_pai_ext.c
index c57c1a203256..af7f2b538c8f 100644
--- a/arch/s390/kernel/perf_pai_ext.c
+++ b/arch/s390/kernel/perf_pai_ext.c
@@ -17,8 +17,7 @@
#include <linux/export.h>
#include <linux/io.h>
#include <linux/perf_event.h>
-
-#include <asm/ctl_reg.h>
+#include <asm/ctlreg.h>
#include <asm/pai.h>
#include <asm/debug.h>
@@ -249,7 +248,7 @@ static int paiext_event_init(struct perf_event *event)
if (rc)
return rc;
/* Allow only CPU wide operation, no process context for now. */
- if (event->hw.target || event->cpu == -1)
+ if ((event->attach_state & PERF_ATTACH_TASK) || event->cpu == -1)
return -ENOENT;
/* Allow only event NNPA_ALL for sampling. */
if (a->sample_period && a->config != PAI_NNPA_BASE)
@@ -261,7 +260,6 @@ static int paiext_event_init(struct perf_event *event)
rc = paiext_alloc(a, event);
if (rc)
return rc;
- event->hw.last_tag = 0;
event->destroy = paiext_event_destroy;
if (a->sample_period) {
@@ -278,9 +276,9 @@ static int paiext_event_init(struct perf_event *event)
return 0;
}
-static u64 paiext_getctr(struct paiext_map *cpump, int nr)
+static u64 paiext_getctr(unsigned long *area, int nr)
{
- return cpump->area[nr];
+ return area[nr];
}
/* Read the counter values. Return value from location in buffer. For event
@@ -294,10 +292,11 @@ static u64 paiext_getdata(struct perf_event *event)
int i;
if (event->attr.config != PAI_NNPA_BASE)
- return paiext_getctr(cpump, event->attr.config - PAI_NNPA_BASE);
+ return paiext_getctr(cpump->area,
+ event->attr.config - PAI_NNPA_BASE);
for (i = 1; i <= paiext_cnt; i++)
- sum += paiext_getctr(cpump, i);
+ sum += paiext_getctr(cpump->area, i);
return sum;
}
@@ -322,12 +321,15 @@ static void paiext_start(struct perf_event *event, int flags)
{
u64 sum;
- if (event->hw.last_tag)
- return;
- event->hw.last_tag = 1;
- sum = paiext_getall(event); /* Get current value */
- local64_set(&event->hw.prev_count, sum);
- local64_set(&event->count, 0);
+ if (!event->attr.sample_period) { /* Counting */
+ if (!event->hw.last_tag) {
+ event->hw.last_tag = 1;
+ sum = paiext_getall(event); /* Get current value */
+ local64_set(&event->hw.prev_count, sum);
+ }
+ } else { /* Sampling */
+ perf_sched_cb_inc(event->pmu);
+ }
}
static int paiext_add(struct perf_event *event, int flags)
@@ -340,25 +342,23 @@ static int paiext_add(struct perf_event *event, int flags)
S390_lowcore.aicd = virt_to_phys(cpump->paiext_cb);
pcb->acc = virt_to_phys(cpump->area) | 0x1;
/* Enable CPU instruction lookup for PAIE1 control block */
- __ctl_set_bit(0, 49);
+ local_ctl_set_bit(0, CR0_PAI_EXTENSION_BIT);
debug_sprintf_event(paiext_dbg, 4, "%s 1508 %llx acc %llx\n",
__func__, S390_lowcore.aicd, pcb->acc);
}
- if (flags & PERF_EF_START && !event->attr.sample_period) {
- /* Only counting needs initial counter value */
+ cpump->event = event;
+ if (flags & PERF_EF_START)
paiext_start(event, PERF_EF_RELOAD);
- }
event->hw.state = 0;
- if (event->attr.sample_period) {
- cpump->event = event;
- perf_sched_cb_inc(event->pmu);
- }
return 0;
}
static void paiext_stop(struct perf_event *event, int flags)
{
- paiext_read(event);
+ if (!event->attr.sample_period) /* Counting */
+ paiext_read(event);
+ else /* Sampling */
+ perf_sched_cb_dec(event->pmu);
event->hw.state = PERF_HES_STOPPED;
}
@@ -368,15 +368,10 @@ static void paiext_del(struct perf_event *event, int flags)
struct paiext_map *cpump = mp->mapptr;
struct paiext_cb *pcb = cpump->paiext_cb;
- if (event->attr.sample_period)
- perf_sched_cb_dec(event->pmu);
- if (!event->attr.sample_period) {
- /* Only counting needs to read counter */
- paiext_stop(event, PERF_EF_UPDATE);
- }
+ paiext_stop(event, PERF_EF_UPDATE);
if (--cpump->active_events == 0) {
/* Disable CPU instruction lookup for PAIE1 control block */
- __ctl_clear_bit(0, 49);
+ local_ctl_clear_bit(0, CR0_PAI_EXTENSION_BIT);
pcb->acc = 0;
S390_lowcore.aicd = 0;
debug_sprintf_event(paiext_dbg, 4, "%s 1508 %llx acc %llx\n",
@@ -389,13 +384,12 @@ static void paiext_del(struct perf_event *event, int flags)
* 2 bytes: Number of counter
* 8 bytes: Value of counter
*/
-static size_t paiext_copy(struct paiext_map *cpump)
+static size_t paiext_copy(struct pai_userdata *userdata, unsigned long *area)
{
- struct pai_userdata *userdata = cpump->save;
int i, outidx = 0;
for (i = 1; i <= paiext_cnt; i++) {
- u64 val = paiext_getctr(cpump, i);
+ u64 val = paiext_getctr(area, i);
if (val) {
userdata[outidx].num = i;
@@ -421,21 +415,14 @@ static size_t paiext_copy(struct paiext_map *cpump)
* sched_task() callback. That callback is not active after paiext_del()
* returns and has deleted the event on that CPU.
*/
-static int paiext_push_sample(void)
+static int paiext_push_sample(size_t rawsize, struct paiext_map *cpump,
+ struct perf_event *event)
{
- struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
- struct paiext_map *cpump = mp->mapptr;
- struct perf_event *event = cpump->event;
struct perf_sample_data data;
struct perf_raw_record raw;
struct pt_regs regs;
- size_t rawsize;
int overflow;
- rawsize = paiext_copy(cpump);
- if (!rawsize) /* No incremented counters */
- return 0;
-
/* Setup perf sample */
memset(&regs, 0, sizeof(regs));
memset(&raw, 0, sizeof(raw));
@@ -464,6 +451,23 @@ static int paiext_push_sample(void)
return overflow;
}
+/* Check if there is data to be saved on schedule out of a task. */
+static int paiext_have_sample(void)
+{
+ struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
+ struct paiext_map *cpump = mp->mapptr;
+ struct perf_event *event = cpump->event;
+ size_t rawsize;
+ int rc = 0;
+
+ if (!event)
+ return 0;
+ rawsize = paiext_copy(cpump->save, cpump->area);
+ if (rawsize) /* Incremented counters */
+ rc = paiext_push_sample(rawsize, cpump, event);
+ return rc;
+}
+
/* Called on schedule-in and schedule-out. No access to event structure,
* but for sampling only event NNPA_ALL is allowed.
*/
@@ -473,7 +477,7 @@ static void paiext_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched
* results on schedule_out and if page was dirty, clear values.
*/
if (!sched_in)
- paiext_push_sample();
+ paiext_have_sample();
}
/* Attribute definitions for pai extension1 interface. As with other CPU
diff --git a/arch/s390/kernel/perf_regs.c b/arch/s390/kernel/perf_regs.c
index 6e9e5d5e927e..3d93656bd948 100644
--- a/arch/s390/kernel/perf_regs.c
+++ b/arch/s390/kernel/perf_regs.c
@@ -20,8 +20,10 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
return 0;
idx -= PERF_REG_S390_FP0;
- fp = MACHINE_HAS_VX ? *(freg_t *)(current->thread.fpu.vxrs + idx)
- : current->thread.fpu.fprs[idx];
+ if (cpu_has_vx())
+ fp = *(freg_t *)(current->thread.fpu.vxrs + idx);
+ else
+ fp = current->thread.fpu.fprs[idx];
return fp.ui;
}
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 258000417724..4e3b366589fb 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -89,7 +89,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
*/
save_fpu_regs();
- memcpy(dst, src, arch_task_struct_size);
+ *dst = *src;
dst->thread.fpu.regs = dst->thread.fpu.fprs;
/*
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 0a999c8226d7..65c1464eea4f 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -201,11 +201,8 @@ static int __init setup_hwcaps(void)
if (MACHINE_HAS_TE)
elf_hwcap |= HWCAP_TE;
- /*
- * Vector extension can be disabled with the "novx" parameter.
- * Use MACHINE_HAS_VX instead of facility bit 129.
- */
- if (MACHINE_HAS_VX) {
+ /* vector */
+ if (test_facility(129)) {
elf_hwcap |= HWCAP_VXRS;
if (test_facility(134))
elf_hwcap |= HWCAP_VXRS_BCD;
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index ea244a73efad..f1897a8bb221 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -30,6 +30,7 @@
#include <asm/switch_to.h>
#include <asm/runtime_instr.h>
#include <asm/facility.h>
+#include <asm/fpu/api.h>
#include "entry.h"
@@ -41,13 +42,20 @@ void update_cr_regs(struct task_struct *task)
{
struct pt_regs *regs = task_pt_regs(task);
struct thread_struct *thread = &task->thread;
- struct per_regs old, new;
union ctlreg0 cr0_old, cr0_new;
union ctlreg2 cr2_old, cr2_new;
int cr0_changed, cr2_changed;
-
- __ctl_store(cr0_old.val, 0, 0);
- __ctl_store(cr2_old.val, 2, 2);
+ union {
+ struct ctlreg regs[3];
+ struct {
+ struct ctlreg control;
+ struct ctlreg start;
+ struct ctlreg end;
+ };
+ } old, new;
+
+ local_ctl_store(0, &cr0_old.reg);
+ local_ctl_store(2, &cr2_old.reg);
cr0_new = cr0_old;
cr2_new = cr2_old;
/* Take care of the enable/disable of transactional execution. */
@@ -75,38 +83,38 @@ void update_cr_regs(struct task_struct *task)
cr0_changed = cr0_new.val != cr0_old.val;
cr2_changed = cr2_new.val != cr2_old.val;
if (cr0_changed)
- __ctl_load(cr0_new.val, 0, 0);
+ local_ctl_load(0, &cr0_new.reg);
if (cr2_changed)
- __ctl_load(cr2_new.val, 2, 2);
+ local_ctl_load(2, &cr2_new.reg);
/* Copy user specified PER registers */
- new.control = thread->per_user.control;
- new.start = thread->per_user.start;
- new.end = thread->per_user.end;
+ new.control.val = thread->per_user.control;
+ new.start.val = thread->per_user.start;
+ new.end.val = thread->per_user.end;
/* merge TIF_SINGLE_STEP into user specified PER registers. */
if (test_tsk_thread_flag(task, TIF_SINGLE_STEP) ||
test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) {
if (test_tsk_thread_flag(task, TIF_BLOCK_STEP))
- new.control |= PER_EVENT_BRANCH;
+ new.control.val |= PER_EVENT_BRANCH;
else
- new.control |= PER_EVENT_IFETCH;
- new.control |= PER_CONTROL_SUSPENSION;
- new.control |= PER_EVENT_TRANSACTION_END;
+ new.control.val |= PER_EVENT_IFETCH;
+ new.control.val |= PER_CONTROL_SUSPENSION;
+ new.control.val |= PER_EVENT_TRANSACTION_END;
if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
- new.control |= PER_EVENT_IFETCH;
- new.start = 0;
- new.end = -1UL;
+ new.control.val |= PER_EVENT_IFETCH;
+ new.start.val = 0;
+ new.end.val = -1UL;
}
/* Take care of the PER enablement bit in the PSW. */
- if (!(new.control & PER_EVENT_MASK)) {
+ if (!(new.control.val & PER_EVENT_MASK)) {
regs->psw.mask &= ~PSW_MASK_PER;
return;
}
regs->psw.mask |= PSW_MASK_PER;
- __ctl_store(old, 9, 11);
+ __local_ctl_store(9, 11, old.regs);
if (memcmp(&new, &old, sizeof(struct per_regs)) != 0)
- __ctl_load(new, 9, 11);
+ __local_ctl_load(9, 11, new.regs);
}
void user_enable_single_step(struct task_struct *task)
@@ -247,7 +255,7 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
* or the child->thread.fpu.vxrs array
*/
offset = addr - offsetof(struct user, regs.fp_regs.fprs);
- if (MACHINE_HAS_VX)
+ if (cpu_has_vx())
tmp = *(addr_t *)
((addr_t) child->thread.fpu.vxrs + 2*offset);
else
@@ -385,8 +393,7 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
/*
* floating point control reg. is in the thread structure
*/
- if ((unsigned int) data != 0 ||
- test_fp_ctl(data >> (BITS_PER_LONG - 32)))
+ if ((unsigned int)data != 0)
return -EINVAL;
child->thread.fpu.fpc = data >> (BITS_PER_LONG - 32);
@@ -396,7 +403,7 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
* or the child->thread.fpu.vxrs array
*/
offset = addr - offsetof(struct user, regs.fp_regs.fprs);
- if (MACHINE_HAS_VX)
+ if (cpu_has_vx())
*(addr_t *)((addr_t)
child->thread.fpu.vxrs + 2*offset) = data;
else
@@ -623,7 +630,7 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
* or the child->thread.fpu.vxrs array
*/
offset = addr - offsetof(struct compat_user, regs.fp_regs.fprs);
- if (MACHINE_HAS_VX)
+ if (cpu_has_vx())
tmp = *(__u32 *)
((addr_t) child->thread.fpu.vxrs + 2*offset);
else
@@ -741,8 +748,6 @@ static int __poke_user_compat(struct task_struct *child,
/*
* floating point control reg. is in the thread structure
*/
- if (test_fp_ctl(tmp))
- return -EINVAL;
child->thread.fpu.fpc = data;
} else if (addr < offsetof(struct compat_user, regs.fp_regs) + sizeof(s390_fp_regs)) {
@@ -751,7 +756,7 @@ static int __poke_user_compat(struct task_struct *child,
* or the child->thread.fpu.vxrs array
*/
offset = addr - offsetof(struct compat_user, regs.fp_regs.fprs);
- if (MACHINE_HAS_VX)
+ if (cpu_has_vx())
*(__u32 *)((addr_t)
child->thread.fpu.vxrs + 2*offset) = tmp;
else
@@ -907,19 +912,18 @@ static int s390_fpregs_set(struct task_struct *target,
if (target == current)
save_fpu_regs();
- if (MACHINE_HAS_VX)
+ if (cpu_has_vx())
convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
else
memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
- /* If setting FPC, must validate it first. */
if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc,
0, offsetof(s390_fp_regs, fprs));
if (rc)
return rc;
- if (ufpc[1] != 0 || test_fp_ctl(ufpc[0]))
+ if (ufpc[1] != 0)
return -EINVAL;
target->thread.fpu.fpc = ufpc[0];
}
@@ -930,7 +934,7 @@ static int s390_fpregs_set(struct task_struct *target,
if (rc)
return rc;
- if (MACHINE_HAS_VX)
+ if (cpu_has_vx())
convert_fp_to_vx(target->thread.fpu.vxrs, fprs);
else
memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs));
@@ -981,7 +985,7 @@ static int s390_vxrs_low_get(struct task_struct *target,
__u64 vxrs[__NUM_VXRS_LOW];
int i;
- if (!MACHINE_HAS_VX)
+ if (!cpu_has_vx())
return -ENODEV;
if (target == current)
save_fpu_regs();
@@ -998,7 +1002,7 @@ static int s390_vxrs_low_set(struct task_struct *target,
__u64 vxrs[__NUM_VXRS_LOW];
int i, rc;
- if (!MACHINE_HAS_VX)
+ if (!cpu_has_vx())
return -ENODEV;
if (target == current)
save_fpu_regs();
@@ -1018,7 +1022,7 @@ static int s390_vxrs_high_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
- if (!MACHINE_HAS_VX)
+ if (!cpu_has_vx())
return -ENODEV;
if (target == current)
save_fpu_regs();
@@ -1033,7 +1037,7 @@ static int s390_vxrs_high_set(struct task_struct *target,
{
int rc;
- if (!MACHINE_HAS_VX)
+ if (!cpu_has_vx())
return -ENODEV;
if (target == current)
save_fpu_regs();
@@ -1107,7 +1111,7 @@ static int s390_gs_cb_set(struct task_struct *target,
target->thread.gs_cb = data;
*target->thread.gs_cb = gs_cb;
if (target == current) {
- __ctl_set_bit(2, 4);
+ local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT);
restore_gs_cb(target->thread.gs_cb);
}
preempt_enable();
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index c744104e4a9c..d1f3b56e7afc 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -97,10 +97,10 @@ EXPORT_SYMBOL(console_irq);
* relocated above 2 GB, because it has to use 31 bit addresses.
* Such code and data is part of the .amode31 section.
*/
-unsigned long __amode31_ref __samode31 = (unsigned long)&_samode31;
-unsigned long __amode31_ref __eamode31 = (unsigned long)&_eamode31;
-unsigned long __amode31_ref __stext_amode31 = (unsigned long)&_stext_amode31;
-unsigned long __amode31_ref __etext_amode31 = (unsigned long)&_etext_amode31;
+char __amode31_ref *__samode31 = _samode31;
+char __amode31_ref *__eamode31 = _eamode31;
+char __amode31_ref *__stext_amode31 = _stext_amode31;
+char __amode31_ref *__etext_amode31 = _etext_amode31;
struct exception_table_entry __amode31_ref *__start_amode31_ex_table = _start_amode31_ex_table;
struct exception_table_entry __amode31_ref *__stop_amode31_ex_table = _stop_amode31_ex_table;
@@ -145,7 +145,6 @@ static u32 __amode31_ref *__ctl_duald = __ctl_duald_amode31;
static u32 __amode31_ref *__ctl_linkage_stack = __ctl_linkage_stack_amode31;
static u32 __amode31_ref *__ctl_duct = __ctl_duct_amode31;
-int __bootdata(noexec_disabled);
unsigned long __bootdata_preserved(max_mappable);
unsigned long __bootdata(ident_map_size);
struct physmem_info __bootdata(physmem_info);
@@ -306,7 +305,7 @@ static void __init setup_zfcpdump(void)
return;
if (oldmem_data.start)
return;
- strcat(boot_command_line, " cio_ignore=all,!ipldev,!condev");
+ strlcat(boot_command_line, " cio_ignore=all,!ipldev,!condev", COMMAND_LINE_SIZE);
console_loglevel = 2;
}
#else
@@ -382,12 +381,6 @@ void stack_free(unsigned long stack)
#endif
}
-void __init __noreturn arch_call_rest_init(void)
-{
- smp_reinit_ipl_cpu();
- rest_init();
-}
-
static unsigned long __init stack_alloc_early(void)
{
unsigned long stack;
@@ -415,15 +408,15 @@ static void __init setup_lowcore(void)
lc->restart_psw.mask = PSW_KERNEL_BITS & ~PSW_MASK_DAT;
lc->restart_psw.addr = __pa(restart_int_handler);
- lc->external_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
+ lc->external_new_psw.mask = PSW_KERNEL_BITS;
lc->external_new_psw.addr = (unsigned long) ext_int_handler;
- lc->svc_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
+ lc->svc_new_psw.mask = PSW_KERNEL_BITS;
lc->svc_new_psw.addr = (unsigned long) system_call;
- lc->program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
+ lc->program_new_psw.mask = PSW_KERNEL_BITS;
lc->program_new_psw.addr = (unsigned long) pgm_check_handler;
lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler;
- lc->io_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
+ lc->io_new_psw.mask = PSW_KERNEL_BITS;
lc->io_new_psw.addr = (unsigned long) io_int_handler;
lc->clock_comparator = clock_comparator_max;
lc->current_task = (unsigned long)&init_task;
@@ -456,7 +449,6 @@ static void __init setup_lowcore(void)
lc->restart_fn = (unsigned long) do_restart;
lc->restart_data = 0;
lc->restart_source = -1U;
- __ctl_store(lc->cregs_save_area, 0, 15);
lc->spinlock_lockval = arch_spin_lockval(0);
lc->spinlock_index = 0;
arch_spin_lock_setup(0);
@@ -466,6 +458,7 @@ static void __init setup_lowcore(void)
lc->kernel_asce = S390_lowcore.kernel_asce;
lc->user_asce = S390_lowcore.user_asce;
+ system_ctlreg_init_save_area(lc);
abs_lc = get_abs_lowcore();
abs_lc->restart_stack = lc->restart_stack;
abs_lc->restart_fn = lc->restart_fn;
@@ -473,7 +466,6 @@ static void __init setup_lowcore(void)
abs_lc->restart_source = lc->restart_source;
abs_lc->restart_psw = lc->restart_psw;
abs_lc->restart_flags = RESTART_FLAG_CTLREGS;
- memcpy(abs_lc->cregs_save_area, lc->cregs_save_area, sizeof(abs_lc->cregs_save_area));
abs_lc->program_new_psw = lc->program_new_psw;
abs_lc->mcesad = lc->mcesad;
put_abs_lowcore(abs_lc);
@@ -626,8 +618,8 @@ static void __init reserve_crashkernel(void)
phys_addr_t low, high;
int rc;
- rc = parse_crashkernel(boot_command_line, ident_map_size, &crash_size,
- &crash_base);
+ rc = parse_crashkernel(boot_command_line, ident_map_size,
+ &crash_size, &crash_base, NULL, NULL);
crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN);
crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN);
@@ -771,15 +763,15 @@ static void __init setup_memory(void)
static void __init relocate_amode31_section(void)
{
unsigned long amode31_size = __eamode31 - __samode31;
- long amode31_offset = physmem_info.reserved[RR_AMODE31].start - __samode31;
- long *ptr;
+ long amode31_offset, *ptr;
+ amode31_offset = physmem_info.reserved[RR_AMODE31].start - (unsigned long)__samode31;
pr_info("Relocating AMODE31 section of size 0x%08lx\n", amode31_size);
/* Move original AMODE31 section to the new one */
- memmove((void *)physmem_info.reserved[RR_AMODE31].start, (void *)__samode31, amode31_size);
+ memmove((void *)physmem_info.reserved[RR_AMODE31].start, __samode31, amode31_size);
/* Zero out the old AMODE31 section to catch invalid accesses within it */
- memset((void *)__samode31, 0, amode31_size);
+ memset(__samode31, 0, amode31_size);
/* Update all AMODE31 region references */
for (ptr = _start_amode31_refs; ptr != _end_amode31_refs; ptr++)
@@ -798,15 +790,15 @@ static void __init setup_cr(void)
__ctl_duct[4] = (unsigned long)__ctl_duald;
/* Update control registers CR2, CR5 and CR15 */
- __ctl_store(cr2.val, 2, 2);
- __ctl_store(cr5.val, 5, 5);
- __ctl_store(cr15.val, 15, 15);
+ local_ctl_store(2, &cr2.reg);
+ local_ctl_store(5, &cr5.reg);
+ local_ctl_store(15, &cr15.reg);
cr2.ducto = (unsigned long)__ctl_duct >> 6;
cr5.pasteo = (unsigned long)__ctl_duct >> 6;
cr15.lsea = (unsigned long)__ctl_linkage_stack >> 3;
- __ctl_load(cr2.val, 2, 2);
- __ctl_load(cr5.val, 5, 5);
- __ctl_load(cr15.val, 15, 15);
+ system_ctl_load(2, &cr2.reg);
+ system_ctl_load(5, &cr5.reg);
+ system_ctl_load(15, &cr15.reg);
}
/*
@@ -828,22 +820,6 @@ static void __init setup_randomness(void)
}
/*
- * Find the correct size for the task_struct. This depends on
- * the size of the struct fpu at the end of the thread_struct
- * which is embedded in the task_struct.
- */
-static void __init setup_task_size(void)
-{
- int task_size = sizeof(struct task_struct);
-
- if (!MACHINE_HAS_VX) {
- task_size -= sizeof(__vector128) * __NUM_VXRS;
- task_size += sizeof(freg_t) * __NUM_FPRS;
- }
- arch_task_struct_size = task_size;
-}
-
-/*
* Issue diagnose 318 to set the control program name and
* version codes.
*/
@@ -935,7 +911,6 @@ void __init setup_arch(char **cmdline_p)
os_info_init();
setup_ipl();
- setup_task_size();
setup_control_program_code();
/* Do some memory reservations *before* memory is added to memblock */
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index d63557d3868c..43e9661cd715 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -12,6 +12,7 @@
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
+#include <linux/rseq.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/kernel.h>
@@ -149,10 +150,6 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW_MASK_RI))
return -EINVAL;
- /* Test the floating-point-control word. */
- if (test_fp_ctl(user_sregs.fpregs.fpc))
- return -EINVAL;
-
/* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */
regs->psw.mask = (regs->psw.mask & ~(PSW_MASK_USER | PSW_MASK_RI)) |
(user_sregs.regs.psw.mask & (PSW_MASK_USER | PSW_MASK_RI));
@@ -182,7 +179,7 @@ static int save_sigregs_ext(struct pt_regs *regs,
int i;
/* Save vector registers to signal stack */
- if (MACHINE_HAS_VX) {
+ if (cpu_has_vx()) {
for (i = 0; i < __NUM_VXRS_LOW; i++)
vxrs[i] = current->thread.fpu.vxrs[i].low;
if (__copy_to_user(&sregs_ext->vxrs_low, vxrs,
@@ -202,7 +199,7 @@ static int restore_sigregs_ext(struct pt_regs *regs,
int i;
/* Restore vector registers from signal stack */
- if (MACHINE_HAS_VX) {
+ if (cpu_has_vx()) {
if (__copy_from_user(vxrs, &sregs_ext->vxrs_low,
sizeof(sregs_ext->vxrs_low)) ||
__copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW,
@@ -300,7 +297,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
* included in the signal frame on a 31-bit system.
*/
frame_size = sizeof(*frame) - sizeof(frame->sregs_ext);
- if (MACHINE_HAS_VX)
+ if (cpu_has_vx())
frame_size += sizeof(frame->sregs_ext);
frame = get_sigframe(ka, regs, frame_size);
if (frame == (void __user *) -1UL)
@@ -377,7 +374,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
* included in the signal frame on a 31-bit system.
*/
uc_flags = 0;
- if (MACHINE_HAS_VX) {
+ if (cpu_has_vx()) {
frame_size += sizeof(_sigregs_ext);
uc_flags |= UC_VXRS;
}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index a4edb7ea66ea..c39d9f0d4b1c 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -37,6 +37,7 @@
#include <linux/crash_dump.h>
#include <linux/kprobes.h>
#include <asm/asm-offsets.h>
+#include <asm/ctlreg.h>
#include <asm/pfault.h>
#include <asm/diag.h>
#include <asm/switch_to.h>
@@ -567,54 +568,6 @@ void arch_irq_work_raise(void)
}
#endif
-/*
- * parameter area for the set/clear control bit callbacks
- */
-struct ec_creg_mask_parms {
- unsigned long orval;
- unsigned long andval;
- int cr;
-};
-
-/*
- * callback for setting/clearing control bits
- */
-static void smp_ctl_bit_callback(void *info)
-{
- struct ec_creg_mask_parms *pp = info;
- unsigned long cregs[16];
-
- __ctl_store(cregs, 0, 15);
- cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
- __ctl_load(cregs, 0, 15);
-}
-
-static DEFINE_SPINLOCK(ctl_lock);
-
-void smp_ctl_set_clear_bit(int cr, int bit, bool set)
-{
- struct ec_creg_mask_parms parms = { .cr = cr, };
- struct lowcore *abs_lc;
- u64 ctlreg;
-
- if (set) {
- parms.orval = 1UL << bit;
- parms.andval = -1UL;
- } else {
- parms.orval = 0;
- parms.andval = ~(1UL << bit);
- }
- spin_lock(&ctl_lock);
- abs_lc = get_abs_lowcore();
- ctlreg = abs_lc->cregs_save_area[cr];
- ctlreg = (ctlreg & parms.andval) | parms.orval;
- abs_lc->cregs_save_area[cr] = ctlreg;
- put_abs_lowcore(abs_lc);
- on_each_cpu(smp_ctl_bit_callback, &parms, 1);
- spin_unlock(&ctl_lock);
-}
-EXPORT_SYMBOL(smp_ctl_set_clear_bit);
-
#ifdef CONFIG_CRASH_DUMP
int smp_store_status(int cpu)
@@ -629,7 +582,7 @@ int smp_store_status(int cpu)
if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS,
pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
return -EIO;
- if (!MACHINE_HAS_VX && !MACHINE_HAS_GS)
+ if (!cpu_has_vx() && !MACHINE_HAS_GS)
return 0;
pa = lc->mcesad & MCESA_ORIGIN_MASK;
if (MACHINE_HAS_GS)
@@ -685,7 +638,7 @@ void __init smp_save_dump_ipl_cpu(void)
copy_oldmem_kernel(regs, __LC_FPREGS_SAVE_AREA, 512);
save_area_add_regs(sa, regs);
memblock_free(regs, 512);
- if (MACHINE_HAS_VX)
+ if (cpu_has_vx())
save_area_add_vxrs(sa, boot_cpu_vector_save_area);
}
@@ -718,7 +671,7 @@ void __init smp_save_dump_secondary_cpus(void)
panic("could not allocate memory for save area\n");
__pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, __pa(page));
save_area_add_regs(sa, page);
- if (MACHINE_HAS_VX) {
+ if (cpu_has_vx()) {
__pcpu_sigp_relax(addr, SIGP_STORE_ADDITIONAL_STATUS, __pa(page));
save_area_add_vxrs(sa, page);
}
@@ -898,7 +851,7 @@ static void smp_start_secondary(void *cpuvoid)
S390_lowcore.restart_flags = 0;
restore_access_regs(S390_lowcore.access_regs_save_area);
cpu_init();
- rcu_cpu_starting(cpu);
+ rcutree_report_cpu_starting(cpu);
init_cpu_timer();
vtime_init();
vdso_getcpu_init();
@@ -935,14 +888,14 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
* Make sure global control register contents do not change
* until new CPU has initialized control registers.
*/
- spin_lock(&ctl_lock);
+ system_ctlreg_lock();
pcpu_prepare_secondary(pcpu, cpu);
pcpu_attach_task(pcpu, tidle);
pcpu_start_fn(pcpu, smp_start_secondary, NULL);
/* Wait until cpu puts itself in the online & active maps */
while (!cpu_online(cpu))
cpu_relax();
- spin_unlock(&ctl_lock);
+ system_ctlreg_unlock();
return 0;
}
@@ -957,7 +910,7 @@ early_param("possible_cpus", _setup_possible_cpus);
int __cpu_disable(void)
{
- unsigned long cregs[16];
+ struct ctlreg cregs[16];
int cpu;
/* Handle possible pending IPIs */
@@ -969,11 +922,11 @@ int __cpu_disable(void)
/* Disable pseudo page faults on this cpu. */
pfault_fini();
/* Disable interrupt sources via control register. */
- __ctl_store(cregs, 0, 15);
- cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */
- cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */
- cregs[14] &= ~0x1f000000UL; /* disable most machine checks */
- __ctl_load(cregs, 0, 15);
+ __local_ctl_store(0, 15, cregs);
+ cregs[0].val &= ~0x0000ee70UL; /* disable all external interrupts */
+ cregs[6].val &= ~0xff000000UL; /* disable all I/O interrupts */
+ cregs[14].val &= ~0x1f000000UL; /* disable most machine checks */
+ __local_ctl_load(0, 15, cregs);
clear_cpu_flag(CIF_NOHZ_DELAY);
return 0;
}
@@ -1013,12 +966,12 @@ void __init smp_fill_possible_mask(void)
void __init smp_prepare_cpus(unsigned int max_cpus)
{
- /* request the 0x1201 emergency signal external interrupt */
if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
panic("Couldn't request external interrupt 0x1201");
- /* request the 0x1202 external call external interrupt */
+ system_ctl_set_bit(0, 14);
if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
panic("Couldn't request external interrupt 0x1202");
+ system_ctl_set_bit(0, 13);
}
void __init smp_prepare_boot_cpu(void)
@@ -1076,11 +1029,9 @@ static ssize_t cpu_configure_store(struct device *dev,
cpus_read_lock();
mutex_lock(&smp_cpu_state_mutex);
rc = -EBUSY;
- /* disallow configuration changes of online cpus and cpu 0 */
+ /* disallow configuration changes of online cpus */
cpu = dev->id;
cpu = smp_get_base_cpu(cpu);
- if (cpu == 0)
- goto out;
for (i = 0; i <= smp_cpu_mtid; i++)
if (cpu_online(cpu + i))
goto out;
@@ -1180,7 +1131,7 @@ static int smp_add_present_cpu(int cpu)
return -ENOMEM;
per_cpu(cpu_device, cpu) = c;
s = &c->dev;
- c->hotpluggable = 1;
+ c->hotpluggable = !!cpu;
rc = register_cpu(c, cpu);
if (rc)
goto out;
@@ -1258,60 +1209,3 @@ out:
return rc;
}
subsys_initcall(s390_smp_init);
-
-static __always_inline void set_new_lowcore(struct lowcore *lc)
-{
- union register_pair dst, src;
- u32 pfx;
-
- src.even = (unsigned long) &S390_lowcore;
- src.odd = sizeof(S390_lowcore);
- dst.even = (unsigned long) lc;
- dst.odd = sizeof(*lc);
- pfx = __pa(lc);
-
- asm volatile(
- " mvcl %[dst],%[src]\n"
- " spx %[pfx]\n"
- : [dst] "+&d" (dst.pair), [src] "+&d" (src.pair)
- : [pfx] "Q" (pfx)
- : "memory", "cc");
-}
-
-int __init smp_reinit_ipl_cpu(void)
-{
- unsigned long async_stack, nodat_stack, mcck_stack;
- struct lowcore *lc, *lc_ipl;
- unsigned long flags, cr0;
- u64 mcesad;
-
- lc_ipl = lowcore_ptr[0];
- lc = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
- nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
- async_stack = stack_alloc();
- mcck_stack = stack_alloc();
- if (!lc || !nodat_stack || !async_stack || !mcck_stack || nmi_alloc_mcesa(&mcesad))
- panic("Couldn't allocate memory");
-
- local_irq_save(flags);
- local_mcck_disable();
- set_new_lowcore(lc);
- S390_lowcore.nodat_stack = nodat_stack + STACK_INIT_OFFSET;
- S390_lowcore.async_stack = async_stack + STACK_INIT_OFFSET;
- S390_lowcore.mcck_stack = mcck_stack + STACK_INIT_OFFSET;
- __ctl_store(cr0, 0, 0);
- __ctl_clear_bit(0, 28); /* disable lowcore protection */
- S390_lowcore.mcesad = mcesad;
- __ctl_load(cr0, 0, 0);
- if (abs_lowcore_map(0, lc, false))
- panic("Couldn't remap absolute lowcore");
- lowcore_ptr[0] = lc;
- local_mcck_enable();
- local_irq_restore(flags);
-
- memblock_free_late(__pa(lc_ipl->mcck_stack - STACK_INIT_OFFSET), THREAD_SIZE);
- memblock_free_late(__pa(lc_ipl->async_stack - STACK_INIT_OFFSET), THREAD_SIZE);
- memblock_free_late(__pa(lc_ipl->nodat_stack - STACK_INIT_OFFSET), THREAD_SIZE);
- memblock_free_late(__pa(lc_ipl), sizeof(*lc_ipl));
- return 0;
-}
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index 0787010139f7..94f440e38303 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -6,9 +6,12 @@
*/
#include <linux/stacktrace.h>
+#include <linux/uaccess.h>
+#include <linux/compat.h>
#include <asm/stacktrace.h>
#include <asm/unwind.h>
#include <asm/kprobes.h>
+#include <asm/ptrace.h>
void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
struct task_struct *task, struct pt_regs *regs)
@@ -58,3 +61,43 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
return -EINVAL;
return 0;
}
+
+void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
+ const struct pt_regs *regs)
+{
+ struct stack_frame_user __user *sf;
+ unsigned long ip, sp;
+ bool first = true;
+
+ if (is_compat_task())
+ return;
+ if (!consume_entry(cookie, instruction_pointer(regs)))
+ return;
+ sf = (void __user *)user_stack_pointer(regs);
+ pagefault_disable();
+ while (1) {
+ if (__get_user(sp, &sf->back_chain))
+ break;
+ if (__get_user(ip, &sf->gprs[8]))
+ break;
+ if (ip & 0x1) {
+ /*
+ * If the instruction address is invalid, and this
+ * is the first stack frame, assume r14 has not
+ * been written to the stack yet. Otherwise exit.
+ */
+ if (first && !(regs->gprs[14] & 0x1))
+ ip = regs->gprs[14];
+ else
+ break;
+ }
+ if (!consume_entry(cookie, ip))
+ break;
+ /* Sanity check: ABI requires SP to be aligned 8 bytes. */
+ if (!sp || sp & 0x7)
+ break;
+ sf = (void __user *)sp;
+ first = false;
+ }
+ pagefault_enable();
+}
diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl
index 0122cc156952..095bb86339a7 100644
--- a/arch/s390/kernel/syscalls/syscall.tbl
+++ b/arch/s390/kernel/syscalls/syscall.tbl
@@ -100,7 +100,7 @@
106 common stat sys_newstat compat_sys_newstat
107 common lstat sys_newlstat compat_sys_newlstat
108 common fstat sys_newfstat compat_sys_newfstat
-110 common lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie
+110 common lookup_dcookie - -
111 common vhangup sys_vhangup sys_vhangup
112 common idle - -
114 common wait4 sys_wait4 compat_sys_wait4
@@ -455,3 +455,12 @@
450 common set_mempolicy_home_node sys_set_mempolicy_home_node sys_set_mempolicy_home_node
451 common cachestat sys_cachestat sys_cachestat
452 common fchmodat2 sys_fchmodat2 sys_fchmodat2
+453 common map_shadow_stack sys_map_shadow_stack sys_map_shadow_stack
+454 common futex_wake sys_futex_wake sys_futex_wake
+455 common futex_wait sys_futex_wait sys_futex_wait
+456 common futex_requeue sys_futex_requeue sys_futex_requeue
+457 common statmount sys_statmount sys_statmount
+458 common listmount sys_listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules sys_lsm_list_modules
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index b5e364358ce4..f6f8f498c9be 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -81,10 +81,12 @@ static bool convert_ext_name(unsigned char encoding, char *name, size_t len)
static void stsi_1_1_1(struct seq_file *m, struct sysinfo_1_1_1 *info)
{
+ bool has_var_cap;
int i;
if (stsi(info, 1, 1, 1))
return;
+ has_var_cap = !!info->model_var_cap[0];
EBCASC(info->manufacturer, sizeof(info->manufacturer));
EBCASC(info->type, sizeof(info->type));
EBCASC(info->model, sizeof(info->model));
@@ -93,6 +95,8 @@ static void stsi_1_1_1(struct seq_file *m, struct sysinfo_1_1_1 *info)
EBCASC(info->model_capacity, sizeof(info->model_capacity));
EBCASC(info->model_perm_cap, sizeof(info->model_perm_cap));
EBCASC(info->model_temp_cap, sizeof(info->model_temp_cap));
+ if (has_var_cap)
+ EBCASC(info->model_var_cap, sizeof(info->model_var_cap));
seq_printf(m, "Manufacturer: %-16.16s\n", info->manufacturer);
seq_printf(m, "Type: %-4.4s\n", info->type);
if (info->lic)
@@ -120,12 +124,18 @@ static void stsi_1_1_1(struct seq_file *m, struct sysinfo_1_1_1 *info)
seq_printf(m, "Model Temp. Capacity: %-16.16s %08u\n",
info->model_temp_cap,
info->model_temp_cap_rating);
+ if (has_var_cap && info->model_var_cap_rating)
+ seq_printf(m, "Model Var. Capacity: %-16.16s %08u\n",
+ info->model_var_cap,
+ info->model_var_cap_rating);
if (info->ncr)
seq_printf(m, "Nominal Cap. Rating: %08u\n", info->ncr);
if (info->npr)
seq_printf(m, "Nominal Perm. Rating: %08u\n", info->npr);
if (info->ntr)
seq_printf(m, "Nominal Temp. Rating: %08u\n", info->ntr);
+ if (has_var_cap && info->nvr)
+ seq_printf(m, "Nominal Var. Rating: %08u\n", info->nvr);
if (info->cai) {
seq_printf(m, "Capacity Adj. Ind.: %d\n", info->cai);
seq_printf(m, "Capacity Ch. Reason: %d\n", info->ccr);
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index d34d3548c046..14abad953c02 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -173,10 +173,10 @@ void init_cpu_timer(void)
clockevents_register_device(cd);
/* Enable clock comparator timer interrupt. */
- __ctl_set_bit(0,11);
+ local_ctl_set_bit(0, CR0_CLOCK_COMPARATOR_SUBMASK_BIT);
/* Always allow the timing alert external interrupt. */
- __ctl_set_bit(0, 4);
+ local_ctl_set_bit(0, CR0_ETR_SUBMASK_BIT);
}
static void clock_comparator_interrupt(struct ext_code ext_code,
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 68adf1de8888..89e91b8ce842 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -522,7 +522,7 @@ static struct sched_domain_topology_level s390_topology[] = {
{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
{ cpu_book_mask, SD_INIT_NAME(BOOK) },
{ cpu_drawer_mask, SD_INIT_NAME(DRAWER) },
- { cpu_cpu_mask, SD_INIT_NAME(DIE) },
+ { cpu_cpu_mask, SD_INIT_NAME(PKG) },
{ NULL, },
};
@@ -636,7 +636,6 @@ static struct ctl_table topology_ctl_table[] = {
.mode = 0644,
.proc_handler = topology_ctl_handler,
},
- { },
};
static int __init topology_init(void)
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 1d2aa448d103..46dac4540ca8 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -43,10 +43,12 @@ static inline void __user *get_trap_ip(struct pt_regs *regs)
return (void __user *) (address - (regs->int_code >> 16));
}
+#ifdef CONFIG_GENERIC_BUG
int is_valid_bugaddr(unsigned long addr)
{
return 1;
}
+#endif
void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str)
{
@@ -193,7 +195,7 @@ static void vector_exception(struct pt_regs *regs)
{
int si_code, vic;
- if (!MACHINE_HAS_VX) {
+ if (!cpu_has_vx()) {
do_trap(regs, SIGILL, ILL_ILLOPN, "illegal operation");
return;
}
@@ -286,6 +288,17 @@ static void __init test_monitor_call(void)
void __init trap_init(void)
{
+ unsigned long flags;
+ struct ctlreg cr0;
+
+ local_irq_save(flags);
+ cr0 = local_ctl_clear_bit(0, CR0_LOW_ADDRESS_PROTECTION_BIT);
+ psw_bits(S390_lowcore.external_new_psw).mcheck = 1;
+ psw_bits(S390_lowcore.program_new_psw).mcheck = 1;
+ psw_bits(S390_lowcore.svc_new_psw).mcheck = 1;
+ psw_bits(S390_lowcore.io_new_psw).mcheck = 1;
+ local_ctl_load(0, &cr0);
+ local_irq_restore(flags);
local_mcck_enable();
test_monitor_call();
}
diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
index b771f1b4cdd1..fc07bc39e698 100644
--- a/arch/s390/kernel/uv.c
+++ b/arch/s390/kernel/uv.c
@@ -258,7 +258,7 @@ static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_str
* shared page from a different protected VM will automatically also
* transfer its ownership.
*/
- if (test_bit_inv(BIT_UV_FEAT_MISC, &uv_info.uv_feature_indications))
+ if (uv_has_feature(BIT_UV_FEAT_MISC))
return false;
if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED)
return false;
diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
index 23e868b79a6c..caec7db6f966 100644
--- a/arch/s390/kernel/vdso32/Makefile
+++ b/arch/s390/kernel/vdso32/Makefile
@@ -61,16 +61,6 @@ quiet_cmd_vdso32as = VDSO32A $@
quiet_cmd_vdso32cc = VDSO32C $@
cmd_vdso32cc = $(CC) $(c_flags) -c -o $@ $<
-# install commands for the unstripped file
-quiet_cmd_vdso_install = INSTALL $@
- cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
-
-vdso32.so: $(obj)/vdso32.so.dbg
- @mkdir -p $(MODLIB)/vdso
- $(call cmd,vdso_install)
-
-vdso_install: vdso32.so
-
# Generate VDSO offsets using helper script
gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
quiet_cmd_vdsosym = VDSOSYM $@
diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
index fc1c6ff8178f..e3c9085f8fa7 100644
--- a/arch/s390/kernel/vdso64/Makefile
+++ b/arch/s390/kernel/vdso64/Makefile
@@ -70,16 +70,6 @@ quiet_cmd_vdso64as = VDSO64A $@
quiet_cmd_vdso64cc = VDSO64C $@
cmd_vdso64cc = $(CC) $(c_flags) -c -o $@ $<
-# install commands for the unstripped file
-quiet_cmd_vdso_install = INSTALL $@
- cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
-
-vdso64.so: $(obj)/vdso64.so.dbg
- @mkdir -p $(MODLIB)/vdso
- $(call cmd,vdso_install)
-
-vdso_install: vdso64.so
-
# Generate VDSO offsets using helper script
gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
quiet_cmd_vdsosym = VDSOSYM $@
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 2ae201ebf90b..e32ef446f451 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -52,6 +52,7 @@ SECTIONS
SOFTIRQENTRY_TEXT
FTRACE_HOTPATCH_TRAMPOLINES_TEXT
*(.text.*_indirect_*)
+ *(.fixup)
*(.gnu.warning)
. = ALIGN(PAGE_SIZE);
_etext = .; /* End of text section */