aboutsummaryrefslogtreecommitdiff
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig11
-rw-r--r--arch/s390/Kconfig.debug4
-rw-r--r--arch/s390/appldata/appldata_base.c2
-rw-r--r--arch/s390/appldata/appldata_os.c16
-rw-r--r--arch/s390/boot/compressed/Makefile5
-rw-r--r--arch/s390/boot/compressed/head.S2
-rw-r--r--arch/s390/boot/compressed/misc.c4
-rw-r--r--arch/s390/configs/default_defconfig34
-rw-r--r--arch/s390/configs/gcov_defconfig54
-rw-r--r--arch/s390/configs/performance_defconfig38
-rw-r--r--arch/s390/crypto/Makefile2
-rw-r--r--arch/s390/crypto/aes_s390.c7
-rw-r--r--arch/s390/crypto/des_s390.c14
-rw-r--r--arch/s390/crypto/paes_s390.c619
-rw-r--r--arch/s390/crypto/prng.c48
-rw-r--r--arch/s390/defconfig6
-rw-r--r--arch/s390/hypfs/inode.c24
-rw-r--r--arch/s390/include/asm/Kbuild4
-rw-r--r--arch/s390/include/asm/asm-offsets.h1
-rw-r--r--arch/s390/include/asm/asm-prototypes.h8
-rw-r--r--arch/s390/include/asm/atomic.h207
-rw-r--r--arch/s390/include/asm/atomic_ops.h130
-rw-r--r--arch/s390/include/asm/bitops.h62
-rw-r--r--arch/s390/include/asm/cacheflush.h30
-rw-r--r--arch/s390/include/asm/checksum.h2
-rw-r--r--arch/s390/include/asm/cpacf.h46
-rw-r--r--arch/s390/include/asm/cpu_mf.h18
-rw-r--r--arch/s390/include/asm/cputime.h109
-rw-r--r--arch/s390/include/asm/ctl_reg.h16
-rw-r--r--arch/s390/include/asm/device.h1
-rw-r--r--arch/s390/include/asm/dma-mapping.h6
-rw-r--r--arch/s390/include/asm/elf.h8
-rw-r--r--arch/s390/include/asm/facilities_src.h82
-rw-r--r--arch/s390/include/asm/hugetlb.h2
-rw-r--r--arch/s390/include/asm/idals.h2
-rw-r--r--arch/s390/include/asm/ipl.h2
-rw-r--r--arch/s390/include/asm/livepatch.h2
-rw-r--r--arch/s390/include/asm/lowcore.h64
-rw-r--r--arch/s390/include/asm/mmu_context.h6
-rw-r--r--arch/s390/include/asm/mutex.h9
-rw-r--r--arch/s390/include/asm/pci.h1
-rw-r--r--arch/s390/include/asm/pci_clp.h10
-rw-r--r--arch/s390/include/asm/pgalloc.h22
-rw-r--r--arch/s390/include/asm/pgtable.h111
-rw-r--r--arch/s390/include/asm/pkey.h90
-rw-r--r--arch/s390/include/asm/preempt.h137
-rw-r--r--arch/s390/include/asm/processor.h37
-rw-r--r--arch/s390/include/asm/sclp.h18
-rw-r--r--arch/s390/include/asm/scsw.h6
-rw-r--r--arch/s390/include/asm/setup.h5
-rw-r--r--arch/s390/include/asm/smp.h8
-rw-r--r--arch/s390/include/asm/spinlock.h10
-rw-r--r--arch/s390/include/asm/string.h11
-rw-r--r--arch/s390/include/asm/sysinfo.h7
-rw-r--r--arch/s390/include/asm/thread_info.h24
-rw-r--r--arch/s390/include/asm/timex.h49
-rw-r--r--arch/s390/include/asm/tlb.h14
-rw-r--r--arch/s390/include/asm/topology.h28
-rw-r--r--arch/s390/include/asm/trace/zcrypt.h122
-rw-r--r--arch/s390/include/asm/uaccess.h27
-rw-r--r--arch/s390/include/asm/vdso.h2
-rw-r--r--arch/s390/include/uapi/asm/Kbuild6
-rw-r--r--arch/s390/include/uapi/asm/pkey.h112
-rw-r--r--arch/s390/include/uapi/asm/socket.h2
-rw-r--r--arch/s390/include/uapi/asm/zcrypt.h37
-rw-r--r--arch/s390/kernel/Makefile63
-rw-r--r--arch/s390/kernel/als.c13
-rw-r--r--arch/s390/kernel/asm-offsets.c17
-rw-r--r--arch/s390/kernel/compat_linux.c3
-rw-r--r--arch/s390/kernel/compat_signal.c6
-rw-r--r--arch/s390/kernel/cpcmd.c2
-rw-r--r--arch/s390/kernel/crash_dump.c10
-rw-r--r--arch/s390/kernel/debug.c17
-rw-r--r--arch/s390/kernel/diag.c3
-rw-r--r--arch/s390/kernel/dis.c4
-rw-r--r--arch/s390/kernel/early.c81
-rw-r--r--arch/s390/kernel/early_printk.c35
-rw-r--r--arch/s390/kernel/ebcdic.c4
-rw-r--r--arch/s390/kernel/entry.S115
-rw-r--r--arch/s390/kernel/entry.h1
-rw-r--r--arch/s390/kernel/head.S2
-rw-r--r--arch/s390/kernel/head64.S7
-rw-r--r--arch/s390/kernel/idle.c23
-rw-r--r--arch/s390/kernel/ipl.c15
-rw-r--r--arch/s390/kernel/irq.c5
-rw-r--r--arch/s390/kernel/jump_label.c1
-rw-r--r--arch/s390/kernel/kprobes.c10
-rw-r--r--arch/s390/kernel/lgr.c5
-rw-r--r--arch/s390/kernel/module.c3
-rw-r--r--arch/s390/kernel/nmi.c44
-rw-r--r--arch/s390/kernel/os_info.c6
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c2
-rw-r--r--arch/s390/kernel/perf_cpum_cf_events.c2
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c55
-rw-r--r--arch/s390/kernel/process.c26
-rw-r--r--arch/s390/kernel/processor.c8
-rw-r--r--arch/s390/kernel/ptrace.c24
-rw-r--r--arch/s390/kernel/sclp.c196
-rw-r--r--arch/s390/kernel/setup.c46
-rw-r--r--arch/s390/kernel/signal.c16
-rw-r--r--arch/s390/kernel/smp.c79
-rw-r--r--arch/s390/kernel/stacktrace.c2
-rw-r--r--arch/s390/kernel/swsusp.S6
-rw-r--r--arch/s390/kernel/sys_s390.c2
-rw-r--r--arch/s390/kernel/sysinfo.c35
-rw-r--r--arch/s390/kernel/time.c202
-rw-r--r--arch/s390/kernel/topology.c72
-rw-r--r--arch/s390/kernel/traps.c2
-rw-r--r--arch/s390/kernel/vdso.c2
-rw-r--r--arch/s390/kernel/vdso32/clock_gettime.S23
-rw-r--r--arch/s390/kernel/vdso32/gettimeofday.S23
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S11
-rw-r--r--arch/s390/kernel/vdso64/gettimeofday.S11
-rw-r--r--arch/s390/kernel/vmlinux.lds.S9
-rw-r--r--arch/s390/kernel/vtime.c159
-rw-r--r--arch/s390/kvm/gaccess.c26
-rw-r--r--arch/s390/kvm/gaccess.h19
-rw-r--r--arch/s390/kvm/guestdbg.c120
-rw-r--r--arch/s390/kvm/intercept.c25
-rw-r--r--arch/s390/kvm/interrupt.c6
-rw-r--r--arch/s390/kvm/kvm-s390.c108
-rw-r--r--arch/s390/kvm/kvm-s390.h12
-rw-r--r--arch/s390/kvm/priv.c32
-rw-r--r--arch/s390/kvm/vsie.c5
-rw-r--r--arch/s390/lib/delay.c2
-rw-r--r--arch/s390/lib/mem.S49
-rw-r--r--arch/s390/lib/spinlock.c29
-rw-r--r--arch/s390/lib/string.c15
-rw-r--r--arch/s390/lib/xor.c2
-rw-r--r--arch/s390/mm/cmm.c1
-rw-r--r--arch/s390/mm/dump_pagetables.c18
-rw-r--r--arch/s390/mm/extmem.c6
-rw-r--r--arch/s390/mm/fault.c43
-rw-r--r--arch/s390/mm/gmap.c10
-rw-r--r--arch/s390/mm/hugetlbpage.c12
-rw-r--r--arch/s390/mm/init.c23
-rw-r--r--arch/s390/mm/mem_detect.c7
-rw-r--r--arch/s390/mm/mmap.c2
-rw-r--r--arch/s390/mm/pageattr.c118
-rw-r--r--arch/s390/mm/pgtable.c11
-rw-r--r--arch/s390/mm/vmem.c53
-rw-r--r--arch/s390/net/bpf_jit_comp.c39
-rw-r--r--arch/s390/numa/mode_emu.c38
-rw-r--r--arch/s390/numa/toptree.c16
-rw-r--r--arch/s390/pci/pci.c20
-rw-r--r--arch/s390/pci/pci_clp.c4
-rw-r--r--arch/s390/pci/pci_debug.c2
-rw-r--r--arch/s390/pci/pci_dma.c38
-rw-r--r--arch/s390/tools/Makefile2
-rw-r--r--arch/s390/tools/gen_facilities.c78
150 files changed, 3295 insertions, 1807 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 426481d4cc86..a2dcef0aacc7 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -62,9 +62,6 @@ config PCI_QUIRKS
config ARCH_SUPPORTS_UPROBES
def_bool y
-config DEBUG_RODATA
- def_bool y
-
config S390
def_bool y
select ARCH_HAS_DEVMEM_IS_ALLOWED
@@ -72,7 +69,10 @@ config S390
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_GIGANTIC_PAGE
select ARCH_HAS_KCOV
+ select ARCH_HAS_SET_MEMORY
select ARCH_HAS_SG_CHAIN
+ select ARCH_HAS_STRICT_KERNEL_RWX
+ select ARCH_HAS_STRICT_MODULE_RWX
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_INLINE_READ_LOCK
@@ -134,8 +134,11 @@ config S390
select HAVE_EBPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES
select HAVE_CMPXCHG_DOUBLE
select HAVE_CMPXCHG_LOCAL
+ select HAVE_COPY_THREAD_TLS
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_API_DEBUG
+ select HAVE_DMA_CONTIGUOUS
+ select DMA_NOOP_OPS
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_EFFICIENT_UNALIGNED_ACCESS
@@ -169,8 +172,10 @@ config S390
select OLD_SIGSUSPEND3
select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE
+ select THREAD_INFO_IN_TASK
select TTY
select VIRT_CPU_ACCOUNTING
+ select ARCH_HAS_SCALED_CPUTIME
select VIRT_TO_BUS
select HAVE_NMI
diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug
index 26c5d5beb4be..ba5f878a295c 100644
--- a/arch/s390/Kconfig.debug
+++ b/arch/s390/Kconfig.debug
@@ -17,7 +17,7 @@ config S390_PTDUMP
kernel.
If in doubt, say "N"
-config DEBUG_SET_MODULE_RONX
+config EARLY_PRINTK
def_bool y
- depends on MODULES
+
endmenu
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index f587c4811faf..5a8dfa22da7c 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -28,7 +28,7 @@
#include <linux/platform_device.h>
#include <asm/appldata.h>
#include <asm/vtimer.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/smp.h>
diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c
index 69b23b25ac34..08b9e942a262 100644
--- a/arch/s390/appldata/appldata_os.c
+++ b/arch/s390/appldata/appldata_os.c
@@ -113,21 +113,21 @@ static void appldata_get_os_data(void *data)
j = 0;
for_each_online_cpu(i) {
os_data->os_cpu[j].per_cpu_user =
- cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_USER]);
+ nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_USER]);
os_data->os_cpu[j].per_cpu_nice =
- cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_NICE]);
+ nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_NICE]);
os_data->os_cpu[j].per_cpu_system =
- cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]);
+ nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]);
os_data->os_cpu[j].per_cpu_idle =
- cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IDLE]);
+ nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IDLE]);
os_data->os_cpu[j].per_cpu_irq =
- cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IRQ]);
+ nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IRQ]);
os_data->os_cpu[j].per_cpu_softirq =
- cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]);
+ nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]);
os_data->os_cpu[j].per_cpu_iowait =
- cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IOWAIT]);
+ nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IOWAIT]);
os_data->os_cpu[j].per_cpu_steal =
- cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_STEAL]);
+ nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_STEAL]);
os_data->os_cpu[j].cpu_id = i;
j++;
}
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index 0daa070d6c9d..f7e4c834ea24 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -10,7 +10,7 @@ targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
targets += misc.o piggy.o sizes.h head.o
-KBUILD_CFLAGS := -m64 -D__KERNEL__ $(LINUX_INCLUDE) -O2
+KBUILD_CFLAGS := -m64 -D__KERNEL__ -O2
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -msoft-float
KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
@@ -19,7 +19,8 @@ KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
GCOV_PROFILE := n
UBSAN_SANITIZE := n
-OBJECTS := $(addprefix $(objtree)/arch/s390/kernel/, head.o sclp.o ebcdic.o als.o)
+OBJECTS := $(addprefix $(objtree)/arch/s390/kernel/, head.o ebcdic.o als.o)
+OBJECTS += $(objtree)/drivers/s390/char/sclp_early_core.o
OBJECTS += $(obj)/head.o $(obj)/misc.o $(obj)/piggy.o
LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T
diff --git a/arch/s390/boot/compressed/head.S b/arch/s390/boot/compressed/head.S
index 28c4f96a2d9c..11f6254c561e 100644
--- a/arch/s390/boot/compressed/head.S
+++ b/arch/s390/boot/compressed/head.S
@@ -46,7 +46,7 @@ mover_end:
.align 8
.Lstack:
- .quad 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER))
+ .quad 0x8000 + (1<<(PAGE_SHIFT+THREAD_SIZE_ORDER))
.Loffset:
.quad 0x11000
.Lmvsize:
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index 4da604ebf6fd..fa95041fa9f6 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -6,7 +6,7 @@
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/page.h>
#include <asm/sclp.h>
#include <asm/ipl.h>
@@ -66,7 +66,7 @@ static unsigned long free_mem_end_ptr;
static int puts(const char *s)
{
- _sclp_print_early(s);
+ sclp_early_printk(s);
return 0;
}
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index 45968686f918..143b1e00b818 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -66,8 +66,10 @@ CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CLEANCACHE=y
CONFIG_FRONTSWAP=y
CONFIG_CMA=y
+CONFIG_CMA_DEBUG=y
+CONFIG_CMA_DEBUGFS=y
CONFIG_MEM_SOFT_DIRTY=y
-CONFIG_ZPOOL=m
+CONFIG_ZSWAP=y
CONFIG_ZBUD=m
CONFIG_ZSMALLOC=m
CONFIG_ZSMALLOC_STAT=y
@@ -139,8 +141,6 @@ CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CONNTRACK_TIMEOUT=y
CONFIG_NF_CONNTRACK_TIMESTAMP=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
@@ -157,13 +157,12 @@ CONFIG_NF_TABLES=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
CONFIG_NFT_CT=m
-CONFIG_NFT_RBTREE=m
-CONFIG_NFT_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_NAT=m
CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_AUDIT=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -217,7 +216,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_RECENT=m
-CONFIG_NETFILTER_XT_MATCH_SOCKET=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -256,7 +254,6 @@ CONFIG_IP_VS_NQ=m
CONFIG_IP_VS_FTP=m
CONFIG_IP_VS_PE_SIP=m
CONFIG_NF_CONNTRACK_IPV4=m
-# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
CONFIG_NF_TABLES_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NF_TABLES_ARP=m
@@ -366,6 +363,8 @@ CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
CONFIG_NET_TCPPROBE=m
CONFIG_DEVTMPFS=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=0
CONFIG_CONNECTOR=y
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -432,13 +431,11 @@ CONFIG_EQUALIZER=m
CONFIG_IFB=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
-CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_VIRTIO_NET=m
CONFIG_NLMON=m
-CONFIG_VHOST_NET=m
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
# CONFIG_NET_VENDOR_INTEL is not set
@@ -477,6 +474,7 @@ CONFIG_VIRTIO_BALLOON=m
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
CONFIG_JBD2_DEBUG=y
CONFIG_JFS_FS=m
CONFIG_JFS_POSIX_ACL=y
@@ -589,14 +587,12 @@ CONFIG_LOCK_STAT=y
CONFIG_DEBUG_LOCKDEP=y
CONFIG_DEBUG_ATOMIC_SLEEP=y
CONFIG_DEBUG_LOCKING_API_SELFTESTS=y
-CONFIG_DEBUG_LIST=y
CONFIG_DEBUG_SG=y
CONFIG_DEBUG_NOTIFIERS=y
CONFIG_DEBUG_CREDENTIALS=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=300
CONFIG_NOTIFIER_ERROR_INJECTION=m
-CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
CONFIG_PM_NOTIFIER_ERROR_INJECT=m
CONFIG_FAULT_INJECTION=y
CONFIG_FAILSLAB=y
@@ -615,6 +611,7 @@ CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_UPROBE_EVENT=y
CONFIG_FUNCTION_PROFILER=y
+CONFIG_HIST_TRIGGERS=y
CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_LKDTM=m
CONFIG_TEST_LIST_SORT=y
@@ -627,6 +624,7 @@ CONFIG_TEST_STRING_HELPERS=y
CONFIG_TEST_KSTRTOX=y
CONFIG_DMA_API_DEBUG=y
CONFIG_TEST_BPF=m
+CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_S390_PTDUMP=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y
@@ -637,16 +635,18 @@ CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
CONFIG_SECURITY_SELINUX_DISABLE=y
CONFIG_IMA=y
CONFIG_IMA_APPRAISE=y
+CONFIG_CRYPTO_RSA=m
+CONFIG_CRYPTO_DH=m
+CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_USER=m
-# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
-CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
@@ -670,12 +670,15 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_ZCRYPT=m
+CONFIG_PKEY=m
CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m
@@ -693,3 +696,4 @@ CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
CONFIG_KVM=m
CONFIG_KVM_S390_UCONTROL=y
+CONFIG_VHOST_NET=m
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index 1dd05e345c4d..f05d2d6e1087 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -12,6 +12,7 @@ CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_NUMA_BALANCING=y
+# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
CONFIG_BLK_CGROUP=y
@@ -54,8 +55,9 @@ CONFIG_SOLARIS_X86_PARTITION=y
CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_CFQ_GROUP_IOSCHED=y
CONFIG_DEFAULT_DEADLINE=y
+CONFIG_LIVEPATCH=y
CONFIG_TUNE_ZEC12=y
-CONFIG_NR_CPUS=256
+CONFIG_NR_CPUS=512
CONFIG_NUMA=y
CONFIG_HZ_100=y
CONFIG_MEMORY_HOTPLUG=y
@@ -65,6 +67,7 @@ CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CLEANCACHE=y
CONFIG_FRONTSWAP=y
CONFIG_CMA=y
+CONFIG_MEM_SOFT_DIRTY=y
CONFIG_ZSWAP=y
CONFIG_ZBUD=m
CONFIG_ZSMALLOC=m
@@ -136,8 +139,6 @@ CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CONNTRACK_TIMEOUT=y
CONFIG_NF_CONNTRACK_TIMESTAMP=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
@@ -154,13 +155,12 @@ CONFIG_NF_TABLES=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
CONFIG_NFT_CT=m
-CONFIG_NFT_RBTREE=m
-CONFIG_NFT_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_NAT=m
CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_AUDIT=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -214,7 +214,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_RECENT=m
-CONFIG_NETFILTER_XT_MATCH_SOCKET=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -253,7 +252,6 @@ CONFIG_IP_VS_NQ=m
CONFIG_IP_VS_FTP=m
CONFIG_IP_VS_PE_SIP=m
CONFIG_NF_CONNTRACK_IPV4=m
-# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
CONFIG_NF_TABLES_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NF_TABLES_ARP=m
@@ -362,6 +360,8 @@ CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
CONFIG_NET_TCPPROBE=m
CONFIG_DEVTMPFS=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=0
CONFIG_CONNECTOR=y
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -428,13 +428,11 @@ CONFIG_EQUALIZER=m
CONFIG_IFB=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
-CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_VIRTIO_NET=m
CONFIG_NLMON=m
-CONFIG_VHOST_NET=m
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
# CONFIG_NET_VENDOR_INTEL is not set
@@ -459,6 +457,7 @@ CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_RAW_DRIVER=m
CONFIG_HANGCHECK_TIMER=m
CONFIG_TN3270_FS=y
+# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_SOFT_WATCHDOG=m
@@ -472,6 +471,7 @@ CONFIG_VIRTIO_BALLOON=m
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
CONFIG_JBD2_DEBUG=y
CONFIG_JFS_FS=m
CONFIG_JFS_POSIX_ACL=y
@@ -494,6 +494,7 @@ CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=y
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
+CONFIG_OVERLAY_FS_REDIRECT_DIR=y
CONFIG_FSCACHE=m
CONFIG_CACHEFILES=m
CONFIG_ISO9660_FS=y
@@ -550,25 +551,27 @@ CONFIG_FRAME_WARN=1024
CONFIG_UNUSED_SYMBOLS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_MEMORY_INIT=y
-CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
CONFIG_PANIC_ON_OOPS=y
CONFIG_TIMER_STATS=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=60
-CONFIG_NOTIFIER_ERROR_INJECTION=m
-CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
-CONFIG_PM_NOTIFIER_ERROR_INJECT=m
CONFIG_LATENCYTOP=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
-# CONFIG_KPROBE_EVENT is not set
+CONFIG_UPROBE_EVENT=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_HIST_TRIGGERS=y
CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_LKDTM=m
-CONFIG_RBTREE_TEST=m
-CONFIG_INTERVAL_TREE_TEST=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
CONFIG_TEST_BPF=m
+CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_S390_PTDUMP=y
+CONFIG_PERSISTENT_KEYRINGS=y
+CONFIG_BIG_KEYS=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
@@ -576,18 +579,25 @@ CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_INTEGRITY_SIGNATURE=y
+CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
CONFIG_IMA=y
+CONFIG_IMA_WRITE_POLICY=y
CONFIG_IMA_APPRAISE=y
+CONFIG_CRYPTO_DH=m
+CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_USER=m
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
-CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
@@ -597,6 +607,7 @@ CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
@@ -611,10 +622,13 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_ZCRYPT=m
CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m
@@ -623,9 +637,6 @@ CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y
-CONFIG_ASYMMETRIC_KEY_TYPE=y
-CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
-CONFIG_X509_CERTIFICATE_PARSER=m
CONFIG_CRC7=m
CONFIG_CRC8=m
CONFIG_CORDIC=m
@@ -633,3 +644,4 @@ CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
CONFIG_KVM=m
CONFIG_KVM_S390_UCONTROL=y
+CONFIG_VHOST_NET=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index 29d1178666f0..2358bf33c5ef 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -65,6 +65,7 @@ CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CLEANCACHE=y
CONFIG_FRONTSWAP=y
CONFIG_CMA=y
+CONFIG_MEM_SOFT_DIRTY=y
CONFIG_ZSWAP=y
CONFIG_ZBUD=m
CONFIG_ZSMALLOC=m
@@ -136,8 +137,6 @@ CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CONNTRACK_TIMEOUT=y
CONFIG_NF_CONNTRACK_TIMESTAMP=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
@@ -154,13 +153,12 @@ CONFIG_NF_TABLES=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
CONFIG_NFT_CT=m
-CONFIG_NFT_RBTREE=m
-CONFIG_NFT_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_NAT=m
CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_AUDIT=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -214,7 +212,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_RECENT=m
-CONFIG_NETFILTER_XT_MATCH_SOCKET=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -253,7 +250,6 @@ CONFIG_IP_VS_NQ=m
CONFIG_IP_VS_FTP=m
CONFIG_IP_VS_PE_SIP=m
CONFIG_NF_CONNTRACK_IPV4=m
-# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
CONFIG_NF_TABLES_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NF_TABLES_ARP=m
@@ -362,6 +358,8 @@ CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
CONFIG_NET_TCPPROBE=m
CONFIG_DEVTMPFS=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=0
CONFIG_CONNECTOR=y
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -428,13 +426,11 @@ CONFIG_EQUALIZER=m
CONFIG_IFB=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
-CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_VIRTIO_NET=m
CONFIG_NLMON=m
-CONFIG_VHOST_NET=m
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
# CONFIG_NET_VENDOR_INTEL is not set
@@ -473,6 +469,7 @@ CONFIG_VIRTIO_BALLOON=m
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
CONFIG_JBD2_DEBUG=y
CONFIG_JFS_FS=m
CONFIG_JFS_POSIX_ACL=y
@@ -495,6 +492,7 @@ CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=y
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
+CONFIG_OVERLAY_FS_REDIRECT_DIR=y
CONFIG_FSCACHE=m
CONFIG_CACHEFILES=m
CONFIG_ISO9660_FS=y
@@ -562,12 +560,16 @@ CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_UPROBE_EVENT=y
CONFIG_FUNCTION_PROFILER=y
+CONFIG_HIST_TRIGGERS=y
CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_LKDTM=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
CONFIG_TEST_BPF=m
+CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_S390_PTDUMP=y
+CONFIG_PERSISTENT_KEYRINGS=y
+CONFIG_BIG_KEYS=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
@@ -575,18 +577,25 @@ CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_INTEGRITY_SIGNATURE=y
+CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
CONFIG_IMA=y
+CONFIG_IMA_WRITE_POLICY=y
CONFIG_IMA_APPRAISE=y
+CONFIG_CRYPTO_DH=m
+CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_USER=m
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
-CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
@@ -596,6 +605,7 @@ CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
@@ -610,11 +620,15 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_ZCRYPT=m
+CONFIG_PKEY=m
CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m
@@ -622,9 +636,6 @@ CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y
-CONFIG_ASYMMETRIC_KEY_TYPE=y
-CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
-CONFIG_X509_CERTIFICATE_PARSER=m
CONFIG_CRC7=m
CONFIG_CRC8=m
CONFIG_CORDIC=m
@@ -632,3 +643,4 @@ CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
CONFIG_KVM=m
CONFIG_KVM_S390_UCONTROL=y
+CONFIG_VHOST_NET=m
diff --git a/arch/s390/crypto/Makefile b/arch/s390/crypto/Makefile
index d1033de4c4ee..402c530c6da5 100644
--- a/arch/s390/crypto/Makefile
+++ b/arch/s390/crypto/Makefile
@@ -6,7 +6,7 @@ obj-$(CONFIG_CRYPTO_SHA1_S390) += sha1_s390.o sha_common.o
obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o sha_common.o
obj-$(CONFIG_CRYPTO_SHA512_S390) += sha512_s390.o sha_common.o
obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o
-obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o
+obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o paes_s390.o
obj-$(CONFIG_S390_PRNG) += prng.o
obj-$(CONFIG_CRYPTO_GHASH_S390) += ghash_s390.o
obj-$(CONFIG_CRYPTO_CRC32_S390) += crc32-vx_s390.o
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 303d28eb03a2..591cbdf615af 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -28,6 +28,7 @@
#include <linux/cpufeature.h>
#include <linux/init.h>
#include <linux/spinlock.h>
+#include <linux/fips.h>
#include <crypto/xts.h>
#include <asm/cpacf.h>
@@ -501,6 +502,12 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
if (err)
return err;
+ /* In fips mode only 128 bit or 256 bit keys are valid */
+ if (fips_enabled && key_len != 32 && key_len != 64) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+ }
+
/* Pick the correct function code based on the key length */
fc = (key_len == 32) ? CPACF_KM_XTS_128 :
(key_len == 64) ? CPACF_KM_XTS_256 : 0;
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index 8b83144206eb..0d296662bbf0 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <linux/crypto.h>
+#include <linux/fips.h>
#include <crypto/algapi.h>
#include <crypto/des.h>
#include <asm/cpacf.h>
@@ -221,6 +222,8 @@ static struct crypto_alg cbc_des_alg = {
* same as DES. Implementers MUST reject keys that exhibit this
* property.
*
+ * In fips mode additinally check for all 3 keys are unique.
+ *
*/
static int des3_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int key_len)
@@ -234,6 +237,17 @@ static int des3_setkey(struct crypto_tfm *tfm, const u8 *key,
tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
+
+ /* in fips mode, ensure k1 != k2 and k2 != k3 and k1 != k3 */
+ if (fips_enabled &&
+ !(crypto_memneq(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
+ crypto_memneq(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2],
+ DES_KEY_SIZE) &&
+ crypto_memneq(key, &key[DES_KEY_SIZE * 2], DES_KEY_SIZE))) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ return -EINVAL;
+ }
+
memcpy(ctx->key, key, key_len);
return 0;
}
diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c
new file mode 100644
index 000000000000..d69ea495c4d7
--- /dev/null
+++ b/arch/s390/crypto/paes_s390.c
@@ -0,0 +1,619 @@
+/*
+ * Cryptographic API.
+ *
+ * s390 implementation of the AES Cipher Algorithm with protected keys.
+ *
+ * s390 Version:
+ * Copyright IBM Corp. 2017
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Harald Freudenberger <freude@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ */
+
+#define KMSG_COMPONENT "paes_s390"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <linux/bug.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/cpufeature.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <crypto/xts.h>
+#include <asm/cpacf.h>
+#include <asm/pkey.h>
+
+static u8 *ctrblk;
+static DEFINE_SPINLOCK(ctrblk_lock);
+
+static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
+
+struct s390_paes_ctx {
+ struct pkey_seckey sk;
+ struct pkey_protkey pk;
+ unsigned long fc;
+};
+
+struct s390_pxts_ctx {
+ struct pkey_seckey sk[2];
+ struct pkey_protkey pk[2];
+ unsigned long fc;
+};
+
+static inline int __paes_convert_key(struct pkey_seckey *sk,
+ struct pkey_protkey *pk)
+{
+ int i, ret;
+
+ /* try three times in case of failure */
+ for (i = 0; i < 3; i++) {
+ ret = pkey_skey2pkey(sk, pk);
+ if (ret == 0)
+ break;
+ }
+
+ return ret;
+}
+
+static int __paes_set_key(struct s390_paes_ctx *ctx)
+{
+ unsigned long fc;
+
+ if (__paes_convert_key(&ctx->sk, &ctx->pk))
+ return -EINVAL;
+
+ /* Pick the correct function code based on the protected key type */
+ fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PAES_128 :
+ (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KM_PAES_192 :
+ (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KM_PAES_256 : 0;
+
+ /* Check if the function code is available */
+ ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
+
+ return ctx->fc ? 0 : -EINVAL;
+}
+
+static int ecb_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (key_len != SECKEYBLOBSIZE)
+ return -EINVAL;
+
+ memcpy(ctx->sk.seckey, in_key, SECKEYBLOBSIZE);
+ if (__paes_set_key(ctx)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int ecb_paes_crypt(struct blkcipher_desc *desc,
+ unsigned long modifier,
+ struct blkcipher_walk *walk)
+{
+ struct s390_paes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ unsigned int nbytes, n, k;
+ int ret;
+
+ ret = blkcipher_walk_virt(desc, walk);
+ while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
+ /* only use complete blocks */
+ n = nbytes & ~(AES_BLOCK_SIZE - 1);
+ k = cpacf_km(ctx->fc | modifier, ctx->pk.protkey,
+ walk->dst.virt.addr, walk->src.virt.addr, n);
+ if (k)
+ ret = blkcipher_walk_done(desc, walk, nbytes - k);
+ if (k < n) {
+ if (__paes_set_key(ctx) != 0)
+ return blkcipher_walk_done(desc, walk, -EIO);
+ }
+ }
+ return ret;
+}
+
+static int ecb_paes_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct blkcipher_walk walk;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ return ecb_paes_crypt(desc, CPACF_ENCRYPT, &walk);
+}
+
+static int ecb_paes_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct blkcipher_walk walk;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ return ecb_paes_crypt(desc, CPACF_DECRYPT, &walk);
+}
+
+static struct crypto_alg ecb_paes_alg = {
+ .cra_name = "ecb(paes)",
+ .cra_driver_name = "ecb-paes-s390",
+ .cra_priority = 400, /* combo: aes + ecb */
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct s390_paes_ctx),
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(ecb_paes_alg.cra_list),
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = SECKEYBLOBSIZE,
+ .max_keysize = SECKEYBLOBSIZE,
+ .setkey = ecb_paes_set_key,
+ .encrypt = ecb_paes_encrypt,
+ .decrypt = ecb_paes_decrypt,
+ }
+ }
+};
+
+static int __cbc_paes_set_key(struct s390_paes_ctx *ctx)
+{
+ unsigned long fc;
+
+ if (__paes_convert_key(&ctx->sk, &ctx->pk))
+ return -EINVAL;
+
+ /* Pick the correct function code based on the protected key type */
+ fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMC_PAES_128 :
+ (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KMC_PAES_192 :
+ (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KMC_PAES_256 : 0;
+
+ /* Check if the function code is available */
+ ctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
+
+ return ctx->fc ? 0 : -EINVAL;
+}
+
+static int cbc_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ memcpy(ctx->sk.seckey, in_key, SECKEYBLOBSIZE);
+ if (__cbc_paes_set_key(ctx)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int cbc_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
+ struct blkcipher_walk *walk)
+{
+ struct s390_paes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ unsigned int nbytes, n, k;
+ int ret;
+ struct {
+ u8 iv[AES_BLOCK_SIZE];
+ u8 key[MAXPROTKEYSIZE];
+ } param;
+
+ ret = blkcipher_walk_virt(desc, walk);
+ memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
+ memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+ while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
+ /* only use complete blocks */
+ n = nbytes & ~(AES_BLOCK_SIZE - 1);
+ k = cpacf_kmc(ctx->fc | modifier, &param,
+ walk->dst.virt.addr, walk->src.virt.addr, n);
+ if (k)
+ ret = blkcipher_walk_done(desc, walk, nbytes - k);
+ if (n < k) {
+ if (__cbc_paes_set_key(ctx) != 0)
+ return blkcipher_walk_done(desc, walk, -EIO);
+ memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+ }
+ }
+ memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
+ return ret;
+}
+
+static int cbc_paes_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct blkcipher_walk walk;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ return cbc_paes_crypt(desc, 0, &walk);
+}
+
+static int cbc_paes_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct blkcipher_walk walk;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ return cbc_paes_crypt(desc, CPACF_DECRYPT, &walk);
+}
+
+static struct crypto_alg cbc_paes_alg = {
+ .cra_name = "cbc(paes)",
+ .cra_driver_name = "cbc-paes-s390",
+ .cra_priority = 400, /* combo: aes + cbc */
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct s390_paes_ctx),
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(cbc_paes_alg.cra_list),
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = SECKEYBLOBSIZE,
+ .max_keysize = SECKEYBLOBSIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = cbc_paes_set_key,
+ .encrypt = cbc_paes_encrypt,
+ .decrypt = cbc_paes_decrypt,
+ }
+ }
+};
+
+static int __xts_paes_set_key(struct s390_pxts_ctx *ctx)
+{
+ unsigned long fc;
+
+ if (__paes_convert_key(&ctx->sk[0], &ctx->pk[0]) ||
+ __paes_convert_key(&ctx->sk[1], &ctx->pk[1]))
+ return -EINVAL;
+
+ if (ctx->pk[0].type != ctx->pk[1].type)
+ return -EINVAL;
+
+ /* Pick the correct function code based on the protected key type */
+ fc = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PXTS_128 :
+ (ctx->pk[0].type == PKEY_KEYTYPE_AES_256) ?
+ CPACF_KM_PXTS_256 : 0;
+
+ /* Check if the function code is available */
+ ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
+
+ return ctx->fc ? 0 : -EINVAL;
+}
+
+static int xts_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct s390_pxts_ctx *ctx = crypto_tfm_ctx(tfm);
+ u8 ckey[2 * AES_MAX_KEY_SIZE];
+ unsigned int ckey_len;
+
+ memcpy(ctx->sk[0].seckey, in_key, SECKEYBLOBSIZE);
+ memcpy(ctx->sk[1].seckey, in_key + SECKEYBLOBSIZE, SECKEYBLOBSIZE);
+ if (__xts_paes_set_key(ctx)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+ }
+
+ /*
+ * xts_check_key verifies the key length is not odd and makes
+ * sure that the two keys are not the same. This can be done
+ * on the two protected keys as well
+ */
+ ckey_len = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ?
+ AES_KEYSIZE_128 : AES_KEYSIZE_256;
+ memcpy(ckey, ctx->pk[0].protkey, ckey_len);
+ memcpy(ckey + ckey_len, ctx->pk[1].protkey, ckey_len);
+ return xts_check_key(tfm, ckey, 2*ckey_len);
+}
+
+static int xts_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
+ struct blkcipher_walk *walk)
+{
+ struct s390_pxts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ unsigned int keylen, offset, nbytes, n, k;
+ int ret;
+ struct {
+ u8 key[MAXPROTKEYSIZE]; /* key + verification pattern */
+ u8 tweak[16];
+ u8 block[16];
+ u8 bit[16];
+ u8 xts[16];
+ } pcc_param;
+ struct {
+ u8 key[MAXPROTKEYSIZE]; /* key + verification pattern */
+ u8 init[16];
+ } xts_param;
+
+ ret = blkcipher_walk_virt(desc, walk);
+ keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 48 : 64;
+ offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 16 : 0;
+retry:
+ memset(&pcc_param, 0, sizeof(pcc_param));
+ memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
+ memcpy(pcc_param.key + offset, ctx->pk[1].protkey, keylen);
+ cpacf_pcc(ctx->fc, pcc_param.key + offset);
+
+ memcpy(xts_param.key + offset, ctx->pk[0].protkey, keylen);
+ memcpy(xts_param.init, pcc_param.xts, 16);
+
+ while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
+ /* only use complete blocks */
+ n = nbytes & ~(AES_BLOCK_SIZE - 1);
+ k = cpacf_km(ctx->fc | modifier, xts_param.key + offset,
+ walk->dst.virt.addr, walk->src.virt.addr, n);
+ if (k)
+ ret = blkcipher_walk_done(desc, walk, nbytes - k);
+ if (k < n) {
+ if (__xts_paes_set_key(ctx) != 0)
+ return blkcipher_walk_done(desc, walk, -EIO);
+ goto retry;
+ }
+ }
+ return ret;
+}
+
+static int xts_paes_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct blkcipher_walk walk;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ return xts_paes_crypt(desc, 0, &walk);
+}
+
+static int xts_paes_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct blkcipher_walk walk;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ return xts_paes_crypt(desc, CPACF_DECRYPT, &walk);
+}
+
+static struct crypto_alg xts_paes_alg = {
+ .cra_name = "xts(paes)",
+ .cra_driver_name = "xts-paes-s390",
+ .cra_priority = 400, /* combo: aes + xts */
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct s390_pxts_ctx),
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(xts_paes_alg.cra_list),
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = 2 * SECKEYBLOBSIZE,
+ .max_keysize = 2 * SECKEYBLOBSIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = xts_paes_set_key,
+ .encrypt = xts_paes_encrypt,
+ .decrypt = xts_paes_decrypt,
+ }
+ }
+};
+
+static int __ctr_paes_set_key(struct s390_paes_ctx *ctx)
+{
+ unsigned long fc;
+
+ if (__paes_convert_key(&ctx->sk, &ctx->pk))
+ return -EINVAL;
+
+ /* Pick the correct function code based on the protected key type */
+ fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMCTR_PAES_128 :
+ (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KMCTR_PAES_192 :
+ (ctx->pk.type == PKEY_KEYTYPE_AES_256) ?
+ CPACF_KMCTR_PAES_256 : 0;
+
+ /* Check if the function code is available */
+ ctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
+
+ return ctx->fc ? 0 : -EINVAL;
+}
+
+static int ctr_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ memcpy(ctx->sk.seckey, in_key, key_len);
+ if (__ctr_paes_set_key(ctx)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
+{
+ unsigned int i, n;
+
+ /* only use complete blocks, max. PAGE_SIZE */
+ memcpy(ctrptr, iv, AES_BLOCK_SIZE);
+ n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
+ for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
+ memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
+ crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
+ ctrptr += AES_BLOCK_SIZE;
+ }
+ return n;
+}
+
+static int ctr_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
+ struct blkcipher_walk *walk)
+{
+ struct s390_paes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ u8 buf[AES_BLOCK_SIZE], *ctrptr;
+ unsigned int nbytes, n, k;
+ int ret, locked;
+
+ locked = spin_trylock(&ctrblk_lock);
+
+ ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
+ while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
+ n = AES_BLOCK_SIZE;
+ if (nbytes >= 2*AES_BLOCK_SIZE && locked)
+ n = __ctrblk_init(ctrblk, walk->iv, nbytes);
+ ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv;
+ k = cpacf_kmctr(ctx->fc | modifier, ctx->pk.protkey,
+ walk->dst.virt.addr, walk->src.virt.addr,
+ n, ctrptr);
+ if (k) {
+ if (ctrptr == ctrblk)
+ memcpy(walk->iv, ctrptr + k - AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE);
+ crypto_inc(walk->iv, AES_BLOCK_SIZE);
+ ret = blkcipher_walk_done(desc, walk, nbytes - n);
+ }
+ if (k < n) {
+ if (__ctr_paes_set_key(ctx) != 0)
+ return blkcipher_walk_done(desc, walk, -EIO);
+ }
+ }
+ if (locked)
+ spin_unlock(&ctrblk_lock);
+ /*
+ * final block may be < AES_BLOCK_SIZE, copy only nbytes
+ */
+ if (nbytes) {
+ while (1) {
+ if (cpacf_kmctr(ctx->fc | modifier,
+ ctx->pk.protkey, buf,
+ walk->src.virt.addr, AES_BLOCK_SIZE,
+ walk->iv) == AES_BLOCK_SIZE)
+ break;
+ if (__ctr_paes_set_key(ctx) != 0)
+ return blkcipher_walk_done(desc, walk, -EIO);
+ }
+ memcpy(walk->dst.virt.addr, buf, nbytes);
+ crypto_inc(walk->iv, AES_BLOCK_SIZE);
+ ret = blkcipher_walk_done(desc, walk, 0);
+ }
+
+ return ret;
+}
+
+static int ctr_paes_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct blkcipher_walk walk;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ return ctr_paes_crypt(desc, 0, &walk);
+}
+
+static int ctr_paes_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct blkcipher_walk walk;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ return ctr_paes_crypt(desc, CPACF_DECRYPT, &walk);
+}
+
+static struct crypto_alg ctr_paes_alg = {
+ .cra_name = "ctr(paes)",
+ .cra_driver_name = "ctr-paes-s390",
+ .cra_priority = 400, /* combo: aes + ctr */
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct s390_paes_ctx),
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(ctr_paes_alg.cra_list),
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = SECKEYBLOBSIZE,
+ .max_keysize = SECKEYBLOBSIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ctr_paes_set_key,
+ .encrypt = ctr_paes_encrypt,
+ .decrypt = ctr_paes_decrypt,
+ }
+ }
+};
+
+static inline void __crypto_unregister_alg(struct crypto_alg *alg)
+{
+ if (!list_empty(&alg->cra_list))
+ crypto_unregister_alg(alg);
+}
+
+static void paes_s390_fini(void)
+{
+ if (ctrblk)
+ free_page((unsigned long) ctrblk);
+ __crypto_unregister_alg(&ctr_paes_alg);
+ __crypto_unregister_alg(&xts_paes_alg);
+ __crypto_unregister_alg(&cbc_paes_alg);
+ __crypto_unregister_alg(&ecb_paes_alg);
+}
+
+static int __init paes_s390_init(void)
+{
+ int ret;
+
+ /* Query available functions for KM, KMC and KMCTR */
+ cpacf_query(CPACF_KM, &km_functions);
+ cpacf_query(CPACF_KMC, &kmc_functions);
+ cpacf_query(CPACF_KMCTR, &kmctr_functions);
+
+ if (cpacf_test_func(&km_functions, CPACF_KM_PAES_128) ||
+ cpacf_test_func(&km_functions, CPACF_KM_PAES_192) ||
+ cpacf_test_func(&km_functions, CPACF_KM_PAES_256)) {
+ ret = crypto_register_alg(&ecb_paes_alg);
+ if (ret)
+ goto out_err;
+ }
+
+ if (cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) ||
+ cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) ||
+ cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256)) {
+ ret = crypto_register_alg(&cbc_paes_alg);
+ if (ret)
+ goto out_err;
+ }
+
+ if (cpacf_test_func(&km_functions, CPACF_KM_PXTS_128) ||
+ cpacf_test_func(&km_functions, CPACF_KM_PXTS_256)) {
+ ret = crypto_register_alg(&xts_paes_alg);
+ if (ret)
+ goto out_err;
+ }
+
+ if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_128) ||
+ cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_192) ||
+ cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_256)) {
+ ret = crypto_register_alg(&ctr_paes_alg);
+ if (ret)
+ goto out_err;
+ ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
+ if (!ctrblk) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+ }
+
+ return 0;
+out_err:
+ paes_s390_fini();
+ return ret;
+}
+
+module_init(paes_s390_init);
+module_exit(paes_s390_fini);
+
+MODULE_ALIAS_CRYPTO("aes-all");
+
+MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm with protected keys");
+MODULE_LICENSE("GPL");
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
index 9cc050f9536c..85b7f5efe06a 100644
--- a/arch/s390/crypto/prng.c
+++ b/arch/s390/crypto/prng.c
@@ -21,7 +21,7 @@
#include <linux/random.h>
#include <linux/slab.h>
#include <asm/debug.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/timex.h>
#include <asm/cpacf.h>
@@ -110,22 +110,30 @@ static const u8 initial_parm_block[32] __initconst = {
/*** helper functions ***/
+/*
+ * generate_entropy:
+ * This algorithm produces 64 bytes of entropy data based on 1024
+ * individual stckf() invocations assuming that each stckf() value
+ * contributes 0.25 bits of entropy. So the caller gets 256 bit
+ * entropy per 64 byte or 4 bits entropy per byte.
+ */
static int generate_entropy(u8 *ebuf, size_t nbytes)
{
int n, ret = 0;
- u8 *pg, *h, hash[32];
+ u8 *pg, *h, hash[64];
- pg = (u8 *) __get_free_page(GFP_KERNEL);
+ /* allocate 2 pages */
+ pg = (u8 *) __get_free_pages(GFP_KERNEL, 1);
if (!pg) {
prng_errorflag = PRNG_GEN_ENTROPY_FAILED;
return -ENOMEM;
}
while (nbytes) {
- /* fill page with urandom bytes */
- get_random_bytes(pg, PAGE_SIZE);
- /* exor page with stckf values */
- for (n = 0; n < PAGE_SIZE / sizeof(u64); n++) {
+ /* fill pages with urandom bytes */
+ get_random_bytes(pg, 2*PAGE_SIZE);
+ /* exor pages with 1024 stckf values */
+ for (n = 0; n < 2 * PAGE_SIZE / sizeof(u64); n++) {
u64 *p = ((u64 *)pg) + n;
*p ^= get_tod_clock_fast();
}
@@ -134,8 +142,8 @@ static int generate_entropy(u8 *ebuf, size_t nbytes)
h = hash;
else
h = ebuf;
- /* generate sha256 from this page */
- cpacf_kimd(CPACF_KIMD_SHA_256, h, pg, PAGE_SIZE);
+ /* hash over the filled pages */
+ cpacf_kimd(CPACF_KIMD_SHA_512, h, pg, 2*PAGE_SIZE);
if (n < sizeof(hash))
memcpy(ebuf, hash, n);
ret += n;
@@ -143,7 +151,7 @@ static int generate_entropy(u8 *ebuf, size_t nbytes)
nbytes -= n;
}
- free_page((unsigned long)pg);
+ free_pages((unsigned long)pg, 1);
return ret;
}
@@ -334,7 +342,7 @@ static int __init prng_sha512_selftest(void)
static int __init prng_sha512_instantiate(void)
{
int ret, datalen;
- u8 seed[64];
+ u8 seed[64 + 32 + 16];
pr_debug("prng runs in SHA-512 mode "
"with chunksize=%d and reseed_limit=%u\n",
@@ -357,12 +365,12 @@ static int __init prng_sha512_instantiate(void)
if (ret)
goto outfree;
- /* generate initial seed bytestring, first 48 bytes of entropy */
- ret = generate_entropy(seed, 48);
- if (ret != 48)
+ /* generate initial seed bytestring, with 256 + 128 bits entropy */
+ ret = generate_entropy(seed, 64 + 32);
+ if (ret != 64 + 32)
goto outfree;
/* followed by 16 bytes of unique nonce */
- get_tod_clock_ext(seed + 48);
+ get_tod_clock_ext(seed + 64 + 32);
/* initial seed of the ppno drng */
cpacf_ppno(CPACF_PPNO_SHA512_DRNG_SEED,
@@ -395,9 +403,9 @@ static void prng_sha512_deinstantiate(void)
static int prng_sha512_reseed(void)
{
int ret;
- u8 seed[32];
+ u8 seed[64];
- /* generate 32 bytes of fresh entropy */
+ /* fetch 256 bits of fresh entropy */
ret = generate_entropy(seed, sizeof(seed));
if (ret != sizeof(seed))
return ret;
@@ -507,8 +515,10 @@ static ssize_t prng_tdes_read(struct file *file, char __user *ubuf,
prng_data->prngws.byte_counter += n;
prng_data->prngws.reseed_counter += n;
- if (copy_to_user(ubuf, prng_data->buf, chunk))
- return -EFAULT;
+ if (copy_to_user(ubuf, prng_data->buf, chunk)) {
+ ret = -EFAULT;
+ break;
+ }
nbytes -= chunk;
ret += chunk;
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 2d40ef0a6295..68bfd09f1b02 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -38,7 +38,6 @@ CONFIG_JUMP_LABEL=y
CONFIG_STATIC_KEYS_SELFTEST=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-CONFIG_MODVERSIONS=y
CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
@@ -130,8 +129,11 @@ CONFIG_DUMMY=m
CONFIG_EQUALIZER=m
CONFIG_TUN=m
CONFIG_VIRTIO_NET=y
+# CONFIG_NET_VENDOR_ALACRITECH is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
+CONFIG_DEVKMEM=y
CONFIG_RAW_DRIVER=m
CONFIG_VIRTIO_BALLOON=y
CONFIG_EXT4_FS=y
@@ -183,7 +185,6 @@ CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_KPROBES_SANITY_TEST=y
CONFIG_S390_PTDUMP=y
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_AUTHENC=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
@@ -228,6 +229,7 @@ CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_ZCRYPT=m
+CONFIG_PKEY=m
CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 09bccb224d03..cf8a2d92467f 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -3,6 +3,7 @@
*
* Copyright IBM Corp. 2006, 2008
* Author(s): Michael Holzheu <holzheu@de.ibm.com>
+ * License: GPL
*/
#define KMSG_COMPONENT "hypfs"
@@ -18,7 +19,8 @@
#include <linux/time.h>
#include <linux/parser.h>
#include <linux/sysfs.h>
-#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kobject.h>
#include <linux/seq_file.h>
#include <linux/mount.h>
#include <linux/uio.h>
@@ -443,7 +445,6 @@ static struct file_system_type hypfs_type = {
.mount = hypfs_mount,
.kill_sb = hypfs_kill_super
};
-MODULE_ALIAS_FS("s390_hypfs");
static const struct super_operations hypfs_s_ops = {
.statfs = simple_statfs,
@@ -497,21 +498,4 @@ fail_dbfs_exit:
pr_err("Initialization of hypfs failed with rc=%i\n", rc);
return rc;
}
-
-static void __exit hypfs_exit(void)
-{
- unregister_filesystem(&hypfs_type);
- sysfs_remove_mount_point(hypervisor_kobj, "s390");
- hypfs_diag0c_exit();
- hypfs_sprp_exit();
- hypfs_vm_exit();
- hypfs_diag_exit();
- hypfs_dbfs_exit();
-}
-
-module_init(hypfs_init)
-module_exit(hypfs_exit)
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Michael Holzheu <holzheu@de.ibm.com>");
-MODULE_DESCRIPTION("s390 Hypervisor Filesystem");
+device_initcall(hypfs_init)
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 20f196b82a6e..8aea32fe8bd2 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -1,6 +1,6 @@
-
-
+generic-y += asm-offsets.h
generic-y += clkdev.h
+generic-y += dma-contiguous.h
generic-y += export.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
diff --git a/arch/s390/include/asm/asm-offsets.h b/arch/s390/include/asm/asm-offsets.h
deleted file mode 100644
index d370ee36a182..000000000000
--- a/arch/s390/include/asm/asm-offsets.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <generated/asm-offsets.h>
diff --git a/arch/s390/include/asm/asm-prototypes.h b/arch/s390/include/asm/asm-prototypes.h
new file mode 100644
index 000000000000..2c3413b0ca52
--- /dev/null
+++ b/arch/s390/include/asm/asm-prototypes.h
@@ -0,0 +1,8 @@
+#ifndef _ASM_S390_PROTOTYPES_H
+
+#include <linux/kvm_host.h>
+#include <linux/ftrace.h>
+#include <asm/fpu/api.h>
+#include <asm-generic/asm-prototypes.h>
+
+#endif /* _ASM_S390_PROTOTYPES_H */
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index d28cc2f5b7b2..f7f69dfd2db2 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -1,13 +1,8 @@
/*
- * Copyright IBM Corp. 1999, 2009
+ * Copyright IBM Corp. 1999, 2016
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Denis Joseph Barrow,
- * Arnd Bergmann <arndb@de.ibm.com>,
- *
- * Atomic operations that C can't guarantee us.
- * Useful for resource counting etc.
- * s390 uses 'Compare And Swap' for atomicity in SMP environment.
- *
+ * Arnd Bergmann,
*/
#ifndef __ARCH_S390_ATOMIC__
@@ -15,62 +10,12 @@
#include <linux/compiler.h>
#include <linux/types.h>
+#include <asm/atomic_ops.h>
#include <asm/barrier.h>
#include <asm/cmpxchg.h>
#define ATOMIC_INIT(i) { (i) }
-#define __ATOMIC_NO_BARRIER "\n"
-
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
-
-#define __ATOMIC_OR "lao"
-#define __ATOMIC_AND "lan"
-#define __ATOMIC_ADD "laa"
-#define __ATOMIC_XOR "lax"
-#define __ATOMIC_BARRIER "bcr 14,0\n"
-
-#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
-({ \
- int old_val; \
- \
- typecheck(atomic_t *, ptr); \
- asm volatile( \
- op_string " %0,%2,%1\n" \
- __barrier \
- : "=d" (old_val), "+Q" ((ptr)->counter) \
- : "d" (op_val) \
- : "cc", "memory"); \
- old_val; \
-})
-
-#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
-#define __ATOMIC_OR "or"
-#define __ATOMIC_AND "nr"
-#define __ATOMIC_ADD "ar"
-#define __ATOMIC_XOR "xr"
-#define __ATOMIC_BARRIER "\n"
-
-#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
-({ \
- int old_val, new_val; \
- \
- typecheck(atomic_t *, ptr); \
- asm volatile( \
- " l %0,%2\n" \
- "0: lr %1,%0\n" \
- op_string " %1,%3\n" \
- " cs %0,%1,%2\n" \
- " jl 0b" \
- : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
- : "d" (op_val) \
- : "cc", "memory"); \
- old_val; \
-})
-
-#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
static inline int atomic_read(const atomic_t *v)
{
int c;
@@ -90,27 +35,23 @@ static inline void atomic_set(atomic_t *v, int i)
static inline int atomic_add_return(int i, atomic_t *v)
{
- return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER) + i;
+ return __atomic_add_barrier(i, &v->counter) + i;
}
static inline int atomic_fetch_add(int i, atomic_t *v)
{
- return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER);
+ return __atomic_add_barrier(i, &v->counter);
}
static inline void atomic_add(int i, atomic_t *v)
{
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
- asm volatile(
- "asi %0,%1\n"
- : "+Q" (v->counter)
- : "i" (i)
- : "cc", "memory");
+ __atomic_add_const(i, &v->counter);
return;
}
#endif
- __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_NO_BARRIER);
+ __atomic_add(i, &v->counter);
}
#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
@@ -125,19 +66,19 @@ static inline void atomic_add(int i, atomic_t *v)
#define atomic_dec_return(_v) atomic_sub_return(1, _v)
#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
-#define ATOMIC_OPS(op, OP) \
+#define ATOMIC_OPS(op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
- __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_NO_BARRIER); \
+ __atomic_##op(i, &v->counter); \
} \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
- return __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_BARRIER); \
+ return __atomic_##op##_barrier(i, &v->counter); \
}
-ATOMIC_OPS(and, AND)
-ATOMIC_OPS(or, OR)
-ATOMIC_OPS(xor, XOR)
+ATOMIC_OPS(and)
+ATOMIC_OPS(or)
+ATOMIC_OPS(xor)
#undef ATOMIC_OPS
@@ -145,12 +86,7 @@ ATOMIC_OPS(xor, XOR)
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
- asm volatile(
- " cs %0,%2,%1"
- : "+d" (old), "+Q" (v->counter)
- : "d" (new)
- : "cc", "memory");
- return old;
+ return __atomic_cmpxchg(&v->counter, old, new);
}
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
@@ -168,65 +104,11 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
return c;
}
-
-#undef __ATOMIC_LOOP
-
#define ATOMIC64_INIT(i) { (i) }
-#define __ATOMIC64_NO_BARRIER "\n"
-
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
-
-#define __ATOMIC64_OR "laog"
-#define __ATOMIC64_AND "lang"
-#define __ATOMIC64_ADD "laag"
-#define __ATOMIC64_XOR "laxg"
-#define __ATOMIC64_BARRIER "bcr 14,0\n"
-
-#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
-({ \
- long long old_val; \
- \
- typecheck(atomic64_t *, ptr); \
- asm volatile( \
- op_string " %0,%2,%1\n" \
- __barrier \
- : "=d" (old_val), "+Q" ((ptr)->counter) \
- : "d" (op_val) \
- : "cc", "memory"); \
- old_val; \
-})
-
-#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
-#define __ATOMIC64_OR "ogr"
-#define __ATOMIC64_AND "ngr"
-#define __ATOMIC64_ADD "agr"
-#define __ATOMIC64_XOR "xgr"
-#define __ATOMIC64_BARRIER "\n"
-
-#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
-({ \
- long long old_val, new_val; \
- \
- typecheck(atomic64_t *, ptr); \
- asm volatile( \
- " lg %0,%2\n" \
- "0: lgr %1,%0\n" \
- op_string " %1,%3\n" \
- " csg %0,%1,%2\n" \
- " jl 0b" \
- : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
- : "d" (op_val) \
- : "cc", "memory"); \
- old_val; \
-})
-
-#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
-static inline long long atomic64_read(const atomic64_t *v)
+static inline long atomic64_read(const atomic64_t *v)
{
- long long c;
+ long c;
asm volatile(
" lg %0,%1\n"
@@ -234,71 +116,60 @@ static inline long long atomic64_read(const atomic64_t *v)
return c;
}
-static inline void atomic64_set(atomic64_t *v, long long i)
+static inline void atomic64_set(atomic64_t *v, long i)
{
asm volatile(
" stg %1,%0\n"
: "=Q" (v->counter) : "d" (i));
}
-static inline long long atomic64_add_return(long long i, atomic64_t *v)
+static inline long atomic64_add_return(long i, atomic64_t *v)
{
- return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER) + i;
+ return __atomic64_add_barrier(i, &v->counter) + i;
}
-static inline long long atomic64_fetch_add(long long i, atomic64_t *v)
+static inline long atomic64_fetch_add(long i, atomic64_t *v)
{
- return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER);
+ return __atomic64_add_barrier(i, &v->counter);
}
-static inline void atomic64_add(long long i, atomic64_t *v)
+static inline void atomic64_add(long i, atomic64_t *v)
{
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
- asm volatile(
- "agsi %0,%1\n"
- : "+Q" (v->counter)
- : "i" (i)
- : "cc", "memory");
+ __atomic64_add_const(i, &v->counter);
return;
}
#endif
- __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER);
+ __atomic64_add(i, &v->counter);
}
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
-static inline long long atomic64_cmpxchg(atomic64_t *v,
- long long old, long long new)
+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
{
- asm volatile(
- " csg %0,%2,%1"
- : "+d" (old), "+Q" (v->counter)
- : "d" (new)
- : "cc", "memory");
- return old;
+ return __atomic64_cmpxchg(&v->counter, old, new);
}
-#define ATOMIC64_OPS(op, OP) \
+#define ATOMIC64_OPS(op) \
static inline void atomic64_##op(long i, atomic64_t *v) \
{ \
- __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER); \
+ __atomic64_##op(i, &v->counter); \
} \
static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
{ \
- return __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_BARRIER); \
+ return __atomic64_##op##_barrier(i, &v->counter); \
}
-ATOMIC64_OPS(and, AND)
-ATOMIC64_OPS(or, OR)
-ATOMIC64_OPS(xor, XOR)
+ATOMIC64_OPS(and)
+ATOMIC64_OPS(or)
+ATOMIC64_OPS(xor)
#undef ATOMIC64_OPS
-#undef __ATOMIC64_LOOP
-static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
+static inline int atomic64_add_unless(atomic64_t *v, long i, long u)
{
- long long c, old;
+ long c, old;
c = atomic64_read(v);
for (;;) {
@@ -312,9 +183,9 @@ static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
return c != u;
}
-static inline long long atomic64_dec_if_positive(atomic64_t *v)
+static inline long atomic64_dec_if_positive(atomic64_t *v)
{
- long long c, old, dec;
+ long c, old, dec;
c = atomic64_read(v);
for (;;) {
@@ -333,9 +204,9 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
#define atomic64_inc(_v) atomic64_add(1, _v)
#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
-#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long long)(_i), _v)
-#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(long long)(_i), _v)
-#define atomic64_sub(_i, _v) atomic64_add(-(long long)(_i), _v)
+#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long)(_i), _v)
+#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(long)(_i), _v)
+#define atomic64_sub(_i, _v) atomic64_add(-(long)(_i), _v)
#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
#define atomic64_dec(_v) atomic64_sub(1, _v)
#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
diff --git a/arch/s390/include/asm/atomic_ops.h b/arch/s390/include/asm/atomic_ops.h
new file mode 100644
index 000000000000..ac9e2b939d04
--- /dev/null
+++ b/arch/s390/include/asm/atomic_ops.h
@@ -0,0 +1,130 @@
+/*
+ * Low level function for atomic operations
+ *
+ * Copyright IBM Corp. 1999, 2016
+ */
+
+#ifndef __ARCH_S390_ATOMIC_OPS__
+#define __ARCH_S390_ATOMIC_OPS__
+
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \
+static inline op_type op_name(op_type val, op_type *ptr) \
+{ \
+ op_type old; \
+ \
+ asm volatile( \
+ op_string " %[old],%[val],%[ptr]\n" \
+ op_barrier \
+ : [old] "=d" (old), [ptr] "+Q" (*ptr) \
+ : [val] "d" (val) : "cc", "memory"); \
+ return old; \
+} \
+
+#define __ATOMIC_OPS(op_name, op_type, op_string) \
+ __ATOMIC_OP(op_name, op_type, op_string, "\n") \
+ __ATOMIC_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
+
+__ATOMIC_OPS(__atomic_add, int, "laa")
+__ATOMIC_OPS(__atomic_and, int, "lan")
+__ATOMIC_OPS(__atomic_or, int, "lao")
+__ATOMIC_OPS(__atomic_xor, int, "lax")
+
+__ATOMIC_OPS(__atomic64_add, long, "laag")
+__ATOMIC_OPS(__atomic64_and, long, "lang")
+__ATOMIC_OPS(__atomic64_or, long, "laog")
+__ATOMIC_OPS(__atomic64_xor, long, "laxg")
+
+#undef __ATOMIC_OPS
+#undef __ATOMIC_OP
+
+static inline void __atomic_add_const(int val, int *ptr)
+{
+ asm volatile(
+ " asi %[ptr],%[val]\n"
+ : [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc");
+}
+
+static inline void __atomic64_add_const(long val, long *ptr)
+{
+ asm volatile(
+ " agsi %[ptr],%[val]\n"
+ : [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc");
+}
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+#define __ATOMIC_OP(op_name, op_string) \
+static inline int op_name(int val, int *ptr) \
+{ \
+ int old, new; \
+ \
+ asm volatile( \
+ "0: lr %[new],%[old]\n" \
+ op_string " %[new],%[val]\n" \
+ " cs %[old],%[new],%[ptr]\n" \
+ " jl 0b" \
+ : [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
+ : [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
+ return old; \
+}
+
+#define __ATOMIC_OPS(op_name, op_string) \
+ __ATOMIC_OP(op_name, op_string) \
+ __ATOMIC_OP(op_name##_barrier, op_string)
+
+__ATOMIC_OPS(__atomic_add, "ar")
+__ATOMIC_OPS(__atomic_and, "nr")
+__ATOMIC_OPS(__atomic_or, "or")
+__ATOMIC_OPS(__atomic_xor, "xr")
+
+#undef __ATOMIC_OPS
+
+#define __ATOMIC64_OP(op_name, op_string) \
+static inline long op_name(long val, long *ptr) \
+{ \
+ long old, new; \
+ \
+ asm volatile( \
+ "0: lgr %[new],%[old]\n" \
+ op_string " %[new],%[val]\n" \
+ " csg %[old],%[new],%[ptr]\n" \
+ " jl 0b" \
+ : [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
+ : [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
+ return old; \
+}
+
+#define __ATOMIC64_OPS(op_name, op_string) \
+ __ATOMIC64_OP(op_name, op_string) \
+ __ATOMIC64_OP(op_name##_barrier, op_string)
+
+__ATOMIC64_OPS(__atomic64_add, "agr")
+__ATOMIC64_OPS(__atomic64_and, "ngr")
+__ATOMIC64_OPS(__atomic64_or, "ogr")
+__ATOMIC64_OPS(__atomic64_xor, "xgr")
+
+#undef __ATOMIC64_OPS
+
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+static inline int __atomic_cmpxchg(int *ptr, int old, int new)
+{
+ asm volatile(
+ " cs %[old],%[new],%[ptr]"
+ : [old] "+d" (old), [ptr] "+Q" (*ptr)
+ : [new] "d" (new) : "cc", "memory");
+ return old;
+}
+
+static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
+{
+ asm volatile(
+ " csg %[old],%[new],%[ptr]"
+ : [old] "+d" (old), [ptr] "+Q" (*ptr)
+ : [new] "d" (new) : "cc", "memory");
+ return old;
+}
+
+#endif /* __ARCH_S390_ATOMIC_OPS__ */
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 8043f10da6b5..d92047da5ccb 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -42,57 +42,9 @@
#include <linux/typecheck.h>
#include <linux/compiler.h>
+#include <asm/atomic_ops.h>
#include <asm/barrier.h>
-#define __BITOPS_NO_BARRIER "\n"
-
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
-
-#define __BITOPS_OR "laog"
-#define __BITOPS_AND "lang"
-#define __BITOPS_XOR "laxg"
-#define __BITOPS_BARRIER "bcr 14,0\n"
-
-#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \
-({ \
- unsigned long __old; \
- \
- typecheck(unsigned long *, (__addr)); \
- asm volatile( \
- __op_string " %0,%2,%1\n" \
- __barrier \
- : "=d" (__old), "+Q" (*(__addr)) \
- : "d" (__val) \
- : "cc", "memory"); \
- __old; \
-})
-
-#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
-#define __BITOPS_OR "ogr"
-#define __BITOPS_AND "ngr"
-#define __BITOPS_XOR "xgr"
-#define __BITOPS_BARRIER "\n"
-
-#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \
-({ \
- unsigned long __old, __new; \
- \
- typecheck(unsigned long *, (__addr)); \
- asm volatile( \
- " lg %0,%2\n" \
- "0: lgr %1,%0\n" \
- __op_string " %1,%3\n" \
- " csg %0,%1,%2\n" \
- " jl 0b" \
- : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
- : "d" (__val) \
- : "cc", "memory"); \
- __old; \
-})
-
-#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
static inline unsigned long *
@@ -128,7 +80,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *ptr)
}
#endif
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- __BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_NO_BARRIER);
+ __atomic64_or(mask, addr);
}
static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
@@ -149,7 +101,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
}
#endif
mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
- __BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_NO_BARRIER);
+ __atomic64_and(mask, addr);
}
static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
@@ -170,7 +122,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
}
#endif
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- __BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_NO_BARRIER);
+ __atomic64_xor(mask, addr);
}
static inline int
@@ -180,7 +132,7 @@ test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
unsigned long old, mask;
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- old = __BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_BARRIER);
+ old = __atomic64_or_barrier(mask, addr);
return (old & mask) != 0;
}
@@ -191,7 +143,7 @@ test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
unsigned long old, mask;
mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
- old = __BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_BARRIER);
+ old = __atomic64_and_barrier(mask, addr);
return (old & ~mask) != 0;
}
@@ -202,7 +154,7 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
unsigned long old, mask;
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_BARRIER);
+ old = __atomic64_xor_barrier(mask, addr);
return (old & mask) != 0;
}
diff --git a/arch/s390/include/asm/cacheflush.h b/arch/s390/include/asm/cacheflush.h
index 58fae7d098cf..0499334f9473 100644
--- a/arch/s390/include/asm/cacheflush.h
+++ b/arch/s390/include/asm/cacheflush.h
@@ -4,9 +4,31 @@
/* Caches aren't brain-dead on the s390. */
#include <asm-generic/cacheflush.h>
-int set_memory_ro(unsigned long addr, int numpages);
-int set_memory_rw(unsigned long addr, int numpages);
-int set_memory_nx(unsigned long addr, int numpages);
-int set_memory_x(unsigned long addr, int numpages);
+#define SET_MEMORY_RO 1UL
+#define SET_MEMORY_RW 2UL
+#define SET_MEMORY_NX 4UL
+#define SET_MEMORY_X 8UL
+
+int __set_memory(unsigned long addr, int numpages, unsigned long flags);
+
+static inline int set_memory_ro(unsigned long addr, int numpages)
+{
+ return __set_memory(addr, numpages, SET_MEMORY_RO);
+}
+
+static inline int set_memory_rw(unsigned long addr, int numpages)
+{
+ return __set_memory(addr, numpages, SET_MEMORY_RW);
+}
+
+static inline int set_memory_nx(unsigned long addr, int numpages)
+{
+ return __set_memory(addr, numpages, SET_MEMORY_NX);
+}
+
+static inline int set_memory_x(unsigned long addr, int numpages)
+{
+ return __set_memory(addr, numpages, SET_MEMORY_X);
+}
#endif /* _S390_CACHEFLUSH_H */
diff --git a/arch/s390/include/asm/checksum.h b/arch/s390/include/asm/checksum.h
index d7f100c53f07..12bf4fef2a68 100644
--- a/arch/s390/include/asm/checksum.h
+++ b/arch/s390/include/asm/checksum.h
@@ -11,7 +11,7 @@
#ifndef _S390_CHECKSUM_H
#define _S390_CHECKSUM_H
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/*
* computes the checksum of a memory block at buff, length len,
diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h
index 2c680db7e5c1..e2dfbf280d12 100644
--- a/arch/s390/include/asm/cpacf.h
+++ b/arch/s390/include/asm/cpacf.h
@@ -28,8 +28,9 @@
#define CPACF_PPNO 0xb93c /* MSA5 */
/*
- * Decryption modifier bit
+ * En/decryption modifier bits
*/
+#define CPACF_ENCRYPT 0x00
#define CPACF_DECRYPT 0x80
/*
@@ -42,8 +43,13 @@
#define CPACF_KM_AES_128 0x12
#define CPACF_KM_AES_192 0x13
#define CPACF_KM_AES_256 0x14
+#define CPACF_KM_PAES_128 0x1a
+#define CPACF_KM_PAES_192 0x1b
+#define CPACF_KM_PAES_256 0x1c
#define CPACF_KM_XTS_128 0x32
#define CPACF_KM_XTS_256 0x34
+#define CPACF_KM_PXTS_128 0x3a
+#define CPACF_KM_PXTS_256 0x3c
/*
* Function codes for the KMC (CIPHER MESSAGE WITH CHAINING)
@@ -56,6 +62,9 @@
#define CPACF_KMC_AES_128 0x12
#define CPACF_KMC_AES_192 0x13
#define CPACF_KMC_AES_256 0x14
+#define CPACF_KMC_PAES_128 0x1a
+#define CPACF_KMC_PAES_192 0x1b
+#define CPACF_KMC_PAES_256 0x1c
#define CPACF_KMC_PRNG 0x43
/*
@@ -69,6 +78,9 @@
#define CPACF_KMCTR_AES_128 0x12
#define CPACF_KMCTR_AES_192 0x13
#define CPACF_KMCTR_AES_256 0x14
+#define CPACF_KMCTR_PAES_128 0x1a
+#define CPACF_KMCTR_PAES_192 0x1b
+#define CPACF_KMCTR_PAES_256 0x1c
/*
* Function codes for the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST)
@@ -99,6 +111,18 @@
#define CPACF_KMAC_TDEA_192 0x03
/*
+ * Function codes for the PCKMO (PERFORM CRYPTOGRAPHIC KEY MANAGEMENT)
+ * instruction
+ */
+#define CPACF_PCKMO_QUERY 0x00
+#define CPACF_PCKMO_ENC_DES_KEY 0x01
+#define CPACF_PCKMO_ENC_TDES_128_KEY 0x02
+#define CPACF_PCKMO_ENC_TDES_192_KEY 0x03
+#define CPACF_PCKMO_ENC_AES_128_KEY 0x12
+#define CPACF_PCKMO_ENC_AES_192_KEY 0x13
+#define CPACF_PCKMO_ENC_AES_256_KEY 0x14
+
+/*
* Function codes for the PPNO (PERFORM PSEUDORANDOM NUMBER OPERATION)
* instruction
*/
@@ -397,4 +421,24 @@ static inline void cpacf_pcc(unsigned long func, void *param)
: "cc", "memory");
}
+/**
+ * cpacf_pckmo() - executes the PCKMO (PERFORM CRYPTOGRAPHIC KEY
+ * MANAGEMENT) instruction
+ * @func: the function code passed to PCKMO; see CPACF_PCKMO_xxx defines
+ * @param: address of parameter block; see POP for details on each func
+ *
+ * Returns 0.
+ */
+static inline void cpacf_pckmo(long func, void *param)
+{
+ register unsigned long r0 asm("0") = (unsigned long) func;
+ register unsigned long r1 asm("1") = (unsigned long) param;
+
+ asm volatile(
+ " .insn rre,%[opc] << 16,0,0\n" /* PCKMO opcode */
+ :
+ : [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_PCKMO)
+ : "cc", "memory");
+}
+
#endif /* _ASM_S390_CPACF_H */
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index 03516476127b..d1e0707310fd 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -104,7 +104,8 @@ struct hws_basic_entry {
unsigned int P:1; /* 28 PSW Problem state */
unsigned int AS:2; /* 29-30 PSW address-space control */
unsigned int I:1; /* 31 entry valid or invalid */
- unsigned int:16;
+ unsigned int CL:2; /* 32-33 Configuration Level */
+ unsigned int:14;
unsigned int prim_asn:16; /* primary ASN */
unsigned long long ia; /* Instruction Address */
unsigned long long gpp; /* Guest Program Parameter */
@@ -198,32 +199,29 @@ static inline int ecctr(u64 ctr, u64 *val)
/* Store CPU counter multiple for the MT utilization counter set */
static inline int stcctm5(u64 num, u64 *val)
{
- typedef struct { u64 _[num]; } addrtype;
int cc;
asm volatile (
" .insn rsy,0xeb0000000017,%2,5,%1\n"
" ipm %0\n"
" srl %0,28\n"
- : "=d" (cc), "=Q" (*(addrtype *) val) : "d" (num) : "cc");
+ : "=d" (cc)
+ : "Q" (*val), "d" (num)
+ : "cc", "memory");
return cc;
}
/* Query sampling information */
static inline int qsi(struct hws_qsi_info_block *info)
{
- int cc;
- cc = 1;
+ int cc = 1;
asm volatile(
- "0: .insn s,0xb2860000,0(%1)\n"
+ "0: .insn s,0xb2860000,%1\n"
"1: lhi %0,0\n"
"2:\n"
EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
- : "=d" (cc), "+a" (info)
- : "m" (*info)
- : "cc", "memory");
-
+ : "+d" (cc), "+Q" (*info));
return cc ? -EINVAL : 0;
}
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index 221b454c734a..d1c407ddf703 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -25,33 +25,6 @@ static inline unsigned long __div(unsigned long long n, unsigned long base)
return n / base;
}
-#define cputime_one_jiffy jiffies_to_cputime(1)
-
-/*
- * Convert cputime to jiffies and back.
- */
-static inline unsigned long cputime_to_jiffies(const cputime_t cputime)
-{
- return __div((__force unsigned long long) cputime, CPUTIME_PER_SEC / HZ);
-}
-
-static inline cputime_t jiffies_to_cputime(const unsigned int jif)
-{
- return (__force cputime_t)(jif * (CPUTIME_PER_SEC / HZ));
-}
-
-static inline u64 cputime64_to_jiffies64(cputime64_t cputime)
-{
- unsigned long long jif = (__force unsigned long long) cputime;
- do_div(jif, CPUTIME_PER_SEC / HZ);
- return jif;
-}
-
-static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
-{
- return (__force cputime64_t)(jif * (CPUTIME_PER_SEC / HZ));
-}
-
/*
* Convert cputime to microseconds and back.
*/
@@ -60,88 +33,8 @@ static inline unsigned int cputime_to_usecs(const cputime_t cputime)
return (__force unsigned long long) cputime >> 12;
}
-static inline cputime_t usecs_to_cputime(const unsigned int m)
-{
- return (__force cputime_t)(m * CPUTIME_PER_USEC);
-}
-
-#define usecs_to_cputime64(m) usecs_to_cputime(m)
-
-/*
- * Convert cputime to milliseconds and back.
- */
-static inline unsigned int cputime_to_secs(const cputime_t cputime)
-{
- return __div((__force unsigned long long) cputime, CPUTIME_PER_SEC / 2) >> 1;
-}
-
-static inline cputime_t secs_to_cputime(const unsigned int s)
-{
- return (__force cputime_t)(s * CPUTIME_PER_SEC);
-}
-
-/*
- * Convert cputime to timespec and back.
- */
-static inline cputime_t timespec_to_cputime(const struct timespec *value)
-{
- unsigned long long ret = value->tv_sec * CPUTIME_PER_SEC;
- return (__force cputime_t)(ret + __div(value->tv_nsec * CPUTIME_PER_USEC, NSEC_PER_USEC));
-}
-
-static inline void cputime_to_timespec(const cputime_t cputime,
- struct timespec *value)
-{
- unsigned long long __cputime = (__force unsigned long long) cputime;
- value->tv_nsec = (__cputime % CPUTIME_PER_SEC) * NSEC_PER_USEC / CPUTIME_PER_USEC;
- value->tv_sec = __cputime / CPUTIME_PER_SEC;
-}
-
-/*
- * Convert cputime to timeval and back.
- * Since cputime and timeval have the same resolution (microseconds)
- * this is easy.
- */
-static inline cputime_t timeval_to_cputime(const struct timeval *value)
-{
- unsigned long long ret = value->tv_sec * CPUTIME_PER_SEC;
- return (__force cputime_t)(ret + value->tv_usec * CPUTIME_PER_USEC);
-}
-
-static inline void cputime_to_timeval(const cputime_t cputime,
- struct timeval *value)
-{
- unsigned long long __cputime = (__force unsigned long long) cputime;
- value->tv_usec = (__cputime % CPUTIME_PER_SEC) / CPUTIME_PER_USEC;
- value->tv_sec = __cputime / CPUTIME_PER_SEC;
-}
-
-/*
- * Convert cputime to clock and back.
- */
-static inline clock_t cputime_to_clock_t(cputime_t cputime)
-{
- unsigned long long clock = (__force unsigned long long) cputime;
- do_div(clock, CPUTIME_PER_SEC / USER_HZ);
- return clock;
-}
-
-static inline cputime_t clock_t_to_cputime(unsigned long x)
-{
- return (__force cputime_t)(x * (CPUTIME_PER_SEC / USER_HZ));
-}
-
-/*
- * Convert cputime64 to clock.
- */
-static inline clock_t cputime64_to_clock_t(cputime64_t cputime)
-{
- unsigned long long clock = (__force unsigned long long) cputime;
- do_div(clock, CPUTIME_PER_SEC / USER_HZ);
- return clock;
-}
-cputime64_t arch_cpu_idle_time(int cpu);
+u64 arch_cpu_idle_time(int cpu);
#define arch_idle_time(cpu) arch_cpu_idle_time(cpu)
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index d7697ab802f6..d0441ad2a990 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -9,16 +9,18 @@
#include <linux/bug.h>
-#define __ctl_load(array, low, high) { \
+#define __ctl_load(array, low, high) do { \
typedef struct { char _[sizeof(array)]; } addrtype; \
\
BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
asm volatile( \
" lctlg %1,%2,%0\n" \
- : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\
-}
+ : \
+ : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high) \
+ : "memory"); \
+} while (0)
-#define __ctl_store(array, low, high) { \
+#define __ctl_store(array, low, high) do { \
typedef struct { char _[sizeof(array)]; } addrtype; \
\
BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
@@ -26,7 +28,7 @@
" stctg %1,%2,%0\n" \
: "=Q" (*(addrtype *)(&array)) \
: "i" (low), "i" (high)); \
-}
+} while (0)
static inline void __ctl_set_bit(unsigned int cr, unsigned int bit)
{
@@ -60,7 +62,9 @@ union ctlreg0 {
unsigned long : 4;
unsigned long afp : 1; /* AFP-register control */
unsigned long vx : 1; /* Vector enablement control */
- unsigned long : 17;
+ unsigned long : 7;
+ unsigned long sssm : 1; /* Service signal subclass mask */
+ unsigned long : 9;
};
};
diff --git a/arch/s390/include/asm/device.h b/arch/s390/include/asm/device.h
index 4a9f35e0973f..5203fc87f080 100644
--- a/arch/s390/include/asm/device.h
+++ b/arch/s390/include/asm/device.h
@@ -4,7 +4,6 @@
* This file is released under the GPLv2
*/
struct dev_archdata {
- struct dma_map_ops *dma_ops;
};
struct pdev_archdata {
diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h
index ffaba07f50ab..3108b8dbe266 100644
--- a/arch/s390/include/asm/dma-mapping.h
+++ b/arch/s390/include/asm/dma-mapping.h
@@ -10,12 +10,10 @@
#define DMA_ERROR_CODE (~(dma_addr_t) 0x0)
-extern struct dma_map_ops s390_pci_dma_ops;
+extern const struct dma_map_ops s390_pci_dma_ops;
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
- if (dev && dev->archdata.dma_ops)
- return dev->archdata.dma_ops;
return &dma_noop_ops;
}
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 1736c7d3c94c..83aaefed2a7b 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -103,6 +103,8 @@
#define HWCAP_S390_HIGH_GPRS 512
#define HWCAP_S390_TE 1024
#define HWCAP_S390_VXRS 2048
+#define HWCAP_S390_VXRS_BCD 4096
+#define HWCAP_S390_VXRS_EXT 8192
/* Internal bits, not exposed via elf */
#define HWCAP_INT_SIE 1UL
@@ -193,7 +195,7 @@ extern char elf_platform[];
do { \
set_personality(PER_LINUX | \
(current->personality & (~PER_MASK))); \
- current_thread_info()->sys_call_table = \
+ current->thread.sys_call_table = \
(unsigned long) &sys_call_table; \
} while (0)
#else /* CONFIG_COMPAT */
@@ -204,11 +206,11 @@ do { \
(current->personality & ~PER_MASK)); \
if ((ex).e_ident[EI_CLASS] == ELFCLASS32) { \
set_thread_flag(TIF_31BIT); \
- current_thread_info()->sys_call_table = \
+ current->thread.sys_call_table = \
(unsigned long) &sys_call_table_emu; \
} else { \
clear_thread_flag(TIF_31BIT); \
- current_thread_info()->sys_call_table = \
+ current->thread.sys_call_table = \
(unsigned long) &sys_call_table; \
} \
} while (0)
diff --git a/arch/s390/include/asm/facilities_src.h b/arch/s390/include/asm/facilities_src.h
deleted file mode 100644
index 3b758f66e48b..000000000000
--- a/arch/s390/include/asm/facilities_src.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright IBM Corp. 2015
- */
-
-#ifndef S390_GEN_FACILITIES_C
-#error "This file can only be included by gen_facilities.c"
-#endif
-
-#include <linux/kconfig.h>
-
-struct facility_def {
- char *name;
- int *bits;
-};
-
-static struct facility_def facility_defs[] = {
- {
- /*
- * FACILITIES_ALS contains the list of facilities that are
- * required to run a kernel that is compiled e.g. with
- * -march=<machine>.
- */
- .name = "FACILITIES_ALS",
- .bits = (int[]){
-#ifdef CONFIG_HAVE_MARCH_Z900_FEATURES
- 0, /* N3 instructions */
- 1, /* z/Arch mode installed */
-#endif
-#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
- 18, /* long displacement facility */
-#endif
-#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
- 7, /* stfle */
- 17, /* message security assist */
- 21, /* extended-immediate facility */
- 25, /* store clock fast */
-#endif
-#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
- 27, /* mvcos */
- 32, /* compare and swap and store */
- 33, /* compare and swap and store 2 */
- 34, /* general extension facility */
- 35, /* execute extensions */
-#endif
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
- 45, /* fast-BCR, etc. */
-#endif
-#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
- 49, /* misc-instruction-extensions */
- 52, /* interlocked facility 2 */
-#endif
-#ifdef CONFIG_HAVE_MARCH_Z13_FEATURES
- 53, /* load-and-zero-rightmost-byte, etc. */
-#endif
- -1 /* END */
- }
- },
- {
- .name = "FACILITIES_KVM",
- .bits = (int[]){
- 0, /* N3 instructions */
- 1, /* z/Arch mode installed */
- 2, /* z/Arch mode active */
- 3, /* DAT-enhancement */
- 4, /* idte segment table */
- 5, /* idte region table */
- 6, /* ASN-and-LX reuse */
- 7, /* stfle */
- 8, /* enhanced-DAT 1 */
- 9, /* sense-running-status */
- 10, /* conditional sske */
- 13, /* ipte-range */
- 14, /* nonquiescing key-setting */
- 73, /* transactional execution */
- 75, /* access-exception-fetch/store indication */
- 76, /* msa extension 3 */
- 77, /* msa extension 4 */
- 78, /* enhanced-DAT 2 */
- -1 /* END */
- }
- },
-};
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index 4c7fac75090e..cd546a245c68 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -14,7 +14,7 @@
#define is_hugepage_only_range(mm, addr, len) 0
#define hugetlb_free_pgd_range free_pgd_range
-#define hugepages_supported() (MACHINE_HAS_HPAGE)
+#define hugepages_supported() (MACHINE_HAS_EDAT1)
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte);
diff --git a/arch/s390/include/asm/idals.h b/arch/s390/include/asm/idals.h
index a7b2d7504049..280b60a0bcd4 100644
--- a/arch/s390/include/asm/idals.h
+++ b/arch/s390/include/asm/idals.h
@@ -17,7 +17,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <asm/cio.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define IDA_SIZE_LOG 12 /* 11 for 2k , 12 for 4k */
#define IDA_BLOCK_SIZE (1L<<IDA_SIZE_LOG)
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index 4da22b2f0521..edb5161df7e2 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -97,7 +97,7 @@ void __init save_area_add_vxrs(struct save_area *, __vector128 *vxrs);
extern void do_reipl(void);
extern void do_halt(void);
extern void do_poff(void);
-extern void ipl_save_parameters(void);
+extern void ipl_verify_parameters(void);
extern void ipl_update_parameters(void);
extern size_t append_ipl_vmparm(char *, size_t);
extern size_t append_ipl_scpdata(char *, size_t);
diff --git a/arch/s390/include/asm/livepatch.h b/arch/s390/include/asm/livepatch.h
index 2c1213785892..6de5c6cb0061 100644
--- a/arch/s390/include/asm/livepatch.h
+++ b/arch/s390/include/asm/livepatch.h
@@ -17,7 +17,7 @@
#ifndef ASM_LIVEPATCH_H
#define ASM_LIVEPATCH_H
-#include <linux/module.h>
+#include <asm/ptrace.h>
static inline int klp_check_compiler_support(void)
{
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 7b93b78f423c..61261e0e95c0 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -85,52 +85,56 @@ struct lowcore {
__u64 mcck_enter_timer; /* 0x02c0 */
__u64 exit_timer; /* 0x02c8 */
__u64 user_timer; /* 0x02d0 */
- __u64 system_timer; /* 0x02d8 */
- __u64 steal_timer; /* 0x02e0 */
- __u64 last_update_timer; /* 0x02e8 */
- __u64 last_update_clock; /* 0x02f0 */
- __u64 int_clock; /* 0x02f8 */
- __u64 mcck_clock; /* 0x0300 */
- __u64 clock_comparator; /* 0x0308 */
+ __u64 guest_timer; /* 0x02d8 */
+ __u64 system_timer; /* 0x02e0 */
+ __u64 hardirq_timer; /* 0x02e8 */
+ __u64 softirq_timer; /* 0x02f0 */
+ __u64 steal_timer; /* 0x02f8 */
+ __u64 last_update_timer; /* 0x0300 */
+ __u64 last_update_clock; /* 0x0308 */
+ __u64 int_clock; /* 0x0310 */
+ __u64 mcck_clock; /* 0x0318 */
+ __u64 clock_comparator; /* 0x0320 */
/* Current process. */
- __u64 current_task; /* 0x0310 */
- __u64 thread_info; /* 0x0318 */
- __u64 kernel_stack; /* 0x0320 */
+ __u64 current_task; /* 0x0328 */
+ __u8 pad_0x318[0x320-0x318]; /* 0x0330 */
+ __u64 kernel_stack; /* 0x0338 */
/* Interrupt, panic and restart stack. */
- __u64 async_stack; /* 0x0328 */
- __u64 panic_stack; /* 0x0330 */
- __u64 restart_stack; /* 0x0338 */
+ __u64 async_stack; /* 0x0340 */
+ __u64 panic_stack; /* 0x0348 */
+ __u64 restart_stack; /* 0x0350 */
/* Restart function and parameter. */
- __u64 restart_fn; /* 0x0340 */
- __u64 restart_data; /* 0x0348 */
- __u64 restart_source; /* 0x0350 */
+ __u64 restart_fn; /* 0x0358 */
+ __u64 restart_data; /* 0x0360 */
+ __u64 restart_source; /* 0x0368 */
/* Address space pointer. */
- __u64 kernel_asce; /* 0x0358 */
- __u64 user_asce; /* 0x0360 */
+ __u64 kernel_asce; /* 0x0370 */
+ __u64 user_asce; /* 0x0378 */
/*
* The lpp and current_pid fields form a
* 64-bit value that is set as program
* parameter with the LPP instruction.
*/
- __u32 lpp; /* 0x0368 */
- __u32 current_pid; /* 0x036c */
+ __u32 lpp; /* 0x0380 */
+ __u32 current_pid; /* 0x0384 */
/* SMP info area */
- __u32 cpu_nr; /* 0x0370 */
- __u32 softirq_pending; /* 0x0374 */
- __u64 percpu_offset; /* 0x0378 */
- __u64 vdso_per_cpu_data; /* 0x0380 */
- __u64 machine_flags; /* 0x0388 */
- __u8 pad_0x0390[0x0398-0x0390]; /* 0x0390 */
- __u64 gmap; /* 0x0398 */
- __u32 spinlock_lockval; /* 0x03a0 */
- __u32 fpu_flags; /* 0x03a4 */
- __u8 pad_0x03a8[0x0400-0x03a8]; /* 0x03a8 */
+ __u32 cpu_nr; /* 0x0388 */
+ __u32 softirq_pending; /* 0x038c */
+ __u64 percpu_offset; /* 0x0390 */
+ __u64 vdso_per_cpu_data; /* 0x0398 */
+ __u64 machine_flags; /* 0x03a0 */
+ __u32 preempt_count; /* 0x03a8 */
+ __u8 pad_0x03ac[0x03b0-0x03ac]; /* 0x03ac */
+ __u64 gmap; /* 0x03b0 */
+ __u32 spinlock_lockval; /* 0x03b8 */
+ __u32 fpu_flags; /* 0x03bc */
+ __u8 pad_0x03c0[0x0400-0x03c0]; /* 0x03c0 */
/* Per cpu primary space access list */
__u32 paste[16]; /* 0x0400 */
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 515fea5a3fc4..9b828c073176 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -8,7 +8,7 @@
#define __S390_MMU_CONTEXT_H
#include <asm/pgalloc.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/tlbflush.h>
#include <asm/ctl_reg.h>
@@ -63,7 +63,7 @@ static inline void set_user_asce(struct mm_struct *mm)
S390_lowcore.user_asce = mm->context.asce;
if (current->thread.mm_segment.ar4)
__ctl_load(S390_lowcore.user_asce, 7, 7);
- set_cpu_flag(CIF_ASCE);
+ set_cpu_flag(CIF_ASCE_PRIMARY);
}
static inline void clear_user_asce(void)
@@ -81,7 +81,7 @@ static inline void load_kernel_asce(void)
__ctl_store(asce, 1, 1);
if (asce != S390_lowcore.kernel_asce)
__ctl_load(S390_lowcore.kernel_asce, 1, 1);
- set_cpu_flag(CIF_ASCE);
+ set_cpu_flag(CIF_ASCE_PRIMARY);
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
diff --git a/arch/s390/include/asm/mutex.h b/arch/s390/include/asm/mutex.h
deleted file mode 100644
index 458c1f7fbc18..000000000000
--- a/arch/s390/include/asm/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-
-#include <asm-generic/mutex-dec.h>
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 6611f798d2be..4e3186649578 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -133,6 +133,7 @@ struct zpci_dev {
/* Function measurement block */
struct zpci_fmb *fmb;
u16 fmb_update; /* update interval */
+ u16 fmb_length;
/* software counters */
atomic64_t allocated_pages;
atomic64_t mapped_pages;
diff --git a/arch/s390/include/asm/pci_clp.h b/arch/s390/include/asm/pci_clp.h
index e75c64cbcf08..938b8cc19fc6 100644
--- a/arch/s390/include/asm/pci_clp.h
+++ b/arch/s390/include/asm/pci_clp.h
@@ -46,6 +46,8 @@ struct clp_fh_list_entry {
#define CLP_UTIL_STR_LEN 64
#define CLP_PFIP_NR_SEGMENTS 4
+extern bool zpci_unique_uid;
+
/* List PCI functions request */
struct clp_req_list_pci {
struct clp_req_hdr hdr;
@@ -59,7 +61,8 @@ struct clp_rsp_list_pci {
u64 resume_token;
u32 reserved2;
u16 max_fn;
- u8 reserved3;
+ u8 : 7;
+ u8 uid_checking : 1;
u8 entry_size;
struct clp_fh_list_entry fh_list[CLP_FH_LIST_NR_ENTRIES];
} __packed;
@@ -82,9 +85,10 @@ struct clp_rsp_query_pci {
u32 fid; /* pci function id */
u8 bar_size[PCI_BAR_COUNT];
u16 pchid;
- u32 bar[PCI_BAR_COUNT];
+ __le32 bar[PCI_BAR_COUNT];
u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */
- u32 : 24;
+ u32 : 16;
+ u8 fmb_len;
u8 pft; /* pci function type */
u64 sdma; /* start dma as */
u64 edma; /* end dma as */
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index f4eb9843eed4..166f703dad7c 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -27,17 +27,17 @@ extern int page_table_allocate_pgste;
static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
{
- typedef struct { char _[n]; } addrtype;
-
- *s = val;
- n = (n / 256) - 1;
- asm volatile(
- " mvc 8(248,%0),0(%0)\n"
- "0: mvc 256(256,%0),0(%0)\n"
- " la %0,256(%0)\n"
- " brct %1,0b\n"
- : "+a" (s), "+d" (n), "=m" (*(addrtype *) s)
- : "m" (*(addrtype *) s));
+ struct addrtype { char _[256]; };
+ int i;
+
+ for (i = 0; i < n; i += 256) {
+ *s = val;
+ asm volatile(
+ "mvc 8(248,%[s]),0(%[s])\n"
+ : "+m" (*(struct addrtype *) s)
+ : [s] "a" (s));
+ s += 256 / sizeof(long);
+ }
}
static inline void crst_table_init(unsigned long *crst, unsigned long entry)
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 0362cd5fa187..7ed1972b1920 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -200,6 +200,7 @@ static inline int is_module_addr(void *addr)
*/
/* Hardware bits in the page table entry */
+#define _PAGE_NOEXEC 0x100 /* HW no-execute bit */
#define _PAGE_PROTECT 0x200 /* HW read-only bit */
#define _PAGE_INVALID 0x400 /* HW invalid bit */
#define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
@@ -277,6 +278,7 @@ static inline int is_module_addr(void *addr)
/* Bits in the region table entry */
#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
#define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
+#define _REGION_ENTRY_NOEXEC 0x100 /* region no-execute bit */
#define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */
#define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
@@ -316,6 +318,7 @@ static inline int is_module_addr(void *addr)
#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
#define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
+#define _SEGMENT_ENTRY_NOEXEC 0x100 /* region no-execute bit */
#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
#define _SEGMENT_ENTRY (0)
@@ -385,17 +388,23 @@ static inline int is_module_addr(void *addr)
* Page protection definitions.
*/
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
-#define PAGE_READ __pgprot(_PAGE_PRESENT | _PAGE_READ | \
+#define PAGE_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | \
+ _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
+#define PAGE_RX __pgprot(_PAGE_PRESENT | _PAGE_READ | \
_PAGE_INVALID | _PAGE_PROTECT)
-#define PAGE_WRITE __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
+#define PAGE_RW __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
+ _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
+#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
_PAGE_INVALID | _PAGE_PROTECT)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
- _PAGE_YOUNG | _PAGE_DIRTY)
+ _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
- _PAGE_YOUNG | _PAGE_DIRTY)
+ _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
- _PAGE_PROTECT)
+ _PAGE_PROTECT | _PAGE_NOEXEC)
+#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
+ _PAGE_YOUNG | _PAGE_DIRTY)
/*
* On s390 the page table entry has an invalid bit and a read-only bit.
@@ -404,43 +413,51 @@ static inline int is_module_addr(void *addr)
*/
/*xwr*/
#define __P000 PAGE_NONE
-#define __P001 PAGE_READ
-#define __P010 PAGE_READ
-#define __P011 PAGE_READ
-#define __P100 PAGE_READ
-#define __P101 PAGE_READ
-#define __P110 PAGE_READ
-#define __P111 PAGE_READ
+#define __P001 PAGE_RO
+#define __P010 PAGE_RO
+#define __P011 PAGE_RO
+#define __P100 PAGE_RX
+#define __P101 PAGE_RX
+#define __P110 PAGE_RX
+#define __P111 PAGE_RX
#define __S000 PAGE_NONE
-#define __S001 PAGE_READ
-#define __S010 PAGE_WRITE
-#define __S011 PAGE_WRITE
-#define __S100 PAGE_READ
-#define __S101 PAGE_READ
-#define __S110 PAGE_WRITE
-#define __S111 PAGE_WRITE
+#define __S001 PAGE_RO
+#define __S010 PAGE_RW
+#define __S011 PAGE_RW
+#define __S100 PAGE_RX
+#define __S101 PAGE_RX
+#define __S110 PAGE_RWX
+#define __S111 PAGE_RWX
/*
* Segment entry (large page) protection definitions.
*/
#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
_SEGMENT_ENTRY_PROTECT)
-#define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_PROTECT | \
+#define SEGMENT_RO __pgprot(_SEGMENT_ENTRY_PROTECT | \
+ _SEGMENT_ENTRY_READ | \
+ _SEGMENT_ENTRY_NOEXEC)
+#define SEGMENT_RX __pgprot(_SEGMENT_ENTRY_PROTECT | \
_SEGMENT_ENTRY_READ)
-#define SEGMENT_WRITE __pgprot(_SEGMENT_ENTRY_READ | \
+#define SEGMENT_RW __pgprot(_SEGMENT_ENTRY_READ | \
+ _SEGMENT_ENTRY_WRITE | \
+ _SEGMENT_ENTRY_NOEXEC)
+#define SEGMENT_RWX __pgprot(_SEGMENT_ENTRY_READ | \
_SEGMENT_ENTRY_WRITE)
#define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \
_SEGMENT_ENTRY_LARGE | \
_SEGMENT_ENTRY_READ | \
_SEGMENT_ENTRY_WRITE | \
_SEGMENT_ENTRY_YOUNG | \
- _SEGMENT_ENTRY_DIRTY)
+ _SEGMENT_ENTRY_DIRTY | \
+ _SEGMENT_ENTRY_NOEXEC)
#define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \
_SEGMENT_ENTRY_LARGE | \
_SEGMENT_ENTRY_READ | \
_SEGMENT_ENTRY_YOUNG | \
- _SEGMENT_ENTRY_PROTECT)
+ _SEGMENT_ENTRY_PROTECT | \
+ _SEGMENT_ENTRY_NOEXEC)
/*
* Region3 entry (large page) protection definitions.
@@ -451,12 +468,14 @@ static inline int is_module_addr(void *addr)
_REGION3_ENTRY_READ | \
_REGION3_ENTRY_WRITE | \
_REGION3_ENTRY_YOUNG | \
- _REGION3_ENTRY_DIRTY)
+ _REGION3_ENTRY_DIRTY | \
+ _REGION_ENTRY_NOEXEC)
#define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
_REGION3_ENTRY_LARGE | \
_REGION3_ENTRY_READ | \
_REGION3_ENTRY_YOUNG | \
- _REGION_ENTRY_PROTECT)
+ _REGION_ENTRY_PROTECT | \
+ _REGION_ENTRY_NOEXEC)
static inline int mm_has_pgste(struct mm_struct *mm)
{
@@ -621,12 +640,12 @@ static inline int pud_bad(pud_t pud)
static inline int pmd_present(pmd_t pmd)
{
- return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID;
+ return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
}
static inline int pmd_none(pmd_t pmd)
{
- return pmd_val(pmd) == _SEGMENT_ENTRY_INVALID;
+ return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
}
static inline unsigned long pmd_pfn(pmd_t pmd)
@@ -784,7 +803,7 @@ static inline void pud_clear(pud_t *pud)
static inline void pmd_clear(pmd_t *pmdp)
{
- pmd_val(*pmdp) = _SEGMENT_ENTRY_INVALID;
+ pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
}
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
@@ -801,14 +820,14 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
pte_val(pte) &= _PAGE_CHG_MASK;
pte_val(pte) |= pgprot_val(newprot);
/*
- * newprot for PAGE_NONE, PAGE_READ and PAGE_WRITE has the
- * invalid bit set, clear it again for readable, young pages
+ * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
+ * has the invalid bit set, clear it again for readable, young pages
*/
if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
pte_val(pte) &= ~_PAGE_INVALID;
/*
- * newprot for PAGE_READ and PAGE_WRITE has the page protection
- * bit set, clear it again for writable, dirty pages
+ * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
+ * protection bit set, clear it again for writable, dirty pages
*/
if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
pte_val(pte) &= ~_PAGE_PROTECT;
@@ -1029,6 +1048,8 @@ int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry)
{
+ if (!MACHINE_HAS_NX)
+ pte_val(entry) &= ~_PAGE_NOEXEC;
if (mm_has_pgste(mm))
ptep_set_pte_at(mm, addr, ptep, entry);
else
@@ -1173,14 +1194,18 @@ static inline pud_t pud_mkdirty(pud_t pud)
static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
{
/*
- * pgprot is PAGE_NONE, PAGE_READ, or PAGE_WRITE (see __Pxxx / __Sxxx)
- * Convert to segment table entry format.
+ * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX
+ * (see __Pxxx / __Sxxx). Convert to segment table entry format.
*/
if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
return pgprot_val(SEGMENT_NONE);
- if (pgprot_val(pgprot) == pgprot_val(PAGE_READ))
- return pgprot_val(SEGMENT_READ);
- return pgprot_val(SEGMENT_WRITE);
+ if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
+ return pgprot_val(SEGMENT_RO);
+ if (pgprot_val(pgprot) == pgprot_val(PAGE_RX))
+ return pgprot_val(SEGMENT_RX);
+ if (pgprot_val(pgprot) == pgprot_val(PAGE_RW))
+ return pgprot_val(SEGMENT_RW);
+ return pgprot_val(SEGMENT_RWX);
}
static inline pmd_t pmd_mkyoung(pmd_t pmd)
@@ -1315,6 +1340,8 @@ static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t entry)
{
+ if (!MACHINE_HAS_NX)
+ pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC;
*pmdp = entry;
}
@@ -1330,7 +1357,7 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp)
{
- return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID));
+ return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
}
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
@@ -1340,10 +1367,10 @@ static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
{
if (full) {
pmd_t pmd = *pmdp;
- *pmdp = __pmd(_SEGMENT_ENTRY_INVALID);
+ *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
return pmd;
}
- return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID));
+ return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
}
#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
@@ -1357,7 +1384,7 @@ static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
static inline void pmdp_invalidate(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp)
{
- pmdp_xchg_direct(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID));
+ pmdp_xchg_direct(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
}
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
@@ -1389,7 +1416,7 @@ static inline int pmd_trans_huge(pmd_t pmd)
#define has_transparent_hugepage has_transparent_hugepage
static inline int has_transparent_hugepage(void)
{
- return MACHINE_HAS_HPAGE ? 1 : 0;
+ return MACHINE_HAS_EDAT1 ? 1 : 0;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
diff --git a/arch/s390/include/asm/pkey.h b/arch/s390/include/asm/pkey.h
new file mode 100644
index 000000000000..b48aef4188f6
--- /dev/null
+++ b/arch/s390/include/asm/pkey.h
@@ -0,0 +1,90 @@
+/*
+ * Kernelspace interface to the pkey device driver
+ *
+ * Copyright IBM Corp. 2016
+ *
+ * Author: Harald Freudenberger <freude@de.ibm.com>
+ *
+ */
+
+#ifndef _KAPI_PKEY_H
+#define _KAPI_PKEY_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+#include <uapi/asm/pkey.h>
+
+/*
+ * Generate (AES) random secure key.
+ * @param cardnr may be -1 (use default card)
+ * @param domain may be -1 (use default domain)
+ * @param keytype one of the PKEY_KEYTYPE values
+ * @param seckey pointer to buffer receiving the secure key
+ * @return 0 on success, negative errno value on failure
+ */
+int pkey_genseckey(__u16 cardnr, __u16 domain,
+ __u32 keytype, struct pkey_seckey *seckey);
+
+/*
+ * Generate (AES) secure key with given key value.
+ * @param cardnr may be -1 (use default card)
+ * @param domain may be -1 (use default domain)
+ * @param keytype one of the PKEY_KEYTYPE values
+ * @param clrkey pointer to buffer with clear key data
+ * @param seckey pointer to buffer receiving the secure key
+ * @return 0 on success, negative errno value on failure
+ */
+int pkey_clr2seckey(__u16 cardnr, __u16 domain, __u32 keytype,
+ const struct pkey_clrkey *clrkey,
+ struct pkey_seckey *seckey);
+
+/*
+ * Derive (AES) proteced key from the (AES) secure key blob.
+ * @param cardnr may be -1 (use default card)
+ * @param domain may be -1 (use default domain)
+ * @param seckey pointer to buffer with the input secure key
+ * @param protkey pointer to buffer receiving the protected key and
+ * additional info (type, length)
+ * @return 0 on success, negative errno value on failure
+ */
+int pkey_sec2protkey(__u16 cardnr, __u16 domain,
+ const struct pkey_seckey *seckey,
+ struct pkey_protkey *protkey);
+
+/*
+ * Derive (AES) protected key from a given clear key value.
+ * @param keytype one of the PKEY_KEYTYPE values
+ * @param clrkey pointer to buffer with clear key data
+ * @param protkey pointer to buffer receiving the protected key and
+ * additional info (type, length)
+ * @return 0 on success, negative errno value on failure
+ */
+int pkey_clr2protkey(__u32 keytype,
+ const struct pkey_clrkey *clrkey,
+ struct pkey_protkey *protkey);
+
+/*
+ * Search for a matching crypto card based on the Master Key
+ * Verification Pattern provided inside a secure key.
+ * @param seckey pointer to buffer with the input secure key
+ * @param cardnr pointer to cardnr, receives the card number on success
+ * @param domain pointer to domain, receives the domain number on success
+ * @param verify if set, always verify by fetching verification pattern
+ * from card
+ * @return 0 on success, negative errno value on failure. If no card could be
+ * found, -ENODEV is returned.
+ */
+int pkey_findcard(const struct pkey_seckey *seckey,
+ __u16 *cardnr, __u16 *domain, int verify);
+
+/*
+ * Find card and transform secure key to protected key.
+ * @param seckey pointer to buffer with the input secure key
+ * @param protkey pointer to buffer receiving the protected key and
+ * additional info (type, length)
+ * @return 0 on success, negative errno value on failure
+ */
+int pkey_skey2pkey(const struct pkey_seckey *seckey,
+ struct pkey_protkey *protkey);
+
+#endif /* _KAPI_PKEY_H */
diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h
new file mode 100644
index 000000000000..b0776b2c8dcf
--- /dev/null
+++ b/arch/s390/include/asm/preempt.h
@@ -0,0 +1,137 @@
+#ifndef __ASM_PREEMPT_H
+#define __ASM_PREEMPT_H
+
+#include <asm/current.h>
+#include <linux/thread_info.h>
+#include <asm/atomic_ops.h>
+
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED)
+
+static inline int preempt_count(void)
+{
+ return READ_ONCE(S390_lowcore.preempt_count) & ~PREEMPT_NEED_RESCHED;
+}
+
+static inline void preempt_count_set(int pc)
+{
+ int old, new;
+
+ do {
+ old = READ_ONCE(S390_lowcore.preempt_count);
+ new = (old & PREEMPT_NEED_RESCHED) |
+ (pc & ~PREEMPT_NEED_RESCHED);
+ } while (__atomic_cmpxchg(&S390_lowcore.preempt_count,
+ old, new) != old);
+}
+
+#define init_task_preempt_count(p) do { } while (0)
+
+#define init_idle_preempt_count(p, cpu) do { \
+ S390_lowcore.preempt_count = PREEMPT_ENABLED; \
+} while (0)
+
+static inline void set_preempt_need_resched(void)
+{
+ __atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
+}
+
+static inline void clear_preempt_need_resched(void)
+{
+ __atomic_or(PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
+}
+
+static inline bool test_preempt_need_resched(void)
+{
+ return !(READ_ONCE(S390_lowcore.preempt_count) & PREEMPT_NEED_RESCHED);
+}
+
+static inline void __preempt_count_add(int val)
+{
+ if (__builtin_constant_p(val) && (val >= -128) && (val <= 127))
+ __atomic_add_const(val, &S390_lowcore.preempt_count);
+ else
+ __atomic_add(val, &S390_lowcore.preempt_count);
+}
+
+static inline void __preempt_count_sub(int val)
+{
+ __preempt_count_add(-val);
+}
+
+static inline bool __preempt_count_dec_and_test(void)
+{
+ return __atomic_add(-1, &S390_lowcore.preempt_count) == 1;
+}
+
+static inline bool should_resched(int preempt_offset)
+{
+ return unlikely(READ_ONCE(S390_lowcore.preempt_count) ==
+ preempt_offset);
+}
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+#define PREEMPT_ENABLED (0)
+
+static inline int preempt_count(void)
+{
+ return READ_ONCE(S390_lowcore.preempt_count);
+}
+
+static inline void preempt_count_set(int pc)
+{
+ S390_lowcore.preempt_count = pc;
+}
+
+#define init_task_preempt_count(p) do { } while (0)
+
+#define init_idle_preempt_count(p, cpu) do { \
+ S390_lowcore.preempt_count = PREEMPT_ENABLED; \
+} while (0)
+
+static inline void set_preempt_need_resched(void)
+{
+}
+
+static inline void clear_preempt_need_resched(void)
+{
+}
+
+static inline bool test_preempt_need_resched(void)
+{
+ return false;
+}
+
+static inline void __preempt_count_add(int val)
+{
+ S390_lowcore.preempt_count += val;
+}
+
+static inline void __preempt_count_sub(int val)
+{
+ S390_lowcore.preempt_count -= val;
+}
+
+static inline bool __preempt_count_dec_and_test(void)
+{
+ return !--S390_lowcore.preempt_count && tif_need_resched();
+}
+
+static inline bool should_resched(int preempt_offset)
+{
+ return unlikely(preempt_count() == preempt_offset &&
+ tif_need_resched());
+}
+
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+#ifdef CONFIG_PREEMPT
+extern asmlinkage void preempt_schedule(void);
+#define __preempt_schedule() preempt_schedule()
+extern asmlinkage void preempt_schedule_notrace(void);
+#define __preempt_schedule_notrace() preempt_schedule_notrace()
+#endif /* CONFIG_PREEMPT */
+
+#endif /* __ASM_PREEMPT_H */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 602af692efdc..e4988710aa86 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -14,14 +14,16 @@
#include <linux/const.h>
#define CIF_MCCK_PENDING 0 /* machine check handling is pending */
-#define CIF_ASCE 1 /* user asce needs fixup / uaccess */
-#define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */
-#define CIF_FPU 3 /* restore FPU registers */
-#define CIF_IGNORE_IRQ 4 /* ignore interrupt (for udelay) */
-#define CIF_ENABLED_WAIT 5 /* in enabled wait state */
+#define CIF_ASCE_PRIMARY 1 /* primary asce needs fixup / uaccess */
+#define CIF_ASCE_SECONDARY 2 /* secondary asce needs fixup / uaccess */
+#define CIF_NOHZ_DELAY 3 /* delay HZ disable for a tick */
+#define CIF_FPU 4 /* restore FPU registers */
+#define CIF_IGNORE_IRQ 5 /* ignore interrupt (for udelay) */
+#define CIF_ENABLED_WAIT 6 /* in enabled wait state */
#define _CIF_MCCK_PENDING _BITUL(CIF_MCCK_PENDING)
-#define _CIF_ASCE _BITUL(CIF_ASCE)
+#define _CIF_ASCE_PRIMARY _BITUL(CIF_ASCE_PRIMARY)
+#define _CIF_ASCE_SECONDARY _BITUL(CIF_ASCE_SECONDARY)
#define _CIF_NOHZ_DELAY _BITUL(CIF_NOHZ_DELAY)
#define _CIF_FPU _BITUL(CIF_FPU)
#define _CIF_IGNORE_IRQ _BITUL(CIF_IGNORE_IRQ)
@@ -89,7 +91,8 @@ extern void execve_tail(void);
* User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
*/
-#define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit)
+#define TASK_SIZE_OF(tsk) ((tsk)->mm ? \
+ (tsk)->mm->context.asce_limit : TASK_MAX_SIZE)
#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \
(1UL << 30) : (1UL << 41))
#define TASK_SIZE TASK_SIZE_OF(current)
@@ -110,14 +113,23 @@ typedef struct {
struct thread_struct {
unsigned int acrs[NUM_ACRS];
unsigned long ksp; /* kernel stack pointer */
+ unsigned long user_timer; /* task cputime in user space */
+ unsigned long guest_timer; /* task cputime in kvm guest */
+ unsigned long system_timer; /* task cputime in kernel space */
+ unsigned long hardirq_timer; /* task cputime in hardirq context */
+ unsigned long softirq_timer; /* task cputime in softirq context */
+ unsigned long sys_call_table; /* system call table address */
mm_segment_t mm_segment;
unsigned long gmap_addr; /* address of last gmap fault. */
unsigned int gmap_write_flag; /* gmap fault write indication */
unsigned int gmap_int_code; /* int code of last gmap fault */
unsigned int gmap_pfault; /* signal of a pending guest pfault */
+ /* Per-thread information related to debugging */
struct per_regs per_user; /* User specified PER registers */
struct per_event per_event; /* Cause of the last PER trap */
unsigned long per_flags; /* Flags to control debug behavior */
+ unsigned int system_call; /* system call number in signal */
+ unsigned long last_break; /* last breaking-event-address. */
/* pfault_wait is used to block the process on a pfault event */
unsigned long pfault_wait;
struct list_head list;
@@ -191,10 +203,12 @@ struct stack_frame {
struct task_struct;
struct mm_struct;
struct seq_file;
+struct pt_regs;
typedef int (*dump_trace_func_t)(void *data, unsigned long address, int reliable);
void dump_trace(dump_trace_func_t func, void *data,
struct task_struct *task, unsigned long sp);
+void show_registers(struct pt_regs *regs);
void show_cacheinfo(struct seq_file *m);
@@ -234,9 +248,10 @@ static inline unsigned short stap(void)
/*
* Give up the time slice of the virtual PU.
*/
-void cpu_relax(void);
+#define cpu_relax_yield cpu_relax_yield
+void cpu_relax_yield(void);
-#define cpu_relax_lowlatency() barrier()
+#define cpu_relax() barrier()
#define ECAG_CACHE_ATTRIBUTE 0
#define ECAG_CPU_ATTRIBUTE 1
@@ -351,12 +366,12 @@ extern void (*s390_base_ext_handler_fn)(void);
extern int memcpy_real(void *, void *, size_t);
extern void memcpy_absolute(void *, void *, size_t);
-#define mem_assign_absolute(dest, val) { \
+#define mem_assign_absolute(dest, val) do { \
__typeof__(dest) __tmp = (val); \
\
BUILD_BUG_ON(sizeof(__tmp) != sizeof(val)); \
memcpy_absolute(&(dest), &__tmp, sizeof(__tmp)); \
-}
+} while (0)
#endif /* __ASSEMBLY__ */
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index 2ad9c204b1a2..ace3bd315438 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -101,7 +101,13 @@ struct zpci_report_error_header {
u8 data[0]; /* Subsequent Data passed verbatim to SCLP ET 24 */
} __packed;
-int sclp_get_core_info(struct sclp_core_info *info);
+int sclp_early_get_core_info(struct sclp_core_info *info);
+void sclp_early_get_ipl_info(struct sclp_ipl_info *info);
+void sclp_early_detect(void);
+void sclp_early_printk(const char *s);
+void __sclp_early_printk(const char *s, unsigned int len);
+
+int _sclp_get_core_info(struct sclp_core_info *info);
int sclp_core_configure(u8 core);
int sclp_core_deconfigure(u8 core);
int sclp_sdias_blk_count(void);
@@ -109,14 +115,18 @@ int sclp_sdias_copy(void *dest, int blk_num, int nr_blks);
int sclp_chp_configure(struct chp_id chpid);
int sclp_chp_deconfigure(struct chp_id chpid);
int sclp_chp_read_info(struct sclp_chp_info *info);
-void sclp_get_ipl_info(struct sclp_ipl_info *info);
int sclp_pci_configure(u32 fid);
int sclp_pci_deconfigure(u32 fid);
int sclp_pci_report(struct zpci_report_error_header *report, u32 fh, u32 fid);
int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count);
int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count);
-void sclp_early_detect(void);
-void _sclp_print_early(const char *);
void sclp_ocf_cpc_name_copy(char *dst);
+static inline int sclp_get_core_info(struct sclp_core_info *info, int early)
+{
+ if (early)
+ return sclp_early_get_core_info(info);
+ return _sclp_get_core_info(info);
+}
+
#endif /* _ASM_S390_SCLP_H */
diff --git a/arch/s390/include/asm/scsw.h b/arch/s390/include/asm/scsw.h
index 4af99cdaddf5..17a7904f001a 100644
--- a/arch/s390/include/asm/scsw.h
+++ b/arch/s390/include/asm/scsw.h
@@ -96,7 +96,8 @@ struct tm_scsw {
u32 dstat:8;
u32 cstat:8;
u32 fcxs:8;
- u32 schxs:8;
+ u32 ifob:1;
+ u32 sesq:7;
} __attribute__ ((packed));
/**
@@ -177,6 +178,9 @@ union scsw {
#define SCHN_STAT_INTF_CTRL_CHK 0x02
#define SCHN_STAT_CHAIN_CHECK 0x01
+#define SCSW_SESQ_DEV_NOFCX 3
+#define SCSW_SESQ_PATH_NOFCX 4
+
/*
* architectured values for first sense byte
*/
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 5e8d57e1cc5e..30bdb5a027f3 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -30,6 +30,7 @@
#define MACHINE_FLAG_TLB_LC _BITUL(12)
#define MACHINE_FLAG_VX _BITUL(13)
#define MACHINE_FLAG_CAD _BITUL(14)
+#define MACHINE_FLAG_NX _BITUL(15)
#define LPP_MAGIC _BITUL(31)
#define LPP_PFAULT_PID_MASK _AC(0xffffffff, UL)
@@ -58,9 +59,6 @@ extern void detect_memory_memblock(void);
#define MACHINE_HAS_DIAG9C (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C)
#define MACHINE_HAS_ESOP (S390_lowcore.machine_flags & MACHINE_FLAG_ESOP)
-#define MACHINE_HAS_PFMF MACHINE_HAS_EDAT1
-#define MACHINE_HAS_HPAGE MACHINE_HAS_EDAT1
-
#define MACHINE_HAS_IDTE (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE)
#define MACHINE_HAS_DIAG44 (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG44)
#define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1)
@@ -71,6 +69,7 @@ extern void detect_memory_memblock(void);
#define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC)
#define MACHINE_HAS_VX (S390_lowcore.machine_flags & MACHINE_FLAG_VX)
#define MACHINE_HAS_CAD (S390_lowcore.machine_flags & MACHINE_FLAG_CAD)
+#define MACHINE_HAS_NX (S390_lowcore.machine_flags & MACHINE_FLAG_NX)
/*
* Console mode. Override with conmode=
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index 0cc383b9be7f..3deb134587b7 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -36,6 +36,7 @@ extern void smp_yield_cpu(int cpu);
extern void smp_cpu_set_polarization(int cpu, int val);
extern int smp_cpu_get_polarization(int cpu);
extern void smp_fill_possible_mask(void);
+extern void smp_detect_cpus(void);
#else /* CONFIG_SMP */
@@ -56,6 +57,7 @@ static inline int smp_store_status(int cpu) { return 0; }
static inline int smp_vcpu_scheduled(int cpu) { return 1; }
static inline void smp_yield_cpu(int cpu) { }
static inline void smp_fill_possible_mask(void) { }
+static inline void smp_detect_cpus(void) { }
#endif /* CONFIG_SMP */
@@ -69,6 +71,12 @@ static inline void smp_stop_cpu(void)
}
}
+/* Return thread 0 CPU number as base CPU */
+static inline int smp_get_base_cpu(int cpu)
+{
+ return cpu - (cpu % (smp_cpu_mtid + 1));
+}
+
#ifdef CONFIG_HOTPLUG_CPU
extern int smp_rescan_cpus(void);
extern void __noreturn cpu_die(void);
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 7e9e09f600fa..ffc45048ea7d 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -23,6 +23,14 @@ _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
return __sync_bool_compare_and_swap(lock, old, new);
}
+#ifndef CONFIG_SMP
+static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
+#else
+bool arch_vcpu_is_preempted(int cpu);
+#endif
+
+#define vcpu_is_preempted arch_vcpu_is_preempted
+
/*
* Simple spin lock operations. There are two variants, one clears IRQ's
* on the local processor, one does not.
@@ -55,7 +63,7 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
static inline int arch_spin_is_locked(arch_spinlock_t *lp)
{
- return ACCESS_ONCE(lp->lock) != 0;
+ return READ_ONCE(lp->lock) != 0;
}
static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h
index 8662f5c8e17f..e5f5c7074f2c 100644
--- a/arch/s390/include/asm/string.h
+++ b/arch/s390/include/asm/string.h
@@ -14,6 +14,7 @@
#define __HAVE_ARCH_MEMCHR /* inline & arch function */
#define __HAVE_ARCH_MEMCMP /* arch function */
#define __HAVE_ARCH_MEMCPY /* gcc builtin & arch function */
+#define __HAVE_ARCH_MEMMOVE /* gcc builtin & arch function */
#define __HAVE_ARCH_MEMSCAN /* inline & arch function */
#define __HAVE_ARCH_MEMSET /* gcc builtin & arch function */
#define __HAVE_ARCH_STRCAT /* inline & arch function */
@@ -32,6 +33,7 @@
extern int memcmp(const void *, const void *, size_t);
extern void *memcpy(void *, const void *, size_t);
extern void *memset(void *, int, size_t);
+extern void *memmove(void *, const void *, size_t);
extern int strcmp(const char *,const char *);
extern size_t strlcat(char *, const char *, size_t);
extern size_t strlcpy(char *, const char *, size_t);
@@ -40,7 +42,6 @@ extern char *strncpy(char *, const char *, size_t);
extern char *strrchr(const char *, int);
extern char *strstr(const char *, const char *);
-#undef __HAVE_ARCH_MEMMOVE
#undef __HAVE_ARCH_STRCHR
#undef __HAVE_ARCH_STRNCHR
#undef __HAVE_ARCH_STRNCMP
@@ -61,7 +62,7 @@ static inline void *memchr(const void * s, int c, size_t n)
" jl 1f\n"
" la %0,0\n"
"1:"
- : "+a" (ret), "+&a" (s) : "d" (r0) : "cc");
+ : "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory");
return (void *) ret;
}
@@ -73,7 +74,7 @@ static inline void *memscan(void *s, int c, size_t n)
asm volatile(
"0: srst %0,%1\n"
" jo 0b\n"
- : "+a" (ret), "+&a" (s) : "d" (r0) : "cc");
+ : "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory");
return (void *) ret;
}
@@ -114,7 +115,7 @@ static inline size_t strlen(const char *s)
asm volatile(
"0: srst %0,%1\n"
" jo 0b"
- : "+d" (r0), "+a" (tmp) : : "cc");
+ : "+d" (r0), "+a" (tmp) : : "cc", "memory");
return r0 - (unsigned long) s;
}
@@ -127,7 +128,7 @@ static inline size_t strnlen(const char * s, size_t n)
asm volatile(
"0: srst %0,%1\n"
" jo 0b"
- : "+a" (end), "+a" (tmp) : "d" (r0) : "cc");
+ : "+a" (end), "+a" (tmp) : "d" (r0) : "cc", "memory");
return end - s;
}
#else /* IN_ARCH_STRING_C */
diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h
index 2728114d5484..229326c942c7 100644
--- a/arch/s390/include/asm/sysinfo.h
+++ b/arch/s390/include/asm/sysinfo.h
@@ -107,6 +107,11 @@ struct sysinfo_2_2_2 {
char reserved_3[5];
unsigned short cpus_dedicated;
unsigned short cpus_shared;
+ char reserved_4[3];
+ unsigned char vsne;
+ uuid_be uuid;
+ char reserved_5[160];
+ char ext_name[256];
};
#define LPAR_CHAR_DEDICATED (1 << 7)
@@ -127,7 +132,7 @@ struct sysinfo_3_2_2 {
unsigned int caf;
char cpi[16];
char reserved_1[3];
- char ext_name_encoding;
+ unsigned char evmne;
unsigned int reserved_2;
uuid_be uuid;
} vm[8];
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index f15c0398c363..a5b54a445eb8 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -12,10 +12,10 @@
/*
* Size of kernel stack for each process
*/
-#define THREAD_ORDER 2
+#define THREAD_SIZE_ORDER 2
#define ASYNC_ORDER 2
-#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
+#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#define ASYNC_SIZE (PAGE_SIZE << ASYNC_ORDER)
#ifndef __ASSEMBLY__
@@ -30,15 +30,7 @@
* - if the contents of this structure are changed, the assembly constants must also be changed
*/
struct thread_info {
- struct task_struct *task; /* main task structure */
unsigned long flags; /* low level flags */
- unsigned long sys_call_table; /* System call table address */
- unsigned int cpu; /* current CPU */
- int preempt_count; /* 0 => preemptable, <0 => BUG */
- unsigned int system_call;
- __u64 user_timer;
- __u64 system_timer;
- unsigned long last_break; /* last breaking-event-address. */
};
/*
@@ -46,26 +38,14 @@ struct thread_info {
*/
#define INIT_THREAD_INFO(tsk) \
{ \
- .task = &tsk, \
.flags = 0, \
- .cpu = 0, \
- .preempt_count = INIT_PREEMPT_COUNT, \
}
-#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
-/* how to get the thread information struct from C */
-static inline struct thread_info *current_thread_info(void)
-{
- return (struct thread_info *) S390_lowcore.thread_info;
-}
-
void arch_release_task_struct(struct task_struct *tsk);
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
-#define THREAD_SIZE_ORDER THREAD_ORDER
-
#endif
/*
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 0bb08f341c09..354344dcc198 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -52,11 +52,9 @@ static inline void store_clock_comparator(__u64 *time)
void clock_comparator_work(void);
-void __init ptff_init(void);
+void __init time_early_init(void);
extern unsigned char ptff_function_mask[16];
-extern unsigned long lpar_offset;
-extern unsigned long initial_leap_seconds;
/* Function codes for the ptff instruction. */
#define PTFF_QAF 0x00 /* query available functions */
@@ -100,21 +98,28 @@ struct ptff_qui {
unsigned int pad_0x5c[41];
} __packed;
-static inline int ptff(void *ptff_block, size_t len, unsigned int func)
-{
- typedef struct { char _[len]; } addrtype;
- register unsigned int reg0 asm("0") = func;
- register unsigned long reg1 asm("1") = (unsigned long) ptff_block;
- int rc;
-
- asm volatile(
- " .word 0x0104\n"
- " ipm %0\n"
- " srl %0,28\n"
- : "=d" (rc), "+m" (*(addrtype *) ptff_block)
- : "d" (reg0), "d" (reg1) : "cc");
- return rc;
-}
+/*
+ * ptff - Perform timing facility function
+ * @ptff_block: Pointer to ptff parameter block
+ * @len: Length of parameter block
+ * @func: Function code
+ * Returns: Condition code (0 on success)
+ */
+#define ptff(ptff_block, len, func) \
+({ \
+ struct addrtype { char _[len]; }; \
+ register unsigned int reg0 asm("0") = func; \
+ register unsigned long reg1 asm("1") = (unsigned long) (ptff_block);\
+ int rc; \
+ \
+ asm volatile( \
+ " .word 0x0104\n" \
+ " ipm %0\n" \
+ " srl %0,28\n" \
+ : "=d" (rc), "+m" (*(struct addrtype *) reg1) \
+ : "d" (reg0), "d" (reg1) : "cc"); \
+ rc; \
+})
static inline unsigned long long local_tick_disable(void)
{
@@ -173,14 +178,6 @@ int get_phys_clock(unsigned long long *clock);
void init_cpu_timer(void);
unsigned long long monotonic_clock(void);
-void tod_to_timeval(__u64 todval, struct timespec64 *xt);
-
-static inline
-void stck_to_timespec64(unsigned long long stck, struct timespec64 *ts)
-{
- tod_to_timeval(stck - TOD_UNIX_EPOCH, ts);
-}
-
extern u64 sched_clock_base_cc;
/**
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 15711de10403..853b2a3d8dee 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -104,12 +104,6 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
return __tlb_remove_page(tlb, page);
}
-static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
- struct page *page)
-{
- return __tlb_remove_page(tlb, page);
-}
-
static inline void tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size)
{
@@ -162,5 +156,13 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
#define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0)
#define tlb_remove_pmd_tlb_entry(tlb, pmdp, addr) do { } while (0)
#define tlb_migrate_finish(mm) do { } while (0)
+#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
+ tlb_remove_tlb_entry(tlb, ptep, address)
+
+#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
+static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
+ unsigned int page_size)
+{
+}
#endif /* _S390_TLB_H */
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index f15f5571ca2b..fa1bfce10370 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -22,21 +22,22 @@ struct cpu_topology_s390 {
cpumask_t drawer_mask;
};
-DECLARE_PER_CPU(struct cpu_topology_s390, cpu_topology);
-
-#define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id)
-#define topology_thread_id(cpu) (per_cpu(cpu_topology, cpu).thread_id)
-#define topology_sibling_cpumask(cpu) \
- (&per_cpu(cpu_topology, cpu).thread_mask)
-#define topology_core_id(cpu) (per_cpu(cpu_topology, cpu).core_id)
-#define topology_core_cpumask(cpu) (&per_cpu(cpu_topology, cpu).core_mask)
-#define topology_book_id(cpu) (per_cpu(cpu_topology, cpu).book_id)
-#define topology_book_cpumask(cpu) (&per_cpu(cpu_topology, cpu).book_mask)
-#define topology_drawer_id(cpu) (per_cpu(cpu_topology, cpu).drawer_id)
-#define topology_drawer_cpumask(cpu) (&per_cpu(cpu_topology, cpu).drawer_mask)
+extern struct cpu_topology_s390 cpu_topology[NR_CPUS];
+extern cpumask_t cpus_with_topology;
+
+#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
+#define topology_thread_id(cpu) (cpu_topology[cpu].thread_id)
+#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_mask)
+#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
+#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_mask)
+#define topology_book_id(cpu) (cpu_topology[cpu].book_id)
+#define topology_book_cpumask(cpu) (&cpu_topology[cpu].book_mask)
+#define topology_drawer_id(cpu) (cpu_topology[cpu].drawer_id)
+#define topology_drawer_cpumask(cpu) (&cpu_topology[cpu].drawer_mask)
#define mc_capable() 1
+void topology_init_early(void);
int topology_cpu_init(struct cpu *);
int topology_set_cpu_management(int fc);
void topology_schedule_update(void);
@@ -46,6 +47,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu);
#else /* CONFIG_SCHED_TOPOLOGY */
+static inline void topology_init_early(void) { }
static inline void topology_schedule_update(void) { }
static inline int topology_cpu_init(struct cpu *cpu) { return 0; }
static inline void topology_expect_change(void) { }
@@ -65,7 +67,7 @@ static inline void topology_expect_change(void) { }
#define cpu_to_node cpu_to_node
static inline int cpu_to_node(int cpu)
{
- return per_cpu(cpu_topology, cpu).node_id;
+ return cpu_topology[cpu].node_id;
}
/* Returns a pointer to the cpumask of CPUs on node 'node'. */
diff --git a/arch/s390/include/asm/trace/zcrypt.h b/arch/s390/include/asm/trace/zcrypt.h
new file mode 100644
index 000000000000..adcb77fafa9d
--- /dev/null
+++ b/arch/s390/include/asm/trace/zcrypt.h
@@ -0,0 +1,122 @@
+/*
+ * Tracepoint definitions for the s390 zcrypt device driver
+ *
+ * Copyright IBM Corp. 2016
+ * Author(s): Harald Freudenberger <freude@de.ibm.com>
+ *
+ * Currently there are two tracepoint events defined here.
+ * An s390_zcrypt_req request event occurs as soon as the request is
+ * recognized by the zcrypt ioctl function. This event may act as some kind
+ * of request-processing-starts-now indication.
+ * As late as possible within the zcrypt ioctl function there occurs the
+ * s390_zcrypt_rep event which may act as the point in time where the
+ * request has been processed by the kernel and the result is about to be
+ * transferred back to userspace.
+ * The glue which binds together request and reply event is the ptr
+ * parameter, which is the local buffer address where the request from
+ * userspace has been stored by the ioctl function.
+ *
+ * The main purpose of this zcrypt tracepoint api is to get some data for
+ * performance measurements together with information about on which card
+ * and queue the request has been processed. It is not an ffdc interface as
+ * there is already code in the zcrypt device driver to serve the s390
+ * debug feature interface.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM s390
+
+#if !defined(_TRACE_S390_ZCRYPT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_S390_ZCRYPT_H
+
+#include <linux/tracepoint.h>
+
+#define TP_ICARSAMODEXPO 0x0001
+#define TP_ICARSACRT 0x0002
+#define TB_ZSECSENDCPRB 0x0003
+#define TP_ZSENDEP11CPRB 0x0004
+#define TP_HWRNGCPRB 0x0005
+
+#define show_zcrypt_tp_type(type) \
+ __print_symbolic(type, \
+ { TP_ICARSAMODEXPO, "ICARSAMODEXPO" }, \
+ { TP_ICARSACRT, "ICARSACRT" }, \
+ { TB_ZSECSENDCPRB, "ZSECSENDCPRB" }, \
+ { TP_ZSENDEP11CPRB, "ZSENDEP11CPRB" }, \
+ { TP_HWRNGCPRB, "HWRNGCPRB" })
+
+/**
+ * trace_s390_zcrypt_req - zcrypt request tracepoint function
+ * @ptr: Address of the local buffer where the request from userspace
+ * is stored. Can be used as a unique id to relate together
+ * request and reply.
+ * @type: One of the TP_ defines above.
+ *
+ * Called when a request from userspace is recognised within the ioctl
+ * function of the zcrypt device driver and may act as an entry
+ * timestamp.
+ */
+TRACE_EVENT(s390_zcrypt_req,
+ TP_PROTO(void *ptr, u32 type),
+ TP_ARGS(ptr, type),
+ TP_STRUCT__entry(
+ __field(void *, ptr)
+ __field(u32, type)),
+ TP_fast_assign(
+ __entry->ptr = ptr;
+ __entry->type = type;),
+ TP_printk("ptr=%p type=%s",
+ __entry->ptr,
+ show_zcrypt_tp_type(__entry->type))
+);
+
+/**
+ * trace_s390_zcrypt_rep - zcrypt reply tracepoint function
+ * @ptr: Address of the local buffer where the request from userspace
+ * is stored. Can be used as a unique id to match together
+ * request and reply.
+ * @fc: Function code.
+ * @rc: The bare returncode as returned by the device driver ioctl
+ * function.
+ * @dev: The adapter nr where this request was actually processed.
+ * @dom: Domain id of the device where this request was processed.
+ *
+ * Called upon recognising the reply from the crypto adapter. This
+ * message may act as the exit timestamp for the request but also
+ * carries some info about on which adapter the request was processed
+ * and the returncode from the device driver.
+ */
+TRACE_EVENT(s390_zcrypt_rep,
+ TP_PROTO(void *ptr, u32 fc, u32 rc, u16 dev, u16 dom),
+ TP_ARGS(ptr, fc, rc, dev, dom),
+ TP_STRUCT__entry(
+ __field(void *, ptr)
+ __field(u32, fc)
+ __field(u32, rc)
+ __field(u16, device)
+ __field(u16, domain)),
+ TP_fast_assign(
+ __entry->ptr = ptr;
+ __entry->fc = fc;
+ __entry->rc = rc;
+ __entry->device = dev;
+ __entry->domain = dom;),
+ TP_printk("ptr=%p fc=0x%04x rc=%d dev=0x%02hx domain=0x%04hx",
+ __entry->ptr,
+ (unsigned int) __entry->fc,
+ (int) __entry->rc,
+ (unsigned short) __entry->device,
+ (unsigned short) __entry->domain)
+);
+
+#endif /* _TRACE_S390_ZCRYPT_H */
+
+/* This part must be outside protection */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+
+#define TRACE_INCLUDE_PATH asm/trace
+#define TRACE_INCLUDE_FILE zcrypt
+
+#include <trace/define_trace.h>
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 52d7c8709279..136932ff4250 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -14,6 +14,7 @@
*/
#include <linux/sched.h>
#include <linux/errno.h>
+#include <asm/processor.h>
#include <asm/ctl_reg.h>
#define VERIFY_READ 0
@@ -36,18 +37,20 @@
#define get_ds() (KERNEL_DS)
#define get_fs() (current->thread.mm_segment)
-
-#define set_fs(x) \
-({ \
- unsigned long __pto; \
- current->thread.mm_segment = (x); \
- __pto = current->thread.mm_segment.ar4 ? \
- S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
- __ctl_load(__pto, 7, 7); \
-})
-
#define segment_eq(a,b) ((a).ar4 == (b).ar4)
+static inline void set_fs(mm_segment_t fs)
+{
+ current->thread.mm_segment = fs;
+ if (segment_eq(fs, KERNEL_DS)) {
+ set_cpu_flag(CIF_ASCE_SECONDARY);
+ __ctl_load(S390_lowcore.kernel_asce, 7, 7);
+ } else {
+ clear_cpu_flag(CIF_ASCE_SECONDARY);
+ __ctl_load(S390_lowcore.user_asce, 7, 7);
+ }
+}
+
static inline int __range_ok(unsigned long addr, unsigned long size)
{
return 1;
@@ -177,7 +180,7 @@ static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
(unsigned long *)x,
size, spec);
break;
- };
+ }
return rc;
}
@@ -207,7 +210,7 @@ static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long s
(unsigned long __user *)ptr,
size, spec);
break;
- };
+ }
return rc;
}
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index d0a2dbf2433d..88bdc477a843 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -33,6 +33,8 @@ struct vdso_data {
__u32 ectg_available; /* ECTG instruction present 0x58 */
__u32 tk_mult; /* Mult. used for xtime_nsec 0x5c */
__u32 tk_shift; /* Shift used for xtime_nsec 0x60 */
+ __u32 ts_dir; /* TOD steering direction 0x64 */
+ __u64 ts_end; /* TOD steering end 0x68 */
};
struct vdso_per_cpu_data {
diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild
index cc44b09c25fc..6848ba5c1454 100644
--- a/arch/s390/include/uapi/asm/Kbuild
+++ b/arch/s390/include/uapi/asm/Kbuild
@@ -12,6 +12,7 @@ header-y += dasd.h
header-y += debug.h
header-y += errno.h
header-y += fcntl.h
+header-y += hypfs.h
header-y += ioctl.h
header-y += ioctls.h
header-y += ipcbuf.h
@@ -23,22 +24,23 @@ header-y += mman.h
header-y += monwriter.h
header-y += msgbuf.h
header-y += param.h
+header-y += pkey.h
header-y += poll.h
header-y += posix_types.h
header-y += ptrace.h
header-y += qeth.h
header-y += resource.h
header-y += schid.h
+header-y += sclp_ctl.h
header-y += sembuf.h
header-y += setup.h
header-y += shmbuf.h
+header-y += sie.h
header-y += sigcontext.h
header-y += siginfo.h
header-y += signal.h
header-y += socket.h
header-y += sockios.h
-header-y += sclp_ctl.h
-header-y += sie.h
header-y += stat.h
header-y += statfs.h
header-y += swab.h
diff --git a/arch/s390/include/uapi/asm/pkey.h b/arch/s390/include/uapi/asm/pkey.h
new file mode 100644
index 000000000000..ed7f19c27ce5
--- /dev/null
+++ b/arch/s390/include/uapi/asm/pkey.h
@@ -0,0 +1,112 @@
+/*
+ * Userspace interface to the pkey device driver
+ *
+ * Copyright IBM Corp. 2017
+ *
+ * Author: Harald Freudenberger <freude@de.ibm.com>
+ *
+ */
+
+#ifndef _UAPI_PKEY_H
+#define _UAPI_PKEY_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/*
+ * Ioctl calls supported by the pkey device driver
+ */
+
+#define PKEY_IOCTL_MAGIC 'p'
+
+#define SECKEYBLOBSIZE 64 /* secure key blob size is always 64 bytes */
+#define MAXPROTKEYSIZE 64 /* a protected key blob may be up to 64 bytes */
+#define MAXCLRKEYSIZE 32 /* a clear key value may be up to 32 bytes */
+
+/* defines for the type field within the pkey_protkey struct */
+#define PKEY_KEYTYPE_AES_128 1
+#define PKEY_KEYTYPE_AES_192 2
+#define PKEY_KEYTYPE_AES_256 3
+
+/* Struct to hold a secure key blob */
+struct pkey_seckey {
+ __u8 seckey[SECKEYBLOBSIZE]; /* the secure key blob */
+};
+
+/* Struct to hold protected key and length info */
+struct pkey_protkey {
+ __u32 type; /* key type, one of the PKEY_KEYTYPE values */
+ __u32 len; /* bytes actually stored in protkey[] */
+ __u8 protkey[MAXPROTKEYSIZE]; /* the protected key blob */
+};
+
+/* Struct to hold a clear key value */
+struct pkey_clrkey {
+ __u8 clrkey[MAXCLRKEYSIZE]; /* 16, 24, or 32 byte clear key value */
+};
+
+/*
+ * Generate secure key
+ */
+struct pkey_genseck {
+ __u16 cardnr; /* in: card to use or FFFF for any */
+ __u16 domain; /* in: domain or FFFF for any */
+ __u32 keytype; /* in: key type to generate */
+ struct pkey_seckey seckey; /* out: the secure key blob */
+};
+#define PKEY_GENSECK _IOWR(PKEY_IOCTL_MAGIC, 0x01, struct pkey_genseck)
+
+/*
+ * Construct secure key from clear key value
+ */
+struct pkey_clr2seck {
+ __u16 cardnr; /* in: card to use or FFFF for any */
+ __u16 domain; /* in: domain or FFFF for any */
+ __u32 keytype; /* in: key type to generate */
+ struct pkey_clrkey clrkey; /* in: the clear key value */
+ struct pkey_seckey seckey; /* out: the secure key blob */
+};
+#define PKEY_CLR2SECK _IOWR(PKEY_IOCTL_MAGIC, 0x02, struct pkey_clr2seck)
+
+/*
+ * Fabricate protected key from a secure key
+ */
+struct pkey_sec2protk {
+ __u16 cardnr; /* in: card to use or FFFF for any */
+ __u16 domain; /* in: domain or FFFF for any */
+ struct pkey_seckey seckey; /* in: the secure key blob */
+ struct pkey_protkey protkey; /* out: the protected key */
+};
+#define PKEY_SEC2PROTK _IOWR(PKEY_IOCTL_MAGIC, 0x03, struct pkey_sec2protk)
+
+/*
+ * Fabricate protected key from an clear key value
+ */
+struct pkey_clr2protk {
+ __u32 keytype; /* in: key type to generate */
+ struct pkey_clrkey clrkey; /* in: the clear key value */
+ struct pkey_protkey protkey; /* out: the protected key */
+};
+#define PKEY_CLR2PROTK _IOWR(PKEY_IOCTL_MAGIC, 0x04, struct pkey_clr2protk)
+
+/*
+ * Search for matching crypto card based on the Master Key
+ * Verification Pattern provided inside a secure key.
+ */
+struct pkey_findcard {
+ struct pkey_seckey seckey; /* in: the secure key blob */
+ __u16 cardnr; /* out: card number */
+ __u16 domain; /* out: domain number */
+};
+#define PKEY_FINDCARD _IOWR(PKEY_IOCTL_MAGIC, 0x05, struct pkey_findcard)
+
+/*
+ * Combined together: findcard + sec2prot
+ */
+struct pkey_skey2pkey {
+ struct pkey_seckey seckey; /* in: the secure key blob */
+ struct pkey_protkey protkey; /* out: the protected key */
+};
+#define PKEY_SKEY2PKEY _IOWR(PKEY_IOCTL_MAGIC, 0x06, struct pkey_skey2pkey)
+
+#endif /* _UAPI_PKEY_H */
diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h
index 41b51c2f4f1b..b24a64cbfeb1 100644
--- a/arch/s390/include/uapi/asm/socket.h
+++ b/arch/s390/include/uapi/asm/socket.h
@@ -96,4 +96,6 @@
#define SO_CNX_ADVICE 53
+#define SCM_TIMESTAMPING_OPT_STATS 54
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/s390/include/uapi/asm/zcrypt.h b/arch/s390/include/uapi/asm/zcrypt.h
index f2b18eacaca8..a777f87ef889 100644
--- a/arch/s390/include/uapi/asm/zcrypt.h
+++ b/arch/s390/include/uapi/asm/zcrypt.h
@@ -215,6 +215,42 @@ struct ep11_urb {
uint64_t resp;
} __attribute__((packed));
+/**
+ * struct zcrypt_device_status
+ * @hwtype: raw hardware type
+ * @qid: 6 bit device index, 8 bit domain
+ * @functions: AP device function bit field 'abcdef'
+ * a, b, c = reserved
+ * d = CCA coprocessor
+ * e = Accelerator
+ * f = EP11 coprocessor
+ * @online online status
+ * @reserved reserved
+ */
+struct zcrypt_device_status {
+ unsigned int hwtype:8;
+ unsigned int qid:14;
+ unsigned int online:1;
+ unsigned int functions:6;
+ unsigned int reserved:3;
+};
+
+#define MAX_ZDEV_CARDIDS 64
+#define MAX_ZDEV_DOMAINS 256
+
+/**
+ * Maximum number of zcrypt devices
+ */
+#define MAX_ZDEV_ENTRIES (MAX_ZDEV_CARDIDS * MAX_ZDEV_DOMAINS)
+
+/**
+ * zcrypt_device_matrix
+ * Device matrix of all zcrypt devices
+ */
+struct zcrypt_device_matrix {
+ struct zcrypt_device_status device[MAX_ZDEV_ENTRIES];
+};
+
#define AUTOSELECT ((unsigned int)0xFFFFFFFF)
#define ZCRYPT_IOCTL_MAGIC 'z'
@@ -321,6 +357,7 @@ struct ep11_urb {
#define ICARSACRT _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x06, 0)
#define ZSECSENDCPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x81, 0)
#define ZSENDEP11CPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x04, 0)
+#define ZDEVICESTATUS _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x4f, 0)
/* New status calls */
#define Z90STAT_TOTALCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x40, int)
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 1f0fe98f6db9..060ce548fe8b 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -2,20 +2,41 @@
# Makefile for the linux kernel.
#
-KCOV_INSTRUMENT_early.o := n
-KCOV_INSTRUMENT_sclp.o := n
-KCOV_INSTRUMENT_als.o := n
-
ifdef CONFIG_FUNCTION_TRACER
-# Don't trace early setup code and tracing code
-CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
+
+# Do not trace tracer code
+CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
+
+# Do not trace early setup code
+CFLAGS_REMOVE_als.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE)
+
+endif
+
+GCOV_PROFILE_als.o := n
+GCOV_PROFILE_early.o := n
+
+KCOV_INSTRUMENT_als.o := n
+KCOV_INSTRUMENT_early.o := n
+
+UBSAN_SANITIZE_als.o := n
+UBSAN_SANITIZE_early.o := n
+
+#
+# Use -march=z900 for als.c to be able to print an error
+# message if the kernel is started on a machine which is too old
+#
+ifneq ($(CC_FLAGS_MARCH),-march=z900)
+CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH)
+CFLAGS_als.o += -march=z900
+AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
+AFLAGS_head.o += -march=z900
endif
#
# Passing null pointers is ok for smp code, since we access the lowcore here.
#
-CFLAGS_smp.o := -Wno-nonnull
+CFLAGS_smp.o := -Wno-nonnull
#
# Disable tailcall optimizations for stack / callchain walking functions
@@ -30,31 +51,11 @@ CFLAGS_dumpstack.o += -fno-optimize-sibling-calls
#
CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
-CFLAGS_sysinfo.o += -w
-
-#
-# Use -march=z900 for sclp.c and als.c to be able to print an error
-# message if the kernel is started on a machine which is too old
-#
-CFLAGS_REMOVE_sclp.o = $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_als.o = $(CC_FLAGS_FTRACE)
-ifneq ($(CC_FLAGS_MARCH),-march=z900)
-CFLAGS_REMOVE_sclp.o += $(CC_FLAGS_MARCH)
-CFLAGS_sclp.o += -march=z900
-CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH)
-CFLAGS_als.o += -march=z900
-AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
-AFLAGS_head.o += -march=z900
-endif
-GCOV_PROFILE_sclp.o := n
-GCOV_PROFILE_als.o := n
-UBSAN_SANITIZE_als.o := n
-UBSAN_SANITIZE_early.o := n
-UBSAN_SANITIZE_sclp.o := n
+CFLAGS_sysinfo.o += -w
obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
-obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o als.o
+obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o als.o
obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o
obj-y += entry.o reipl.o relocate_kernel.o
@@ -69,7 +70,7 @@ obj-$(CONFIG_AUDIT) += audit.o
compat-obj-$(CONFIG_AUDIT) += compat_audit.o
obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o
obj-$(CONFIG_COMPAT) += compat_wrapper.o $(compat-obj-y)
-
+obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
diff --git a/arch/s390/kernel/als.c b/arch/s390/kernel/als.c
index a16e9d1bf9e3..14769eb52a33 100644
--- a/arch/s390/kernel/als.c
+++ b/arch/s390/kernel/als.c
@@ -41,7 +41,8 @@ static void __init print_machine_type(void)
get_cpu_id(&id);
u16_to_hex(type_str, id.machine);
strcat(mach_str, type_str);
- _sclp_print_early(mach_str);
+ strcat(mach_str, "\n");
+ sclp_early_printk(mach_str);
}
static void __init u16_to_decimal(char *str, u16 val)
@@ -79,7 +80,8 @@ static void __init print_missing_facilities(void)
* z/VM adds a four character prefix.
*/
if (strlen(als_str) > 70) {
- _sclp_print_early(als_str);
+ strcat(als_str, "\n");
+ sclp_early_printk(als_str);
*als_str = '\0';
}
u16_to_decimal(val_str, i * BITS_PER_LONG + j);
@@ -87,13 +89,14 @@ static void __init print_missing_facilities(void)
first = 0;
}
}
- _sclp_print_early(als_str);
- _sclp_print_early("See Principles of Operations for facility bits");
+ strcat(als_str, "\n");
+ sclp_early_printk(als_str);
+ sclp_early_printk("See Principles of Operations for facility bits\n");
}
static void __init facility_mismatch(void)
{
- _sclp_print_early("The Linux kernel requires more recent processor hardware");
+ sclp_early_printk("The Linux kernel requires more recent processor hardware\n");
print_machine_type();
print_missing_facilities();
disabled_wait(0x8badcccc);
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index f3df9e0a5dec..c4b3570ded5b 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -25,12 +25,14 @@
int main(void)
{
/* task struct offsets */
- OFFSET(__TASK_thread_info, task_struct, stack);
+ OFFSET(__TASK_stack, task_struct, stack);
OFFSET(__TASK_thread, task_struct, thread);
OFFSET(__TASK_pid, task_struct, pid);
BLANK();
/* thread struct offsets */
OFFSET(__THREAD_ksp, thread_struct, ksp);
+ OFFSET(__THREAD_sysc_table, thread_struct, sys_call_table);
+ OFFSET(__THREAD_last_break, thread_struct, last_break);
OFFSET(__THREAD_FPU_fpc, thread_struct, fpu.fpc);
OFFSET(__THREAD_FPU_regs, thread_struct, fpu.regs);
OFFSET(__THREAD_per_cause, thread_struct, per_event.cause);
@@ -39,14 +41,7 @@ int main(void)
OFFSET(__THREAD_trap_tdb, thread_struct, trap_tdb);
BLANK();
/* thread info offsets */
- OFFSET(__TI_task, thread_info, task);
- OFFSET(__TI_flags, thread_info, flags);
- OFFSET(__TI_sysc_table, thread_info, sys_call_table);
- OFFSET(__TI_cpu, thread_info, cpu);
- OFFSET(__TI_precount, thread_info, preempt_count);
- OFFSET(__TI_user_timer, thread_info, user_timer);
- OFFSET(__TI_system_timer, thread_info, system_timer);
- OFFSET(__TI_last_break, thread_info, last_break);
+ OFFSET(__TI_flags, task_struct, thread_info.flags);
BLANK();
/* pt_regs offsets */
OFFSET(__PT_ARGS, pt_regs, args);
@@ -79,6 +74,8 @@ int main(void)
OFFSET(__VDSO_ECTG_OK, vdso_data, ectg_available);
OFFSET(__VDSO_TK_MULT, vdso_data, tk_mult);
OFFSET(__VDSO_TK_SHIFT, vdso_data, tk_shift);
+ OFFSET(__VDSO_TS_DIR, vdso_data, ts_dir);
+ OFFSET(__VDSO_TS_END, vdso_data, ts_end);
OFFSET(__VDSO_ECTG_BASE, vdso_per_cpu_data, ectg_timer_base);
OFFSET(__VDSO_ECTG_USER, vdso_per_cpu_data, ectg_user_time);
OFFSET(__VDSO_CPU_NR, vdso_per_cpu_data, cpu_nr);
@@ -159,7 +156,6 @@ int main(void)
OFFSET(__LC_INT_CLOCK, lowcore, int_clock);
OFFSET(__LC_MCCK_CLOCK, lowcore, mcck_clock);
OFFSET(__LC_CURRENT, lowcore, current_task);
- OFFSET(__LC_THREAD_INFO, lowcore, thread_info);
OFFSET(__LC_KERNEL_STACK, lowcore, kernel_stack);
OFFSET(__LC_ASYNC_STACK, lowcore, async_stack);
OFFSET(__LC_PANIC_STACK, lowcore, panic_stack);
@@ -173,6 +169,7 @@ int main(void)
OFFSET(__LC_PERCPU_OFFSET, lowcore, percpu_offset);
OFFSET(__LC_VDSO_PER_CPU, lowcore, vdso_per_cpu_data);
OFFSET(__LC_MACHINE_FLAGS, lowcore, machine_flags);
+ OFFSET(__LC_PREEMPT_COUNT, lowcore, preempt_count);
OFFSET(__LC_GMAP, lowcore, gmap);
OFFSET(__LC_PASTE, lowcore, paste);
/* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 0f9cd90c11af..a3d14161abcb 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -28,7 +28,6 @@
#include <linux/shm.h>
#include <linux/uio.h>
#include <linux/quota.h>
-#include <linux/module.h>
#include <linux/poll.h>
#include <linux/personality.h>
#include <linux/stat.h>
@@ -51,7 +50,7 @@
#include <linux/slab.h>
#include <asm/types.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <net/scm.h>
#include <net/sock.h>
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 4af60374eba0..362350cc485c 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -23,7 +23,7 @@
#include <linux/personality.h>
#include <linux/binfmts.h>
#include <asm/ucontext.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/lowcore.h>
#include <asm/switch_to.h>
#include "compat_linux.h"
@@ -446,7 +446,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
/* set extra registers only for synchronous signals */
regs->gprs[4] = regs->int_code & 127;
regs->gprs[5] = regs->int_parm_long;
- regs->gprs[6] = task_thread_info(current)->last_break;
+ regs->gprs[6] = current->thread.last_break;
}
return 0;
@@ -523,7 +523,7 @@ static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
regs->gprs[2] = ksig->sig;
regs->gprs[3] = (__force __u64) &frame->info;
regs->gprs[4] = (__force __u64) &frame->uc;
- regs->gprs[5] = task_thread_info(current)->last_break;
+ regs->gprs[5] = current->thread.last_break;
return 0;
}
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
index 7f48e568ac64..9f0e4a2785f7 100644
--- a/arch/s390/kernel/cpcmd.c
+++ b/arch/s390/kernel/cpcmd.c
@@ -9,7 +9,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stddef.h>
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index f9293bfefb7f..dd1d5c62c374 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -8,7 +8,8 @@
#include <linux/crash_dump.h>
#include <asm/lowcore.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/mm.h>
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/bootmem.h>
@@ -31,6 +32,7 @@ static struct memblock_type oldmem_type = {
.max = 1,
.total_size = 0,
.regions = &oldmem_region,
+ .name = "oldmem",
};
struct save_area {
@@ -329,7 +331,11 @@ static void *nt_init_name(void *buf, Elf64_Word type, void *desc, int d_len,
static inline void *nt_init(void *buf, Elf64_Word type, void *desc, int d_len)
{
- return nt_init_name(buf, type, desc, d_len, KEXEC_CORE_NOTE_NAME);
+ const char *note_name = "LINUX";
+
+ if (type == NT_PRPSINFO || type == NT_PRSTATUS || type == NT_PRFPREG)
+ note_name = KEXEC_CORE_NOTE_NAME;
+ return nt_init_name(buf, type, desc, d_len, note_name);
}
/*
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index aa12de72fd47..530226b6cb19 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -19,8 +19,8 @@
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/sysctl.h>
-#include <asm/uaccess.h>
-#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/debugfs.h>
@@ -866,7 +866,7 @@ static inline void
debug_finish_entry(debug_info_t * id, debug_entry_t* active, int level,
int exception)
{
- active->id.stck = get_tod_clock_fast();
+ active->id.stck = get_tod_clock_fast() - sched_clock_base_cc;
active->id.fields.cpuid = smp_processor_id();
active->caller = __builtin_return_address(0);
active->id.fields.exception = exception;
@@ -1455,23 +1455,24 @@ int
debug_dflt_header_fn(debug_info_t * id, struct debug_view *view,
int area, debug_entry_t * entry, char *out_buf)
{
- struct timespec64 time_spec;
+ unsigned long sec, usec;
char *except_str;
unsigned long caller;
int rc = 0;
unsigned int level;
level = entry->id.fields.level;
- stck_to_timespec64(entry->id.stck, &time_spec);
+ sec = (entry->id.stck >> 12) + (sched_clock_base_cc >> 12);
+ sec = sec - (TOD_UNIX_EPOCH >> 12);
+ usec = do_div(sec, USEC_PER_SEC);
if (entry->id.fields.exception)
except_str = "*";
else
except_str = "-";
caller = (unsigned long) entry->caller;
- rc += sprintf(out_buf, "%02i %011lld:%06lu %1u %1s %02i %p ",
- area, (long long)time_spec.tv_sec,
- time_spec.tv_nsec / 1000, level, except_str,
+ rc += sprintf(out_buf, "%02i %011ld:%06lu %1u %1s %02i %p ",
+ area, sec, usec, level, except_str,
entry->id.fields.cpuid, (void *)caller);
return rc;
}
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c
index a97354c8c667..ac6abcd3fe6a 100644
--- a/arch/s390/kernel/diag.c
+++ b/arch/s390/kernel/diag.c
@@ -5,7 +5,8 @@
* Author(s): Michael Holzheu <holzheu@de.ibm.com>
*/
-#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index c74c59236f44..f7e82302a71e 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -16,13 +16,13 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/kallsyms.h>
#include <linux/reboot.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/dis.h>
#include <asm/io.h>
#include <linux/atomic.h>
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 2374c5b46bbc..4e65c79cc5f2 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -293,6 +293,7 @@ static noinline __init void setup_lowcore_early(void)
psw.addr = (unsigned long) s390_base_pgm_handler;
S390_lowcore.program_new_psw = psw;
s390_base_pgm_handler_fn = early_pgm_check_handler;
+ S390_lowcore.preempt_count = INIT_PREEMPT_COUNT;
}
static noinline __init void setup_facility_list(void)
@@ -353,6 +354,10 @@ static __init void detect_machine_facilities(void)
S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
__ctl_set_bit(0, 17);
}
+ if (test_facility(130)) {
+ S390_lowcore.machine_flags |= MACHINE_FLAG_NX;
+ __ctl_set_bit(0, 20);
+ }
}
static inline void save_vector_registers(void)
@@ -363,6 +368,18 @@ static inline void save_vector_registers(void)
#endif
}
+static int __init topology_setup(char *str)
+{
+ bool enabled;
+ int rc;
+
+ rc = kstrtobool(str, &enabled);
+ if (!rc && !enabled)
+ S390_lowcore.machine_flags &= ~MACHINE_HAS_TOPOLOGY;
+ return rc;
+}
+early_param("topology", topology_setup);
+
static int __init disable_vector_extension(char *str)
{
S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX;
@@ -371,6 +388,21 @@ static int __init disable_vector_extension(char *str)
}
early_param("novx", disable_vector_extension);
+static int __init noexec_setup(char *str)
+{
+ bool enabled;
+ int rc;
+
+ rc = kstrtobool(str, &enabled);
+ if (!rc && !enabled) {
+ /* Disable no-execute support */
+ S390_lowcore.machine_flags &= ~MACHINE_FLAG_NX;
+ __ctl_clear_bit(0, 20);
+ }
+ return rc;
+}
+early_param("noexec", noexec_setup);
+
static int __init cad_setup(char *str)
{
int val;
@@ -391,7 +423,49 @@ static int __init cad_init(void)
}
early_initcall(cad_init);
-static __init void rescue_initrd(void)
+static __init void memmove_early(void *dst, const void *src, size_t n)
+{
+ unsigned long addr;
+ long incr;
+ psw_t old;
+
+ if (!n)
+ return;
+ incr = 1;
+ if (dst > src) {
+ incr = -incr;
+ dst += n - 1;
+ src += n - 1;
+ }
+ old = S390_lowcore.program_new_psw;
+ S390_lowcore.program_new_psw.mask = __extract_psw();
+ asm volatile(
+ " larl %[addr],1f\n"
+ " stg %[addr],%[psw_pgm_addr]\n"
+ "0: mvc 0(1,%[dst]),0(%[src])\n"
+ " agr %[dst],%[incr]\n"
+ " agr %[src],%[incr]\n"
+ " brctg %[n],0b\n"
+ "1:\n"
+ : [addr] "=&d" (addr),
+ [psw_pgm_addr] "=Q" (S390_lowcore.program_new_psw.addr),
+ [dst] "+&a" (dst), [src] "+&a" (src), [n] "+d" (n)
+ : [incr] "d" (incr)
+ : "cc", "memory");
+ S390_lowcore.program_new_psw = old;
+}
+
+static __init noinline void ipl_save_parameters(void)
+{
+ void *src, *dst;
+
+ src = (void *)(unsigned long) S390_lowcore.ipl_parmblock_ptr;
+ dst = (void *) IPL_PARMBLOCK_ORIGIN;
+ memmove_early(dst, src, PAGE_SIZE);
+ S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN;
+}
+
+static __init noinline void rescue_initrd(void)
{
#ifdef CONFIG_BLK_DEV_INITRD
unsigned long min_initrd_addr = (unsigned long) _end + (4UL << 20);
@@ -405,7 +479,7 @@ static __init void rescue_initrd(void)
return;
if (INITRD_START >= min_initrd_addr)
return;
- memmove((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE);
+ memmove_early((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE);
INITRD_START = min_initrd_addr;
#endif
}
@@ -467,7 +541,8 @@ void __init startup_init(void)
ipl_save_parameters();
rescue_initrd();
clear_bss_section();
- ptff_init();
+ ipl_verify_parameters();
+ time_early_init();
init_kernel_storage_key();
lockdep_off();
setup_lowcore_early();
diff --git a/arch/s390/kernel/early_printk.c b/arch/s390/kernel/early_printk.c
new file mode 100644
index 000000000000..819cb15c67e8
--- /dev/null
+++ b/arch/s390/kernel/early_printk.c
@@ -0,0 +1,35 @@
+/*
+ * Copyright IBM Corp. 2017
+ */
+
+#include <linux/console.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/sclp.h>
+
+static void sclp_early_write(struct console *con, const char *s, unsigned int len)
+{
+ __sclp_early_printk(s, len);
+}
+
+static struct console sclp_early_console = {
+ .name = "earlysclp",
+ .write = sclp_early_write,
+ .flags = CON_PRINTBUFFER | CON_BOOT,
+ .index = -1,
+};
+
+static int __init setup_early_printk(char *buf)
+{
+ if (early_console)
+ return 0;
+ /* Accept only "earlyprintk" and "earlyprintk=sclp" */
+ if (buf && strncmp(buf, "sclp", 4))
+ return 0;
+ if (!sclp.has_linemode && !sclp.has_vt220)
+ return 0;
+ early_console = &sclp_early_console;
+ register_console(early_console);
+ return 0;
+}
+early_param("earlyprintk", setup_early_printk);
diff --git a/arch/s390/kernel/ebcdic.c b/arch/s390/kernel/ebcdic.c
index b971c6be6298..1d5392b36ad8 100644
--- a/arch/s390/kernel/ebcdic.c
+++ b/arch/s390/kernel/ebcdic.c
@@ -8,8 +8,8 @@
* Martin Peschke <peschke@fh-brandenburg.de>
*/
-#include <linux/module.h>
-#include <asm/types.h>
+#include <linux/types.h>
+#include <linux/export.h>
#include <asm/ebcdic.h>
/*
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 49a30737adde..dff2152350a7 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -42,7 +42,7 @@ __PT_R13 = __PT_GPRS + 104
__PT_R14 = __PT_GPRS + 112
__PT_R15 = __PT_GPRS + 120
-STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
+STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
STACK_SIZE = 1 << STACK_SHIFT
STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
@@ -50,7 +50,8 @@ _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_UPROBE)
_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
_TIF_SYSCALL_TRACEPOINT)
-_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE | _CIF_FPU)
+_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE_PRIMARY | \
+ _CIF_ASCE_SECONDARY | _CIF_FPU)
_PIF_WORK = (_PIF_PER_TRAP)
#define BASED(name) name-cleanup_critical(%r13)
@@ -103,8 +104,7 @@ _PIF_WORK = (_PIF_PER_TRAP)
CHECK_STACK 1<<STACK_SHIFT,\savearea
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 3f
-1: LAST_BREAK %r14
- UPDATE_VTIME %r14,%r15,\timer
+1: UPDATE_VTIME %r14,%r15,\timer
2: lg %r15,__LC_ASYNC_STACK # load async stack
3: la %r11,STACK_FRAME_OVERHEAD(%r15)
.endm
@@ -121,12 +121,6 @@ _PIF_WORK = (_PIF_PER_TRAP)
mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer
.endm
- .macro LAST_BREAK scratch
- srag \scratch,%r10,23
- jz .+10
- stg %r10,__TI_last_break(%r12)
- .endm
-
.macro REENABLE_IRQS
stg %r8,__LC_RETURN_PSW
ni __LC_RETURN_PSW,0xbf
@@ -186,14 +180,13 @@ ENTRY(__switch_to)
stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
lgr %r1,%r2
aghi %r1,__TASK_thread # thread_struct of prev task
- lg %r5,__TASK_thread_info(%r3) # get thread_info of next
+ lg %r5,__TASK_stack(%r3) # start of kernel stack of next
stg %r15,__THREAD_ksp(%r1) # store kernel stack of prev
lgr %r1,%r3
aghi %r1,__TASK_thread # thread_struct of next task
lgr %r15,%r5
aghi %r15,STACK_INIT # end of kernel stack of next
stg %r3,__LC_CURRENT # store task struct of next
- stg %r5,__LC_THREAD_INFO # store thread info of next
stg %r15,__LC_KERNEL_STACK # store end of kernel stack
lg %r15,__THREAD_ksp(%r1) # load kernel stack of next
/* c4 is used in guest detection: arch/s390/kernel/perf_cpum_sf.c */
@@ -273,22 +266,22 @@ ENTRY(system_call)
stpt __LC_SYNC_ENTER_TIMER
.Lsysc_stmg:
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
- lg %r10,__LC_LAST_BREAK
- lg %r12,__LC_THREAD_INFO
+ lg %r12,__LC_CURRENT
+ lghi %r13,__TASK_thread
lghi %r14,_PIF_SYSCALL
.Lsysc_per:
lg %r15,__LC_KERNEL_STACK
la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
- LAST_BREAK %r13
.Lsysc_vtime:
- UPDATE_VTIME %r10,%r13,__LC_SYNC_ENTER_TIMER
+ UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
stmg %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
stg %r14,__PT_FLAGS(%r11)
.Lsysc_do_svc:
- lg %r10,__TI_sysc_table(%r12) # address of system call table
+ # load address of system call table
+ lg %r10,__THREAD_sysc_table(%r13,%r12)
llgh %r8,__PT_INT_CODE+2(%r11)
slag %r8,%r8,2 # shift and test for svc 0
jnz .Lsysc_nr_ok
@@ -347,8 +340,8 @@ ENTRY(system_call)
jo .Lsysc_notify_resume
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
jo .Lsysc_vxrs
- TSTMSK __LC_CPU_FLAGS,_CIF_ASCE
- jo .Lsysc_uaccess
+ TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
+ jnz .Lsysc_asce
j .Lsysc_return # beware of critical section cleanup
#
@@ -366,12 +359,15 @@ ENTRY(system_call)
jg s390_handle_mcck # TIF bit will be cleared by handler
#
-# _CIF_ASCE is set, load user space asce
+# _CIF_ASCE_PRIMARY and/or CIF_ASCE_SECONDARY set, load user space asce
#
-.Lsysc_uaccess:
- ni __LC_CPU_FLAGS+7,255-_CIF_ASCE
+.Lsysc_asce:
+ ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
- j .Lsysc_return
+ TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_SECONDARY
+ jz .Lsysc_return
+ larl %r14,.Lsysc_return
+ jg set_fs_fixup
#
# CIF_FPU is set, restore floating-point controls and floating-point registers.
@@ -388,14 +384,11 @@ ENTRY(system_call)
brasl %r14,do_signal
TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL
jno .Lsysc_return
+.Lsysc_do_syscall:
+ lghi %r13,__TASK_thread
lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
- lg %r10,__TI_sysc_table(%r12) # address of system call table
- lghi %r8,0 # svc 0 returns -ENOSYS
- llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number
- cghi %r1,NR_syscalls
- jnl .Lsysc_nr_ok # invalid svc number -> do svc 0
- slag %r8,%r1,2
- j .Lsysc_nr_ok # restart svc
+ lghi %r1,0 # svc 0 returns -ENOSYS
+ j .Lsysc_do_svc
#
# _TIF_NOTIFY_RESUME is set, call do_notify_resume
@@ -457,7 +450,7 @@ ENTRY(system_call)
#
ENTRY(ret_from_fork)
la %r11,STACK_FRAME_OVERHEAD(%r15)
- lg %r12,__LC_THREAD_INFO
+ lg %r12,__LC_CURRENT
brasl %r14,schedule_tail
TRACE_IRQS_ON
ssm __LC_SVC_NEW_PSW # reenable interrupts
@@ -478,7 +471,7 @@ ENTRY(pgm_check_handler)
stpt __LC_SYNC_ENTER_TIMER
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
lg %r10,__LC_LAST_BREAK
- lg %r12,__LC_THREAD_INFO
+ lg %r12,__LC_CURRENT
larl %r13,cleanup_critical
lmg %r8,%r9,__LC_PGM_OLD_PSW
tmhh %r8,0x0001 # test problem state bit
@@ -498,16 +491,16 @@ ENTRY(pgm_check_handler)
1: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 3f
-2: LAST_BREAK %r14
- UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
+2: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
lg %r15,__LC_KERNEL_STACK
- lg %r14,__TI_task(%r12)
+ lgr %r14,%r12
aghi %r14,__TASK_thread # pointer to thread_struct
lghi %r13,__LC_PGM_TDB
tm __LC_PGM_ILC+2,0x02 # check for transaction abort
jz 3f
mvc __THREAD_trap_tdb(256,%r14),0(%r13)
3: la %r11,STACK_FRAME_OVERHEAD(%r15)
+ stg %r10,__THREAD_last_break(%r14)
stmg %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
stmg %r8,%r9,__PT_PSW(%r11)
@@ -537,6 +530,8 @@ ENTRY(pgm_check_handler)
LOCKDEP_SYS_EXIT
tm __PT_PSW+1(%r11),0x01 # returning to user ?
jno .Lsysc_restore
+ TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL
+ jo .Lsysc_do_syscall
j .Lsysc_tif
#
@@ -554,6 +549,7 @@ ENTRY(pgm_check_handler)
#
.Lpgm_svcper:
mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
+ lghi %r13,__TASK_thread
larl %r14,.Lsysc_per
stg %r14,__LC_RETURN_PSW+8
lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
@@ -566,8 +562,7 @@ ENTRY(io_int_handler)
STCK __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
- lg %r10,__LC_LAST_BREAK
- lg %r12,__LC_THREAD_INFO
+ lg %r12,__LC_CURRENT
larl %r13,cleanup_critical
lmg %r8,%r9,__LC_IO_OLD_PSW
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
@@ -626,7 +621,7 @@ ENTRY(io_int_handler)
jo .Lio_work_user # yes -> do resched & signal
#ifdef CONFIG_PREEMPT
# check for preemptive scheduling
- icm %r0,15,__TI_precount(%r12)
+ icm %r0,15,__LC_PREEMPT_COUNT
jnz .Lio_restore # preemption is disabled
TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
jno .Lio_restore
@@ -670,8 +665,8 @@ ENTRY(io_int_handler)
jo .Lio_notify_resume
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
jo .Lio_vxrs
- TSTMSK __LC_CPU_FLAGS,_CIF_ASCE
- jo .Lio_uaccess
+ TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
+ jnz .Lio_asce
j .Lio_return # beware of critical section cleanup
#
@@ -684,12 +679,15 @@ ENTRY(io_int_handler)
j .Lio_return
#
-# _CIF_ASCE is set, load user space asce
+# _CIF_ASCE_PRIMARY and/or CIF_ASCE_SECONDARY set, load user space asce
#
-.Lio_uaccess:
- ni __LC_CPU_FLAGS+7,255-_CIF_ASCE
+.Lio_asce:
+ ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
- j .Lio_return
+ TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_SECONDARY
+ jz .Lio_return
+ larl %r14,.Lio_return
+ jg set_fs_fixup
#
# CIF_FPU is set, restore floating-point controls and floating-point registers.
@@ -740,8 +738,7 @@ ENTRY(ext_int_handler)
STCK __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
- lg %r10,__LC_LAST_BREAK
- lg %r12,__LC_THREAD_INFO
+ lg %r12,__LC_CURRENT
larl %r13,cleanup_critical
lmg %r8,%r9,__LC_EXT_OLD_PSW
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
@@ -798,13 +795,10 @@ ENTRY(save_fpu_regs)
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
bor %r14
stfpc __THREAD_FPU_fpc(%r2)
-.Lsave_fpu_regs_fpc_end:
lg %r3,__THREAD_FPU_regs(%r2)
TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
jz .Lsave_fpu_regs_fp # no -> store FP regs
-.Lsave_fpu_regs_vx_low:
VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3)
-.Lsave_fpu_regs_vx_high:
VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3)
j .Lsave_fpu_regs_done # -> set CIF_FPU flag
.Lsave_fpu_regs_fp:
@@ -851,9 +845,7 @@ load_fpu_regs:
TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
jz .Lload_fpu_regs_fp # -> no VX, load FP regs
-.Lload_fpu_regs_vx:
VLM %v0,%v15,0,%r4
-.Lload_fpu_regs_vx_high:
VLM %v16,%v31,256,%r4
j .Lload_fpu_regs_done
.Lload_fpu_regs_fp:
@@ -888,8 +880,7 @@ ENTRY(mcck_int_handler)
la %r1,4095 # revalidate r1
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
- lg %r10,__LC_LAST_BREAK
- lg %r12,__LC_THREAD_INFO
+ lg %r12,__LC_CURRENT
larl %r13,cleanup_critical
lmg %r8,%r9,__LC_MCK_OLD_PSW
TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
@@ -948,7 +939,7 @@ ENTRY(mcck_int_handler)
.Lmcck_panic:
lg %r15,__LC_PANIC_STACK
- aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+ la %r11,STACK_FRAME_OVERHEAD(%r15)
j .Lmcck_skip
#
@@ -1083,9 +1074,10 @@ cleanup_critical:
0: # check if base register setup + TIF bit load has been done
clg %r9,BASED(.Lcleanup_system_call_insn+16)
jhe 0f
- # set up saved registers r10 and r12
- stg %r10,16(%r11) # r10 last break
- stg %r12,32(%r11) # r12 thread-info pointer
+ # set up saved register r12 task struct pointer
+ stg %r12,32(%r11)
+ # set up saved register r13 __TASK_thread offset
+ mvc 40(8,%r11),BASED(.Lcleanup_system_call_const)
0: # check if the user time update has been done
clg %r9,BASED(.Lcleanup_system_call_insn+24)
jh 0f
@@ -1102,12 +1094,7 @@ cleanup_critical:
stg %r15,__LC_SYSTEM_TIMER
0: # update accounting time stamp
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
- # do LAST_BREAK
- lg %r9,16(%r11)
- srag %r9,%r9,23
- jz 0f
- mvc __TI_last_break(8,%r12),16(%r11)
-0: # set up saved register r11
+ # set up saved register r11
lg %r15,__LC_KERNEL_STACK
la %r9,STACK_FRAME_OVERHEAD(%r15)
stg %r9,24(%r11) # r11 pt_regs pointer
@@ -1129,6 +1116,8 @@ cleanup_critical:
.quad .Lsysc_per
.quad .Lsysc_vtime+36
.quad .Lsysc_vtime+42
+.Lcleanup_system_call_const:
+ .quad __TASK_thread
.Lcleanup_sysc_tif:
larl %r9,.Lsysc_tif
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index e79f030dd276..33f901865326 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -80,5 +80,6 @@ long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t);
DECLARE_PER_CPU(u64, mt_cycles[8]);
void verify_facilities(void);
+void set_fs_fixup(void);
#endif /* _ENTRY_H */
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 4431905f8cfa..0b5ebf8a3d30 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -315,7 +315,7 @@ ENTRY(startup_kdump)
jg startup_continue
.Lstack:
- .long 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER))
+ .long 0x8000 + (1<<(PAGE_SHIFT+THREAD_SIZE_ORDER))
.align 8
6: .long 0x7fffffff,0xffffffff
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 03c2b469c472..482d3526e32b 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -32,11 +32,10 @@ ENTRY(startup_continue)
#
# Setup stack
#
- larl %r15,init_thread_union
- stg %r15,__LC_THREAD_INFO # cache thread info in lowcore
- lg %r14,__TI_task(%r15) # cache current in lowcore
+ larl %r14,init_task
stg %r14,__LC_CURRENT
- aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE
+ larl %r15,init_thread_union
+ aghi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER) # init_task_union + THREAD_SIZE
stg %r15,__LC_KERNEL_STACK # set end of kernel stack
aghi %r15,-160
#
diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c
index 7a55c29b0b33..fb07a70820af 100644
--- a/arch/s390/kernel/idle.c
+++ b/arch/s390/kernel/idle.c
@@ -12,7 +12,7 @@
#include <linux/notifier.h>
#include <linux/init.h>
#include <linux/cpu.h>
-#include <asm/cputime.h>
+#include <linux/cputime.h>
#include <asm/nmi.h>
#include <asm/smp.h>
#include "entry.h"
@@ -43,7 +43,7 @@ void enabled_wait(void)
idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
idle->idle_time += idle_time;
idle->idle_count++;
- account_idle_time(idle_time);
+ account_idle_time(cputime_to_nsecs(idle_time));
write_seqcount_end(&idle->seqcount);
}
NOKPROBE_SYMBOL(enabled_wait);
@@ -57,8 +57,8 @@ static ssize_t show_idle_count(struct device *dev,
do {
seq = read_seqcount_begin(&idle->seqcount);
- idle_count = ACCESS_ONCE(idle->idle_count);
- if (ACCESS_ONCE(idle->clock_idle_enter))
+ idle_count = READ_ONCE(idle->idle_count);
+ if (READ_ONCE(idle->clock_idle_enter))
idle_count++;
} while (read_seqcount_retry(&idle->seqcount, seq));
return sprintf(buf, "%llu\n", idle_count);
@@ -75,16 +75,16 @@ static ssize_t show_idle_time(struct device *dev,
do {
now = get_tod_clock();
seq = read_seqcount_begin(&idle->seqcount);
- idle_time = ACCESS_ONCE(idle->idle_time);
- idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
- idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
+ idle_time = READ_ONCE(idle->idle_time);
+ idle_enter = READ_ONCE(idle->clock_idle_enter);
+ idle_exit = READ_ONCE(idle->clock_idle_exit);
} while (read_seqcount_retry(&idle->seqcount, seq));
idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
return sprintf(buf, "%llu\n", idle_time >> 12);
}
DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
-cputime64_t arch_cpu_idle_time(int cpu)
+u64 arch_cpu_idle_time(int cpu)
{
struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
unsigned long long now, idle_enter, idle_exit;
@@ -93,10 +93,11 @@ cputime64_t arch_cpu_idle_time(int cpu)
do {
now = get_tod_clock();
seq = read_seqcount_begin(&idle->seqcount);
- idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
- idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
+ idle_enter = READ_ONCE(idle->clock_idle_enter);
+ idle_exit = READ_ONCE(idle->clock_idle_exit);
} while (read_seqcount_retry(&idle->seqcount, seq));
- return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0;
+
+ return cputime_to_nsecs(idle_enter ? ((idle_exit ?: now) - idle_enter) : 0);
}
void arch_cpu_idle_enter(void)
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 295bfb7124bc..b67dafb7b7cf 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -8,7 +8,8 @@
*/
#include <linux/types.h>
-#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/init.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/reboot.h>
@@ -1546,7 +1547,8 @@ static void dump_reipl_run(struct shutdown_trigger *trigger)
unsigned long ipib = (unsigned long) reipl_block_actual;
unsigned int csum;
- csum = csum_partial(reipl_block_actual, reipl_block_actual->hdr.len, 0);
+ csum = (__force unsigned int)
+ csum_partial(reipl_block_actual, reipl_block_actual->hdr.len, 0);
mem_assign_absolute(S390_lowcore.ipib, ipib);
mem_assign_absolute(S390_lowcore.ipib_checksum, csum);
dump_run(trigger);
@@ -1863,7 +1865,7 @@ static int __init s390_ipl_init(void)
{
char str[8] = {0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40};
- sclp_get_ipl_info(&sclp_ipl_info);
+ sclp_early_get_ipl_info(&sclp_ipl_info);
/*
* Fix loadparm: There are systems where the (SCSI) LOADPARM
* returned by read SCP info is invalid (contains EBCDIC blanks)
@@ -1991,10 +1993,9 @@ void __init ipl_update_parameters(void)
diag308_set_works = 1;
}
-void __init ipl_save_parameters(void)
+void __init ipl_verify_parameters(void)
{
struct cio_iplinfo iplinfo;
- void *src, *dst;
if (cio_get_iplinfo(&iplinfo))
return;
@@ -2005,10 +2006,6 @@ void __init ipl_save_parameters(void)
if (!iplinfo.is_qdio)
return;
ipl_flags |= IPL_PARMBLOCK_VALID;
- src = (void *)(unsigned long)S390_lowcore.ipl_parmblock_ptr;
- dst = (void *)IPL_PARMBLOCK_ORIGIN;
- memmove(dst, src, PAGE_SIZE);
- S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN;
}
static LIST_HEAD(rcall);
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 285d6561076d..6dca93b29bed 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -12,11 +12,12 @@
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#include <linux/profile.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/ftrace.h>
#include <linux/errno.h>
#include <linux/slab.h>
+#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/irq.h>
#include <asm/irq_regs.h>
@@ -168,7 +169,7 @@ void do_softirq_own_stack(void)
old = current_stack_pointer();
/* Check against async. stack address range. */
new = S390_lowcore.async_stack;
- if (((new - old) >> (PAGE_SHIFT + THREAD_ORDER)) != 0) {
+ if (((new - old) >> (PAGE_SHIFT + THREAD_SIZE_ORDER)) != 0) {
/* Need to switch to the async. stack. */
new -= STACK_FRAME_OVERHEAD;
((struct stack_frame *) new)->back_chain = old;
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c
index 083b05f5f5ab..6aa630a8d24f 100644
--- a/arch/s390/kernel/jump_label.c
+++ b/arch/s390/kernel/jump_label.c
@@ -4,7 +4,6 @@
* Copyright IBM Corp. 2011
* Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
*/
-#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/stop_machine.h>
#include <linux/jump_label.h>
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index fdb40424acfe..76f9eda1d7c0 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -33,7 +33,7 @@
#include <linux/ftrace.h>
#include <asm/cacheflush.h>
#include <asm/sections.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/dis.h>
DEFINE_PER_CPU(struct kprobe *, current_kprobe);
@@ -45,11 +45,17 @@ DEFINE_INSN_CACHE_OPS(dmainsn);
static void *alloc_dmainsn_page(void)
{
- return (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
+ void *page;
+
+ page = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
+ if (page)
+ set_memory_x((unsigned long) page, 1);
+ return page;
}
static void free_dmainsn_page(void *page)
{
+ set_memory_nx((unsigned long) page, 1);
free_page((unsigned long)page);
}
diff --git a/arch/s390/kernel/lgr.c b/arch/s390/kernel/lgr.c
index 6ea6d69339b5..ae7dff110054 100644
--- a/arch/s390/kernel/lgr.c
+++ b/arch/s390/kernel/lgr.c
@@ -5,7 +5,8 @@
* Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
*/
-#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/export.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <asm/facility.h>
@@ -183,4 +184,4 @@ static int __init lgr_init(void)
lgr_timer_set();
return 0;
}
-module_init(lgr_init);
+device_initcall(lgr_init);
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index fbc07891f9e7..1a27f307a920 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -45,7 +45,8 @@ void *module_alloc(unsigned long size)
if (PAGE_ALIGN(size) > MODULES_LEN)
return NULL;
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
- GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE,
+ GFP_KERNEL, PAGE_KERNEL_EXEC,
+ 0, NUMA_NO_NODE,
__builtin_return_address(0));
}
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 9a32f7419d78..80c093e0c6f1 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -13,7 +13,7 @@
#include <linux/errno.h>
#include <linux/hardirq.h>
#include <linux/time.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <asm/lowcore.h>
#include <asm/smp.h>
#include <asm/stp.h>
@@ -102,7 +102,7 @@ static int notrace s390_validate_registers(union mci mci, int umode)
{
int kill_task;
u64 zero;
- void *fpt_save_area, *fpt_creg_save_area;
+ void *fpt_save_area;
kill_task = 0;
zero = 0;
@@ -116,6 +116,19 @@ static int notrace s390_validate_registers(union mci mci, int umode)
s390_handle_damage();
kill_task = 1;
}
+ /* Validate control registers */
+ if (!mci.cr) {
+ /*
+ * Control registers have unknown contents.
+ * Can't recover and therefore stopping machine.
+ */
+ s390_handle_damage();
+ } else {
+ asm volatile(
+ " lctlg 0,15,0(%0)\n"
+ " ptlb\n"
+ : : "a" (&S390_lowcore.cregs_save_area) : "memory");
+ }
if (!mci.fp) {
/*
* Floating point registers can't be restored. If the
@@ -130,7 +143,6 @@ static int notrace s390_validate_registers(union mci mci, int umode)
kill_task = 1;
}
fpt_save_area = &S390_lowcore.floating_pt_save_area;
- fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area;
if (!mci.fc) {
/*
* Floating point control register can't be restored.
@@ -142,11 +154,13 @@ static int notrace s390_validate_registers(union mci mci, int umode)
*/
if (S390_lowcore.fpu_flags & KERNEL_FPC)
s390_handle_damage();
- asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero));
+ asm volatile("lfpc %0" : : "Q" (zero));
if (!test_cpu_flag(CIF_FPU))
kill_task = 1;
- } else
- asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
+ } else {
+ asm volatile("lfpc %0"
+ : : "Q" (S390_lowcore.fpt_creg_save_area));
+ }
if (!MACHINE_HAS_VX) {
/* Validate floating point registers */
@@ -167,7 +181,7 @@ static int notrace s390_validate_registers(union mci mci, int umode)
" ld 13,104(%0)\n"
" ld 14,112(%0)\n"
" ld 15,120(%0)\n"
- : : "a" (fpt_save_area));
+ : : "a" (fpt_save_area) : "memory");
} else {
/* Validate vector registers */
union ctlreg0 cr0;
@@ -207,18 +221,6 @@ static int notrace s390_validate_registers(union mci mci, int umode)
*/
kill_task = 1;
}
- /* Validate control registers */
- if (!mci.cr) {
- /*
- * Control registers have unknown contents.
- * Can't recover and therefore stopping machine.
- */
- s390_handle_damage();
- } else {
- asm volatile(
- " lctlg 0,15,0(%0)"
- : : "a" (&S390_lowcore.cregs_save_area));
- }
/*
* We don't even try to validate the TOD register, since we simply
* can't write something sensible into that register.
@@ -234,9 +236,9 @@ static int notrace s390_validate_registers(union mci mci, int umode)
: : : "0", "cc");
else
asm volatile(
- " l 0,0(%0)\n"
+ " l 0,%0\n"
" sckpf"
- : : "a" (&S390_lowcore.tod_progreg_save_area)
+ : : "Q" (S390_lowcore.tod_progreg_save_area)
: "0", "cc");
/* Validate clock comparator register */
set_clock_comparator(S390_lowcore.clock_comparator);
diff --git a/arch/s390/kernel/os_info.c b/arch/s390/kernel/os_info.c
index 87f05e475ae8..753ba63182b9 100644
--- a/arch/s390/kernel/os_info.c
+++ b/arch/s390/kernel/os_info.c
@@ -26,7 +26,7 @@ static struct os_info os_info __page_aligned_data;
u32 os_info_csum(struct os_info *os_info)
{
int size = sizeof(*os_info) - offsetof(struct os_info, version_major);
- return csum_partial(&os_info->version_major, size, 0);
+ return (__force u32)csum_partial(&os_info->version_major, size, 0);
}
/*
@@ -46,7 +46,7 @@ void os_info_entry_add(int nr, void *ptr, u64 size)
{
os_info.entry[nr].addr = (u64)(unsigned long)ptr;
os_info.entry[nr].size = size;
- os_info.entry[nr].csum = csum_partial(ptr, size, 0);
+ os_info.entry[nr].csum = (__force u32)csum_partial(ptr, size, 0);
os_info.csum = os_info_csum(&os_info);
}
@@ -93,7 +93,7 @@ static void os_info_old_alloc(int nr, int align)
msg = "copy failed";
goto fail_free;
}
- csum = csum_partial(buf_align, size, 0);
+ csum = (__force u32)csum_partial(buf_align, size, 0);
if (csum != os_info_old->entry[nr].csum) {
msg = "checksum failed";
goto fail_free;
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 037c2a253ae4..1aba10e90906 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -711,7 +711,7 @@ static int __init cpumf_pmu_init(void)
return rc;
}
return cpuhp_setup_state(CPUHP_AP_PERF_S390_CF_ONLINE,
- "AP_PERF_S390_CF_ONLINE",
+ "perf/s390/cf:online",
s390_pmu_online_cpu, s390_pmu_offline_cpu);
}
early_initcall(cpumf_pmu_init);
diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c
index 4554a4bae39e..c343ac2cf6c5 100644
--- a/arch/s390/kernel/perf_cpum_cf_events.c
+++ b/arch/s390/kernel/perf_cpum_cf_events.c
@@ -309,7 +309,7 @@ __init const struct attribute_group **cpumf_cf_event_group(void)
default:
model = NULL;
break;
- };
+ }
if (!model)
goto out;
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index fcc634c1479a..1c0b58545c04 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -995,39 +995,36 @@ static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr)
regs.int_parm = CPU_MF_INT_SF_PRA;
sde_regs = (struct perf_sf_sde_regs *) &regs.int_parm_long;
- regs.psw.addr = sfr->basic.ia;
- if (sfr->basic.T)
- regs.psw.mask |= PSW_MASK_DAT;
- if (sfr->basic.W)
- regs.psw.mask |= PSW_MASK_WAIT;
- if (sfr->basic.P)
- regs.psw.mask |= PSW_MASK_PSTATE;
- switch (sfr->basic.AS) {
- case 0x0:
- regs.psw.mask |= PSW_ASC_PRIMARY;
- break;
- case 0x1:
- regs.psw.mask |= PSW_ASC_ACCREG;
- break;
- case 0x2:
- regs.psw.mask |= PSW_ASC_SECONDARY;
- break;
- case 0x3:
- regs.psw.mask |= PSW_ASC_HOME;
- break;
- }
+ psw_bits(regs.psw).ia = sfr->basic.ia;
+ psw_bits(regs.psw).t = sfr->basic.T;
+ psw_bits(regs.psw).w = sfr->basic.W;
+ psw_bits(regs.psw).p = sfr->basic.P;
+ psw_bits(regs.psw).as = sfr->basic.AS;
/*
- * A non-zero guest program parameter indicates a guest
- * sample.
- * Note that some early samples or samples from guests without
+ * Use the hardware provided configuration level to decide if the
+ * sample belongs to a guest or host. If that is not available,
+ * fall back to the following heuristics:
+ * A non-zero guest program parameter always indicates a guest
+ * sample. Some early samples or samples from guests without
* lpp usage would be misaccounted to the host. We use the asn
- * value as a heuristic to detect most of these guest samples.
- * If the value differs from the host hpp value, we assume
- * it to be a KVM guest.
+ * value as an addon heuristic to detect most of these guest samples.
+ * If the value differs from the host hpp value, we assume to be a
+ * KVM guest.
*/
- if (sfr->basic.gpp || sfr->basic.prim_asn != (u16) sfr->basic.hpp)
+ switch (sfr->basic.CL) {
+ case 1: /* logical partition */
+ sde_regs->in_guest = 0;
+ break;
+ case 2: /* virtual machine */
sde_regs->in_guest = 1;
+ break;
+ default: /* old machine, use heuristics */
+ if (sfr->basic.gpp ||
+ sfr->basic.prim_asn != (u16)sfr->basic.hpp)
+ sde_regs->in_guest = 1;
+ break;
+ }
overflow = 0;
if (perf_exclude_event(event, &regs, sde_regs))
@@ -1626,7 +1623,7 @@ static int __init init_cpum_sampling_pmu(void)
goto out;
}
- cpuhp_setup_state(CPUHP_AP_PERF_S390_SF_ONLINE, "AP_PERF_S390_SF_ONLINE",
+ cpuhp_setup_state(CPUHP_AP_PERF_S390_SF_ONLINE, "perf/s390/sf:online",
s390_pmu_sf_online_cpu, s390_pmu_sf_offline_cpu);
out:
return err;
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index bba4fa74b321..54281660582c 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -23,7 +23,7 @@
#include <linux/compat.h>
#include <linux/kprobes.h>
#include <linux/random.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/init_task.h>
#include <asm/io.h>
#include <asm/processor.h>
@@ -100,10 +100,9 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
return 0;
}
-int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
- unsigned long arg, struct task_struct *p)
+int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp,
+ unsigned long arg, struct task_struct *p, unsigned long tls)
{
- struct thread_info *ti;
struct fake_frame
{
struct stack_frame sf;
@@ -121,9 +120,8 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
/* Initialize per thread user and system timer values */
- ti = task_thread_info(p);
- ti->user_timer = 0;
- ti->system_timer = 0;
+ p->thread.user_timer = 0;
+ p->thread.system_timer = 0;
frame->sf.back_chain = 0;
/* new return point is ret_from_fork */
@@ -158,7 +156,6 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
/* Set a new TLS ? */
if (clone_flags & CLONE_SETTLS) {
- unsigned long tls = frame->childregs.gprs[6];
if (is_compat_task()) {
p->thread.acrs[0] = (unsigned int)tls;
} else {
@@ -236,3 +233,16 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
ret = PAGE_ALIGN(mm->brk + brk_rnd());
return (ret > mm->brk) ? ret : mm->brk;
}
+
+void set_fs_fixup(void)
+{
+ struct pt_regs *regs = current_pt_regs();
+ static bool warned;
+
+ set_fs(USER_DS);
+ if (warned)
+ return;
+ WARN(1, "Unbalanced set_fs - int code: 0x%x\n", regs->int_code);
+ show_registers(regs);
+ warned = true;
+}
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 81d0808085e6..21004aaac69b 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -32,7 +32,7 @@ static bool machine_has_cpu_mhz;
void __init cpu_detect_mhz_feature(void)
{
if (test_facility(34) && __ecag(ECAG_CPU_ATTRIBUTE, 0) != -1UL)
- machine_has_cpu_mhz = 1;
+ machine_has_cpu_mhz = true;
}
static void update_cpu_mhz(void *arg)
@@ -53,7 +53,7 @@ void s390_update_cpu_mhz(void)
on_each_cpu(update_cpu_mhz, NULL, 0);
}
-void notrace cpu_relax(void)
+void notrace cpu_relax_yield(void)
{
if (!smp_cpu_mtid && MACHINE_HAS_DIAG44) {
diag_stat_inc(DIAG_STAT_X044);
@@ -61,7 +61,7 @@ void notrace cpu_relax(void)
}
barrier();
}
-EXPORT_SYMBOL(cpu_relax);
+EXPORT_SYMBOL(cpu_relax_yield);
/*
* cpu_init - initializes state that is per-CPU.
@@ -92,7 +92,7 @@ static void show_cpu_summary(struct seq_file *m, void *v)
{
static const char *hwcap_str[] = {
"esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
- "edat", "etf3eh", "highgprs", "te", "vx"
+ "edat", "etf3eh", "highgprs", "te", "vx", "vxd", "vxe"
};
static const char * const int_hwcap_str[] = {
"sie"
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 9336e824e2db..12020b55887b 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -26,7 +26,7 @@
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/unistd.h>
#include <asm/switch_to.h>
#include "entry.h"
@@ -461,7 +461,7 @@ long arch_ptrace(struct task_struct *child, long request,
}
return 0;
case PTRACE_GET_LAST_BREAK:
- put_user(task_thread_info(child)->last_break,
+ put_user(child->thread.last_break,
(unsigned long __user *) data);
return 0;
case PTRACE_ENABLE_TE:
@@ -811,7 +811,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
}
return 0;
case PTRACE_GET_LAST_BREAK:
- put_user(task_thread_info(child)->last_break,
+ put_user(child->thread.last_break,
(unsigned int __user *) data);
return 0;
}
@@ -963,6 +963,11 @@ static int s390_fpregs_set(struct task_struct *target,
if (target == current)
save_fpu_regs();
+ if (MACHINE_HAS_VX)
+ convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
+ else
+ memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
+
/* If setting FPC, must validate it first. */
if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
@@ -997,10 +1002,10 @@ static int s390_last_break_get(struct task_struct *target,
if (count > 0) {
if (kbuf) {
unsigned long *k = kbuf;
- *k = task_thread_info(target)->last_break;
+ *k = target->thread.last_break;
} else {
unsigned long __user *u = ubuf;
- if (__put_user(task_thread_info(target)->last_break, u))
+ if (__put_user(target->thread.last_break, u))
return -EFAULT;
}
}
@@ -1067,6 +1072,9 @@ static int s390_vxrs_low_set(struct task_struct *target,
if (target == current)
save_fpu_regs();
+ for (i = 0; i < __NUM_VXRS_LOW; i++)
+ vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
+
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
if (rc == 0)
for (i = 0; i < __NUM_VXRS_LOW; i++)
@@ -1113,7 +1121,7 @@ static int s390_system_call_get(struct task_struct *target,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
- unsigned int *data = &task_thread_info(target)->system_call;
+ unsigned int *data = &target->thread.system_call;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
data, 0, sizeof(unsigned int));
}
@@ -1123,7 +1131,7 @@ static int s390_system_call_set(struct task_struct *target,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
- unsigned int *data = &task_thread_info(target)->system_call;
+ unsigned int *data = &target->thread.system_call;
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
data, 0, sizeof(unsigned int));
}
@@ -1327,7 +1335,7 @@ static int s390_compat_last_break_get(struct task_struct *target,
compat_ulong_t last_break;
if (count > 0) {
- last_break = task_thread_info(target)->last_break;
+ last_break = target->thread.last_break;
if (kbuf) {
unsigned long *k = kbuf;
*k = last_break;
diff --git a/arch/s390/kernel/sclp.c b/arch/s390/kernel/sclp.c
deleted file mode 100644
index f08af675f36f..000000000000
--- a/arch/s390/kernel/sclp.c
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- * Copyright IBM Corp. 2015
- * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
- */
-#include <linux/kernel.h>
-#include <asm/ebcdic.h>
-#include <asm/irq.h>
-#include <asm/lowcore.h>
-#include <asm/processor.h>
-#include <asm/sclp.h>
-
-#define EVTYP_VT220MSG_MASK 0x00000040
-#define EVTYP_MSG_MASK 0x40000000
-
-static char _sclp_work_area[4096] __aligned(PAGE_SIZE) __section(data);
-static bool have_vt220 __section(data);
-static bool have_linemode __section(data);
-
-static void _sclp_wait_int(void)
-{
- unsigned long cr0, cr0_new, psw_mask, addr;
- psw_t psw_ext_save, psw_wait;
-
- __ctl_store(cr0, 0, 0);
- cr0_new = cr0 | 0x200;
- __ctl_load(cr0_new, 0, 0);
-
- psw_ext_save = S390_lowcore.external_new_psw;
- psw_mask = __extract_psw();
- S390_lowcore.external_new_psw.mask = psw_mask;
- psw_wait.mask = psw_mask | PSW_MASK_EXT | PSW_MASK_WAIT;
- S390_lowcore.ext_int_code = 0;
-
- do {
- asm volatile(
- " larl %[addr],0f\n"
- " stg %[addr],%[psw_wait_addr]\n"
- " stg %[addr],%[psw_ext_addr]\n"
- " lpswe %[psw_wait]\n"
- "0:\n"
- : [addr] "=&d" (addr),
- [psw_wait_addr] "=Q" (psw_wait.addr),
- [psw_ext_addr] "=Q" (S390_lowcore.external_new_psw.addr)
- : [psw_wait] "Q" (psw_wait)
- : "cc", "memory");
- } while (S390_lowcore.ext_int_code != EXT_IRQ_SERVICE_SIG);
-
- __ctl_load(cr0, 0, 0);
- S390_lowcore.external_new_psw = psw_ext_save;
-}
-
-static int _sclp_servc(unsigned int cmd, char *sccb)
-{
- unsigned int cc;
-
- do {
- asm volatile(
- " .insn rre,0xb2200000,%1,%2\n"
- " ipm %0\n"
- : "=d" (cc) : "d" (cmd), "a" (sccb)
- : "cc", "memory");
- cc >>= 28;
- if (cc == 3)
- return -EINVAL;
- _sclp_wait_int();
- } while (cc != 0);
- return (*(unsigned short *)(sccb + 6) == 0x20) ? 0 : -EIO;
-}
-
-static int _sclp_setup(int disable)
-{
- static unsigned char init_sccb[] = {
- 0x00, 0x1c,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x04,
- 0x80, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x40,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
- };
- unsigned int *masks;
- int rc;
-
- memcpy(_sclp_work_area, init_sccb, 28);
- masks = (unsigned int *)(_sclp_work_area + 12);
- if (disable)
- memset(masks, 0, 16);
- /* SCLP write mask */
- rc = _sclp_servc(0x00780005, _sclp_work_area);
- if (rc)
- return rc;
- have_vt220 = masks[2] & EVTYP_VT220MSG_MASK;
- have_linemode = masks[2] & EVTYP_MSG_MASK;
- return 0;
-}
-
-/* Output multi-line text using SCLP Message interface. */
-static void _sclp_print_lm(const char *str)
-{
- static unsigned char write_head[] = {
- /* sccb header */
- 0x00, 0x52, /* 0 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 2 */
- /* evbuf */
- 0x00, 0x4a, /* 8 */
- 0x02, 0x00, 0x00, 0x00, /* 10 */
- /* mdb */
- 0x00, 0x44, /* 14 */
- 0x00, 0x01, /* 16 */
- 0xd4, 0xc4, 0xc2, 0x40, /* 18 */
- 0x00, 0x00, 0x00, 0x01, /* 22 */
- /* go */
- 0x00, 0x38, /* 26 */
- 0x00, 0x01, /* 28 */
- 0x00, 0x00, 0x00, 0x00, /* 30 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 34 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 42 */
- 0x00, 0x00, 0x00, 0x00, /* 50 */
- 0x00, 0x00, /* 54 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 56 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 64 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 72 */
- 0x00, 0x00, /* 80 */
- };
- static unsigned char write_mto[] = {
- /* mto */
- 0x00, 0x0a, /* 0 */
- 0x00, 0x04, /* 2 */
- 0x10, 0x00, /* 4 */
- 0x00, 0x00, 0x00, 0x00 /* 6 */
- };
- unsigned char *ptr, ch;
- unsigned int count;
-
- memcpy(_sclp_work_area, write_head, sizeof(write_head));
- ptr = _sclp_work_area + sizeof(write_head);
- do {
- memcpy(ptr, write_mto, sizeof(write_mto));
- for (count = sizeof(write_mto); (ch = *str++) != 0; count++) {
- if (ch == 0x0a)
- break;
- ptr[count] = _ascebc[ch];
- }
- /* Update length fields in mto, mdb, evbuf and sccb */
- *(unsigned short *) ptr = count;
- *(unsigned short *)(_sclp_work_area + 14) += count;
- *(unsigned short *)(_sclp_work_area + 8) += count;
- *(unsigned short *)(_sclp_work_area + 0) += count;
- ptr += count;
- } while (ch != 0);
-
- /* SCLP write data */
- _sclp_servc(0x00760005, _sclp_work_area);
-}
-
-/* Output multi-line text (plus a newline) using SCLP VT220
- * interface.
- */
-static void _sclp_print_vt220(const char *str)
-{
- static unsigned char const write_head[] = {
- /* sccb header */
- 0x00, 0x0e,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- /* evbuf header */
- 0x00, 0x06,
- 0x1a, 0x00, 0x00, 0x00,
- };
- size_t len = strlen(str);
-
- if (sizeof(write_head) + len >= sizeof(_sclp_work_area))
- len = sizeof(_sclp_work_area) - sizeof(write_head) - 1;
-
- memcpy(_sclp_work_area, write_head, sizeof(write_head));
- memcpy(_sclp_work_area + sizeof(write_head), str, len);
- _sclp_work_area[sizeof(write_head) + len] = '\n';
-
- /* Update length fields in evbuf and sccb headers */
- *(unsigned short *)(_sclp_work_area + 8) += len + 1;
- *(unsigned short *)(_sclp_work_area + 0) += len + 1;
-
- /* SCLP write data */
- (void)_sclp_servc(0x00760005, _sclp_work_area);
-}
-
-/* Output one or more lines of text on the SCLP console (VT220 and /
- * or line-mode). All lines get terminated; no need for a trailing LF.
- */
-void _sclp_print_early(const char *str)
-{
- if (_sclp_setup(0) != 0)
- return;
- if (have_linemode)
- _sclp_print_lm(str);
- if (have_vt220)
- _sclp_print_vt220(str);
- _sclp_setup(1);
-}
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 7f7ba5f23f13..e4d811f17971 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -35,6 +35,7 @@
#include <linux/root_dev.h>
#include <linux/console.h>
#include <linux/kernel_stat.h>
+#include <linux/dma-contiguous.h>
#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/pfn.h>
@@ -303,7 +304,7 @@ static void __init setup_lowcore(void)
* Setup lowcore for boot cpu
*/
BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * 4096);
- lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
+ lc = memblock_virt_alloc_low(sizeof(*lc), sizeof(*lc));
lc->restart_psw.mask = PSW_KERNEL_BITS;
lc->restart_psw.addr = (unsigned long) restart_int_handler;
lc->external_new_psw.mask = PSW_KERNEL_BITS |
@@ -324,15 +325,15 @@ static void __init setup_lowcore(void)
lc->kernel_stack = ((unsigned long) &init_thread_union)
+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->async_stack = (unsigned long)
- __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0)
+ memblock_virt_alloc(ASYNC_SIZE, ASYNC_SIZE)
+ ASYNC_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->panic_stack = (unsigned long)
- __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0)
+ memblock_virt_alloc(PAGE_SIZE, PAGE_SIZE)
+ PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
- lc->current_task = (unsigned long) init_thread_union.thread_info.task;
- lc->thread_info = (unsigned long) &init_thread_union;
+ lc->current_task = (unsigned long)&init_task;
lc->lpp = LPP_MAGIC;
lc->machine_flags = S390_lowcore.machine_flags;
+ lc->preempt_count = S390_lowcore.preempt_count;
lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
MAX_FACILITY_BIT/8);
@@ -349,7 +350,7 @@ static void __init setup_lowcore(void)
lc->last_update_timer = S390_lowcore.last_update_timer;
lc->last_update_clock = S390_lowcore.last_update_clock;
- restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0);
+ restart_stack = memblock_virt_alloc(ASYNC_SIZE, ASYNC_SIZE);
restart_stack += ASYNC_SIZE;
/*
@@ -412,7 +413,7 @@ static void __init setup_resources(void)
bss_resource.end = (unsigned long) &__bss_stop - 1;
for_each_memblock(memory, reg) {
- res = alloc_bootmem_low(sizeof(*res));
+ res = memblock_virt_alloc(sizeof(*res), 8);
res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
res->name = "System RAM";
@@ -426,7 +427,7 @@ static void __init setup_resources(void)
std_res->start > res->end)
continue;
if (std_res->end > res->end) {
- sub_res = alloc_bootmem_low(sizeof(*sub_res));
+ sub_res = memblock_virt_alloc(sizeof(*sub_res), 8);
*sub_res = *std_res;
sub_res->end = res->end;
std_res->start = res->end + 1;
@@ -445,7 +446,7 @@ static void __init setup_resources(void)
* part of the System RAM resource.
*/
if (crashk_res.end) {
- memblock_add(crashk_res.start, resource_size(&crashk_res));
+ memblock_add_node(crashk_res.start, resource_size(&crashk_res), 0);
memblock_reserve(crashk_res.start, resource_size(&crashk_res));
insert_resource(&iomem_resource, &crashk_res);
}
@@ -484,7 +485,7 @@ static void __init setup_memory_end(void)
max_pfn = max_low_pfn = PFN_DOWN(memory_end);
memblock_remove(memory_end, ULONG_MAX);
- pr_notice("Max memory size: %luMB\n", memory_end >> 20);
+ pr_notice("The maximum memory size is %luMB\n", memory_end >> 20);
}
static void __init setup_vmcoreinfo(void)
@@ -635,6 +636,8 @@ static void __init reserve_crashkernel(void)
static void __init reserve_initrd(void)
{
#ifdef CONFIG_BLK_DEV_INITRD
+ if (!INITRD_START || !INITRD_SIZE)
+ return;
initrd_start = INITRD_START;
initrd_end = initrd_start + INITRD_SIZE;
memblock_reserve(INITRD_START, INITRD_SIZE);
@@ -649,7 +652,7 @@ static void __init check_initrd(void)
#ifdef CONFIG_BLK_DEV_INITRD
if (INITRD_START && INITRD_SIZE &&
!memblock_is_region_memory(INITRD_START, INITRD_SIZE)) {
- pr_err("initrd does not fit memory.\n");
+ pr_err("The initial RAM disk does not fit into the memory\n");
memblock_free(INITRD_START, INITRD_SIZE);
initrd_start = initrd_end = 0;
}
@@ -746,7 +749,7 @@ static int __init setup_hwcaps(void)
/*
* Huge page support HWCAP_S390_HPAGE is bit 7.
*/
- if (MACHINE_HAS_HPAGE)
+ if (MACHINE_HAS_EDAT1)
elf_hwcap |= HWCAP_S390_HPAGE;
/*
@@ -766,8 +769,14 @@ static int __init setup_hwcaps(void)
* can be disabled with the "novx" parameter. Use MACHINE_HAS_VX
* instead of facility bit 129.
*/
- if (MACHINE_HAS_VX)
+ if (MACHINE_HAS_VX) {
elf_hwcap |= HWCAP_S390_VXRS;
+ if (test_facility(134))
+ elf_hwcap |= HWCAP_S390_VXRS_EXT;
+ if (test_facility(135))
+ elf_hwcap |= HWCAP_S390_VXRS_BCD;
+ }
+
get_cpu_id(&cpu_id);
add_device_randomness(&cpu_id, sizeof(cpu_id));
switch (cpu_id.machine) {
@@ -819,10 +828,10 @@ static void __init setup_randomness(void)
{
struct sysinfo_3_2_2 *vmms;
- vmms = (struct sysinfo_3_2_2 *) alloc_page(GFP_KERNEL);
- if (vmms && stsi(vmms, 3, 2, 2) == 0 && vmms->count)
- add_device_randomness(&vmms, vmms->count);
- free_page((unsigned long) vmms);
+ vmms = (struct sysinfo_3_2_2 *) memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
+ add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
+ memblock_free((unsigned long) vmms, PAGE_SIZE);
}
/*
@@ -903,6 +912,7 @@ void __init setup_arch(char **cmdline_p)
setup_memory_end();
setup_memory();
+ dma_contiguous_reserve(memory_end);
check_initrd();
reserve_crashkernel();
@@ -921,6 +931,8 @@ void __init setup_arch(char **cmdline_p)
cpu_detect_mhz_feature();
cpu_init();
numa_setup();
+ smp_detect_cpus();
+ topology_init_early();
/*
* Create kernel page tables and switch to virtual addressing.
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index d82562cf0a0e..62a4c263e887 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -26,7 +26,7 @@
#include <linux/syscalls.h>
#include <linux/compat.h>
#include <asm/ucontext.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/lowcore.h>
#include <asm/switch_to.h>
#include "entry.h"
@@ -359,7 +359,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
/* set extra registers only for synchronous signals */
regs->gprs[4] = regs->int_code & 127;
regs->gprs[5] = regs->int_parm_long;
- regs->gprs[6] = task_thread_info(current)->last_break;
+ regs->gprs[6] = current->thread.last_break;
}
return 0;
}
@@ -430,7 +430,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
regs->gprs[2] = ksig->sig;
regs->gprs[3] = (unsigned long) &frame->info;
regs->gprs[4] = (unsigned long) &frame->uc;
- regs->gprs[5] = task_thread_info(current)->last_break;
+ regs->gprs[5] = current->thread.last_break;
return 0;
}
@@ -467,13 +467,13 @@ void do_signal(struct pt_regs *regs)
* the debugger may change all our registers, including the system
* call information.
*/
- current_thread_info()->system_call =
+ current->thread.system_call =
test_pt_regs_flag(regs, PIF_SYSCALL) ? regs->int_code : 0;
if (get_signal(&ksig)) {
/* Whee! Actually deliver the signal. */
- if (current_thread_info()->system_call) {
- regs->int_code = current_thread_info()->system_call;
+ if (current->thread.system_call) {
+ regs->int_code = current->thread.system_call;
/* Check for system call restarting. */
switch (regs->gprs[2]) {
case -ERESTART_RESTARTBLOCK:
@@ -506,8 +506,8 @@ void do_signal(struct pt_regs *regs)
/* No handlers present - check for system call restart */
clear_pt_regs_flag(regs, PIF_SYSCALL);
- if (current_thread_info()->system_call) {
- regs->int_code = current_thread_info()->system_call;
+ if (current->thread.system_call) {
+ regs->int_code = current->thread.system_call;
switch (regs->gprs[2]) {
case -ERESTART_RESTARTBLOCK:
/* Restart with sys_restart_syscall */
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 35531fe1c5ea..d0a74d7ce433 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -19,7 +19,8 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/workqueue.h>
-#include <linux/module.h>
+#include <linux/bootmem.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/err.h>
@@ -259,16 +260,14 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
{
struct lowcore *lc = pcpu->lowcore;
- struct thread_info *ti = task_thread_info(tsk);
lc->kernel_stack = (unsigned long) task_stack_page(tsk)
+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
- lc->thread_info = (unsigned long) task_thread_info(tsk);
lc->current_task = (unsigned long) tsk;
lc->lpp = LPP_MAGIC;
lc->current_pid = tsk->pid;
- lc->user_timer = ti->user_timer;
- lc->system_timer = ti->system_timer;
+ lc->user_timer = tsk->thread.user_timer;
+ lc->system_timer = tsk->thread.system_timer;
lc->steal_timer = 0;
}
@@ -368,10 +367,15 @@ int smp_find_processor_id(u16 address)
return -1;
}
-int smp_vcpu_scheduled(int cpu)
+bool arch_vcpu_is_preempted(int cpu)
{
- return pcpu_running(pcpu_devices + cpu);
+ if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
+ return false;
+ if (pcpu_running(pcpu_devices + cpu))
+ return false;
+ return true;
}
+EXPORT_SYMBOL(arch_vcpu_is_preempted);
void smp_yield_cpu(int cpu)
{
@@ -657,14 +661,12 @@ int smp_cpu_get_polarization(int cpu)
return pcpu_devices[cpu].polarization;
}
-static struct sclp_core_info *smp_get_core_info(void)
+static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
{
static int use_sigp_detection;
- struct sclp_core_info *info;
int address;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (info && (use_sigp_detection || sclp_get_core_info(info))) {
+ if (use_sigp_detection || sclp_get_core_info(info, early)) {
use_sigp_detection = 1;
for (address = 0;
address < (SCLP_MAX_CORES << smp_cpu_mt_shift);
@@ -678,7 +680,6 @@ static struct sclp_core_info *smp_get_core_info(void)
}
info->combined = info->configured;
}
- return info;
}
static int smp_add_present_cpu(int cpu);
@@ -719,17 +720,15 @@ static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add)
return nr;
}
-static void __init smp_detect_cpus(void)
+void __init smp_detect_cpus(void)
{
unsigned int cpu, mtid, c_cpus, s_cpus;
struct sclp_core_info *info;
u16 address;
/* Get CPU information */
- info = smp_get_core_info();
- if (!info)
- panic("smp_detect_cpus failed to allocate memory\n");
-
+ info = memblock_virt_alloc(sizeof(*info), 8);
+ smp_get_core_info(info, 1);
/* Find boot CPU type */
if (sclp.has_core_type) {
address = stap();
@@ -765,7 +764,7 @@ static void __init smp_detect_cpus(void)
get_online_cpus();
__smp_rescan_cpus(info, 0);
put_online_cpus();
- kfree(info);
+ memblock_free_early((unsigned long)info, sizeof(*info));
}
/*
@@ -802,7 +801,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
pcpu = pcpu_devices + cpu;
if (pcpu->state != CPU_STATE_CONFIGURED)
return -EIO;
- base = cpu - (cpu % (smp_cpu_mtid + 1));
+ base = smp_get_base_cpu(cpu);
for (i = 0; i <= smp_cpu_mtid; i++) {
if (base + i < nr_cpu_ids)
if (cpu_online(base + i))
@@ -902,7 +901,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
/* request the 0x1202 external call external interrupt */
if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
panic("Couldn't request external interrupt 0x1202");
- smp_detect_cpus();
}
void __init smp_prepare_boot_cpu(void)
@@ -968,7 +966,7 @@ static ssize_t cpu_configure_store(struct device *dev,
rc = -EBUSY;
/* disallow configuration changes of online cpus and cpu 0 */
cpu = dev->id;
- cpu -= cpu % (smp_cpu_mtid + 1);
+ cpu = smp_get_base_cpu(cpu);
if (cpu == 0)
goto out;
for (i = 0; i <= smp_cpu_mtid; i++)
@@ -1047,22 +1045,18 @@ static struct attribute_group cpu_online_attr_group = {
.attrs = cpu_online_attrs,
};
-static int smp_cpu_notify(struct notifier_block *self, unsigned long action,
- void *hcpu)
+static int smp_cpu_online(unsigned int cpu)
{
- unsigned int cpu = (unsigned int)(long)hcpu;
struct device *s = &per_cpu(cpu_device, cpu)->dev;
- int err = 0;
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_ONLINE:
- err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
- break;
- case CPU_DEAD:
- sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
- break;
- }
- return notifier_from_errno(err);
+ return sysfs_create_group(&s->kobj, &cpu_online_attr_group);
+}
+static int smp_cpu_pre_down(unsigned int cpu)
+{
+ struct device *s = &per_cpu(cpu_device, cpu)->dev;
+
+ sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
+ return 0;
}
static int smp_add_present_cpu(int cpu)
@@ -1083,20 +1077,12 @@ static int smp_add_present_cpu(int cpu)
rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
if (rc)
goto out_cpu;
- if (cpu_online(cpu)) {
- rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
- if (rc)
- goto out_online;
- }
rc = topology_cpu_init(c);
if (rc)
goto out_topology;
return 0;
out_topology:
- if (cpu_online(cpu))
- sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
-out_online:
sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
out_cpu:
#ifdef CONFIG_HOTPLUG_CPU
@@ -1113,9 +1099,10 @@ int __ref smp_rescan_cpus(void)
struct sclp_core_info *info;
int nr;
- info = smp_get_core_info();
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
+ smp_get_core_info(info, 0);
get_online_cpus();
mutex_lock(&smp_cpu_state_mutex);
nr = __smp_rescan_cpus(info, 1);
@@ -1149,17 +1136,15 @@ static int __init s390_smp_init(void)
if (rc)
return rc;
#endif
- cpu_notifier_register_begin();
for_each_present_cpu(cpu) {
rc = smp_add_present_cpu(cpu);
if (rc)
goto out;
}
- __hotcpu_notifier(smp_cpu_notify, 0);
-
+ rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "s390/smp:online",
+ smp_cpu_online, smp_cpu_pre_down);
out:
- cpu_notifier_register_done();
return rc;
}
subsys_initcall(s390_smp_init);
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index 355db9db8210..0085b2d8ed7d 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -8,7 +8,7 @@
#include <linux/sched.h>
#include <linux/stacktrace.h>
#include <linux/kallsyms.h>
-#include <linux/module.h>
+#include <linux/export.h>
static int __save_address(void *data, unsigned long address, int nosched)
{
diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S
index 2d6b6e81f812..6bebc935e9c2 100644
--- a/arch/s390/kernel/swsusp.S
+++ b/arch/s390/kernel/swsusp.S
@@ -194,9 +194,9 @@ pgm_check_entry:
/* Suspend CPU not available -> panic */
larl %r15,init_thread_union
- ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER)
+ ahi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER)
larl %r2,.Lpanic_string
- larl %r3,_sclp_print_early
+ larl %r3,sclp_early_printk
lghi %r1,0
sam31
sigp %r1,%r0,SIGP_SET_ARCHITECTURE
@@ -273,7 +273,7 @@ restore_registers:
.Ldisabled_wait_31:
.long 0x000a0000,0x00000000
.Lpanic_string:
- .asciz "Resume not possible because suspend CPU is no longer available"
+ .asciz "Resume not possible because suspend CPU is no longer available\n"
.align 8
.Lrestart_diag308_psw:
.long 0x00080000,0x80000000
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
index f145490cce54..b7af452978ca 100644
--- a/arch/s390/kernel/sys_s390.c
+++ b/arch/s390/kernel/sys_s390.c
@@ -27,7 +27,7 @@
#include <linux/personality.h>
#include <linux/unistd.h>
#include <linux/ipc.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "entry.h"
/*
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index bfda6aa40280..12b6b138e354 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -10,7 +10,7 @@
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/delay.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <asm/ebcdic.h>
#include <asm/sysinfo.h>
@@ -56,6 +56,20 @@ int stsi(void *sysinfo, int fc, int sel1, int sel2)
}
EXPORT_SYMBOL(stsi);
+static bool convert_ext_name(unsigned char encoding, char *name, size_t len)
+{
+ switch (encoding) {
+ case 1: /* EBCDIC */
+ EBCASC(name, len);
+ break;
+ case 2: /* UTF-8 */
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
static void stsi_1_1_1(struct seq_file *m, struct sysinfo_1_1_1 *info)
{
int i;
@@ -207,24 +221,19 @@ static void stsi_2_2_2(struct seq_file *m, struct sysinfo_2_2_2 *info)
seq_printf(m, "LPAR CPUs S-MTID: %d\n", info->mt_stid);
seq_printf(m, "LPAR CPUs PS-MTID: %d\n", info->mt_psmtid);
}
+ if (convert_ext_name(info->vsne, info->ext_name, sizeof(info->ext_name))) {
+ seq_printf(m, "LPAR Extended Name: %-.256s\n", info->ext_name);
+ seq_printf(m, "LPAR UUID: %pUb\n", &info->uuid);
+ }
}
static void print_ext_name(struct seq_file *m, int lvl,
struct sysinfo_3_2_2 *info)
{
- if (info->vm[lvl].ext_name_encoding == 0)
- return;
- if (info->ext_names[lvl][0] == 0)
- return;
- switch (info->vm[lvl].ext_name_encoding) {
- case 1: /* EBCDIC */
- EBCASC(info->ext_names[lvl], sizeof(info->ext_names[lvl]));
- break;
- case 2: /* UTF-8 */
- break;
- default:
+ size_t len = sizeof(info->ext_names[lvl]);
+
+ if (!convert_ext_name(info->vm[lvl].evmne, info->ext_names[lvl], len))
return;
- }
seq_printf(m, "VM%02d Extended Name: %-.256s\n", lvl,
info->ext_names[lvl]);
}
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 0bfcc492987e..de66abb479c9 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -16,7 +16,7 @@
#include <linux/kernel_stat.h>
#include <linux/errno.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/param.h>
@@ -38,7 +38,7 @@
#include <linux/clockchips.h>
#include <linux/gfp.h>
#include <linux/kprobes.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/facility.h>
#include <asm/delay.h>
#include <asm/div64.h>
@@ -59,19 +59,27 @@ ATOMIC_NOTIFIER_HEAD(s390_epoch_delta_notifier);
EXPORT_SYMBOL(s390_epoch_delta_notifier);
unsigned char ptff_function_mask[16];
-unsigned long lpar_offset;
-unsigned long initial_leap_seconds;
+
+static unsigned long long lpar_offset;
+static unsigned long long initial_leap_seconds;
+static unsigned long long tod_steering_end;
+static long long tod_steering_delta;
/*
* Get time offsets with PTFF
*/
-void __init ptff_init(void)
+void __init time_early_init(void)
{
struct ptff_qto qto;
struct ptff_qui qui;
+ /* Initialize TOD steering parameters */
+ tod_steering_end = sched_clock_base_cc;
+ vdso_data->ts_end = tod_steering_end;
+
if (!test_facility(28))
return;
+
ptff(&ptff_function_mask, sizeof(ptff_function_mask), PTFF_QAF);
/* get LPAR offset */
@@ -80,7 +88,7 @@ void __init ptff_init(void)
/* get initial leap seconds */
if (ptff_query(PTFF_QUI) && ptff(&qui, sizeof(qui), PTFF_QUI) == 0)
- initial_leap_seconds = (unsigned long)
+ initial_leap_seconds = (unsigned long long)
((long) qui.old_leap * 4096000000L);
}
@@ -102,7 +110,7 @@ unsigned long long monotonic_clock(void)
}
EXPORT_SYMBOL(monotonic_clock);
-void tod_to_timeval(__u64 todval, struct timespec64 *xt)
+static void tod_to_timeval(__u64 todval, struct timespec64 *xt)
{
unsigned long long sec;
@@ -112,7 +120,6 @@ void tod_to_timeval(__u64 todval, struct timespec64 *xt)
todval -= (sec * 1000000) << 12;
xt->tv_nsec = ((todval * 1000) >> 12);
}
-EXPORT_SYMBOL(tod_to_timeval);
void clock_comparator_work(void)
{
@@ -123,18 +130,6 @@ void clock_comparator_work(void)
cd->event_handler(cd);
}
-/*
- * Fixup the clock comparator.
- */
-static void fixup_clock_comparator(unsigned long long delta)
-{
- /* If nobody is waiting there's nothing to fix. */
- if (S390_lowcore.clock_comparator == -1ULL)
- return;
- S390_lowcore.clock_comparator += delta;
- set_clock_comparator(S390_lowcore.clock_comparator);
-}
-
static int s390_next_event(unsigned long delta,
struct clock_event_device *evt)
{
@@ -213,9 +208,23 @@ void read_boot_clock64(struct timespec64 *ts)
tod_to_timeval(clock - TOD_UNIX_EPOCH, ts);
}
-static cycle_t read_tod_clock(struct clocksource *cs)
+static u64 read_tod_clock(struct clocksource *cs)
{
- return get_tod_clock();
+ unsigned long long now, adj;
+
+ preempt_disable(); /* protect from changes to steering parameters */
+ now = get_tod_clock();
+ adj = tod_steering_end - now;
+ if (unlikely((s64) adj >= 0))
+ /*
+ * manually steer by 1 cycle every 2^16 cycles. This
+ * corresponds to shifting the tod delta by 15. 1s is
+ * therefore steered in ~9h. The adjust will decrease
+ * over time, until it finally reaches 0.
+ */
+ now += (tod_steering_delta < 0) ? (adj >> 15) : -(adj >> 15);
+ preempt_enable();
+ return now;
}
static struct clocksource clocksource_tod = {
@@ -384,6 +393,55 @@ static inline int check_sync_clock(void)
return rc;
}
+/*
+ * Apply clock delta to the global data structures.
+ * This is called once on the CPU that performed the clock sync.
+ */
+static void clock_sync_global(unsigned long long delta)
+{
+ unsigned long now, adj;
+ struct ptff_qto qto;
+
+ /* Fixup the monotonic sched clock. */
+ sched_clock_base_cc += delta;
+ /* Adjust TOD steering parameters. */
+ vdso_data->tb_update_count++;
+ now = get_tod_clock();
+ adj = tod_steering_end - now;
+ if (unlikely((s64) adj >= 0))
+ /* Calculate how much of the old adjustment is left. */
+ tod_steering_delta = (tod_steering_delta < 0) ?
+ -(adj >> 15) : (adj >> 15);
+ tod_steering_delta += delta;
+ if ((abs(tod_steering_delta) >> 48) != 0)
+ panic("TOD clock sync offset %lli is too large to drift\n",
+ tod_steering_delta);
+ tod_steering_end = now + (abs(tod_steering_delta) << 15);
+ vdso_data->ts_dir = (tod_steering_delta < 0) ? 0 : 1;
+ vdso_data->ts_end = tod_steering_end;
+ vdso_data->tb_update_count++;
+ /* Update LPAR offset. */
+ if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
+ lpar_offset = qto.tod_epoch_difference;
+ /* Call the TOD clock change notifier. */
+ atomic_notifier_call_chain(&s390_epoch_delta_notifier, 0, &delta);
+}
+
+/*
+ * Apply clock delta to the per-CPU data structures of this CPU.
+ * This is called for each online CPU after the call to clock_sync_global.
+ */
+static void clock_sync_local(unsigned long long delta)
+{
+ /* Add the delta to the clock comparator. */
+ if (S390_lowcore.clock_comparator != -1ULL) {
+ S390_lowcore.clock_comparator += delta;
+ set_clock_comparator(S390_lowcore.clock_comparator);
+ }
+ /* Adjust the last_update_clock time-stamp. */
+ S390_lowcore.last_update_clock += delta;
+}
+
/* Single threaded workqueue used for stp sync events */
static struct workqueue_struct *time_sync_wq;
@@ -397,31 +455,9 @@ static void __init time_init_wq(void)
struct clock_sync_data {
atomic_t cpus;
int in_sync;
- unsigned long long fixup_cc;
+ unsigned long long clock_delta;
};
-static void clock_sync_cpu(struct clock_sync_data *sync)
-{
- atomic_dec(&sync->cpus);
- enable_sync_clock();
- while (sync->in_sync == 0) {
- __udelay(1);
- /*
- * A different cpu changes *in_sync. Therefore use
- * barrier() to force memory access.
- */
- barrier();
- }
- if (sync->in_sync != 1)
- /* Didn't work. Clear per-cpu in sync bit again. */
- disable_sync_clock(NULL);
- /*
- * This round of TOD syncing is done. Set the clock comparator
- * to the next tick and let the processor continue.
- */
- fixup_clock_comparator(sync->fixup_cc);
-}
-
/*
* Server Time Protocol (STP) code.
*/
@@ -455,7 +491,7 @@ static void __init stp_reset(void)
pr_warn("The real or virtual hardware system does not provide an STP interface\n");
free_page((unsigned long) stp_page);
stp_page = NULL;
- stp_online = 0;
+ stp_online = false;
}
}
@@ -523,54 +559,46 @@ void stp_queue_work(void)
static int stp_sync_clock(void *data)
{
- static int first;
+ struct clock_sync_data *sync = data;
unsigned long long clock_delta;
- struct clock_sync_data *stp_sync;
- struct ptff_qto qto;
+ static int first;
int rc;
- stp_sync = data;
-
- if (xchg(&first, 1) == 1) {
- /* Slave */
- clock_sync_cpu(stp_sync);
- return 0;
- }
-
- /* Wait until all other cpus entered the sync function. */
- while (atomic_read(&stp_sync->cpus) != 0)
- cpu_relax();
-
enable_sync_clock();
-
- rc = 0;
- if (stp_info.todoff[0] || stp_info.todoff[1] ||
- stp_info.todoff[2] || stp_info.todoff[3] ||
- stp_info.tmd != 2) {
- rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0, &clock_delta);
- if (rc == 0) {
- /* fixup the monotonic sched clock */
- sched_clock_base_cc += clock_delta;
- if (ptff_query(PTFF_QTO) &&
- ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
- /* Update LPAR offset */
- lpar_offset = qto.tod_epoch_difference;
- atomic_notifier_call_chain(&s390_epoch_delta_notifier,
- 0, &clock_delta);
- stp_sync->fixup_cc = clock_delta;
- fixup_clock_comparator(clock_delta);
- rc = chsc_sstpi(stp_page, &stp_info,
- sizeof(struct stp_sstpi));
- if (rc == 0 && stp_info.tmd != 2)
- rc = -EAGAIN;
+ if (xchg(&first, 1) == 0) {
+ /* Wait until all other cpus entered the sync function. */
+ while (atomic_read(&sync->cpus) != 0)
+ cpu_relax();
+ rc = 0;
+ if (stp_info.todoff[0] || stp_info.todoff[1] ||
+ stp_info.todoff[2] || stp_info.todoff[3] ||
+ stp_info.tmd != 2) {
+ rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0,
+ &clock_delta);
+ if (rc == 0) {
+ sync->clock_delta = clock_delta;
+ clock_sync_global(clock_delta);
+ rc = chsc_sstpi(stp_page, &stp_info,
+ sizeof(struct stp_sstpi));
+ if (rc == 0 && stp_info.tmd != 2)
+ rc = -EAGAIN;
+ }
}
+ sync->in_sync = rc ? -EAGAIN : 1;
+ xchg(&first, 0);
+ } else {
+ /* Slave */
+ atomic_dec(&sync->cpus);
+ /* Wait for in_sync to be set. */
+ while (READ_ONCE(sync->in_sync) == 0)
+ __udelay(1);
}
- if (rc) {
+ if (sync->in_sync != 1)
+ /* Didn't work. Clear per-cpu in sync bit again. */
disable_sync_clock(NULL);
- stp_sync->in_sync = -EAGAIN;
- } else
- stp_sync->in_sync = 1;
- xchg(&first, 0);
+ /* Apply clock delta to per-CPU fields of this CPU. */
+ clock_sync_local(sync->clock_delta);
+
return 0;
}
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index e959c02e0cac..2cd5f4f1013c 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -7,6 +7,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/workqueue.h>
+#include <linux/bootmem.h>
#include <linux/cpuset.h>
#include <linux/device.h>
#include <linux/export.h>
@@ -37,26 +38,27 @@ static void set_topology_timer(void);
static void topology_work_fn(struct work_struct *work);
static struct sysinfo_15_1_x *tl_info;
-static bool topology_enabled = true;
static DECLARE_WORK(topology_work, topology_work_fn);
/*
- * Socket/Book linked lists and per_cpu(cpu_topology) updates are
+ * Socket/Book linked lists and cpu_topology updates are
* protected by "sched_domains_mutex".
*/
static struct mask_info socket_info;
static struct mask_info book_info;
static struct mask_info drawer_info;
-DEFINE_PER_CPU(struct cpu_topology_s390, cpu_topology);
-EXPORT_PER_CPU_SYMBOL_GPL(cpu_topology);
+struct cpu_topology_s390 cpu_topology[NR_CPUS];
+EXPORT_SYMBOL_GPL(cpu_topology);
+
+cpumask_t cpus_with_topology;
static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
{
cpumask_t mask;
cpumask_copy(&mask, cpumask_of(cpu));
- if (!topology_enabled || !MACHINE_HAS_TOPOLOGY)
+ if (!MACHINE_HAS_TOPOLOGY)
return mask;
for (; info; info = info->next) {
if (cpumask_test_cpu(cpu, &info->mask))
@@ -71,7 +73,7 @@ static cpumask_t cpu_thread_map(unsigned int cpu)
int i;
cpumask_copy(&mask, cpumask_of(cpu));
- if (!topology_enabled || !MACHINE_HAS_TOPOLOGY)
+ if (!MACHINE_HAS_TOPOLOGY)
return mask;
cpu -= cpu % (smp_cpu_mtid + 1);
for (i = 0; i <= smp_cpu_mtid; i++)
@@ -97,7 +99,7 @@ static void add_cpus_to_mask(struct topology_core *tl_core,
if (lcpu < 0)
continue;
for (i = 0; i <= smp_cpu_mtid; i++) {
- topo = &per_cpu(cpu_topology, lcpu + i);
+ topo = &cpu_topology[lcpu + i];
topo->drawer_id = drawer->id;
topo->book_id = book->id;
topo->socket_id = socket->id;
@@ -106,6 +108,7 @@ static void add_cpus_to_mask(struct topology_core *tl_core,
cpumask_set_cpu(lcpu + i, &drawer->mask);
cpumask_set_cpu(lcpu + i, &book->mask);
cpumask_set_cpu(lcpu + i, &socket->mask);
+ cpumask_set_cpu(lcpu + i, &cpus_with_topology);
smp_cpu_set_polarization(lcpu + i, tl_core->pp);
}
}
@@ -220,7 +223,7 @@ static void update_cpu_masks(void)
int cpu;
for_each_possible_cpu(cpu) {
- topo = &per_cpu(cpu_topology, cpu);
+ topo = &cpu_topology[cpu];
topo->thread_mask = cpu_thread_map(cpu);
topo->core_mask = cpu_group_map(&socket_info, cpu);
topo->book_mask = cpu_group_map(&book_info, cpu);
@@ -231,6 +234,8 @@ static void update_cpu_masks(void)
topo->socket_id = cpu;
topo->book_id = cpu;
topo->drawer_id = cpu;
+ if (cpu_present(cpu))
+ cpumask_set_cpu(cpu, &cpus_with_topology);
}
}
numa_update_cpu_topology();
@@ -241,12 +246,12 @@ void store_topology(struct sysinfo_15_1_x *info)
stsi(info, 15, 1, min(topology_max_mnest, 4));
}
-int arch_update_cpu_topology(void)
+static int __arch_update_cpu_topology(void)
{
struct sysinfo_15_1_x *info = tl_info;
- struct device *dev;
- int cpu, rc = 0;
+ int rc = 0;
+ cpumask_clear(&cpus_with_topology);
if (MACHINE_HAS_TOPOLOGY) {
rc = 1;
store_topology(info);
@@ -255,6 +260,15 @@ int arch_update_cpu_topology(void)
update_cpu_masks();
if (!MACHINE_HAS_TOPOLOGY)
topology_update_polarization_simple();
+ return rc;
+}
+
+int arch_update_cpu_topology(void)
+{
+ struct device *dev;
+ int cpu, rc;
+
+ rc = __arch_update_cpu_topology();
for_each_online_cpu(cpu) {
dev = get_cpu_device(cpu);
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
@@ -394,30 +408,24 @@ int topology_cpu_init(struct cpu *cpu)
static const struct cpumask *cpu_thread_mask(int cpu)
{
- return &per_cpu(cpu_topology, cpu).thread_mask;
+ return &cpu_topology[cpu].thread_mask;
}
const struct cpumask *cpu_coregroup_mask(int cpu)
{
- return &per_cpu(cpu_topology, cpu).core_mask;
+ return &cpu_topology[cpu].core_mask;
}
static const struct cpumask *cpu_book_mask(int cpu)
{
- return &per_cpu(cpu_topology, cpu).book_mask;
+ return &cpu_topology[cpu].book_mask;
}
static const struct cpumask *cpu_drawer_mask(int cpu)
{
- return &per_cpu(cpu_topology, cpu).drawer_mask;
-}
-
-static int __init early_parse_topology(char *p)
-{
- return kstrtobool(p, &topology_enabled);
+ return &cpu_topology[cpu].drawer_mask;
}
-early_param("topology", early_parse_topology);
static struct sched_domain_topology_level s390_topology[] = {
{ cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
@@ -438,32 +446,30 @@ static void __init alloc_masks(struct sysinfo_15_1_x *info,
nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
nr_masks = max(nr_masks, 1);
for (i = 0; i < nr_masks; i++) {
- mask->next = kzalloc(sizeof(*mask->next), GFP_KERNEL);
+ mask->next = memblock_virt_alloc(sizeof(*mask->next), 8);
mask = mask->next;
}
}
-static int __init s390_topology_init(void)
+void __init topology_init_early(void)
{
struct sysinfo_15_1_x *info;
- int i;
+ set_sched_topology(s390_topology);
if (!MACHINE_HAS_TOPOLOGY)
- return 0;
- tl_info = (struct sysinfo_15_1_x *)__get_free_page(GFP_KERNEL);
+ goto out;
+ tl_info = memblock_virt_alloc(PAGE_SIZE, PAGE_SIZE);
info = tl_info;
store_topology(info);
- pr_info("The CPU configuration topology of the machine is:");
- for (i = 0; i < TOPOLOGY_NR_MAG; i++)
- printk(KERN_CONT " %d", info->mag[i]);
- printk(KERN_CONT " / %d\n", info->mnest);
+ pr_info("The CPU configuration topology of the machine is: %d %d %d %d %d %d / %d\n",
+ info->mag[0], info->mag[1], info->mag[2], info->mag[3],
+ info->mag[4], info->mag[5], info->mnest);
alloc_masks(info, &socket_info, 1);
alloc_masks(info, &book_info, 2);
alloc_masks(info, &drawer_info, 3);
- set_sched_topology(s390_topology);
- return 0;
+out:
+ __arch_update_cpu_topology();
}
-early_initcall(s390_topology_init);
static int __init topology_init(void)
{
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index d0539f76fd24..283ad7840335 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -19,7 +19,7 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/slab.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/fpu/api.h>
#include "entry.h"
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 5904abf6b1ae..10516ae3b55e 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -9,7 +9,7 @@
* as published by the Free Software Foundation.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S
index 5eec9afbb5b5..a5769b83d90e 100644
--- a/arch/s390/kernel/vdso32/clock_gettime.S
+++ b/arch/s390/kernel/vdso32/clock_gettime.S
@@ -99,8 +99,27 @@ __kernel_clock_gettime:
tml %r4,0x0001 /* pending update ? loop */
jnz 11b
stcke 0(%r15) /* Store TOD clock */
- lm %r0,%r1,1(%r15)
- s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
+ lm %r0,%r1,__VDSO_TS_END(%r5) /* TOD steering end time */
+ s %r0,1(%r15) /* no - ts_steering_end */
+ sl %r1,5(%r15)
+ brc 3,22f
+ ahi %r0,-1
+22: ltr %r0,%r0 /* past end of steering? */
+ jm 24f
+ srdl %r0,15 /* 1 per 2^16 */
+ tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */
+ jz 23f
+ lcr %r0,%r0 /* negative TOD offset */
+ lcr %r1,%r1
+ je 23f
+ ahi %r0,-1
+23: a %r0,1(%r15) /* add TOD timestamp */
+ al %r1,5(%r15)
+ brc 12,25f
+ ahi %r0,1
+ j 25f
+24: lm %r0,%r1,1(%r15) /* load TOD timestamp */
+25: s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
brc 3,12f
ahi %r0,-1
diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S
index 719de6186b20..63b86dceb0bf 100644
--- a/arch/s390/kernel/vdso32/gettimeofday.S
+++ b/arch/s390/kernel/vdso32/gettimeofday.S
@@ -31,8 +31,27 @@ __kernel_gettimeofday:
tml %r4,0x0001 /* pending update ? loop */
jnz 1b
stcke 0(%r15) /* Store TOD clock */
- lm %r0,%r1,1(%r15)
- s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
+ lm %r0,%r1,__VDSO_TS_END(%r5) /* TOD steering end time */
+ s %r0,1(%r15)
+ sl %r1,5(%r15)
+ brc 3,14f
+ ahi %r0,-1
+14: ltr %r0,%r0 /* past end of steering? */
+ jm 16f
+ srdl %r0,15 /* 1 per 2^16 */
+ tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */
+ jz 15f
+ lcr %r0,%r0 /* negative TOD offset */
+ lcr %r1,%r1
+ je 15f
+ ahi %r0,-1
+15: a %r0,1(%r15) /* add TOD timestamp */
+ al %r1,5(%r15)
+ brc 12,17f
+ ahi %r0,1
+ j 17f
+16: lm %r0,%r1,1(%r15) /* load TOD timestamp */
+17: s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
brc 3,3f
ahi %r0,-1
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index 61541fb93dc6..9c3b12626dba 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -83,8 +83,17 @@ __kernel_clock_gettime:
tmll %r4,0x0001 /* pending update ? loop */
jnz 5b
stcke 0(%r15) /* Store TOD clock */
- lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
lg %r1,1(%r15)
+ lg %r0,__VDSO_TS_END(%r5) /* TOD steering end time */
+ slgr %r0,%r1 /* now - ts_steering_end */
+ ltgr %r0,%r0 /* past end of steering ? */
+ jm 17f
+ srlg %r0,%r0,15 /* 1 per 2^16 */
+ tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */
+ jz 18f
+ lcgr %r0,%r0 /* negative TOD offset */
+18: algr %r1,%r0 /* add steering offset */
+17: lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S
index 6ce46707663c..b02e62f3bc12 100644
--- a/arch/s390/kernel/vdso64/gettimeofday.S
+++ b/arch/s390/kernel/vdso64/gettimeofday.S
@@ -31,7 +31,16 @@ __kernel_gettimeofday:
jnz 0b
stcke 0(%r15) /* Store TOD clock */
lg %r1,1(%r15)
- sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
+ lg %r0,__VDSO_TS_END(%r5) /* TOD steering end time */
+ slgr %r0,%r1 /* now - ts_steering_end */
+ ltgr %r0,%r0 /* past end of steering ? */
+ jm 6f
+ srlg %r0,%r0,15 /* 1 per 2^16 */
+ tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */
+ jz 7f
+ lcgr %r0,%r0 /* negative TOD offset */
+7: algr %r1,%r0 /* add steering offset */
+6: sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 3667d20e997f..5ccf95396251 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -44,6 +44,7 @@ SECTIONS
*(.gnu.warning)
} :text = 0x0700
+ . = ALIGN(PAGE_SIZE);
_etext = .; /* End of text section */
NOTES :text :note
@@ -79,7 +80,13 @@ SECTIONS
. = ALIGN(PAGE_SIZE); /* Init code and data */
__init_begin = .;
- INIT_TEXT_SECTION(PAGE_SIZE)
+ . = ALIGN(PAGE_SIZE);
+ .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
+ VMLINUX_SYMBOL(_sinittext) = . ;
+ INIT_TEXT
+ . = ALIGN(PAGE_SIZE);
+ VMLINUX_SYMBOL(_einittext) = . ;
+ }
/*
* .exit.text is discarded at runtime, not link time,
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 856e30d8463f..b4a3e9e06ef2 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -6,13 +6,13 @@
*/
#include <linux/kernel_stat.h>
+#include <linux/cputime.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/timex.h>
#include <linux/types.h>
#include <linux/time.h>
-#include <asm/cputime.h>
#include <asm/vtimer.h>
#include <asm/vtime.h>
#include <asm/cpu_mf.h>
@@ -90,15 +90,41 @@ static void update_mt_scaling(void)
__this_cpu_write(mt_scaling_jiffies, jiffies_64);
}
+static inline u64 update_tsk_timer(unsigned long *tsk_vtime, u64 new)
+{
+ u64 delta;
+
+ delta = new - *tsk_vtime;
+ *tsk_vtime = new;
+ return delta;
+}
+
+
+static inline u64 scale_vtime(u64 vtime)
+{
+ u64 mult = __this_cpu_read(mt_scaling_mult);
+ u64 div = __this_cpu_read(mt_scaling_div);
+
+ if (smp_cpu_mtid)
+ return vtime * mult / div;
+ return vtime;
+}
+
+static void account_system_index_scaled(struct task_struct *p,
+ cputime_t cputime, cputime_t scaled,
+ enum cpu_usage_stat index)
+{
+ p->stimescaled += cputime_to_nsecs(scaled);
+ account_system_index_time(p, cputime_to_nsecs(cputime), index);
+}
+
/*
* Update process times based on virtual cpu times stored by entry.S
* to the lowcore fields user_timer, system_timer & steal_clock.
*/
-static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
+static int do_account_vtime(struct task_struct *tsk)
{
- struct thread_info *ti = task_thread_info(tsk);
- u64 timer, clock, user, system, steal;
- u64 user_scaled, system_scaled;
+ u64 timer, clock, user, guest, system, hardirq, softirq, steal;
timer = S390_lowcore.last_update_timer;
clock = S390_lowcore.last_update_clock;
@@ -111,55 +137,76 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
#endif
: "=m" (S390_lowcore.last_update_timer),
"=m" (S390_lowcore.last_update_clock));
- S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
- S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
+ clock = S390_lowcore.last_update_clock - clock;
+ timer -= S390_lowcore.last_update_timer;
+
+ if (hardirq_count())
+ S390_lowcore.hardirq_timer += timer;
+ else
+ S390_lowcore.system_timer += timer;
/* Update MT utilization calculation */
if (smp_cpu_mtid &&
time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
update_mt_scaling();
- user = S390_lowcore.user_timer - ti->user_timer;
- S390_lowcore.steal_timer -= user;
- ti->user_timer = S390_lowcore.user_timer;
-
- system = S390_lowcore.system_timer - ti->system_timer;
- S390_lowcore.steal_timer -= system;
- ti->system_timer = S390_lowcore.system_timer;
-
- user_scaled = user;
- system_scaled = system;
- /* Do MT utilization scaling */
- if (smp_cpu_mtid) {
- u64 mult = __this_cpu_read(mt_scaling_mult);
- u64 div = __this_cpu_read(mt_scaling_div);
+ /* Calculate cputime delta */
+ user = update_tsk_timer(&tsk->thread.user_timer,
+ READ_ONCE(S390_lowcore.user_timer));
+ guest = update_tsk_timer(&tsk->thread.guest_timer,
+ READ_ONCE(S390_lowcore.guest_timer));
+ system = update_tsk_timer(&tsk->thread.system_timer,
+ READ_ONCE(S390_lowcore.system_timer));
+ hardirq = update_tsk_timer(&tsk->thread.hardirq_timer,
+ READ_ONCE(S390_lowcore.hardirq_timer));
+ softirq = update_tsk_timer(&tsk->thread.softirq_timer,
+ READ_ONCE(S390_lowcore.softirq_timer));
+ S390_lowcore.steal_timer +=
+ clock - user - guest - system - hardirq - softirq;
+
+ /* Push account value */
+ if (user) {
+ account_user_time(tsk, cputime_to_nsecs(user));
+ tsk->utimescaled += cputime_to_nsecs(scale_vtime(user));
+ }
- user_scaled = (user_scaled * mult) / div;
- system_scaled = (system_scaled * mult) / div;
+ if (guest) {
+ account_guest_time(tsk, cputime_to_nsecs(guest));
+ tsk->utimescaled += cputime_to_nsecs(scale_vtime(guest));
}
- account_user_time(tsk, user, user_scaled);
- account_system_time(tsk, hardirq_offset, system, system_scaled);
+
+ if (system)
+ account_system_index_scaled(tsk, system, scale_vtime(system),
+ CPUTIME_SYSTEM);
+ if (hardirq)
+ account_system_index_scaled(tsk, hardirq, scale_vtime(hardirq),
+ CPUTIME_IRQ);
+ if (softirq)
+ account_system_index_scaled(tsk, softirq, scale_vtime(softirq),
+ CPUTIME_SOFTIRQ);
steal = S390_lowcore.steal_timer;
if ((s64) steal > 0) {
S390_lowcore.steal_timer = 0;
- account_steal_time(steal);
+ account_steal_time(cputime_to_nsecs(steal));
}
- return virt_timer_forward(user + system);
+ return virt_timer_forward(user + guest + system + hardirq + softirq);
}
void vtime_task_switch(struct task_struct *prev)
{
- struct thread_info *ti;
-
- do_account_vtime(prev, 0);
- ti = task_thread_info(prev);
- ti->user_timer = S390_lowcore.user_timer;
- ti->system_timer = S390_lowcore.system_timer;
- ti = task_thread_info(current);
- S390_lowcore.user_timer = ti->user_timer;
- S390_lowcore.system_timer = ti->system_timer;
+ do_account_vtime(prev);
+ prev->thread.user_timer = S390_lowcore.user_timer;
+ prev->thread.guest_timer = S390_lowcore.guest_timer;
+ prev->thread.system_timer = S390_lowcore.system_timer;
+ prev->thread.hardirq_timer = S390_lowcore.hardirq_timer;
+ prev->thread.softirq_timer = S390_lowcore.softirq_timer;
+ S390_lowcore.user_timer = current->thread.user_timer;
+ S390_lowcore.guest_timer = current->thread.guest_timer;
+ S390_lowcore.system_timer = current->thread.system_timer;
+ S390_lowcore.hardirq_timer = current->thread.hardirq_timer;
+ S390_lowcore.softirq_timer = current->thread.softirq_timer;
}
/*
@@ -167,9 +214,9 @@ void vtime_task_switch(struct task_struct *prev)
* accounting system time in order to correctly compute
* the stolen time accounting.
*/
-void vtime_account_user(struct task_struct *tsk)
+void vtime_flush(struct task_struct *tsk)
{
- if (do_account_vtime(tsk, HARDIRQ_OFFSET))
+ if (do_account_vtime(tsk))
virt_timer_expire();
}
@@ -179,32 +226,22 @@ void vtime_account_user(struct task_struct *tsk)
*/
void vtime_account_irq_enter(struct task_struct *tsk)
{
- struct thread_info *ti = task_thread_info(tsk);
- u64 timer, system, system_scaled;
+ u64 timer;
timer = S390_lowcore.last_update_timer;
S390_lowcore.last_update_timer = get_vtimer();
- S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
-
- /* Update MT utilization calculation */
- if (smp_cpu_mtid &&
- time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
- update_mt_scaling();
-
- system = S390_lowcore.system_timer - ti->system_timer;
- S390_lowcore.steal_timer -= system;
- ti->system_timer = S390_lowcore.system_timer;
- system_scaled = system;
- /* Do MT utilization scaling */
- if (smp_cpu_mtid) {
- u64 mult = __this_cpu_read(mt_scaling_mult);
- u64 div = __this_cpu_read(mt_scaling_div);
-
- system_scaled = (system_scaled * mult) / div;
- }
- account_system_time(tsk, 0, system, system_scaled);
-
- virt_timer_forward(system);
+ timer -= S390_lowcore.last_update_timer;
+
+ if ((tsk->flags & PF_VCPU) && (irq_count() == 0))
+ S390_lowcore.guest_timer += timer;
+ else if (hardirq_count())
+ S390_lowcore.hardirq_timer += timer;
+ else if (in_serving_softirq())
+ S390_lowcore.softirq_timer += timer;
+ else
+ S390_lowcore.system_timer += timer;
+
+ virt_timer_forward(timer);
}
EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 4aa8a7e2a1da..4492c9363178 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -373,7 +373,7 @@ void ipte_unlock(struct kvm_vcpu *vcpu)
ipte_unlock_simple(vcpu);
}
-static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar,
+static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, u8 ar,
enum gacc_mode mode)
{
union alet alet;
@@ -465,7 +465,9 @@ static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar,
struct trans_exc_code_bits {
unsigned long addr : 52; /* Translation-exception Address */
unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */
- unsigned long : 6;
+ unsigned long : 2;
+ unsigned long b56 : 1;
+ unsigned long : 3;
unsigned long b60 : 1;
unsigned long b61 : 1;
unsigned long as : 2; /* ASCE Identifier */
@@ -485,7 +487,7 @@ enum prot_type {
};
static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
- ar_t ar, enum gacc_mode mode, enum prot_type prot)
+ u8 ar, enum gacc_mode mode, enum prot_type prot)
{
struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
struct trans_exc_code_bits *tec;
@@ -497,14 +499,18 @@ static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
switch (code) {
case PGM_PROTECTION:
switch (prot) {
+ case PROT_TYPE_LA:
+ tec->b56 = 1;
+ break;
+ case PROT_TYPE_KEYC:
+ tec->b60 = 1;
+ break;
case PROT_TYPE_ALC:
tec->b60 = 1;
/* FALL THROUGH */
case PROT_TYPE_DAT:
tec->b61 = 1;
break;
- default: /* LA and KEYC set b61 to 0, other params undefined */
- return code;
}
/* FALL THROUGH */
case PGM_ASCE_TYPE:
@@ -539,7 +545,7 @@ static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
}
static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
- unsigned long ga, ar_t ar, enum gacc_mode mode)
+ unsigned long ga, u8 ar, enum gacc_mode mode)
{
int rc;
struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw);
@@ -771,7 +777,7 @@ static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
return 1;
}
-static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar,
+static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
unsigned long *pages, unsigned long nr_pages,
const union asce asce, enum gacc_mode mode)
{
@@ -803,7 +809,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar,
return 0;
}
-int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
+int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
unsigned long len, enum gacc_mode mode)
{
psw_t *psw = &vcpu->arch.sie_block->gpsw;
@@ -877,7 +883,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
* Note: The IPTE lock is not taken during this function, so the caller
* has to take care of this.
*/
-int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
+int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
unsigned long *gpa, enum gacc_mode mode)
{
psw_t *psw = &vcpu->arch.sie_block->gpsw;
@@ -910,7 +916,7 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
/**
* check_gva_range - test a range of guest virtual addresses for accessibility
*/
-int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
+int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
unsigned long length, enum gacc_mode mode)
{
unsigned long gpa;
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index 8756569ad938..7ce47fd36f28 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -162,11 +162,11 @@ enum gacc_mode {
};
int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
- ar_t ar, unsigned long *gpa, enum gacc_mode mode);
-int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
+ u8 ar, unsigned long *gpa, enum gacc_mode mode);
+int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
unsigned long length, enum gacc_mode mode);
-int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
+int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
unsigned long len, enum gacc_mode mode);
int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
@@ -218,7 +218,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
* if data has been changed in guest space in case of an exception.
*/
static inline __must_check
-int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
+int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
unsigned long len)
{
return access_guest(vcpu, ga, ar, data, len, GACC_STORE);
@@ -238,7 +238,7 @@ int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
* data will be copied from guest space to kernel space.
*/
static inline __must_check
-int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
+int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
unsigned long len)
{
return access_guest(vcpu, ga, ar, data, len, GACC_FETCH);
@@ -247,10 +247,11 @@ int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
/**
* read_guest_instr - copy instruction data from guest space to kernel space
* @vcpu: virtual cpu
+ * @ga: guest address
* @data: destination address in kernel space
* @len: number of bytes to copy
*
- * Copy @len bytes from the current psw address (guest space) to @data (kernel
+ * Copy @len bytes from the given address (guest space) to @data (kernel
* space).
*
* The behaviour of read_guest_instr is identical to read_guest, except that
@@ -258,10 +259,10 @@ int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
* address-space mode.
*/
static inline __must_check
-int read_guest_instr(struct kvm_vcpu *vcpu, void *data, unsigned long len)
+int read_guest_instr(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
+ unsigned long len)
{
- return access_guest(vcpu, vcpu->arch.sie_block->gpsw.addr, 0, data, len,
- GACC_IFETCH);
+ return access_guest(vcpu, ga, 0, data, len, GACC_IFETCH);
}
/**
diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/guestdbg.c
index d7c6a7f53ced..23d9a4e12da1 100644
--- a/arch/s390/kvm/guestdbg.c
+++ b/arch/s390/kvm/guestdbg.c
@@ -388,14 +388,13 @@ void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu)
#define per_write_wp_event(code) \
(code & (PER_CODE_STORE | PER_CODE_STORE_REAL))
-static int debug_exit_required(struct kvm_vcpu *vcpu)
+static int debug_exit_required(struct kvm_vcpu *vcpu, u8 perc,
+ unsigned long peraddr)
{
- u8 perc = vcpu->arch.sie_block->perc;
struct kvm_debug_exit_arch *debug_exit = &vcpu->run->debug.arch;
struct kvm_hw_wp_info_arch *wp_info = NULL;
struct kvm_hw_bp_info_arch *bp_info = NULL;
unsigned long addr = vcpu->arch.sie_block->gpsw.addr;
- unsigned long peraddr = vcpu->arch.sie_block->peraddr;
if (guestdbg_hw_bp_enabled(vcpu)) {
if (per_write_wp_event(perc) &&
@@ -437,36 +436,118 @@ exit_required:
return 1;
}
+static int per_fetched_addr(struct kvm_vcpu *vcpu, unsigned long *addr)
+{
+ u8 exec_ilen = 0;
+ u16 opcode[3];
+ int rc;
+
+ if (vcpu->arch.sie_block->icptcode == ICPT_PROGI) {
+ /* PER address references the fetched or the execute instr */
+ *addr = vcpu->arch.sie_block->peraddr;
+ /*
+ * Manually detect if we have an EXECUTE instruction. As
+ * instructions are always 2 byte aligned we can read the
+ * first two bytes unconditionally
+ */
+ rc = read_guest_instr(vcpu, *addr, &opcode, 2);
+ if (rc)
+ return rc;
+ if (opcode[0] >> 8 == 0x44)
+ exec_ilen = 4;
+ if ((opcode[0] & 0xff0f) == 0xc600)
+ exec_ilen = 6;
+ } else {
+ /* instr was suppressed, calculate the responsible instr */
+ *addr = __rewind_psw(vcpu->arch.sie_block->gpsw,
+ kvm_s390_get_ilen(vcpu));
+ if (vcpu->arch.sie_block->icptstatus & 0x01) {
+ exec_ilen = (vcpu->arch.sie_block->icptstatus & 0x60) >> 4;
+ if (!exec_ilen)
+ exec_ilen = 4;
+ }
+ }
+
+ if (exec_ilen) {
+ /* read the complete EXECUTE instr to detect the fetched addr */
+ rc = read_guest_instr(vcpu, *addr, &opcode, exec_ilen);
+ if (rc)
+ return rc;
+ if (exec_ilen == 6) {
+ /* EXECUTE RELATIVE LONG - RIL-b format */
+ s32 rl = *((s32 *) (opcode + 1));
+
+ /* rl is a _signed_ 32 bit value specifying halfwords */
+ *addr += (u64)(s64) rl * 2;
+ } else {
+ /* EXECUTE - RX-a format */
+ u32 base = (opcode[1] & 0xf000) >> 12;
+ u32 disp = opcode[1] & 0x0fff;
+ u32 index = opcode[0] & 0x000f;
+
+ *addr = base ? vcpu->run->s.regs.gprs[base] : 0;
+ *addr += index ? vcpu->run->s.regs.gprs[index] : 0;
+ *addr += disp;
+ }
+ *addr = kvm_s390_logical_to_effective(vcpu, *addr);
+ }
+ return 0;
+}
+
#define guest_per_enabled(vcpu) \
(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER)
int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu)
{
+ const u64 cr10 = vcpu->arch.sie_block->gcr[10];
+ const u64 cr11 = vcpu->arch.sie_block->gcr[11];
const u8 ilen = kvm_s390_get_ilen(vcpu);
struct kvm_s390_pgm_info pgm_info = {
.code = PGM_PER,
.per_code = PER_CODE_IFETCH,
.per_address = __rewind_psw(vcpu->arch.sie_block->gpsw, ilen),
};
+ unsigned long fetched_addr;
+ int rc;
/*
* The PSW points to the next instruction, therefore the intercepted
* instruction generated a PER i-fetch event. PER address therefore
* points at the previous PSW address (could be an EXECUTE function).
*/
- return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
+ if (!guestdbg_enabled(vcpu))
+ return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
+
+ if (debug_exit_required(vcpu, pgm_info.per_code, pgm_info.per_address))
+ vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING;
+
+ if (!guest_per_enabled(vcpu) ||
+ !(vcpu->arch.sie_block->gcr[9] & PER_EVENT_IFETCH))
+ return 0;
+
+ rc = per_fetched_addr(vcpu, &fetched_addr);
+ if (rc < 0)
+ return rc;
+ if (rc)
+ /* instruction-fetching exceptions */
+ return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+
+ if (in_addr_range(fetched_addr, cr10, cr11))
+ return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
+ return 0;
}
-static void filter_guest_per_event(struct kvm_vcpu *vcpu)
+static int filter_guest_per_event(struct kvm_vcpu *vcpu)
{
const u8 perc = vcpu->arch.sie_block->perc;
- u64 peraddr = vcpu->arch.sie_block->peraddr;
u64 addr = vcpu->arch.sie_block->gpsw.addr;
u64 cr9 = vcpu->arch.sie_block->gcr[9];
u64 cr10 = vcpu->arch.sie_block->gcr[10];
u64 cr11 = vcpu->arch.sie_block->gcr[11];
/* filter all events, demanded by the guest */
u8 guest_perc = perc & (cr9 >> 24) & PER_CODE_MASK;
+ unsigned long fetched_addr;
+ int rc;
if (!guest_per_enabled(vcpu))
guest_perc = 0;
@@ -478,9 +559,17 @@ static void filter_guest_per_event(struct kvm_vcpu *vcpu)
guest_perc &= ~PER_CODE_BRANCH;
/* filter "instruction-fetching" events */
- if (guest_perc & PER_CODE_IFETCH &&
- !in_addr_range(peraddr, cr10, cr11))
- guest_perc &= ~PER_CODE_IFETCH;
+ if (guest_perc & PER_CODE_IFETCH) {
+ rc = per_fetched_addr(vcpu, &fetched_addr);
+ if (rc < 0)
+ return rc;
+ /*
+ * Don't inject an irq on exceptions. This would make handling
+ * on icpt code 8 very complex (as PSW was already rewound).
+ */
+ if (rc || !in_addr_range(fetched_addr, cr10, cr11))
+ guest_perc &= ~PER_CODE_IFETCH;
+ }
/* All other PER events will be given to the guest */
/* TODO: Check altered address/address space */
@@ -489,6 +578,7 @@ static void filter_guest_per_event(struct kvm_vcpu *vcpu)
if (!guest_perc)
vcpu->arch.sie_block->iprcc &= ~PGM_PER;
+ return 0;
}
#define pssec(vcpu) (vcpu->arch.sie_block->gcr[1] & _ASCE_SPACE_SWITCH)
@@ -496,14 +586,17 @@ static void filter_guest_per_event(struct kvm_vcpu *vcpu)
#define old_ssec(vcpu) ((vcpu->arch.sie_block->tecmc >> 31) & 0x1)
#define old_as_is_home(vcpu) !(vcpu->arch.sie_block->tecmc & 0xffff)
-void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu)
+int kvm_s390_handle_per_event(struct kvm_vcpu *vcpu)
{
- int new_as;
+ int rc, new_as;
- if (debug_exit_required(vcpu))
+ if (debug_exit_required(vcpu, vcpu->arch.sie_block->perc,
+ vcpu->arch.sie_block->peraddr))
vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING;
- filter_guest_per_event(vcpu);
+ rc = filter_guest_per_event(vcpu);
+ if (rc)
+ return rc;
/*
* Only RP, SAC, SACF, PT, PTI, PR, PC instructions can trigger
@@ -532,4 +625,5 @@ void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu)
(pssec(vcpu) || old_ssec(vcpu)))
vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH;
}
+ return 0;
}
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 7a27eebab28a..59920f96ebc0 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -238,7 +238,9 @@ static int handle_prog(struct kvm_vcpu *vcpu)
vcpu->stat.exit_program_interruption++;
if (guestdbg_enabled(vcpu) && per_event(vcpu)) {
- kvm_s390_handle_per_event(vcpu);
+ rc = kvm_s390_handle_per_event(vcpu);
+ if (rc)
+ return rc;
/* the interrupt might have been filtered out completely */
if (vcpu->arch.sie_block->iprcc == 0)
return 0;
@@ -359,6 +361,9 @@ static int handle_partial_execution(struct kvm_vcpu *vcpu)
static int handle_operexc(struct kvm_vcpu *vcpu)
{
+ psw_t oldpsw, newpsw;
+ int rc;
+
vcpu->stat.exit_operation_exception++;
trace_kvm_s390_handle_operexc(vcpu, vcpu->arch.sie_block->ipa,
vcpu->arch.sie_block->ipb);
@@ -369,6 +374,24 @@ static int handle_operexc(struct kvm_vcpu *vcpu)
if (vcpu->arch.sie_block->ipa == 0 && vcpu->kvm->arch.user_instr0)
return -EOPNOTSUPP;
+ rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &newpsw, sizeof(psw_t));
+ if (rc)
+ return rc;
+ /*
+ * Avoid endless loops of operation exceptions, if the pgm new
+ * PSW will cause a new operation exception.
+ * The heuristic checks if the pgm new psw is within 6 bytes before
+ * the faulting psw address (with same DAT, AS settings) and the
+ * new psw is not a wait psw and the fault was not triggered by
+ * problem state.
+ */
+ oldpsw = vcpu->arch.sie_block->gpsw;
+ if (oldpsw.addr - newpsw.addr <= 6 &&
+ !(newpsw.mask & PSW_MASK_WAIT) &&
+ !(oldpsw.mask & PSW_MASK_PSTATE) &&
+ (newpsw.mask & PSW_MASK_ASC) == (oldpsw.mask & PSW_MASK_ASC) &&
+ (newpsw.mask & PSW_MASK_DAT) == (oldpsw.mask & PSW_MASK_DAT))
+ return -EOPNOTSUPP;
return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index be4db07f70d3..0f8f14199734 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -20,7 +20,7 @@
#include <linux/vmalloc.h>
#include <asm/asm-offsets.h>
#include <asm/dis.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/sclp.h>
#include <asm/isc.h>
#include <asm/gmap.h>
@@ -415,7 +415,7 @@ static int __write_machine_check(struct kvm_vcpu *vcpu,
int rc;
mci.val = mchk->mcic;
- /* take care of lazy register loading via vcpu load/put */
+ /* take care of lazy register loading */
save_fpu_regs();
save_access_regs(vcpu->run->s.regs.acrs);
@@ -1019,7 +1019,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
return 0;
__set_cpu_idle(vcpu);
- hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
+ hrtimer_start(&vcpu->arch.ckc_timer, sltime, HRTIMER_MODE_REL);
VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime);
no_timer:
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 9c7a1ecfe6bd..f5694838234d 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -23,6 +23,7 @@
#include <linux/kvm_host.h>
#include <linux/mman.h>
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/timer.h>
@@ -217,7 +218,7 @@ static void allow_cpu_feat(unsigned long nr)
static inline int plo_test_bit(unsigned char nr)
{
register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
- int cc = 3; /* subfunction not available */
+ int cc;
asm volatile(
/* Parameter registers are ignored for "test bit" */
@@ -370,6 +371,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_S390_IRQCHIP:
case KVM_CAP_VM_ATTRIBUTES:
case KVM_CAP_MP_STATE:
+ case KVM_CAP_IMMEDIATE_EXIT:
case KVM_CAP_S390_INJECT_IRQ:
case KVM_CAP_S390_USER_SIGP:
case KVM_CAP_S390_USER_STSI:
@@ -442,6 +444,9 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_memory_slot *memslot;
int is_dirty = 0;
+ if (kvm_is_ucontrol(kvm))
+ return -EINVAL;
+
mutex_lock(&kvm->slots_lock);
r = -EINVAL;
@@ -505,6 +510,14 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
} else if (MACHINE_HAS_VX) {
set_kvm_facility(kvm->arch.model.fac_mask, 129);
set_kvm_facility(kvm->arch.model.fac_list, 129);
+ if (test_facility(134)) {
+ set_kvm_facility(kvm->arch.model.fac_mask, 134);
+ set_kvm_facility(kvm->arch.model.fac_list, 134);
+ }
+ if (test_facility(135)) {
+ set_kvm_facility(kvm->arch.model.fac_mask, 135);
+ set_kvm_facility(kvm->arch.model.fac_list, 135);
+ }
r = 0;
} else
r = -EINVAL;
@@ -821,6 +834,13 @@ static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
}
memcpy(kvm->arch.model.fac_list, proc->fac_list,
S390_ARCH_FAC_LIST_SIZE_BYTE);
+ VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
+ kvm->arch.model.ibc,
+ kvm->arch.model.cpuid);
+ VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
+ kvm->arch.model.fac_list[0],
+ kvm->arch.model.fac_list[1],
+ kvm->arch.model.fac_list[2]);
} else
ret = -EFAULT;
kfree(proc);
@@ -894,6 +914,13 @@ static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
proc->ibc = kvm->arch.model.ibc;
memcpy(&proc->fac_list, kvm->arch.model.fac_list,
S390_ARCH_FAC_LIST_SIZE_BYTE);
+ VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
+ kvm->arch.model.ibc,
+ kvm->arch.model.cpuid);
+ VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
+ kvm->arch.model.fac_list[0],
+ kvm->arch.model.fac_list[1],
+ kvm->arch.model.fac_list[2]);
if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
ret = -EFAULT;
kfree(proc);
@@ -916,7 +943,18 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
S390_ARCH_FAC_LIST_SIZE_BYTE);
memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
- S390_ARCH_FAC_LIST_SIZE_BYTE);
+ sizeof(S390_lowcore.stfle_fac_list));
+ VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
+ kvm->arch.model.ibc,
+ kvm->arch.model.cpuid);
+ VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
+ mach->fac_mask[0],
+ mach->fac_mask[1],
+ mach->fac_mask[2]);
+ VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
+ mach->fac_list[0],
+ mach->fac_list[1],
+ mach->fac_list[2]);
if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
ret = -EFAULT;
kfree(mach);
@@ -1437,7 +1475,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
/* Populate the facility mask initially. */
memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
- S390_ARCH_FAC_LIST_SIZE_BYTE);
+ sizeof(S390_lowcore.stfle_fac_list));
for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
if (i < kvm_s390_fac_list_mask_size())
kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
@@ -1812,22 +1850,7 @@ __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
- /* Save host register state */
- save_fpu_regs();
- vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
- vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
- if (MACHINE_HAS_VX)
- current->thread.fpu.regs = vcpu->run->s.regs.vrs;
- else
- current->thread.fpu.regs = vcpu->run->s.regs.fprs;
- current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
- if (test_fp_ctl(current->thread.fpu.fpc))
- /* User space provided an invalid FPC, let's clear it */
- current->thread.fpu.fpc = 0;
-
- save_access_regs(vcpu->arch.host_acrs);
- restore_access_regs(vcpu->run->s.regs.acrs);
gmap_enable(vcpu->arch.enabled_gmap);
atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
@@ -1844,16 +1867,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
vcpu->arch.enabled_gmap = gmap_get_enabled();
gmap_disable(vcpu->arch.enabled_gmap);
- /* Save guest register state */
- save_fpu_regs();
- vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
-
- /* Restore host register state */
- current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
- current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
-
- save_access_regs(vcpu->run->s.regs.acrs);
- restore_access_regs(vcpu->arch.host_acrs);
}
static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
@@ -1963,6 +1976,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi)
vcpu->arch.sie_block->ecb2 |= 0x08;
+ if (test_kvm_facility(vcpu->kvm, 130))
+ vcpu->arch.sie_block->ecb2 |= 0x20;
vcpu->arch.sie_block->eca = 0x1002000U;
if (sclp.has_cei)
vcpu->arch.sie_block->eca |= 0x80000000U;
@@ -2243,7 +2258,6 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
{
memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
- restore_access_regs(vcpu->run->s.regs.acrs);
return 0;
}
@@ -2257,11 +2271,9 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
- /* make sure the new values will be lazily loaded */
- save_fpu_regs();
if (test_fp_ctl(fpu->fpc))
return -EINVAL;
- current->thread.fpu.fpc = fpu->fpc;
+ vcpu->run->s.regs.fpc = fpu->fpc;
if (MACHINE_HAS_VX)
convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
(freg_t *) fpu->fprs);
@@ -2279,7 +2291,7 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
(__vector128 *) vcpu->run->s.regs.vrs);
else
memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
- fpu->fpc = current->thread.fpu.fpc;
+ fpu->fpc = vcpu->run->s.regs.fpc;
return 0;
}
@@ -2606,7 +2618,7 @@ static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
* to look up the current opcode to get the length of the instruction
* to be able to forward the PSW.
*/
- rc = read_guest_instr(vcpu, &opcode, 1);
+ rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
ilen = insn_length(opcode);
if (rc < 0) {
return rc;
@@ -2740,6 +2752,20 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (riccb->valid)
vcpu->arch.sie_block->ecb3 |= 0x01;
}
+ save_access_regs(vcpu->arch.host_acrs);
+ restore_access_regs(vcpu->run->s.regs.acrs);
+ /* save host (userspace) fprs/vrs */
+ save_fpu_regs();
+ vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
+ vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
+ if (MACHINE_HAS_VX)
+ current->thread.fpu.regs = vcpu->run->s.regs.vrs;
+ else
+ current->thread.fpu.regs = vcpu->run->s.regs.fprs;
+ current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
+ if (test_fp_ctl(current->thread.fpu.fpc))
+ /* User space provided an invalid FPC, let's clear it */
+ current->thread.fpu.fpc = 0;
kvm_run->kvm_dirty_regs = 0;
}
@@ -2758,6 +2784,15 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
kvm_run->s.regs.pft = vcpu->arch.pfault_token;
kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
+ save_access_regs(vcpu->run->s.regs.acrs);
+ restore_access_regs(vcpu->arch.host_acrs);
+ /* Save guest register state */
+ save_fpu_regs();
+ vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
+ /* Restore will be done lazily at return */
+ current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
+ current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
+
}
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
@@ -2765,6 +2800,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
int rc;
sigset_t sigsaved;
+ if (kvm_run->immediate_exit)
+ return -EINTR;
+
if (guestdbg_exit_pending(vcpu)) {
kvm_s390_prepare_debug_exit(vcpu);
return 0;
@@ -2874,7 +2912,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
{
/*
* The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
- * copying in vcpu load/put. Lets update our copies before we save
+ * switch in the run ioctl. Let's update our copies before we save
* it into the save area
*/
save_fpu_regs();
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 3a4e97f1a9e6..af9fa91a0c91 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -86,9 +86,7 @@ static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
}
-typedef u8 __bitwise ar_t;
-
-static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, ar_t *ar)
+static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, u8 *ar)
{
u32 base2 = vcpu->arch.sie_block->ipb >> 28;
u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
@@ -101,7 +99,7 @@ static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, ar_t *ar)
static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
u64 *address1, u64 *address2,
- ar_t *ar_b1, ar_t *ar_b2)
+ u8 *ar_b1, u8 *ar_b2)
{
u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16;
@@ -125,7 +123,7 @@ static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2
*r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
}
-static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, ar_t *ar)
+static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, u8 *ar)
{
u32 base2 = vcpu->arch.sie_block->ipb >> 28;
u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
@@ -140,7 +138,7 @@ static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, ar_t *ar)
return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2;
}
-static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu, ar_t *ar)
+static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu, u8 *ar)
{
u32 base2 = vcpu->arch.sie_block->ipb >> 28;
u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
@@ -379,7 +377,7 @@ int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu);
void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu);
int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu);
-void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu);
+int kvm_s390_handle_per_event(struct kvm_vcpu *vcpu);
/* support for Basic/Extended SCA handling */
static inline union ipte_control *kvm_s390_get_ipte_control(struct kvm *kvm)
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index e18435355c16..fb4b494cde9b 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -54,7 +54,7 @@ int kvm_s390_handle_aa(struct kvm_vcpu *vcpu)
static int handle_set_clock(struct kvm_vcpu *vcpu)
{
int rc;
- ar_t ar;
+ u8 ar;
u64 op2, val;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
@@ -79,7 +79,7 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu)
u64 operand2;
u32 address;
int rc;
- ar_t ar;
+ u8 ar;
vcpu->stat.instruction_spx++;
@@ -117,7 +117,7 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
u64 operand2;
u32 address;
int rc;
- ar_t ar;
+ u8 ar;
vcpu->stat.instruction_stpx++;
@@ -147,7 +147,7 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
u16 vcpu_id = vcpu->vcpu_id;
u64 ga;
int rc;
- ar_t ar;
+ u8 ar;
vcpu->stat.instruction_stap++;
@@ -311,7 +311,7 @@ static int handle_sske(struct kvm_vcpu *vcpu)
if (rc < 0)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
start += PAGE_SIZE;
- };
+ }
if (m3 & (SSKE_MC | SSKE_MR)) {
if (m3 & SSKE_MB) {
@@ -380,7 +380,7 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
u32 tpi_data[3];
int rc;
u64 addr;
- ar_t ar;
+ u8 ar;
addr = kvm_s390_get_base_disp_s(vcpu, &ar);
if (addr & 3)
@@ -548,7 +548,7 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
psw_compat_t new_psw;
u64 addr;
int rc;
- ar_t ar;
+ u8 ar;
if (gpsw->mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
@@ -575,7 +575,7 @@ static int handle_lpswe(struct kvm_vcpu *vcpu)
psw_t new_psw;
u64 addr;
int rc;
- ar_t ar;
+ u8 ar;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
@@ -597,7 +597,7 @@ static int handle_stidp(struct kvm_vcpu *vcpu)
u64 stidp_data = vcpu->kvm->arch.model.cpuid;
u64 operand2;
int rc;
- ar_t ar;
+ u8 ar;
vcpu->stat.instruction_stidp++;
@@ -644,7 +644,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
ASCEBC(mem->vm[0].cpi, 16);
}
-static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, ar_t ar,
+static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, u8 ar,
u8 fc, u8 sel1, u16 sel2)
{
vcpu->run->exit_reason = KVM_EXIT_S390_STSI;
@@ -663,7 +663,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
unsigned long mem = 0;
u64 operand2;
int rc = 0;
- ar_t ar;
+ u8 ar;
vcpu->stat.instruction_stsi++;
VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2);
@@ -970,7 +970,7 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
int reg, rc, nr_regs;
u32 ctl_array[16];
u64 ga;
- ar_t ar;
+ u8 ar;
vcpu->stat.instruction_lctl++;
@@ -1009,7 +1009,7 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
int reg, rc, nr_regs;
u32 ctl_array[16];
u64 ga;
- ar_t ar;
+ u8 ar;
vcpu->stat.instruction_stctl++;
@@ -1043,7 +1043,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
int reg, rc, nr_regs;
u64 ctl_array[16];
u64 ga;
- ar_t ar;
+ u8 ar;
vcpu->stat.instruction_lctlg++;
@@ -1081,7 +1081,7 @@ static int handle_stctg(struct kvm_vcpu *vcpu)
int reg, rc, nr_regs;
u64 ctl_array[16];
u64 ga;
- ar_t ar;
+ u8 ar;
vcpu->stat.instruction_stctg++;
@@ -1132,7 +1132,7 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
unsigned long hva, gpa;
int ret = 0, cc = 0;
bool writable;
- ar_t ar;
+ u8 ar;
vcpu->stat.instruction_tprot++;
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index d8673e243f13..38556e395915 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -324,6 +324,9 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
/* Run-time-Instrumentation */
if (test_kvm_facility(vcpu->kvm, 64))
scb_s->ecb3 |= scb_o->ecb3 & 0x01U;
+ /* Instruction Execution Prevention */
+ if (test_kvm_facility(vcpu->kvm, 130))
+ scb_s->ecb2 |= scb_o->ecb2 & 0x20U;
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIIF))
scb_s->eca |= scb_o->eca & 0x00000001U;
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IB))
@@ -899,7 +902,7 @@ static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
if (rc || scb_s->icptcode || signal_pending(current) ||
kvm_s390_vcpu_has_irq(vcpu, 0))
break;
- };
+ }
if (rc == -EFAULT) {
/*
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index 501dcd4ca4a0..92e90e40b6fb 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -9,7 +9,7 @@
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/timex.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/irqflags.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S
index be9fa65bfac4..7ff79a4ff00c 100644
--- a/arch/s390/lib/mem.S
+++ b/arch/s390/lib/mem.S
@@ -8,6 +8,43 @@
#include <asm/export.h>
/*
+ * void *memmove(void *dest, const void *src, size_t n)
+ */
+ENTRY(memmove)
+ ltgr %r4,%r4
+ lgr %r1,%r2
+ bzr %r14
+ aghi %r4,-1
+ clgr %r2,%r3
+ jnh .Lmemmove_forward
+ la %r5,1(%r4,%r3)
+ clgr %r2,%r5
+ jl .Lmemmove_reverse
+.Lmemmove_forward:
+ srlg %r0,%r4,8
+ ltgr %r0,%r0
+ jz .Lmemmove_forward_remainder
+.Lmemmove_forward_loop:
+ mvc 0(256,%r1),0(%r3)
+ la %r1,256(%r1)
+ la %r3,256(%r3)
+ brctg %r0,.Lmemmove_forward_loop
+.Lmemmove_forward_remainder:
+ larl %r5,.Lmemmove_mvc
+ ex %r4,0(%r5)
+ br %r14
+.Lmemmove_reverse:
+ ic %r0,0(%r4,%r3)
+ stc %r0,0(%r4,%r1)
+ brctg %r4,.Lmemmove_reverse
+ ic %r0,0(%r4,%r3)
+ stc %r0,0(%r4,%r1)
+ br %r14
+.Lmemmove_mvc:
+ mvc 0(1,%r1),0(%r3)
+EXPORT_SYMBOL(memmove)
+
+/*
* memset implementation
*
* This code corresponds to the C construct below. We do distinguish
@@ -31,12 +68,12 @@ ENTRY(memset)
srlg %r3,%r4,8
ltgr %r3,%r3
lgr %r1,%r2
- jz .Lmemset_clear_rest
+ jz .Lmemset_clear_remainder
.Lmemset_clear_loop:
xc 0(256,%r1),0(%r1)
la %r1,256(%r1)
brctg %r3,.Lmemset_clear_loop
-.Lmemset_clear_rest:
+.Lmemset_clear_remainder:
larl %r3,.Lmemset_xc
ex %r4,0(%r3)
br %r14
@@ -48,12 +85,12 @@ ENTRY(memset)
aghi %r4,-2
srlg %r3,%r4,8
ltgr %r3,%r3
- jz .Lmemset_fill_rest
+ jz .Lmemset_fill_remainder
.Lmemset_fill_loop:
mvc 1(256,%r1),0(%r1)
la %r1,256(%r1)
brctg %r3,.Lmemset_fill_loop
-.Lmemset_fill_rest:
+.Lmemset_fill_remainder:
larl %r3,.Lmemset_mvc
ex %r4,0(%r3)
br %r14
@@ -76,7 +113,7 @@ ENTRY(memcpy)
ltgr %r5,%r5
lgr %r1,%r2
jnz .Lmemcpy_loop
-.Lmemcpy_rest:
+.Lmemcpy_remainder:
larl %r5,.Lmemcpy_mvc
ex %r4,0(%r5)
br %r14
@@ -85,7 +122,7 @@ ENTRY(memcpy)
la %r1,256(%r1)
la %r3,256(%r3)
brctg %r5,.Lmemcpy_loop
- j .Lmemcpy_rest
+ j .Lmemcpy_remainder
.Lmemcpy_mvc:
mvc 0(1,%r1),0(%r3)
EXPORT_SYMBOL(memcpy)
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index e5f50a7d2f4e..ba427eb6f14c 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -6,7 +6,7 @@
*/
#include <linux/types.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/smp.h>
@@ -37,15 +37,6 @@ static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old)
asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
}
-static inline int cpu_is_preempted(int cpu)
-{
- if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
- return 0;
- if (smp_vcpu_scheduled(cpu))
- return 0;
- return 1;
-}
-
void arch_spin_lock_wait(arch_spinlock_t *lp)
{
unsigned int cpu = SPINLOCK_LOCKVAL;
@@ -62,7 +53,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
continue;
}
/* First iteration: check if the lock owner is running. */
- if (first_diag && cpu_is_preempted(~owner)) {
+ if (first_diag && arch_vcpu_is_preempted(~owner)) {
smp_yield_cpu(~owner);
first_diag = 0;
continue;
@@ -81,7 +72,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
* yield the CPU unconditionally. For LPAR rely on the
* sense running status.
*/
- if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
+ if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
smp_yield_cpu(~owner);
first_diag = 0;
}
@@ -108,7 +99,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
continue;
}
/* Check if the lock owner is running. */
- if (first_diag && cpu_is_preempted(~owner)) {
+ if (first_diag && arch_vcpu_is_preempted(~owner)) {
smp_yield_cpu(~owner);
first_diag = 0;
continue;
@@ -127,7 +118,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
* yield the CPU unconditionally. For LPAR rely on the
* sense running status.
*/
- if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
+ if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
smp_yield_cpu(~owner);
first_diag = 0;
}
@@ -142,7 +133,7 @@ int arch_spin_trylock_retry(arch_spinlock_t *lp)
int count;
for (count = spin_retry; count > 0; count--) {
- owner = ACCESS_ONCE(lp->lock);
+ owner = READ_ONCE(lp->lock);
/* Try to get the lock if it is free. */
if (!owner) {
if (_raw_compare_and_swap(&lp->lock, 0, cpu))
@@ -165,7 +156,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
owner = 0;
while (1) {
if (count-- <= 0) {
- if (owner && cpu_is_preempted(~owner))
+ if (owner && arch_vcpu_is_preempted(~owner))
smp_yield_cpu(~owner);
count = spin_retry;
}
@@ -211,7 +202,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
owner = 0;
while (1) {
if (count-- <= 0) {
- if (owner && cpu_is_preempted(~owner))
+ if (owner && arch_vcpu_is_preempted(~owner))
smp_yield_cpu(~owner);
count = spin_retry;
}
@@ -241,7 +232,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
owner = 0;
while (1) {
if (count-- <= 0) {
- if (owner && cpu_is_preempted(~owner))
+ if (owner && arch_vcpu_is_preempted(~owner))
smp_yield_cpu(~owner);
count = spin_retry;
}
@@ -285,7 +276,7 @@ void arch_lock_relax(unsigned int cpu)
{
if (!cpu)
return;
- if (MACHINE_IS_LPAR && !cpu_is_preempted(~cpu))
+ if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(~cpu))
return;
smp_yield_cpu(~cpu);
}
diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c
index 48352bffbc92..4ee27339c792 100644
--- a/arch/s390/lib/string.c
+++ b/arch/s390/lib/string.c
@@ -9,7 +9,8 @@
#define IN_ARCH_STRING_C 1
#include <linux/types.h>
-#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/export.h>
/*
* Helper functions to find the end of a string
@@ -20,7 +21,7 @@ static inline char *__strend(const char *s)
asm volatile ("0: srst %0,%1\n"
" jo 0b"
- : "+d" (r0), "+a" (s) : : "cc" );
+ : "+d" (r0), "+a" (s) : : "cc", "memory");
return (char *) r0;
}
@@ -31,7 +32,7 @@ static inline char *__strnend(const char *s, size_t n)
asm volatile ("0: srst %0,%1\n"
" jo 0b"
- : "+d" (p), "+a" (s) : "d" (r0) : "cc" );
+ : "+d" (p), "+a" (s) : "d" (r0) : "cc", "memory");
return (char *) p;
}
@@ -213,7 +214,7 @@ int strcmp(const char *cs, const char *ct)
" sr %0,%1\n"
"1:"
: "+d" (ret), "+d" (r0), "+a" (cs), "+a" (ct)
- : : "cc" );
+ : : "cc", "memory");
return ret;
}
EXPORT_SYMBOL(strcmp);
@@ -250,7 +251,7 @@ static inline int clcle(const char *s1, unsigned long l1,
" ipm %0\n"
" srl %0,28"
: "=&d" (cc), "+a" (r2), "+a" (r3),
- "+a" (r4), "+a" (r5) : : "cc");
+ "+a" (r4), "+a" (r5) : : "cc", "memory");
return cc;
}
@@ -298,7 +299,7 @@ void *memchr(const void *s, int c, size_t n)
" jl 1f\n"
" la %0,0\n"
"1:"
- : "+a" (ret), "+&a" (s) : "d" (r0) : "cc" );
+ : "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory");
return (void *) ret;
}
EXPORT_SYMBOL(memchr);
@@ -336,7 +337,7 @@ void *memscan(void *s, int c, size_t n)
asm volatile ("0: srst %0,%1\n"
" jo 0b\n"
- : "+a" (ret), "+&a" (s) : "d" (r0) : "cc" );
+ : "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory");
return (void *) ret;
}
EXPORT_SYMBOL(memscan);
diff --git a/arch/s390/lib/xor.c b/arch/s390/lib/xor.c
index 7d94e3ec34a9..b4fd05c36151 100644
--- a/arch/s390/lib/xor.c
+++ b/arch/s390/lib/xor.c
@@ -6,7 +6,7 @@
*/
#include <linux/types.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/raid/xor.h>
static void xor_xc_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 79ddd580d605..829c63dbc81a 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -10,6 +10,7 @@
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/gfp.h>
#include <linux/sched.h>
#include <linux/sysctl.h>
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index 861880df12c7..1b553d847140 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -1,6 +1,6 @@
#include <linux/seq_file.h>
#include <linux/debugfs.h>
-#include <linux/module.h>
+#include <linux/sched.h>
#include <linux/mm.h>
#include <asm/sections.h>
#include <asm/pgtable.h>
@@ -49,8 +49,8 @@ static void print_prot(struct seq_file *m, unsigned int pr, int level)
seq_printf(m, "I\n");
return;
}
- seq_printf(m, "%s", pr & _PAGE_PROTECT ? "RO " : "RW ");
- seq_putc(m, '\n');
+ seq_puts(m, (pr & _PAGE_PROTECT) ? "RO " : "RW ");
+ seq_puts(m, (pr & _PAGE_NOEXEC) ? "NX\n" : "X\n");
}
static void note_page(struct seq_file *m, struct pg_state *st,
@@ -117,7 +117,8 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st,
for (i = 0; i < PTRS_PER_PTE && addr < max_addr; i++) {
st->current_address = addr;
pte = pte_offset_kernel(pmd, addr);
- prot = pte_val(*pte) & (_PAGE_PROTECT | _PAGE_INVALID);
+ prot = pte_val(*pte) &
+ (_PAGE_PROTECT | _PAGE_INVALID | _PAGE_NOEXEC);
note_page(m, st, prot, 4);
addr += PAGE_SIZE;
}
@@ -135,7 +136,9 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
pmd = pmd_offset(pud, addr);
if (!pmd_none(*pmd)) {
if (pmd_large(*pmd)) {
- prot = pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT;
+ prot = pmd_val(*pmd) &
+ (_SEGMENT_ENTRY_PROTECT |
+ _SEGMENT_ENTRY_NOEXEC);
note_page(m, st, prot, 3);
} else
walk_pte_level(m, st, pmd, addr);
@@ -157,7 +160,9 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st,
pud = pud_offset(pgd, addr);
if (!pud_none(*pud))
if (pud_large(*pud)) {
- prot = pud_val(*pud) & _REGION_ENTRY_PROTECT;
+ prot = pud_val(*pud) &
+ (_REGION_ENTRY_PROTECT |
+ _REGION_ENTRY_NOEXEC);
note_page(m, st, prot, 2);
} else
walk_pmd_level(m, st, pud, addr);
@@ -183,6 +188,7 @@ static void walk_pgd_level(struct seq_file *m)
else
note_page(m, &st, _PAGE_INVALID, 1);
addr += PGDIR_SIZE;
+ cond_resched();
}
/* Flush out the last page */
st.current_address = max_addr;
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 02042b6b66bf..9f118629b55f 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -14,7 +14,7 @@
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/slab.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/bootmem.h>
#include <linux/ctype.h>
#include <linux/ioport.h>
@@ -122,7 +122,7 @@ dcss_set_subcodes(void)
"1: la %2,3\n"
"2:\n"
EX_TABLE(0b, 1b)
- : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
+ : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc", "memory");
kfree(name);
/* Diag x'64' new subcodes are supported, set to new subcodes */
@@ -154,7 +154,7 @@ dcss_mkname(char *name, char *dcss_name)
if (name[i] == '\0')
break;
dcss_name[i] = toupper(name[i]);
- };
+ }
for (; i < 8; i++)
dcss_name[i] = ' ';
ASCEBC(dcss_name, 8);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 661d9fe63c43..bb5560eb2435 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -311,12 +311,34 @@ static noinline void do_sigbus(struct pt_regs *regs)
force_sig_info(SIGBUS, &si, tsk);
}
-static noinline void do_fault_error(struct pt_regs *regs, int fault)
+static noinline int signal_return(struct pt_regs *regs)
+{
+ u16 instruction;
+ int rc;
+
+ rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
+ if (rc)
+ return rc;
+ if (instruction == 0x0a77) {
+ set_pt_regs_flag(regs, PIF_SYSCALL);
+ regs->int_code = 0x00040077;
+ return 0;
+ } else if (instruction == 0x0aad) {
+ set_pt_regs_flag(regs, PIF_SYSCALL);
+ regs->int_code = 0x000400ad;
+ return 0;
+ }
+ return -EACCES;
+}
+
+static noinline void do_fault_error(struct pt_regs *regs, int access, int fault)
{
int si_code;
switch (fault) {
case VM_FAULT_BADACCESS:
+ if (access == VM_EXEC && signal_return(regs) == 0)
+ break;
case VM_FAULT_BADMAP:
/* Bad memory access. Check if it is kernel or user space. */
if (user_mode(regs)) {
@@ -324,7 +346,7 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault)
si_code = (fault == VM_FAULT_BADMAP) ?
SEGV_MAPERR : SEGV_ACCERR;
do_sigsegv(regs, si_code);
- return;
+ break;
}
case VM_FAULT_BADCONTEXT:
case VM_FAULT_PFAULT:
@@ -525,7 +547,7 @@ out:
void do_protection_exception(struct pt_regs *regs)
{
unsigned long trans_exc_code;
- int fault;
+ int access, fault;
trans_exc_code = regs->int_parm_long;
/*
@@ -544,9 +566,17 @@ void do_protection_exception(struct pt_regs *regs)
do_low_address(regs);
return;
}
- fault = do_exception(regs, VM_WRITE);
+ if (unlikely(MACHINE_HAS_NX && (trans_exc_code & 0x80))) {
+ regs->int_parm_long = (trans_exc_code & ~PAGE_MASK) |
+ (regs->psw.addr & PAGE_MASK);
+ access = VM_EXEC;
+ fault = VM_FAULT_BADACCESS;
+ } else {
+ access = VM_WRITE;
+ fault = do_exception(regs, access);
+ }
if (unlikely(fault))
- do_fault_error(regs, fault);
+ do_fault_error(regs, access, fault);
}
NOKPROBE_SYMBOL(do_protection_exception);
@@ -557,7 +587,7 @@ void do_dat_exception(struct pt_regs *regs)
access = VM_READ | VM_EXEC | VM_WRITE;
fault = do_exception(regs, access);
if (unlikely(fault))
- do_fault_error(regs, fault);
+ do_fault_error(regs, access, fault);
}
NOKPROBE_SYMBOL(do_dat_exception);
@@ -733,6 +763,7 @@ block:
* return to userspace schedule() to block. */
__set_current_state(TASK_UNINTERRUPTIBLE);
set_tsk_need_resched(tsk);
+ set_preempt_need_resched();
}
}
out:
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 3ba622702ce4..a07b1ec1391d 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -359,8 +359,8 @@ static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
spin_lock(&gmap->guest_table_lock);
entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
if (entry) {
- flush = (*entry != _SEGMENT_ENTRY_INVALID);
- *entry = _SEGMENT_ENTRY_INVALID;
+ flush = (*entry != _SEGMENT_ENTRY_EMPTY);
+ *entry = _SEGMENT_ENTRY_EMPTY;
}
spin_unlock(&gmap->guest_table_lock);
return flush;
@@ -589,7 +589,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
return rc;
ptl = pmd_lock(mm, pmd);
spin_lock(&gmap->guest_table_lock);
- if (*table == _SEGMENT_ENTRY_INVALID) {
+ if (*table == _SEGMENT_ENTRY_EMPTY) {
rc = radix_tree_insert(&gmap->host_to_guest,
vmaddr >> PMD_SHIFT, table);
if (!rc)
@@ -687,7 +687,7 @@ void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
/* Find vma in the parent mm */
vma = find_vma(gmap->mm, vmaddr);
size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
- zap_page_range(vma, vmaddr, size, NULL);
+ zap_page_range(vma, vmaddr, size);
}
up_read(&gmap->mm->mmap_sem);
}
@@ -1015,7 +1015,7 @@ static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
if (slot) {
rmap->next = radix_tree_deref_slot_protected(slot,
&sg->guest_table_lock);
- radix_tree_replace_slot(slot, rmap);
+ radix_tree_replace_slot(&sg->host_to_rmap, slot, rmap);
} else {
rmap->next = NULL;
radix_tree_insert(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT,
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 4a0c5bce3552..9b4050caa4e9 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -59,8 +59,10 @@ static inline unsigned long __pte_to_rste(pte_t pte)
rste |= move_set_bit(pte_val(pte), _PAGE_SOFT_DIRTY,
_SEGMENT_ENTRY_SOFT_DIRTY);
#endif
+ rste |= move_set_bit(pte_val(pte), _PAGE_NOEXEC,
+ _SEGMENT_ENTRY_NOEXEC);
} else
- rste = _SEGMENT_ENTRY_INVALID;
+ rste = _SEGMENT_ENTRY_EMPTY;
return rste;
}
@@ -113,6 +115,8 @@ static inline pte_t __rste_to_pte(unsigned long rste)
pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY,
_PAGE_DIRTY);
#endif
+ pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_NOEXEC,
+ _PAGE_NOEXEC);
} else
pte_val(pte) = _PAGE_INVALID;
return pte;
@@ -121,7 +125,11 @@ static inline pte_t __rste_to_pte(unsigned long rste)
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
- unsigned long rste = __pte_to_rste(pte);
+ unsigned long rste;
+
+ rste = __pte_to_rste(pte);
+ if (!MACHINE_HAS_NX)
+ rste &= ~_SEGMENT_ENTRY_NOEXEC;
/* Set correct table type for 2G hugepages */
if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index b3e9d18f2ec6..ee5066718b21 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -29,7 +29,7 @@
#include <linux/gfp.h>
#include <linux/memblock.h>
#include <asm/processor.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/dma.h>
@@ -137,6 +137,9 @@ void __init mem_init(void)
void free_initmem(void)
{
+ __set_memory((unsigned long) _sinittext,
+ (_einittext - _sinittext) >> PAGE_SHIFT,
+ SET_MEMORY_RW | SET_MEMORY_NX);
free_initmem_default(POISON_FREE_INITMEM);
}
@@ -148,6 +151,15 @@ void __init free_initrd_mem(unsigned long start, unsigned long end)
}
#endif
+unsigned long memory_block_size_bytes(void)
+{
+ /*
+ * Make sure the memory block size is always greater
+ * or equal than the memory increment size.
+ */
+ return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm);
+}
+
#ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
{
@@ -191,15 +203,6 @@ int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
return rc;
}
-unsigned long memory_block_size_bytes(void)
-{
- /*
- * Make sure the memory block size is always greater
- * or equal than the memory increment size.
- */
- return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm);
-}
-
#ifdef CONFIG_MEMORY_HOTREMOVE
int arch_remove_memory(u64 start, u64 size)
{
diff --git a/arch/s390/mm/mem_detect.c b/arch/s390/mm/mem_detect.c
index d612cc3eec6a..e58dca05b962 100644
--- a/arch/s390/mm/mem_detect.c
+++ b/arch/s390/mm/mem_detect.c
@@ -5,7 +5,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/debugfs.h>
@@ -19,6 +18,8 @@
static inline void memblock_physmem_add(phys_addr_t start, phys_addr_t size)
{
+ memblock_dbg("memblock_physmem_add: [%#016llx-%#016llx]\n",
+ start, start + size - 1);
memblock_add_range(&memblock.memory, start, size, 0, 0);
memblock_add_range(&memblock.physmem, start, size, 0, 0);
}
@@ -39,7 +40,8 @@ void __init detect_memory_memblock(void)
memblock_set_bottom_up(true);
do {
size = 0;
- type = tprot(addr);
+ /* assume lowcore is writable */
+ type = addr ? tprot(addr) : CHUNK_READ_WRITE;
do {
size += rzm;
if (max_physmem_end && addr + size >= max_physmem_end)
@@ -55,4 +57,5 @@ void __init detect_memory_memblock(void)
memblock_set_bottom_up(false);
if (!max_physmem_end)
max_physmem_end = memblock_end_of_DRAM();
+ memblock_dump_all();
}
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index eb9df2822da1..7ae1282d5be9 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -26,11 +26,11 @@
#include <linux/personality.h>
#include <linux/mm.h>
#include <linux/mman.h>
-#include <linux/module.h>
#include <linux/random.h>
#include <linux/compat.h>
#include <linux/security.h>
#include <asm/pgalloc.h>
+#include <asm/elf.h>
static unsigned long stack_maxrandom_size(void)
{
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 44f150312a16..fc5dc33bb141 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -3,7 +3,6 @@
* Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
*/
#include <linux/hugetlb.h>
-#include <linux/module.h>
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include <asm/facility.h>
@@ -81,24 +80,24 @@ static void pgt_set(unsigned long *old, unsigned long new, unsigned long addr,
}
}
-struct cpa {
- unsigned int set_ro : 1;
- unsigned int clear_ro : 1;
-};
-
static int walk_pte_level(pmd_t *pmdp, unsigned long addr, unsigned long end,
- struct cpa cpa)
+ unsigned long flags)
{
pte_t *ptep, new;
ptep = pte_offset(pmdp, addr);
do {
- if (pte_none(*ptep))
+ new = *ptep;
+ if (pte_none(new))
return -EINVAL;
- if (cpa.set_ro)
- new = pte_wrprotect(*ptep);
- else if (cpa.clear_ro)
- new = pte_mkwrite(pte_mkdirty(*ptep));
+ if (flags & SET_MEMORY_RO)
+ new = pte_wrprotect(new);
+ else if (flags & SET_MEMORY_RW)
+ new = pte_mkwrite(pte_mkdirty(new));
+ if ((flags & SET_MEMORY_NX) && MACHINE_HAS_NX)
+ pte_val(new) |= _PAGE_NOEXEC;
+ else if (flags & SET_MEMORY_X)
+ pte_val(new) &= ~_PAGE_NOEXEC;
pgt_set((unsigned long *)ptep, pte_val(new), addr, CRDTE_DTT_PAGE);
ptep++;
addr += PAGE_SIZE;
@@ -112,14 +111,17 @@ static int split_pmd_page(pmd_t *pmdp, unsigned long addr)
unsigned long pte_addr, prot;
pte_t *pt_dir, *ptep;
pmd_t new;
- int i, ro;
+ int i, ro, nx;
pt_dir = vmem_pte_alloc();
if (!pt_dir)
return -ENOMEM;
pte_addr = pmd_pfn(*pmdp) << PAGE_SHIFT;
ro = !!(pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT);
+ nx = !!(pmd_val(*pmdp) & _SEGMENT_ENTRY_NOEXEC);
prot = pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
+ if (!nx)
+ prot &= ~_PAGE_NOEXEC;
ptep = pt_dir;
for (i = 0; i < PTRS_PER_PTE; i++) {
pte_val(*ptep) = pte_addr | prot;
@@ -133,19 +135,24 @@ static int split_pmd_page(pmd_t *pmdp, unsigned long addr)
return 0;
}
-static void modify_pmd_page(pmd_t *pmdp, unsigned long addr, struct cpa cpa)
+static void modify_pmd_page(pmd_t *pmdp, unsigned long addr,
+ unsigned long flags)
{
- pmd_t new;
-
- if (cpa.set_ro)
- new = pmd_wrprotect(*pmdp);
- else if (cpa.clear_ro)
- new = pmd_mkwrite(pmd_mkdirty(*pmdp));
+ pmd_t new = *pmdp;
+
+ if (flags & SET_MEMORY_RO)
+ new = pmd_wrprotect(new);
+ else if (flags & SET_MEMORY_RW)
+ new = pmd_mkwrite(pmd_mkdirty(new));
+ if ((flags & SET_MEMORY_NX) && MACHINE_HAS_NX)
+ pmd_val(new) |= _SEGMENT_ENTRY_NOEXEC;
+ else if (flags & SET_MEMORY_X)
+ pmd_val(new) &= ~_SEGMENT_ENTRY_NOEXEC;
pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT);
}
static int walk_pmd_level(pud_t *pudp, unsigned long addr, unsigned long end,
- struct cpa cpa)
+ unsigned long flags)
{
unsigned long next;
pmd_t *pmdp;
@@ -163,9 +170,9 @@ static int walk_pmd_level(pud_t *pudp, unsigned long addr, unsigned long end,
return rc;
continue;
}
- modify_pmd_page(pmdp, addr, cpa);
+ modify_pmd_page(pmdp, addr, flags);
} else {
- rc = walk_pte_level(pmdp, addr, next, cpa);
+ rc = walk_pte_level(pmdp, addr, next, flags);
if (rc)
return rc;
}
@@ -181,14 +188,17 @@ static int split_pud_page(pud_t *pudp, unsigned long addr)
unsigned long pmd_addr, prot;
pmd_t *pm_dir, *pmdp;
pud_t new;
- int i, ro;
+ int i, ro, nx;
pm_dir = vmem_pmd_alloc();
if (!pm_dir)
return -ENOMEM;
pmd_addr = pud_pfn(*pudp) << PAGE_SHIFT;
ro = !!(pud_val(*pudp) & _REGION_ENTRY_PROTECT);
+ nx = !!(pud_val(*pudp) & _REGION_ENTRY_NOEXEC);
prot = pgprot_val(ro ? SEGMENT_KERNEL_RO : SEGMENT_KERNEL);
+ if (!nx)
+ prot &= ~_SEGMENT_ENTRY_NOEXEC;
pmdp = pm_dir;
for (i = 0; i < PTRS_PER_PMD; i++) {
pmd_val(*pmdp) = pmd_addr | prot;
@@ -202,19 +212,24 @@ static int split_pud_page(pud_t *pudp, unsigned long addr)
return 0;
}
-static void modify_pud_page(pud_t *pudp, unsigned long addr, struct cpa cpa)
+static void modify_pud_page(pud_t *pudp, unsigned long addr,
+ unsigned long flags)
{
- pud_t new;
-
- if (cpa.set_ro)
- new = pud_wrprotect(*pudp);
- else if (cpa.clear_ro)
- new = pud_mkwrite(pud_mkdirty(*pudp));
+ pud_t new = *pudp;
+
+ if (flags & SET_MEMORY_RO)
+ new = pud_wrprotect(new);
+ else if (flags & SET_MEMORY_RW)
+ new = pud_mkwrite(pud_mkdirty(new));
+ if ((flags & SET_MEMORY_NX) && MACHINE_HAS_NX)
+ pud_val(new) |= _REGION_ENTRY_NOEXEC;
+ else if (flags & SET_MEMORY_X)
+ pud_val(new) &= ~_REGION_ENTRY_NOEXEC;
pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3);
}
static int walk_pud_level(pgd_t *pgd, unsigned long addr, unsigned long end,
- struct cpa cpa)
+ unsigned long flags)
{
unsigned long next;
pud_t *pudp;
@@ -232,9 +247,9 @@ static int walk_pud_level(pgd_t *pgd, unsigned long addr, unsigned long end,
break;
continue;
}
- modify_pud_page(pudp, addr, cpa);
+ modify_pud_page(pudp, addr, flags);
} else {
- rc = walk_pmd_level(pudp, addr, next, cpa);
+ rc = walk_pmd_level(pudp, addr, next, flags);
}
pudp++;
addr = next;
@@ -246,7 +261,7 @@ static int walk_pud_level(pgd_t *pgd, unsigned long addr, unsigned long end,
static DEFINE_MUTEX(cpa_mutex);
static int change_page_attr(unsigned long addr, unsigned long end,
- struct cpa cpa)
+ unsigned long flags)
{
unsigned long next;
int rc = -EINVAL;
@@ -262,7 +277,7 @@ static int change_page_attr(unsigned long addr, unsigned long end,
if (pgd_none(*pgdp))
break;
next = pgd_addr_end(addr, end);
- rc = walk_pud_level(pgdp, addr, next, cpa);
+ rc = walk_pud_level(pgdp, addr, next, flags);
if (rc)
break;
cond_resched();
@@ -271,35 +286,10 @@ static int change_page_attr(unsigned long addr, unsigned long end,
return rc;
}
-int set_memory_ro(unsigned long addr, int numpages)
+int __set_memory(unsigned long addr, int numpages, unsigned long flags)
{
- struct cpa cpa = {
- .set_ro = 1,
- };
-
addr &= PAGE_MASK;
- return change_page_attr(addr, addr + numpages * PAGE_SIZE, cpa);
-}
-
-int set_memory_rw(unsigned long addr, int numpages)
-{
- struct cpa cpa = {
- .clear_ro = 1,
- };
-
- addr &= PAGE_MASK;
- return change_page_attr(addr, addr + numpages * PAGE_SIZE, cpa);
-}
-
-/* not possible */
-int set_memory_nx(unsigned long addr, int numpages)
-{
- return 0;
-}
-
-int set_memory_x(unsigned long addr, int numpages)
-{
- return 0;
+ return change_page_attr(addr, addr + numpages * PAGE_SIZE, flags);
}
#ifdef CONFIG_DEBUG_PAGEALLOC
@@ -339,7 +329,7 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
nr = min(numpages - i, nr);
if (enable) {
for (j = 0; j < nr; j++) {
- pte_val(*pte) = address | pgprot_val(PAGE_KERNEL);
+ pte_val(*pte) &= ~_PAGE_INVALID;
address += PAGE_SIZE;
pte++;
}
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 7a1897c51c54..b48dc5f1900b 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -202,7 +202,7 @@ static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
return pgste;
}
-static inline void ptep_xchg_commit(struct mm_struct *mm,
+static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
unsigned long addr, pte_t *ptep,
pgste_t pgste, pte_t old, pte_t new)
{
@@ -220,6 +220,7 @@ static inline void ptep_xchg_commit(struct mm_struct *mm,
} else {
*ptep = new;
}
+ return old;
}
pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
@@ -231,7 +232,7 @@ pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
preempt_disable();
pgste = ptep_xchg_start(mm, addr, ptep);
old = ptep_flush_direct(mm, addr, ptep);
- ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
+ old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
preempt_enable();
return old;
}
@@ -246,7 +247,7 @@ pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
preempt_disable();
pgste = ptep_xchg_start(mm, addr, ptep);
old = ptep_flush_lazy(mm, addr, ptep);
- ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
+ old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
preempt_enable();
return old;
}
@@ -274,6 +275,8 @@ void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
{
pgste_t pgste;
+ if (!MACHINE_HAS_NX)
+ pte_val(pte) &= ~_PAGE_NOEXEC;
if (mm_has_pgste(mm)) {
pgste = pgste_get(ptep);
pgste_set_key(ptep, pgste, pte, mm);
@@ -741,7 +744,7 @@ int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr)
pgste_set_unlock(ptep, new);
pte_unmap_unlock(ptep, ptl);
- return 0;
+ return cc;
}
EXPORT_SYMBOL(reset_guest_reference_bit);
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 1848292766ef..60d38993f232 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -6,7 +6,7 @@
#include <linux/bootmem.h>
#include <linux/pfn.h>
#include <linux/mm.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/list.h>
#include <linux/hugetlb.h>
#include <linux/slab.h>
@@ -34,7 +34,7 @@ static void __ref *vmem_alloc_pages(unsigned int order)
if (slab_is_available())
return (void *)__get_free_pages(GFP_KERNEL, order);
- return alloc_bootmem_align(size, size);
+ return (void *) memblock_alloc(size, size);
}
static inline pud_t *vmem_pud_alloc(void)
@@ -61,17 +61,16 @@ pmd_t *vmem_pmd_alloc(void)
pte_t __ref *vmem_pte_alloc(void)
{
+ unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
pte_t *pte;
if (slab_is_available())
pte = (pte_t *) page_table_alloc(&init_mm);
else
- pte = alloc_bootmem_align(PTRS_PER_PTE * sizeof(pte_t),
- PTRS_PER_PTE * sizeof(pte_t));
+ pte = (pte_t *) memblock_alloc(size, size);
if (!pte)
return NULL;
- clear_table((unsigned long *) pte, _PAGE_INVALID,
- PTRS_PER_PTE * sizeof(pte_t));
+ clear_table((unsigned long *) pte, _PAGE_INVALID, size);
return pte;
}
@@ -80,6 +79,7 @@ pte_t __ref *vmem_pte_alloc(void)
*/
static int vmem_add_mem(unsigned long start, unsigned long size)
{
+ unsigned long pgt_prot, sgt_prot, r3_prot;
unsigned long pages4k, pages1m, pages2g;
unsigned long end = start + size;
unsigned long address = start;
@@ -89,6 +89,14 @@ static int vmem_add_mem(unsigned long start, unsigned long size)
pte_t *pt_dir;
int ret = -ENOMEM;
+ pgt_prot = pgprot_val(PAGE_KERNEL);
+ sgt_prot = pgprot_val(SEGMENT_KERNEL);
+ r3_prot = pgprot_val(REGION3_KERNEL);
+ if (!MACHINE_HAS_NX) {
+ pgt_prot &= ~_PAGE_NOEXEC;
+ sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
+ r3_prot &= ~_REGION_ENTRY_NOEXEC;
+ }
pages4k = pages1m = pages2g = 0;
while (address < end) {
pg_dir = pgd_offset_k(address);
@@ -102,7 +110,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size)
if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
!(address & ~PUD_MASK) && (address + PUD_SIZE <= end) &&
!debug_pagealloc_enabled()) {
- pud_val(*pu_dir) = address | pgprot_val(REGION3_KERNEL);
+ pud_val(*pu_dir) = address | r3_prot;
address += PUD_SIZE;
pages2g++;
continue;
@@ -117,7 +125,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size)
if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
!(address & ~PMD_MASK) && (address + PMD_SIZE <= end) &&
!debug_pagealloc_enabled()) {
- pmd_val(*pm_dir) = address | pgprot_val(SEGMENT_KERNEL);
+ pmd_val(*pm_dir) = address | sgt_prot;
address += PMD_SIZE;
pages1m++;
continue;
@@ -130,7 +138,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size)
}
pt_dir = pte_offset_kernel(pm_dir, address);
- pte_val(*pt_dir) = address | pgprot_val(PAGE_KERNEL);
+ pte_val(*pt_dir) = address | pgt_prot;
address += PAGE_SIZE;
pages4k++;
}
@@ -201,6 +209,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
*/
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
{
+ unsigned long pgt_prot, sgt_prot;
unsigned long address = start;
pgd_t *pg_dir;
pud_t *pu_dir;
@@ -208,6 +217,12 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
pte_t *pt_dir;
int ret = -ENOMEM;
+ pgt_prot = pgprot_val(PAGE_KERNEL);
+ sgt_prot = pgprot_val(SEGMENT_KERNEL);
+ if (!MACHINE_HAS_NX) {
+ pgt_prot &= ~_PAGE_NOEXEC;
+ sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
+ }
for (address = start; address < end;) {
pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) {
@@ -239,8 +254,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
new_page = vmemmap_alloc_block(PMD_SIZE, node);
if (!new_page)
goto out;
- pmd_val(*pm_dir) = __pa(new_page) |
- _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE;
+ pmd_val(*pm_dir) = __pa(new_page) | sgt_prot;
address = (address + PMD_SIZE) & PMD_MASK;
continue;
}
@@ -260,8 +274,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
new_page = vmemmap_alloc_block(PAGE_SIZE, node);
if (!new_page)
goto out;
- pte_val(*pt_dir) =
- __pa(new_page) | pgprot_val(PAGE_KERNEL);
+ pte_val(*pt_dir) = __pa(new_page) | pgt_prot;
}
address += PAGE_SIZE;
}
@@ -373,13 +386,21 @@ out:
*/
void __init vmem_map_init(void)
{
- unsigned long size = _eshared - _stext;
struct memblock_region *reg;
for_each_memblock(memory, reg)
vmem_add_mem(reg->base, reg->size);
- set_memory_ro((unsigned long)_stext, size >> PAGE_SHIFT);
- pr_info("Write protected kernel read-only data: %luk\n", size >> 10);
+ __set_memory((unsigned long) _stext,
+ (_etext - _stext) >> PAGE_SHIFT,
+ SET_MEMORY_RO | SET_MEMORY_X);
+ __set_memory((unsigned long) _etext,
+ (_eshared - _etext) >> PAGE_SHIFT,
+ SET_MEMORY_RO);
+ __set_memory((unsigned long) _sinittext,
+ (_einittext - _sinittext) >> PAGE_SHIFT,
+ SET_MEMORY_RO | SET_MEMORY_X);
+ pr_info("Write protected kernel read-only data: %luk\n",
+ (_eshared - _stext) >> 10);
}
/*
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index bee281f3163d..4ecf6d687509 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -981,7 +981,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
EMIT2(0x0d00, REG_14, REG_W1);
/* lgr %b0,%r2: load return value into %b0 */
EMIT4(0xb9040000, BPF_REG_0, REG_2);
- if (bpf_helper_changes_skb_data((void *)func)) {
+ if (bpf_helper_changes_pkt_data((void *)func)) {
jit->seen |= SEEN_SKB_CHANGE;
/* lg %b1,ST_OFF_SKBP(%r15) */
EMIT6_DISP_LH(0xe3000000, 0x0004, BPF_REG_1, REG_0,
@@ -1263,14 +1263,6 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
}
/*
- * Classic BPF function stub. BPF programs will be converted into
- * eBPF and then bpf_int_jit_compile() will be called.
- */
-void bpf_jit_compile(struct bpf_prog *fp)
-{
-}
-
-/*
* Compile eBPF program "fp"
*/
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
@@ -1331,14 +1323,11 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
}
if (bpf_jit_enable > 1) {
bpf_jit_dump(fp->len, jit.size, pass, jit.prg_buf);
- if (jit.prg_buf)
- print_fn_code(jit.prg_buf, jit.size_prg);
- }
- if (jit.prg_buf) {
- set_memory_ro((unsigned long)header, header->pages);
- fp->bpf_func = (void *) jit.prg_buf;
- fp->jited = 1;
+ print_fn_code(jit.prg_buf, jit.size_prg);
}
+ bpf_jit_binary_lock_ro(header);
+ fp->bpf_func = (void *) jit.prg_buf;
+ fp->jited = 1;
free_addrs:
kfree(jit.addrs);
out:
@@ -1347,21 +1336,3 @@ out:
tmp : orig_fp);
return fp;
}
-
-/*
- * Free eBPF program
- */
-void bpf_jit_free(struct bpf_prog *fp)
-{
- unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
- struct bpf_binary_header *header = (void *)addr;
-
- if (!fp->jited)
- goto free_filter;
-
- set_memory_rw(addr, header->pages);
- bpf_jit_binary_free(header);
-
-free_filter:
- bpf_prog_unlock_free(fp);
-}
diff --git a/arch/s390/numa/mode_emu.c b/arch/s390/numa/mode_emu.c
index 37e0bb835516..cfd08384f0ab 100644
--- a/arch/s390/numa/mode_emu.c
+++ b/arch/s390/numa/mode_emu.c
@@ -21,6 +21,7 @@
#include <linux/kernel.h>
#include <linux/cpumask.h>
#include <linux/memblock.h>
+#include <linux/bootmem.h>
#include <linux/node.h>
#include <linux/memory.h>
#include <linux/slab.h>
@@ -307,13 +308,11 @@ fail:
/*
* Allocate and initialize core to node mapping
*/
-static void create_core_to_node_map(void)
+static void __ref create_core_to_node_map(void)
{
int i;
- emu_cores = kzalloc(sizeof(*emu_cores), GFP_KERNEL);
- if (emu_cores == NULL)
- panic("Could not allocate cores to node memory");
+ emu_cores = memblock_virt_alloc(sizeof(*emu_cores), 8);
for (i = 0; i < ARRAY_SIZE(emu_cores->to_node_id); i++)
emu_cores->to_node_id[i] = NODE_ID_FREE;
}
@@ -354,13 +353,13 @@ static struct toptree *toptree_from_topology(void)
phys = toptree_new(TOPTREE_ID_PHYS, 1);
- for_each_online_cpu(cpu) {
- top = &per_cpu(cpu_topology, cpu);
+ for_each_cpu(cpu, &cpus_with_topology) {
+ top = &cpu_topology[cpu];
node = toptree_get_child(phys, 0);
drawer = toptree_get_child(node, top->drawer_id);
book = toptree_get_child(drawer, top->book_id);
mc = toptree_get_child(book, top->socket_id);
- core = toptree_get_child(mc, top->core_id);
+ core = toptree_get_child(mc, smp_get_base_cpu(cpu));
if (!drawer || !book || !mc || !core)
panic("NUMA emulation could not allocate memory");
cpumask_set_cpu(cpu, &core->mask);
@@ -378,7 +377,7 @@ static void topology_add_core(struct toptree *core)
int cpu;
for_each_cpu(cpu, &core->mask) {
- top = &per_cpu(cpu_topology, cpu);
+ top = &cpu_topology[cpu];
cpumask_copy(&top->thread_mask, &core->mask);
cpumask_copy(&top->core_mask, &core_mc(core)->mask);
cpumask_copy(&top->book_mask, &core_book(core)->mask);
@@ -425,6 +424,27 @@ static void print_node_to_core_map(void)
}
}
+static void pin_all_possible_cpus(void)
+{
+ int core_id, node_id, cpu;
+ static int initialized;
+
+ if (initialized)
+ return;
+ print_node_to_core_map();
+ node_id = 0;
+ for_each_possible_cpu(cpu) {
+ core_id = smp_get_base_cpu(cpu);
+ if (emu_cores->to_node_id[core_id] != NODE_ID_FREE)
+ continue;
+ pin_core_to_node(core_id, node_id);
+ cpu_topology[cpu].node_id = node_id;
+ node_id = (node_id + 1) % emu_nodes;
+ }
+ print_node_to_core_map();
+ initialized = 1;
+}
+
/*
* Transfer physical topology into a NUMA topology and modify CPU masks
* according to the NUMA topology.
@@ -442,7 +462,7 @@ static void emu_update_cpu_topology(void)
toptree_free(phys);
toptree_to_topology(numa);
toptree_free(numa);
- print_node_to_core_map();
+ pin_all_possible_cpus();
}
/*
diff --git a/arch/s390/numa/toptree.c b/arch/s390/numa/toptree.c
index 902d350d859a..26f622b1cd11 100644
--- a/arch/s390/numa/toptree.c
+++ b/arch/s390/numa/toptree.c
@@ -7,6 +7,7 @@
*/
#include <linux/kernel.h>
+#include <linux/bootmem.h>
#include <linux/cpumask.h>
#include <linux/list.h>
#include <linux/list_sort.h>
@@ -25,10 +26,14 @@
* RETURNS:
* Pointer to the new tree node or NULL on error
*/
-struct toptree *toptree_alloc(int level, int id)
+struct toptree __ref *toptree_alloc(int level, int id)
{
- struct toptree *res = kzalloc(sizeof(struct toptree), GFP_KERNEL);
+ struct toptree *res;
+ if (slab_is_available())
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ else
+ res = memblock_virt_alloc(sizeof(*res), 8);
if (!res)
return res;
@@ -65,7 +70,7 @@ static void toptree_remove(struct toptree *cand)
* cleanly using toptree_remove. Possible children are freed
* recursively. In the end @cand itself is freed.
*/
-void toptree_free(struct toptree *cand)
+void __ref toptree_free(struct toptree *cand)
{
struct toptree *child, *tmp;
@@ -73,7 +78,10 @@ void toptree_free(struct toptree *cand)
toptree_remove(cand);
toptree_for_each_child_safe(child, tmp, cand)
toptree_free(child);
- kfree(cand);
+ if (slab_is_available())
+ kfree(cand);
+ else
+ memblock_free_early((unsigned long)cand, sizeof(*cand));
}
/**
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 15ffc19c8c0c..364b9d824be3 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -180,7 +180,7 @@ int zpci_fmb_enable_device(struct zpci_dev *zdev)
{
struct mod_pci_args args = { 0, 0, 0, 0 };
- if (zdev->fmb)
+ if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length)
return -EINVAL;
zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
@@ -224,8 +224,8 @@ static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
rc = zpci_load(&data, req, offset);
if (!rc) {
- data = data << ((8 - len) * 8);
- data = le64_to_cpu(data);
+ data = le64_to_cpu((__force __le64) data);
+ data >>= (8 - len) * 8;
*val = (u32) data;
} else
*val = 0xffffffff;
@@ -238,8 +238,8 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
u64 data = val;
int rc;
- data = cpu_to_le64(data);
- data = data >> ((8 - len) * 8);
+ data <<= (8 - len) * 8;
+ data = (__force u64) cpu_to_le64(data);
rc = zpci_store(data, req, offset);
return rc;
}
@@ -641,7 +641,7 @@ int pcibios_add_device(struct pci_dev *pdev)
int i;
pdev->dev.groups = zpci_attr_groups;
- pdev->dev.archdata.dma_ops = &s390_pci_dma_ops;
+ pdev->dev.dma_ops = &s390_pci_dma_ops;
zpci_map_resources(pdev);
for (i = 0; i < PCI_BAR_COUNT; i++) {
@@ -722,6 +722,11 @@ struct dev_pm_ops pcibios_pm_ops = {
static int zpci_alloc_domain(struct zpci_dev *zdev)
{
+ if (zpci_unique_uid) {
+ zdev->domain = (u16) zdev->uid;
+ return 0;
+ }
+
spin_lock(&zpci_domain_lock);
zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
if (zdev->domain == ZPCI_NR_DEVICES) {
@@ -735,6 +740,9 @@ static int zpci_alloc_domain(struct zpci_dev *zdev)
static void zpci_free_domain(struct zpci_dev *zdev)
{
+ if (zpci_unique_uid)
+ return;
+
spin_lock(&zpci_domain_lock);
clear_bit(zdev->domain, zpci_domain);
spin_unlock(&zpci_domain_lock);
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index 1a4512c8544a..1c3332ac1957 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -22,6 +22,8 @@
#include <asm/clp.h>
#include <uapi/asm/clp.h>
+bool zpci_unique_uid;
+
static inline void zpci_err_clp(unsigned int rsp, int rc)
{
struct {
@@ -146,6 +148,7 @@ static int clp_store_query_pci_fn(struct zpci_dev *zdev,
zdev->pft = response->pft;
zdev->vfn = response->vfn;
zdev->uid = response->uid;
+ zdev->fmb_length = sizeof(u32) * response->fmb_len;
memcpy(zdev->pfip, response->pfip, sizeof(zdev->pfip));
if (response->util_str_avail) {
@@ -315,6 +318,7 @@ static int clp_list_pci(struct clp_req_rsp_list_pci *rrb,
goto out;
}
+ zpci_unique_uid = rrb->response.uid_checking;
WARN_ON_ONCE(rrb->response.entry_size !=
sizeof(struct clp_fh_list_entry));
diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c
index 38993b156924..c2f786f0ea06 100644
--- a/arch/s390/pci/pci_debug.c
+++ b/arch/s390/pci/pci_debug.c
@@ -69,7 +69,7 @@ static void pci_sw_counter_show(struct seq_file *m)
int i;
for (i = 0; i < ARRAY_SIZE(pci_sw_names); i++, counter++)
- seq_printf(m, "%26s:\t%llu\n", pci_sw_names[i],
+ seq_printf(m, "%26s:\t%lu\n", pci_sw_names[i],
atomic64_read(counter));
}
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 6b2f72f523b9..9081a57fa340 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -181,14 +181,17 @@ static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
/*
* With zdev->tlb_refresh == 0, rpcit is not required to establish new
* translations when previously invalid translation-table entries are
- * validated. With lazy unmap, it also is skipped for previously valid
+ * validated. With lazy unmap, rpcit is skipped for previously valid
* entries, but a global rpcit is then required before any address can
* be re-used, i.e. after each iommu bitmap wrap-around.
*/
- if (!zdev->tlb_refresh &&
- (!s390_iommu_strict ||
- ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)))
- return 0;
+ if ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID) {
+ if (!zdev->tlb_refresh)
+ return 0;
+ } else {
+ if (!s390_iommu_strict)
+ return 0;
+ }
return zpci_refresh_trans((u64) zdev->fh << 32, dma_addr,
PAGE_ALIGN(size));
@@ -257,7 +260,7 @@ static dma_addr_t dma_alloc_address(struct device *dev, int size)
spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
offset = __dma_alloc_iommu(dev, zdev->next_bit, size);
if (offset == -1) {
- if (!zdev->tlb_refresh && !s390_iommu_strict) {
+ if (!s390_iommu_strict) {
/* global flush before DMA addresses are reused */
if (zpci_refresh_global(zdev))
goto out_error;
@@ -292,7 +295,7 @@ static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size)
if (!zdev->iommu_bitmap)
goto out;
- if (zdev->tlb_refresh || s390_iommu_strict)
+ if (s390_iommu_strict)
bitmap_clear(zdev->iommu_bitmap, offset, size);
else
bitmap_set(zdev->lazy_bitmap, offset, size);
@@ -388,8 +391,6 @@ static void *s390_dma_alloc(struct device *dev, size_t size,
return NULL;
pa = page_to_phys(page);
- memset((void *) pa, 0, size);
-
map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, 0);
if (dma_mapping_error(dev, map)) {
free_pages(pa, get_order(size));
@@ -419,6 +420,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
size_t size, dma_addr_t *handle,
enum dma_data_direction dir)
{
+ unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
dma_addr_t dma_addr_base, dma_addr;
int flags = ZPCI_PTE_VALID;
@@ -426,8 +428,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
unsigned long pa = 0;
int ret;
- size = PAGE_ALIGN(size);
- dma_addr_base = dma_alloc_address(dev, size >> PAGE_SHIFT);
+ dma_addr_base = dma_alloc_address(dev, nr_pages);
if (dma_addr_base == DMA_ERROR_CODE)
return -ENOMEM;
@@ -436,26 +437,27 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
flags |= ZPCI_TABLE_PROTECTED;
for (s = sg; dma_addr < dma_addr_base + size; s = sg_next(s)) {
- pa = page_to_phys(sg_page(s)) + s->offset;
- ret = __dma_update_trans(zdev, pa, dma_addr, s->length, flags);
+ pa = page_to_phys(sg_page(s));
+ ret = __dma_update_trans(zdev, pa, dma_addr,
+ s->offset + s->length, flags);
if (ret)
goto unmap;
- dma_addr += s->length;
+ dma_addr += s->offset + s->length;
}
ret = __dma_purge_tlb(zdev, dma_addr_base, size, flags);
if (ret)
goto unmap;
*handle = dma_addr_base;
- atomic64_add(size >> PAGE_SHIFT, &zdev->mapped_pages);
+ atomic64_add(nr_pages, &zdev->mapped_pages);
return ret;
unmap:
dma_update_trans(zdev, 0, dma_addr_base, dma_addr - dma_addr_base,
ZPCI_PTE_INVALID);
- dma_free_address(dev, dma_addr_base, size >> PAGE_SHIFT);
+ dma_free_address(dev, dma_addr_base, nr_pages);
zpci_err("map error:\n");
zpci_err_dma(ret, pa);
return ret;
@@ -564,7 +566,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
rc = -ENOMEM;
goto free_dma_table;
}
- if (!zdev->tlb_refresh && !s390_iommu_strict) {
+ if (!s390_iommu_strict) {
zdev->lazy_bitmap = vzalloc(zdev->iommu_pages / 8);
if (!zdev->lazy_bitmap) {
rc = -ENOMEM;
@@ -648,7 +650,7 @@ static int __init dma_debug_do_init(void)
}
fs_initcall(dma_debug_do_init);
-struct dma_map_ops s390_pci_dma_ops = {
+const struct dma_map_ops s390_pci_dma_ops = {
.alloc = s390_dma_alloc,
.free = s390_dma_free,
.map_sg = s390_dma_map_sg,
diff --git a/arch/s390/tools/Makefile b/arch/s390/tools/Makefile
index 6d9814c9df2b..4b5e1e499527 100644
--- a/arch/s390/tools/Makefile
+++ b/arch/s390/tools/Makefile
@@ -9,7 +9,5 @@ define filechk_facilities.h
$(obj)/gen_facilities
endef
-$(obj)/gen_facilities.o: $(srctree)/arch/s390/tools/gen_facilities.c
-
include/generated/facilities.h: $(obj)/gen_facilities FORCE
$(call filechk,facilities.h)
diff --git a/arch/s390/tools/gen_facilities.c b/arch/s390/tools/gen_facilities.c
index fe4e6c910dd7..0cf802de52a1 100644
--- a/arch/s390/tools/gen_facilities.c
+++ b/arch/s390/tools/gen_facilities.c
@@ -7,13 +7,85 @@
*
*/
-#define S390_GEN_FACILITIES_C
-
#include <strings.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
-#include <asm/facilities_src.h>
+
+struct facility_def {
+ char *name;
+ int *bits;
+};
+
+static struct facility_def facility_defs[] = {
+ {
+ /*
+ * FACILITIES_ALS contains the list of facilities that are
+ * required to run a kernel that is compiled e.g. with
+ * -march=<machine>.
+ */
+ .name = "FACILITIES_ALS",
+ .bits = (int[]){
+#ifdef CONFIG_HAVE_MARCH_Z900_FEATURES
+ 0, /* N3 instructions */
+ 1, /* z/Arch mode installed */
+#endif
+#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
+ 18, /* long displacement facility */
+#endif
+#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
+ 7, /* stfle */
+ 17, /* message security assist */
+ 21, /* extended-immediate facility */
+ 25, /* store clock fast */
+#endif
+#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
+ 27, /* mvcos */
+ 32, /* compare and swap and store */
+ 33, /* compare and swap and store 2 */
+ 34, /* general extension facility */
+ 35, /* execute extensions */
+#endif
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+ 45, /* fast-BCR, etc. */
+#endif
+#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
+ 49, /* misc-instruction-extensions */
+ 52, /* interlocked facility 2 */
+#endif
+#ifdef CONFIG_HAVE_MARCH_Z13_FEATURES
+ 53, /* load-and-zero-rightmost-byte, etc. */
+#endif
+ -1 /* END */
+ }
+ },
+ {
+ .name = "FACILITIES_KVM",
+ .bits = (int[]){
+ 0, /* N3 instructions */
+ 1, /* z/Arch mode installed */
+ 2, /* z/Arch mode active */
+ 3, /* DAT-enhancement */
+ 4, /* idte segment table */
+ 5, /* idte region table */
+ 6, /* ASN-and-LX reuse */
+ 7, /* stfle */
+ 8, /* enhanced-DAT 1 */
+ 9, /* sense-running-status */
+ 10, /* conditional sske */
+ 13, /* ipte-range */
+ 14, /* nonquiescing key-setting */
+ 73, /* transactional execution */
+ 75, /* access-exception-fetch/store indication */
+ 76, /* msa extension 3 */
+ 77, /* msa extension 4 */
+ 78, /* enhanced-DAT 2 */
+ 130, /* instruction-execution-protection */
+ 131, /* enhanced-SOP 2 and side-effect */
+ -1 /* END */
+ }
+ },
+};
static void print_facility_list(struct facility_def *def)
{