From f4661d293eb2d01dfc742982761a36fafe456d46 Mon Sep 17 00:00:00 2001 From: Jacek Tomaka Date: Sat, 25 Aug 2018 11:50:39 +0800 Subject: [PATCH 01/14] x86/microcode: Make revision and processor flags world-readable The microcode revision is already readable for non-root users via /proc/cpuinfo. Thus, there's no reason to keep the same information readable by root only in /sys/devices/system/cpu/cpuX/microcode/. Make .../processor_flags world-readable too, while at it. Reported-by: Tim Burgess Signed-off-by: Jacek Tomaka Signed-off-by: Borislav Petkov Signed-off-by: Thomas Gleixner Link: http://lkml.kernel.org/r/20180825035039.14409-1-jacekt@dugeo.com --- arch/x86/kernel/cpu/microcode/core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index b9bc8a1a584e..2637ff09d6a0 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -666,8 +666,8 @@ static ssize_t pf_show(struct device *dev, } static DEVICE_ATTR_WO(reload); -static DEVICE_ATTR(version, 0400, version_show, NULL); -static DEVICE_ATTR(processor_flags, 0400, pf_show, NULL); +static DEVICE_ATTR(version, 0444, version_show, NULL); +static DEVICE_ATTR(processor_flags, 0444, pf_show, NULL); static struct attribute *mc_default_attrs[] = { &dev_attr_version.attr, From 4a60aa05a0634241ce17f957bf9fb5ac1eed6576 Mon Sep 17 00:00:00 2001 From: Allan Xavier Date: Fri, 7 Sep 2018 08:12:01 -0500 Subject: [PATCH 02/14] objtool: Support per-function rodata sections Add support for processing switch jump tables in objects with multiple .rodata sections, such as those created by '-ffunction-sections' and '-fdata-sections'. Currently, objtool always looks in .rodata for jump table information, which results in many "sibling call from callable instruction with modified stack frame" warnings with objects compiled using those flags. The fix is comprised of three parts: 1. Flagging all .rodata sections when importing ELF information for easier checking later. 2. Keeping a reference to the section each relocation is from in order to get the list_head for the other relocations in that section. 3. Finding jump tables by following relocations to .rodata sections, rather than always referencing a single global .rodata section. The patch has been tested without data sections enabled and no differences in the resulting orc unwind information were seen. Note that as objtool adds terminators to end of each .text section the unwind information generated between a function+data sections build and a normal build aren't directly comparable. Manual inspection suggests that objtool is now generating the correct information, or at least making more of an effort to do so than it did previously. Signed-off-by: Allan Xavier Signed-off-by: Josh Poimboeuf Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/099bdc375195c490dda04db777ee0b95d566ded1.1536325914.git.jpoimboe@redhat.com --- tools/objtool/check.c | 38 ++++++++++++++++++++++++++++++++------ tools/objtool/check.h | 4 ++-- tools/objtool/elf.c | 1 + tools/objtool/elf.h | 3 ++- 4 files changed, 37 insertions(+), 9 deletions(-) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 2928939b98ec..0414a0d52262 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -836,7 +836,7 @@ static int add_switch_table(struct objtool_file *file, struct instruction *insn, struct symbol *pfunc = insn->func->pfunc; unsigned int prev_offset = 0; - list_for_each_entry_from(rela, &file->rodata->rela->rela_list, list) { + list_for_each_entry_from(rela, &table->rela_sec->rela_list, list) { if (rela == next_table) break; @@ -926,6 +926,7 @@ static struct rela *find_switch_table(struct objtool_file *file, { struct rela *text_rela, *rodata_rela; struct instruction *orig_insn = insn; + struct section *rodata_sec; unsigned long table_offset; /* @@ -953,10 +954,13 @@ static struct rela *find_switch_table(struct objtool_file *file, /* look for a relocation which references .rodata */ text_rela = find_rela_by_dest_range(insn->sec, insn->offset, insn->len); - if (!text_rela || text_rela->sym != file->rodata->sym) + if (!text_rela || text_rela->sym->type != STT_SECTION || + !text_rela->sym->sec->rodata) continue; table_offset = text_rela->addend; + rodata_sec = text_rela->sym->sec; + if (text_rela->type == R_X86_64_PC32) table_offset += 4; @@ -964,10 +968,10 @@ static struct rela *find_switch_table(struct objtool_file *file, * Make sure the .rodata address isn't associated with a * symbol. gcc jump tables are anonymous data. */ - if (find_symbol_containing(file->rodata, table_offset)) + if (find_symbol_containing(rodata_sec, table_offset)) continue; - rodata_rela = find_rela_by_dest(file->rodata, table_offset); + rodata_rela = find_rela_by_dest(rodata_sec, table_offset); if (rodata_rela) { /* * Use of RIP-relative switch jumps is quite rare, and @@ -1052,7 +1056,7 @@ static int add_switch_table_alts(struct objtool_file *file) struct symbol *func; int ret; - if (!file->rodata || !file->rodata->rela) + if (!file->rodata) return 0; for_each_sec(file, sec) { @@ -1198,10 +1202,33 @@ static int read_retpoline_hints(struct objtool_file *file) return 0; } +static void mark_rodata(struct objtool_file *file) +{ + struct section *sec; + bool found = false; + + /* + * This searches for the .rodata section or multiple .rodata.func_name + * sections if -fdata-sections is being used. The .str.1.1 and .str.1.8 + * rodata sections are ignored as they don't contain jump tables. + */ + for_each_sec(file, sec) { + if (!strncmp(sec->name, ".rodata", 7) && + !strstr(sec->name, ".str1.")) { + sec->rodata = true; + found = true; + } + } + + file->rodata = found; +} + static int decode_sections(struct objtool_file *file) { int ret; + mark_rodata(file); + ret = decode_instructions(file); if (ret) return ret; @@ -2171,7 +2198,6 @@ int check(const char *_objname, bool orc) INIT_LIST_HEAD(&file.insn_list); hash_init(file.insn_hash); file.whitelist = find_section_by_name(file.elf, ".discard.func_stack_frame_non_standard"); - file.rodata = find_section_by_name(file.elf, ".rodata"); file.c_file = find_section_by_name(file.elf, ".comment"); file.ignore_unreachables = no_unreachable; file.hints = false; diff --git a/tools/objtool/check.h b/tools/objtool/check.h index 95700a2bcb7c..e6e8a655b556 100644 --- a/tools/objtool/check.h +++ b/tools/objtool/check.h @@ -60,8 +60,8 @@ struct objtool_file { struct elf *elf; struct list_head insn_list; DECLARE_HASHTABLE(insn_hash, 16); - struct section *rodata, *whitelist; - bool ignore_unreachables, c_file, hints; + struct section *whitelist; + bool ignore_unreachables, c_file, hints, rodata; }; int check(const char *objname, bool orc); diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c index 7ec85d567598..f7082de1ee82 100644 --- a/tools/objtool/elf.c +++ b/tools/objtool/elf.c @@ -379,6 +379,7 @@ static int read_relas(struct elf *elf) rela->offset = rela->rela.r_offset; symndx = GELF_R_SYM(rela->rela.r_info); rela->sym = find_symbol_by_index(elf, symndx); + rela->rela_sec = sec; if (!rela->sym) { WARN("can't find rela entry symbol %d for %s", symndx, sec->name); diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h index de5cd2ddded9..bc97ed86b9cd 100644 --- a/tools/objtool/elf.h +++ b/tools/objtool/elf.h @@ -48,7 +48,7 @@ struct section { char *name; int idx; unsigned int len; - bool changed, text; + bool changed, text, rodata; }; struct symbol { @@ -68,6 +68,7 @@ struct rela { struct list_head list; struct hlist_node hash; GElf_Rela rela; + struct section *rela_sec; struct symbol *sym; unsigned int type; unsigned long offset; From d2266bbfa9e3e32e3b642965088ca461bd24a94f Mon Sep 17 00:00:00 2001 From: Feng Tang Date: Wed, 3 Oct 2018 00:49:21 +0800 Subject: [PATCH 03/14] x86/earlyprintk: Add a force option for pciserial device The "pciserial" earlyprintk variant helps much on many modern x86 platforms, but unfortunately there are still some platforms with PCI UART devices which have the wrong PCI class code. In that case, the current class code check does not allow for them to be used for logging. Add a sub-option "force" which overrides the class code check and thus the use of such device can be enforced. [ bp: massage formulations. ] Suggested-by: Borislav Petkov Signed-off-by: Feng Tang Signed-off-by: Borislav Petkov Cc: "H. Peter Anvin" Cc: "Stuart R . Anderson" Cc: Bjorn Helgaas Cc: David Rientjes Cc: Feng Tang Cc: Frederic Weisbecker Cc: Greg Kroah-Hartman Cc: H Peter Anvin Cc: Ingo Molnar Cc: Jiri Kosina Cc: Jonathan Corbet Cc: Kai-Heng Feng Cc: Kate Stewart Cc: Konrad Rzeszutek Wilk Cc: Peter Zijlstra Cc: Philippe Ombredanne Cc: Thomas Gleixner Cc: Thymo van Beers Cc: alan@linux.intel.com Cc: linux-doc@vger.kernel.org Link: http://lkml.kernel.org/r/20181002164921.25833-1-feng.tang@intel.com --- .../admin-guide/kernel-parameters.txt | 6 +++- arch/x86/kernel/early_printk.c | 29 ++++++++++++------- 2 files changed, 24 insertions(+), 11 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 92eb1f42240d..34e6800dea0e 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1063,7 +1063,7 @@ earlyprintk=serial[,0x...[,baudrate]] earlyprintk=ttySn[,baudrate] earlyprintk=dbgp[debugController#] - earlyprintk=pciserial,bus:device.function[,baudrate] + earlyprintk=pciserial[,force],bus:device.function[,baudrate] earlyprintk=xdbc[xhciController#] earlyprintk is useful when the kernel crashes before @@ -1095,6 +1095,10 @@ The sclp output can only be used on s390. + The optional "force" to "pciserial" enables use of a + PCI device even when its classcode is not of the + UART class. + edac_report= [HW,EDAC] Control how to report EDAC event Format: {"on" | "off" | "force"} on: enable EDAC to report H/W event. May be overridden diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index 5e801c8c8ce7..374a52fa5296 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c @@ -213,8 +213,9 @@ static unsigned int mem32_serial_in(unsigned long addr, int offset) * early_pci_serial_init() * * This function is invoked when the early_printk param starts with "pciserial" - * The rest of the param should be ",B:D.F,baud" where B, D & F describe the - * location of a PCI device that must be a UART device. + * The rest of the param should be "[force],B:D.F,baud", where B, D & F describe + * the location of a PCI device that must be a UART device. "force" is optional + * and overrides the use of an UART device with a wrong PCI class code. */ static __init void early_pci_serial_init(char *s) { @@ -224,17 +225,23 @@ static __init void early_pci_serial_init(char *s) u32 classcode, bar0; u16 cmdreg; char *e; + int force = 0; - - /* - * First, part the param to get the BDF values - */ if (*s == ',') ++s; if (*s == 0) return; + /* Force the use of an UART device with wrong class code */ + if (!strncmp(s, "force,", 6)) { + force = 1; + s += 6; + } + + /* + * Part the param to get the BDF values + */ bus = (u8)simple_strtoul(s, &e, 16); s = e; if (*s != ':') @@ -253,7 +260,7 @@ static __init void early_pci_serial_init(char *s) s++; /* - * Second, find the device from the BDF + * Find the device from the BDF */ cmdreg = read_pci_config(bus, slot, func, PCI_COMMAND); classcode = read_pci_config(bus, slot, func, PCI_CLASS_REVISION); @@ -264,8 +271,10 @@ static __init void early_pci_serial_init(char *s) */ if (((classcode >> 16 != PCI_CLASS_COMMUNICATION_MODEM) && (classcode >> 16 != PCI_CLASS_COMMUNICATION_SERIAL)) || - (((classcode >> 8) & 0xff) != 0x02)) /* 16550 I/F at BAR0 */ - return; + (((classcode >> 8) & 0xff) != 0x02)) /* 16550 I/F at BAR0 */ { + if (!force) + return; + } /* * Determine if it is IO or memory mapped @@ -289,7 +298,7 @@ static __init void early_pci_serial_init(char *s) } /* - * Lastly, initialize the hardware + * Initialize the hardware */ if (*s) { if (strcmp(s, "nocfg") == 0) From 33823f4d63f7a010653d219800539409a78ef4be Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Wed, 24 Oct 2018 14:57:16 -0700 Subject: [PATCH 04/14] x86/cpufeatures: Enumerate MOVDIRI instruction MOVDIRI moves doubleword or quadword from register to memory through direct store which is implemented by using write combining (WC) for writing data directly into memory without caching the data. Programmable agents can handle streaming offload (e.g. high speed packet processing in network). Hardware implements a doorbell (tail pointer) register that is updated by software when adding new work-elements to the streaming offload work-queue. MOVDIRI can be used as the doorbell write which is a 4-byte or 8-byte uncachable write to MMIO. MOVDIRI has lower overhead than other ways to write the doorbell. Availability of the MOVDIRI instruction is indicated by the presence of the CPUID feature flag MOVDIRI(CPUID.0x07.0x0:ECX[bit 27]). Please check the latest Intel Architecture Instruction Set Extensions and Future Features Programming Reference for more details on the CPUID feature MOVDIRI flag. Signed-off-by: Fenghua Yu Cc: Andy Lutomirski Cc: Ashok Raj Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Ravi V Shankar Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1540418237-125817-2-git-send-email-fenghua.yu@intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/cpufeatures.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 89a048c2faec..90934ee7b79a 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -331,6 +331,7 @@ #define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ #define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ #define X86_FEATURE_CLDEMOTE (16*32+25) /* CLDEMOTE instruction */ +#define X86_FEATURE_MOVDIRI (16*32+27) /* MOVDIRI instruction */ /* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */ #define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */ From ace6485a03266cc3c198ce8e927a1ce0ce139699 Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Wed, 24 Oct 2018 14:57:17 -0700 Subject: [PATCH 05/14] x86/cpufeatures: Enumerate MOVDIR64B instruction MOVDIR64B moves 64-bytes as direct-store with 64-bytes write atomicity. Direct store is implemented by using write combining (WC) for writing data directly into memory without caching the data. In low latency offload (e.g. Non-Volatile Memory, etc), MOVDIR64B writes work descriptors (and data in some cases) to device-hosted work-queues atomically without cache pollution. Availability of the MOVDIR64B instruction is indicated by the presence of the CPUID feature flag MOVDIR64B (CPUID.0x07.0x0:ECX[bit 28]). Please check the latest Intel Architecture Instruction Set Extensions and Future Features Programming Reference for more details on the CPUID feature MOVDIR64B flag. Signed-off-by: Fenghua Yu Cc: Andy Lutomirski Cc: Ashok Raj Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Ravi V Shankar Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1540418237-125817-3-git-send-email-fenghua.yu@intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/cpufeatures.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 90934ee7b79a..28c4a502b419 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -332,6 +332,7 @@ #define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ #define X86_FEATURE_CLDEMOTE (16*32+25) /* CLDEMOTE instruction */ #define X86_FEATURE_MOVDIRI (16*32+27) /* MOVDIRI instruction */ +#define X86_FEATURE_MOVDIR64B (16*32+28) /* MOVDIR64B instruction */ /* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */ #define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */ From 0e96f31ea4249b1e94e266fe4dff908c2983a9b3 Mon Sep 17 00:00:00 2001 From: Jordan Borgner Date: Sun, 28 Oct 2018 12:58:28 +0000 Subject: [PATCH 06/14] x86: Clean up 'sizeof x' => 'sizeof(x)' "sizeof(x)" is the canonical coding style used in arch/x86 most of the time. Fix the few places that didn't follow the convention. (Also do some whitespace cleanups in a few places while at it.) [ mingo: Rewrote the changelog. ] Signed-off-by: Jordan Borgner Cc: Borislav Petkov Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20181028125828.7rgammkgzep2wpam@JordanDesktop Signed-off-by: Ingo Molnar --- arch/x86/boot/cpucheck.c | 2 +- arch/x86/boot/early_serial_console.c | 4 +-- arch/x86/boot/edd.c | 6 ++-- arch/x86/boot/main.c | 4 +-- arch/x86/boot/memory.c | 2 +- arch/x86/boot/regs.c | 2 +- arch/x86/boot/video-vesa.c | 6 ++-- arch/x86/boot/video.c | 2 +- arch/x86/events/intel/core.c | 2 +- arch/x86/kernel/cpu/common.c | 4 +-- arch/x86/kernel/cpu/mcheck/mce.c | 2 +- arch/x86/kernel/cpu/mtrr/generic.c | 2 +- arch/x86/kernel/cpu/mtrr/if.c | 6 ++-- arch/x86/kernel/head64.c | 2 +- arch/x86/kernel/msr.c | 8 +++--- arch/x86/kvm/emulate.c | 22 +++++++-------- arch/x86/kvm/lapic.c | 2 +- arch/x86/kvm/x86.c | 42 ++++++++++++++-------------- arch/x86/tools/relocs.c | 4 +-- arch/x86/um/asm/elf.h | 2 +- 20 files changed, 63 insertions(+), 63 deletions(-) diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c index 8f0c4c9fc904..51079fc9298f 100644 --- a/arch/x86/boot/cpucheck.c +++ b/arch/x86/boot/cpucheck.c @@ -113,7 +113,7 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) { int err; - memset(&cpu.flags, 0, sizeof cpu.flags); + memset(&cpu.flags, 0, sizeof(cpu.flags)); cpu.level = 3; if (has_eflag(X86_EFLAGS_AC)) diff --git a/arch/x86/boot/early_serial_console.c b/arch/x86/boot/early_serial_console.c index b25c53527a94..023bf1c3de8b 100644 --- a/arch/x86/boot/early_serial_console.c +++ b/arch/x86/boot/early_serial_console.c @@ -50,7 +50,7 @@ static void parse_earlyprintk(void) int pos = 0; int port = 0; - if (cmdline_find_option("earlyprintk", arg, sizeof arg) > 0) { + if (cmdline_find_option("earlyprintk", arg, sizeof(arg)) > 0) { char *e; if (!strncmp(arg, "serial", 6)) { @@ -124,7 +124,7 @@ static void parse_console_uart8250(void) * console=uart8250,io,0x3f8,115200n8 * need to make sure it is last one console ! */ - if (cmdline_find_option("console", optstr, sizeof optstr) <= 0) + if (cmdline_find_option("console", optstr, sizeof(optstr)) <= 0) return; options = optstr; diff --git a/arch/x86/boot/edd.c b/arch/x86/boot/edd.c index 223e42527077..6c176b6a42ad 100644 --- a/arch/x86/boot/edd.c +++ b/arch/x86/boot/edd.c @@ -76,7 +76,7 @@ static int get_edd_info(u8 devno, struct edd_info *ei) { struct biosregs ireg, oreg; - memset(ei, 0, sizeof *ei); + memset(ei, 0, sizeof(*ei)); /* Check Extensions Present */ @@ -133,7 +133,7 @@ void query_edd(void) struct edd_info ei, *edp; u32 *mbrptr; - if (cmdline_find_option("edd", eddarg, sizeof eddarg) > 0) { + if (cmdline_find_option("edd", eddarg, sizeof(eddarg)) > 0) { if (!strcmp(eddarg, "skipmbr") || !strcmp(eddarg, "skip")) { do_edd = 1; do_mbr = 0; @@ -166,7 +166,7 @@ void query_edd(void) */ if (!get_edd_info(devno, &ei) && boot_params.eddbuf_entries < EDDMAXNR) { - memcpy(edp, &ei, sizeof ei); + memcpy(edp, &ei, sizeof(ei)); edp++; boot_params.eddbuf_entries++; } diff --git a/arch/x86/boot/main.c b/arch/x86/boot/main.c index 9bcea386db65..73532543d689 100644 --- a/arch/x86/boot/main.c +++ b/arch/x86/boot/main.c @@ -36,8 +36,8 @@ static void copy_boot_params(void) const struct old_cmdline * const oldcmd = (const struct old_cmdline *)OLD_CL_ADDRESS; - BUILD_BUG_ON(sizeof boot_params != 4096); - memcpy(&boot_params.hdr, &hdr, sizeof hdr); + BUILD_BUG_ON(sizeof(boot_params) != 4096); + memcpy(&boot_params.hdr, &hdr, sizeof(hdr)); if (!boot_params.hdr.cmd_line_ptr && oldcmd->cl_magic == OLD_CL_MAGIC) { diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c index d9c28c87e477..7df2b28207be 100644 --- a/arch/x86/boot/memory.c +++ b/arch/x86/boot/memory.c @@ -26,7 +26,7 @@ static int detect_memory_e820(void) initregs(&ireg); ireg.ax = 0xe820; - ireg.cx = sizeof buf; + ireg.cx = sizeof(buf); ireg.edx = SMAP; ireg.di = (size_t)&buf; diff --git a/arch/x86/boot/regs.c b/arch/x86/boot/regs.c index c0fb356a3092..2fe3616ba161 100644 --- a/arch/x86/boot/regs.c +++ b/arch/x86/boot/regs.c @@ -21,7 +21,7 @@ void initregs(struct biosregs *reg) { - memset(reg, 0, sizeof *reg); + memset(reg, 0, sizeof(*reg)); reg->eflags |= X86_EFLAGS_CF; reg->ds = ds(); reg->es = ds(); diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c index ba3e100654db..3ecc11a9c440 100644 --- a/arch/x86/boot/video-vesa.c +++ b/arch/x86/boot/video-vesa.c @@ -62,7 +62,7 @@ static int vesa_probe(void) if (mode & ~0x1ff) continue; - memset(&vminfo, 0, sizeof vminfo); /* Just in case... */ + memset(&vminfo, 0, sizeof(vminfo)); /* Just in case... */ ireg.ax = 0x4f01; ireg.cx = mode; @@ -109,7 +109,7 @@ static int vesa_set_mode(struct mode_info *mode) int is_graphic; u16 vesa_mode = mode->mode - VIDEO_FIRST_VESA; - memset(&vminfo, 0, sizeof vminfo); /* Just in case... */ + memset(&vminfo, 0, sizeof(vminfo)); /* Just in case... */ initregs(&ireg); ireg.ax = 0x4f01; @@ -241,7 +241,7 @@ void vesa_store_edid(void) struct biosregs ireg, oreg; /* Apparently used as a nonsense token... */ - memset(&boot_params.edid_info, 0x13, sizeof boot_params.edid_info); + memset(&boot_params.edid_info, 0x13, sizeof(boot_params.edid_info)); if (vginfo.version < 0x0200) return; /* EDID requires VBE 2.0+ */ diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c index 77780e386e9b..ac89b6624a40 100644 --- a/arch/x86/boot/video.c +++ b/arch/x86/boot/video.c @@ -115,7 +115,7 @@ static unsigned int get_entry(void) } else if ((key >= '0' && key <= '9') || (key >= 'A' && key <= 'Z') || (key >= 'a' && key <= 'z')) { - if (len < sizeof entry_buf) { + if (len < sizeof(entry_buf)) { entry_buf[len++] = key; putchar(key); } diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 0fb8659b20d8..273c62e81546 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -4535,7 +4535,7 @@ __init int intel_pmu_init(void) } } - snprintf(pmu_name_str, sizeof pmu_name_str, "%s", name); + snprintf(pmu_name_str, sizeof(pmu_name_str), "%s", name); if (version >= 2 && extra_attr) { x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr, diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 660d0b22e962..3a4eb2b25d71 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1074,7 +1074,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) #endif c->x86_cache_alignment = c->x86_clflush_size; - memset(&c->x86_capability, 0, sizeof c->x86_capability); + memset(&c->x86_capability, 0, sizeof(c->x86_capability)); c->extended_cpuid_level = 0; if (!have_cpuid_p()) @@ -1317,7 +1317,7 @@ static void identify_cpu(struct cpuinfo_x86 *c) c->x86_virt_bits = 32; #endif c->x86_cache_alignment = c->x86_clflush_size; - memset(&c->x86_capability, 0, sizeof c->x86_capability); + memset(&c->x86_capability, 0, sizeof(c->x86_capability)); generic_identify(c); diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 8cb3c02980cf..8c66d2fc8f81 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -2215,7 +2215,7 @@ static int mce_device_create(unsigned int cpu) if (dev) return 0; - dev = kzalloc(sizeof *dev, GFP_KERNEL); + dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; dev->id = cpu; diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index e12ee86906c6..86e277f8daf4 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c @@ -798,7 +798,7 @@ static void generic_set_all(void) local_irq_restore(flags); /* Use the atomic bitops to update the global mask */ - for (count = 0; count < sizeof mask * 8; ++count) { + for (count = 0; count < sizeof(mask) * 8; ++count) { if (mask & 0x01) set_bit(count, &smp_changes_mask); mask >>= 1; diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c index 40eee6cc4124..2e173d47b450 100644 --- a/arch/x86/kernel/cpu/mtrr/if.c +++ b/arch/x86/kernel/cpu/mtrr/if.c @@ -174,12 +174,12 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg) case MTRRIOC_SET_PAGE_ENTRY: case MTRRIOC_DEL_PAGE_ENTRY: case MTRRIOC_KILL_PAGE_ENTRY: - if (copy_from_user(&sentry, arg, sizeof sentry)) + if (copy_from_user(&sentry, arg, sizeof(sentry))) return -EFAULT; break; case MTRRIOC_GET_ENTRY: case MTRRIOC_GET_PAGE_ENTRY: - if (copy_from_user(&gentry, arg, sizeof gentry)) + if (copy_from_user(&gentry, arg, sizeof(gentry))) return -EFAULT; break; #ifdef CONFIG_COMPAT @@ -332,7 +332,7 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg) switch (cmd) { case MTRRIOC_GET_ENTRY: case MTRRIOC_GET_PAGE_ENTRY: - if (copy_to_user(arg, &gentry, sizeof gentry)) + if (copy_to_user(arg, &gentry, sizeof(gentry))) err = -EFAULT; break; #ifdef CONFIG_COMPAT diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 5dc377dc9d7b..7663a8eb602b 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -385,7 +385,7 @@ static void __init copy_bootdata(char *real_mode_data) */ sme_map_bootdata(real_mode_data); - memcpy(&boot_params, real_mode_data, sizeof boot_params); + memcpy(&boot_params, real_mode_data, sizeof(boot_params)); sanitize_boot_params(&boot_params); cmd_line_ptr = get_cmd_line_ptr(); if (cmd_line_ptr) { diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index ef688804f80d..4588414e2561 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c @@ -115,14 +115,14 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg) err = -EBADF; break; } - if (copy_from_user(®s, uregs, sizeof regs)) { + if (copy_from_user(®s, uregs, sizeof(regs))) { err = -EFAULT; break; } err = rdmsr_safe_regs_on_cpu(cpu, regs); if (err) break; - if (copy_to_user(uregs, ®s, sizeof regs)) + if (copy_to_user(uregs, ®s, sizeof(regs))) err = -EFAULT; break; @@ -131,14 +131,14 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg) err = -EBADF; break; } - if (copy_from_user(®s, uregs, sizeof regs)) { + if (copy_from_user(®s, uregs, sizeof(regs))) { err = -EFAULT; break; } err = wrmsr_safe_regs_on_cpu(cpu, regs); if (err) break; - if (copy_to_user(uregs, ®s, sizeof regs)) + if (copy_to_user(uregs, ®s, sizeof(regs))) err = -EFAULT; break; diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 34edf198708f..78e430f4e15c 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -1509,7 +1509,7 @@ static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt, return emulate_gp(ctxt, index << 3 | 0x2); addr = dt.address + index * 8; - return linear_read_system(ctxt, addr, desc, sizeof *desc); + return linear_read_system(ctxt, addr, desc, sizeof(*desc)); } static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, @@ -1522,7 +1522,7 @@ static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, struct desc_struct desc; u16 sel; - memset (dt, 0, sizeof *dt); + memset(dt, 0, sizeof(*dt)); if (!ops->get_segment(ctxt, &sel, &desc, &base3, VCPU_SREG_LDTR)) return; @@ -1586,7 +1586,7 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, if (rc != X86EMUL_CONTINUE) return rc; - return linear_write_system(ctxt, addr, desc, sizeof *desc); + return linear_write_system(ctxt, addr, desc, sizeof(*desc)); } static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, @@ -1604,7 +1604,7 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 dummy; u32 base3 = 0; - memset(&seg_desc, 0, sizeof seg_desc); + memset(&seg_desc, 0, sizeof(seg_desc)); if (ctxt->mode == X86EMUL_MODE_REAL) { /* set real mode segment descriptor (keep limit etc. for @@ -3075,17 +3075,17 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt, int ret; u32 new_tss_base = get_desc_base(new_desc); - ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg); + ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg)); if (ret != X86EMUL_CONTINUE) return ret; save_state_to_tss16(ctxt, &tss_seg); - ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg); + ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg)); if (ret != X86EMUL_CONTINUE) return ret; - ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg); + ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg)); if (ret != X86EMUL_CONTINUE) return ret; @@ -3094,7 +3094,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt, ret = linear_write_system(ctxt, new_tss_base, &tss_seg.prev_task_link, - sizeof tss_seg.prev_task_link); + sizeof(tss_seg.prev_task_link)); if (ret != X86EMUL_CONTINUE) return ret; } @@ -3216,7 +3216,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, u32 eip_offset = offsetof(struct tss_segment_32, eip); u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector); - ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg); + ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg)); if (ret != X86EMUL_CONTINUE) return ret; @@ -3228,7 +3228,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, if (ret != X86EMUL_CONTINUE) return ret; - ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg); + ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg)); if (ret != X86EMUL_CONTINUE) return ret; @@ -3237,7 +3237,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, ret = linear_write_system(ctxt, new_tss_base, &tss_seg.prev_task_link, - sizeof tss_seg.prev_task_link); + sizeof(tss_seg.prev_task_link)); if (ret != X86EMUL_CONTINUE) return ret; } diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 3cd227ff807f..89db20f8cb70 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -2409,7 +2409,7 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) r = kvm_apic_state_fixup(vcpu, s, true); if (r) return r; - memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s); + memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s)); recalculate_apic_map(vcpu->kvm); kvm_apic_set_version(vcpu); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 66d66d77caee..5cd5647120f2 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2924,7 +2924,7 @@ static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs, unsigned size; r = -EFAULT; - if (copy_from_user(&msrs, user_msrs, sizeof msrs)) + if (copy_from_user(&msrs, user_msrs, sizeof(msrs))) goto out; r = -E2BIG; @@ -3091,11 +3091,11 @@ long kvm_arch_dev_ioctl(struct file *filp, unsigned n; r = -EFAULT; - if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list)) + if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list))) goto out; n = msr_list.nmsrs; msr_list.nmsrs = num_msrs_to_save + num_emulated_msrs; - if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list)) + if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list))) goto out; r = -E2BIG; if (n < msr_list.nmsrs) @@ -3117,7 +3117,7 @@ long kvm_arch_dev_ioctl(struct file *filp, struct kvm_cpuid2 cpuid; r = -EFAULT; - if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) + if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) goto out; r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries, @@ -3126,7 +3126,7 @@ long kvm_arch_dev_ioctl(struct file *filp, goto out; r = -EFAULT; - if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid)) + if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid))) goto out; r = 0; break; @@ -3894,7 +3894,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_interrupt irq; r = -EFAULT; - if (copy_from_user(&irq, argp, sizeof irq)) + if (copy_from_user(&irq, argp, sizeof(irq))) goto out; r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); break; @@ -3912,7 +3912,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_cpuid cpuid; r = -EFAULT; - if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) + if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) goto out; r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); break; @@ -3922,7 +3922,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_cpuid2 cpuid; r = -EFAULT; - if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) + if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) goto out; r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid, cpuid_arg->entries); @@ -3933,14 +3933,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_cpuid2 cpuid; r = -EFAULT; - if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) + if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) goto out; r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid, cpuid_arg->entries); if (r) goto out; r = -EFAULT; - if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid)) + if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid))) goto out; r = 0; break; @@ -3961,13 +3961,13 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_tpr_access_ctl tac; r = -EFAULT; - if (copy_from_user(&tac, argp, sizeof tac)) + if (copy_from_user(&tac, argp, sizeof(tac))) goto out; r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac); if (r) goto out; r = -EFAULT; - if (copy_to_user(argp, &tac, sizeof tac)) + if (copy_to_user(argp, &tac, sizeof(tac))) goto out; r = 0; break; @@ -3980,7 +3980,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, if (!lapic_in_kernel(vcpu)) goto out; r = -EFAULT; - if (copy_from_user(&va, argp, sizeof va)) + if (copy_from_user(&va, argp, sizeof(va))) goto out; idx = srcu_read_lock(&vcpu->kvm->srcu); r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); @@ -3991,7 +3991,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, u64 mcg_cap; r = -EFAULT; - if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap)) + if (copy_from_user(&mcg_cap, argp, sizeof(mcg_cap))) goto out; r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap); break; @@ -4000,7 +4000,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_x86_mce mce; r = -EFAULT; - if (copy_from_user(&mce, argp, sizeof mce)) + if (copy_from_user(&mce, argp, sizeof(mce))) goto out; r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce); break; @@ -4536,7 +4536,7 @@ long kvm_arch_vm_ioctl(struct file *filp, if (kvm->created_vcpus) goto set_identity_unlock; r = -EFAULT; - if (copy_from_user(&ident_addr, argp, sizeof ident_addr)) + if (copy_from_user(&ident_addr, argp, sizeof(ident_addr))) goto set_identity_unlock; r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr); set_identity_unlock: @@ -4620,7 +4620,7 @@ set_identity_unlock: if (r) goto get_irqchip_out; r = -EFAULT; - if (copy_to_user(argp, chip, sizeof *chip)) + if (copy_to_user(argp, chip, sizeof(*chip))) goto get_irqchip_out; r = 0; get_irqchip_out: @@ -4666,7 +4666,7 @@ set_identity_unlock: } case KVM_SET_PIT: { r = -EFAULT; - if (copy_from_user(&u.ps, argp, sizeof u.ps)) + if (copy_from_user(&u.ps, argp, sizeof(u.ps))) goto out; r = -ENXIO; if (!kvm->arch.vpit) @@ -8205,7 +8205,7 @@ static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) sregs->efer = vcpu->arch.efer; sregs->apic_base = kvm_get_apic_base(vcpu); - memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap); + memset(sregs->interrupt_bitmap, 0, sizeof(sregs->interrupt_bitmap)); if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft) set_bit(vcpu->arch.interrupt.nr, @@ -8509,7 +8509,7 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) fpu->last_opcode = fxsave->fop; fpu->last_ip = fxsave->rip; fpu->last_dp = fxsave->rdp; - memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space); + memcpy(fpu->xmm, fxsave->xmm_space, sizeof(fxsave->xmm_space)); vcpu_put(vcpu); return 0; @@ -8530,7 +8530,7 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) fxsave->fop = fpu->last_opcode; fxsave->rip = fpu->last_ip; fxsave->rdp = fpu->last_dp; - memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space); + memcpy(fxsave->xmm_space, fpu->xmm, sizeof(fxsave->xmm_space)); vcpu_put(vcpu); return 0; diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c index 0b08067c45f3..b629f6992d9f 100644 --- a/arch/x86/tools/relocs.c +++ b/arch/x86/tools/relocs.c @@ -130,7 +130,7 @@ static void regex_init(int use_real_mode) REG_EXTENDED|REG_NOSUB); if (err) { - regerror(err, &sym_regex_c[i], errbuf, sizeof errbuf); + regerror(err, &sym_regex_c[i], errbuf, sizeof(errbuf)); die("%s", errbuf); } } @@ -405,7 +405,7 @@ static void read_shdrs(FILE *fp) } for (i = 0; i < ehdr.e_shnum; i++) { struct section *sec = &secs[i]; - if (fread(&shdr, sizeof shdr, 1, fp) != 1) + if (fread(&shdr, sizeof(shdr), 1, fp) != 1) die("Cannot read ELF section headers %d/%d: %s\n", i, ehdr.e_shnum, strerror(errno)); sec->shdr.sh_name = elf_word_to_cpu(shdr.sh_name); diff --git a/arch/x86/um/asm/elf.h b/arch/x86/um/asm/elf.h index 413f3519d9a1..c907b20d4993 100644 --- a/arch/x86/um/asm/elf.h +++ b/arch/x86/um/asm/elf.h @@ -194,7 +194,7 @@ extern unsigned long um_vdso_addr; typedef unsigned long elf_greg_t; -#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t)) +#define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t)) typedef elf_greg_t elf_gregset_t[ELF_NGREG]; typedef struct user_i387_struct elf_fpregset_t; From 2022cceb4e30f1bb4c84d40ffa705aa8d8d68adb Mon Sep 17 00:00:00 2001 From: Rasmus Villemoes Date: Sat, 27 Oct 2018 00:20:04 +0200 Subject: [PATCH 07/14] x86/traps: Use format string with panic() call Building with -Wformat-nonliteral gives: arch/x86/kernel/traps.c:334:2: warning: format not a string literal and no format arguments [-Wformat-nonliteral] panic(message); handle_stack_overflow() can only be called from two places (kernel/traps.c and via inline asm in mm/fault.c), in both cases with a string not containing format specifiers, so we might as well silence this warning using "%s" as a format string. Signed-off-by: Rasmus Villemoes Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20181026222004.14193-1-linux@rasmusvillemoes.dk Signed-off-by: Ingo Molnar --- arch/x86/kernel/traps.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 8f6dcd88202e..9b7c4ca8f0a7 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -306,7 +306,7 @@ __visible void __noreturn handle_stack_overflow(const char *message, die(message, regs, 0); /* Be absolutely certain we don't return. */ - panic(message); + panic("%s", message); } #endif From 8af1909580595a303b03d5999e410d407b7a6db7 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Mon, 29 Oct 2018 16:01:16 +0100 Subject: [PATCH 08/14] x86/paravirt: Remove GPL from pv_ops export Commit 5c83511bdb9832 ("x86/paravirt: Use a single ops structure") introduced a regression for out-of-tree modules using spinlocks, as pv_lock_ops was exported via EXPORT_SYMBOL(), while the new pv_ops structure now containing the pv lock operations is exported via EXPORT_SYMBOL_GPL(). Change that by using EXPORT_SYMBOL(pv_ops). Fixes: 5c83511bdb9832 ("x86/paravirt: Use a single ops structure") Signed-off-by: Juergen Gross Signed-off-by: Thomas Gleixner Cc: xen-devel@lists.xenproject.org Cc: virtualization@lists.linux-foundation.org Cc: akataria@vmware.com Cc: rusty@rustcorp.com.au Cc: boris.ostrovsky@oracle.com Cc: hpa@zytor.com Link: https://lkml.kernel.org/r/20181029150116.25372-1-jgross@suse.com --- arch/x86/kernel/paravirt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index e4d4df37922a..45123b116c05 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -483,5 +483,5 @@ NOKPROBE_SYMBOL(native_set_debugreg); NOKPROBE_SYMBOL(native_load_idt); #endif -EXPORT_SYMBOL_GPL(pv_ops); +EXPORT_SYMBOL(pv_ops); EXPORT_SYMBOL_GPL(pv_info); From f77084d96355f5fba8e2c1fb3a51a393b1570de7 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 17 Oct 2018 12:34:32 +0200 Subject: [PATCH 09/14] x86/mm/pat: Disable preemption around __flush_tlb_all() The WARN_ON_ONCE(__read_cr3() != build_cr3()) in switch_mm_irqs_off() triggers every once in a while during a snapshotted system upgrade. The warning triggers since commit decab0888e6e ("x86/mm: Remove preempt_disable/enable() from __native_flush_tlb()"). The callchain is: get_page_from_freelist() -> post_alloc_hook() -> __kernel_map_pages() with CONFIG_DEBUG_PAGEALLOC enabled. Disable preemption during CR3 reset / __flush_tlb_all() and add a comment why preemption has to be disabled so it won't be removed accidentaly. Add another preemptible() check in __flush_tlb_all() to catch callers with enabled preemption when PGE is enabled, because PGE enabled does not trigger the warning in __native_flush_tlb(). Suggested by Andy Lutomirski. Fixes: decab0888e6e ("x86/mm: Remove preempt_disable/enable() from __native_flush_tlb()") Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Dave Hansen Cc: Peter Zijlstra Cc: Borislav Petkov Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20181017103432.zgv46nlu3hc7k4rq@linutronix.de --- arch/x86/include/asm/tlbflush.h | 6 ++++++ arch/x86/mm/pageattr.c | 6 +++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 323a313947e0..d760611cfc35 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -453,6 +453,12 @@ static inline void __native_flush_tlb_one_user(unsigned long addr) */ static inline void __flush_tlb_all(void) { + /* + * This is to catch users with enabled preemption and the PGE feature + * and don't trigger the warning in __native_flush_tlb(). + */ + VM_WARN_ON_ONCE(preemptible()); + if (boot_cpu_has(X86_FEATURE_PGE)) { __flush_tlb_global(); } else { diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 62bb30b4bd2a..a1004dec98ea 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -2309,9 +2309,13 @@ void __kernel_map_pages(struct page *page, int numpages, int enable) /* * We should perform an IPI and flush all tlbs, - * but that can deadlock->flush only current cpu: + * but that can deadlock->flush only current cpu. + * Preemption needs to be disabled around __flush_tlb_all() due to + * CR3 reload in __native_flush_tlb(). */ + preempt_disable(); __flush_tlb_all(); + preempt_enable(); arch_flush_lazy_mmu_mode(); } From 7847c7be0481558f17e3ef3b03f573677fd30d29 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Tue, 30 Oct 2018 07:33:01 +0100 Subject: [PATCH 10/14] x86/paravirt: Remove unused _paravirt_ident_32 There is no user of _paravirt_ident_32 left in the tree. Remove it together with the related paravirt_patch_ident_32(). paravirt_patch_ident_64() can be moved inside CONFIG_PARAVIRT_XXL=y. Signed-off-by: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: akataria@vmware.com Cc: boris.ostrovsky@oracle.com Cc: rusty@rustcorp.com.au Cc: virtualization@lists.linux-foundation.org Cc: xen-devel@lists.xenproject.org Link: http://lkml.kernel.org/r/20181030063301.15054-1-jgross@suse.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/paravirt_types.h | 2 -- arch/x86/kernel/paravirt.c | 26 +++++++------------------- arch/x86/kernel/paravirt_patch_32.c | 18 ++++++------------ arch/x86/kernel/paravirt_patch_64.c | 20 ++++++-------------- 4 files changed, 19 insertions(+), 47 deletions(-) diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index fba54ca23b2a..26942ad63830 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -361,7 +361,6 @@ extern struct paravirt_patch_template pv_ops; __visible extern const char start_##ops##_##name[], end_##ops##_##name[]; \ asm(NATIVE_LABEL("start_", ops, name) code NATIVE_LABEL("end_", ops, name)) -unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len); unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len); unsigned paravirt_patch_default(u8 type, void *insnbuf, unsigned long addr, unsigned len); @@ -651,7 +650,6 @@ void paravirt_leave_lazy_mmu(void); void paravirt_flush_lazy_mmu(void); void _paravirt_nop(void); -u32 _paravirt_ident_32(u32); u64 _paravirt_ident_64(u64); #define paravirt_nop ((void *)_paravirt_nop) diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 45123b116c05..c0e0101133f3 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -56,17 +56,6 @@ asm (".pushsection .entry.text, \"ax\"\n" ".type _paravirt_nop, @function\n\t" ".popsection"); -/* identity function, which can be inlined */ -u32 notrace _paravirt_ident_32(u32 x) -{ - return x; -} - -u64 notrace _paravirt_ident_64(u64 x) -{ - return x; -} - void __init default_banner(void) { printk(KERN_INFO "Booting paravirtualized kernel on %s\n", @@ -102,6 +91,12 @@ static unsigned paravirt_patch_call(void *insnbuf, const void *target, } #ifdef CONFIG_PARAVIRT_XXL +/* identity function, which can be inlined */ +u64 notrace _paravirt_ident_64(u64 x) +{ + return x; +} + static unsigned paravirt_patch_jmp(void *insnbuf, const void *target, unsigned long addr, unsigned len) { @@ -146,13 +141,11 @@ unsigned paravirt_patch_default(u8 type, void *insnbuf, else if (opfunc == _paravirt_nop) ret = 0; +#ifdef CONFIG_PARAVIRT_XXL /* identity functions just return their single argument */ - else if (opfunc == _paravirt_ident_32) - ret = paravirt_patch_ident_32(insnbuf, len); else if (opfunc == _paravirt_ident_64) ret = paravirt_patch_ident_64(insnbuf, len); -#ifdef CONFIG_PARAVIRT_XXL else if (type == PARAVIRT_PATCH(cpu.iret) || type == PARAVIRT_PATCH(cpu.usergs_sysret64)) /* If operation requires a jmp, then jmp */ @@ -309,13 +302,8 @@ struct pv_info pv_info = { #endif }; -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE) -/* 32-bit pagetable entries */ -#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32) -#else /* 64-bit pagetable entries */ #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64) -#endif struct paravirt_patch_template pv_ops = { /* Init ops. */ diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c index 6368c22fa1fa..de138d3912e4 100644 --- a/arch/x86/kernel/paravirt_patch_32.c +++ b/arch/x86/kernel/paravirt_patch_32.c @@ -10,24 +10,18 @@ DEF_NATIVE(cpu, iret, "iret"); DEF_NATIVE(mmu, read_cr2, "mov %cr2, %eax"); DEF_NATIVE(mmu, write_cr3, "mov %eax, %cr3"); DEF_NATIVE(mmu, read_cr3, "mov %cr3, %eax"); -#endif - -#if defined(CONFIG_PARAVIRT_SPINLOCKS) -DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%eax)"); -DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax"); -#endif - -unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len) -{ - /* arg in %eax, return in %eax */ - return 0; -} unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len) { /* arg in %edx:%eax, return in %edx:%eax */ return 0; } +#endif + +#if defined(CONFIG_PARAVIRT_SPINLOCKS) +DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%eax)"); +DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax"); +#endif extern bool pv_is_native_spin_unlock(void); extern bool pv_is_native_vcpu_is_preempted(void); diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c index 7ca9cb726f4d..9d9e04b31077 100644 --- a/arch/x86/kernel/paravirt_patch_64.c +++ b/arch/x86/kernel/paravirt_patch_64.c @@ -15,27 +15,19 @@ DEF_NATIVE(cpu, wbinvd, "wbinvd"); DEF_NATIVE(cpu, usergs_sysret64, "swapgs; sysretq"); DEF_NATIVE(cpu, swapgs, "swapgs"); -#endif - -DEF_NATIVE(, mov32, "mov %edi, %eax"); DEF_NATIVE(, mov64, "mov %rdi, %rax"); -#if defined(CONFIG_PARAVIRT_SPINLOCKS) -DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%rdi)"); -DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax"); -#endif - -unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len) -{ - return paravirt_patch_insns(insnbuf, len, - start__mov32, end__mov32); -} - unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len) { return paravirt_patch_insns(insnbuf, len, start__mov64, end__mov64); } +#endif + +#if defined(CONFIG_PARAVIRT_SPINLOCKS) +DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%rdi)"); +DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax"); +#endif extern bool pv_is_native_spin_unlock(void); extern bool pv_is_native_vcpu_is_preempted(void); From c6ee7a548e2c291398b4f32c1f741c66b9f98e1c Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Thu, 25 Oct 2018 13:26:45 -0700 Subject: [PATCH 11/14] x86/numa_emulation: Fix uniform-split numa emulation The numa_emulation() routine in the 'uniform' case walks through all the physical 'memblk' instances and divides them into N emulated nodes with split_nodes_size_interleave_uniform(). As each physical node is consumed it is removed from the physical memblk array in the numa_remove_memblk_from() helper. Since split_nodes_size_interleave_uniform() handles advancing the array as the 'memblk' is consumed it is expected that the base of the array is always specified as the argument. Otherwise, on multi-socket (> 2) configurations the uniform-split capability can generate an invalid numa configuration leading to boot failures with signatures like the following: rcu: INFO: rcu_sched detected stalls on CPUs/tasks: Sending NMI from CPU 0 to CPUs 2: NMI backtrace for cpu 2 CPU: 2 PID: 1332 Comm: pgdatinit0 Not tainted 4.19.0-rc8-next-20181019-baseline #59 RIP: 0010:__init_single_page.isra.74+0x81/0x90 [..] Call Trace: deferred_init_pages+0xaa/0xe3 deferred_init_memmap+0x18f/0x318 kthread+0xf8/0x130 ? deferred_free_pages.isra.105+0xc9/0xc9 ? kthread_stop+0x110/0x110 ret_from_fork+0x35/0x40 Fixes: 1f6a2c6d9f121 ("x86/numa_emulation: Introduce uniform split capability") Signed-off-by: Dave Jiang Signed-off-by: Dan Williams Signed-off-by: Thomas Gleixner Tested-by: Alexander Duyck Reviewed-by: Dave Hansen Cc: Borislav Petkov Cc: "H. Peter Anvin" Cc: Andy Lutomirski Cc: Peter Zijlstra Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/154049911459.2685845.9210186007479774286.stgit@dwillia2-desk3.amr.corp.intel.com --- arch/x86/mm/numa_emulation.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c index b54d52a2d00a..d71d72cf6c66 100644 --- a/arch/x86/mm/numa_emulation.c +++ b/arch/x86/mm/numa_emulation.c @@ -400,9 +400,17 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) n = simple_strtoul(emu_cmdline, &emu_cmdline, 0); ret = -1; for_each_node_mask(i, physnode_mask) { + /* + * The reason we pass in blk[0] is due to + * numa_remove_memblk_from() called by + * emu_setup_memblk() will delete entry 0 + * and then move everything else up in the pi.blk + * array. Therefore we should always be looking + * at blk[0]. + */ ret = split_nodes_size_interleave_uniform(&ei, &pi, - pi.blk[i].start, pi.blk[i].end, 0, - n, &pi.blk[i], nid); + pi.blk[0].start, pi.blk[0].end, 0, + n, &pi.blk[0], nid); if (ret < 0) break; if (ret < n) { From bcb6fb5da77c2a228adf07cc9cb1a0c2aa2001c6 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Wed, 31 Oct 2018 21:57:30 -0500 Subject: [PATCH 12/14] objtool: Support GCC 9 cold subfunction naming scheme Starting with GCC 8, a lot of unlikely code was moved out of line to "cold" subfunctions in .text.unlikely. For example, the unlikely bits of: irq_do_set_affinity() are moved out to the following subfunction: irq_do_set_affinity.cold.49() Starting with GCC 9, the numbered suffix has been removed. So in the above example, the cold subfunction is instead: irq_do_set_affinity.cold() Tweak the objtool subfunction detection logic so that it detects both GCC 8 and GCC 9 naming schemes. Reported-by: Peter Zijlstra (Intel) Signed-off-by: Josh Poimboeuf Signed-off-by: Thomas Gleixner Tested-by: Peter Zijlstra (Intel) Acked-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/015e9544b1f188d36a7f02fa31e9e95629aa5f50.1541040800.git.jpoimboe@redhat.com --- tools/objtool/elf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c index 7ec85d567598..f8cef271dfc7 100644 --- a/tools/objtool/elf.c +++ b/tools/objtool/elf.c @@ -301,7 +301,7 @@ static int read_symbols(struct elf *elf) if (sym->type != STT_FUNC) continue; sym->pfunc = sym->cfunc = sym; - coldstr = strstr(sym->name, ".cold."); + coldstr = strstr(sym->name, ".cold"); if (!coldstr) continue; From a846446b1914d1e3d996d657754f43fde89bab51 Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Fri, 12 Oct 2018 14:42:52 +0100 Subject: [PATCH 13/14] x86/compat: Adjust in_compat_syscall() to generic code under !COMPAT The result of in_compat_syscall() can be pictured as: x86 platform: --------------------------------------------------- | Arch\syscall | 64-bit | ia32 | x32 | |-------------------------------------------------| | x86_64 | false | true | true | |-------------------------------------------------| | i686 | | | | --------------------------------------------------- Other platforms: ------------------------------------------- | Arch\syscall | 64-bit | compat | |-----------------------------------------| | 64-bit | false | true | |-----------------------------------------| | 32-bit(?) | | | ------------------------------------------- As seen, the result of in_compat_syscall() on generic 32-bit platform differs from i686. There is no reason for in_compat_syscall() == true on native i686. It also easy to misread code if the result on native 32-bit platform differs between arches. Because of that non arch-specific code has many places with: if (IS_ENABLED(CONFIG_COMPAT) && in_compat_syscall()) in different variations. It looks-like the only non-x86 code which uses in_compat_syscall() not under CONFIG_COMPAT guard is in amd/amdkfd. But according to the commit a18069c132cb ("amdkfd: Disable support for 32-bit user processes"), it actually should be disabled on native i686. Rename in_compat_syscall() to in_32bit_syscall() for x86-specific code and make in_compat_syscall() false under !CONFIG_COMPAT. A follow on patch will clean up generic users which were forced to check IS_ENABLED(CONFIG_COMPAT) with in_compat_syscall(). Signed-off-by: Dmitry Safonov Signed-off-by: Thomas Gleixner Reviewed-by: Andy Lutomirski Cc: Dmitry Safonov <0x7f454c46@gmail.com> Cc: Ard Biesheuvel Cc: "David S. Miller" Cc: Herbert Xu Cc: "H. Peter Anvin" Cc: John Stultz Cc: "Kirill A. Shutemov" Cc: Oleg Nesterov Cc: Steffen Klassert Cc: Stephen Boyd Cc: Steven Rostedt Cc: linux-efi@vger.kernel.org Cc: netdev@vger.kernel.org Link: https://lkml.kernel.org/r/20181012134253.23266-2-dima@arista.com --- arch/x86/include/asm/compat.h | 9 ++++++++- arch/x86/include/asm/ftrace.h | 4 +--- arch/x86/kernel/process_64.c | 4 ++-- arch/x86/kernel/sys_x86_64.c | 11 ++++++----- arch/x86/mm/hugetlbpage.c | 4 ++-- arch/x86/mm/mmap.c | 2 +- include/linux/compat.h | 4 ++-- 7 files changed, 22 insertions(+), 16 deletions(-) diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h index fab4df16a3c4..22c4dfe65992 100644 --- a/arch/x86/include/asm/compat.h +++ b/arch/x86/include/asm/compat.h @@ -217,11 +217,18 @@ static inline bool in_x32_syscall(void) return false; } -static inline bool in_compat_syscall(void) +static inline bool in_32bit_syscall(void) { return in_ia32_syscall() || in_x32_syscall(); } + +#ifdef CONFIG_COMPAT +static inline bool in_compat_syscall(void) +{ + return in_32bit_syscall(); +} #define in_compat_syscall in_compat_syscall /* override the generic impl */ +#endif struct compat_siginfo; int __copy_siginfo_to_user32(struct compat_siginfo __user *to, diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index c18ed65287d5..cf350639e76d 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h @@ -76,9 +76,7 @@ static inline bool arch_syscall_match_sym_name(const char *sym, const char *name #define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS 1 static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs) { - if (in_compat_syscall()) - return true; - return false; + return in_32bit_syscall(); } #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_IA32_EMULATION */ #endif /* !COMPILE_OFFSETS */ diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 31b4755369f0..0e0b4288a4b2 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -701,10 +701,10 @@ static void __set_personality_x32(void) current->mm->context.ia32_compat = TIF_X32; current->personality &= ~READ_IMPLIES_EXEC; /* - * in_compat_syscall() uses the presence of the x32 syscall bit + * in_32bit_syscall() uses the presence of the x32 syscall bit * flag to determine compat status. The x86 mmap() code relies on * the syscall bitness so set x32 syscall bit right here to make - * in_compat_syscall() work during exec(). + * in_32bit_syscall() work during exec(). * * Pretend to come from a x32 execve. */ diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index 6a78d4b36a79..f7476ce23b6e 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c @@ -105,7 +105,7 @@ out: static void find_start_end(unsigned long addr, unsigned long flags, unsigned long *begin, unsigned long *end) { - if (!in_compat_syscall() && (flags & MAP_32BIT)) { + if (!in_32bit_syscall() && (flags & MAP_32BIT)) { /* This is usually used needed to map code in small model, so it needs to be in the first 31bit. Limit it to that. This means we need to move the @@ -122,7 +122,7 @@ static void find_start_end(unsigned long addr, unsigned long flags, } *begin = get_mmap_base(1); - if (in_compat_syscall()) + if (in_32bit_syscall()) *end = task_size_32bit(); else *end = task_size_64bit(addr > DEFAULT_MAP_WINDOW); @@ -193,7 +193,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, return addr; /* for MAP_32BIT mappings we force the legacy mmap base */ - if (!in_compat_syscall() && (flags & MAP_32BIT)) + if (!in_32bit_syscall() && (flags & MAP_32BIT)) goto bottomup; /* requesting a specific address */ @@ -217,9 +217,10 @@ get_unmapped_area: * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area * in the full address space. * - * !in_compat_syscall() check to avoid high addresses for x32. + * !in_32bit_syscall() check to avoid high addresses for x32 + * (and make it no op on native i386). */ - if (addr > DEFAULT_MAP_WINDOW && !in_compat_syscall()) + if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall()) info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW; info.align_mask = 0; diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index 00b296617ca4..92e4c4b85bba 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -92,7 +92,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area * in the full address space. */ - info.high_limit = in_compat_syscall() ? + info.high_limit = in_32bit_syscall() ? task_size_32bit() : task_size_64bit(addr > DEFAULT_MAP_WINDOW); info.align_mask = PAGE_MASK & ~huge_page_mask(h); @@ -116,7 +116,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area * in the full address space. */ - if (addr > DEFAULT_MAP_WINDOW && !in_compat_syscall()) + if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall()) info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW; info.align_mask = PAGE_MASK & ~huge_page_mask(h); diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 1e95d57760cf..db3165714521 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -166,7 +166,7 @@ unsigned long get_mmap_base(int is_legacy) struct mm_struct *mm = current->mm; #ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES - if (in_compat_syscall()) { + if (in_32bit_syscall()) { return is_legacy ? mm->mmap_compat_legacy_base : mm->mmap_compat_base; } diff --git a/include/linux/compat.h b/include/linux/compat.h index d30e4dbd4be2..f1ef24c68fcc 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -1029,9 +1029,9 @@ int kcompat_sys_fstatfs64(unsigned int fd, compat_size_t sz, #else /* !CONFIG_COMPAT */ #define is_compat_task() (0) -#ifndef in_compat_syscall +/* Ensure no one redefines in_compat_syscall() under !CONFIG_COMPAT */ +#define in_compat_syscall in_compat_syscall static inline bool in_compat_syscall(void) { return false; } -#endif #endif /* CONFIG_COMPAT */ From 98f76206b33504b934209d16196477dfa519a807 Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Fri, 12 Oct 2018 14:42:53 +0100 Subject: [PATCH 14/14] compat: Cleanup in_compat_syscall() callers Now that in_compat_syscall() is consistent on all architectures and does not longer report true on native i686, the workarounds (ifdeffery and helpers) can be removed. Signed-off-by: Dmitry Safonov Signed-off-by: Thomas Gleixner Cc: Dmitry Safonov <0x7f454c46@gmail.com> Cc: Ard Biesheuvel Cc: Andy Lutomirsky Cc: "David S. Miller" Cc: Herbert Xu Cc: "H. Peter Anvin" Cc: John Stultz Cc: "Kirill A. Shutemov" Cc: Oleg Nesterov Cc: Steffen Klassert Cc: Stephen Boyd Cc: Steven Rostedt Cc: linux-efi@vger.kernel.org Cc: netdev@vger.kernel.org Link: https://lkml.kernel.org/r/20181012134253.23266-3-dima@arista.com --- drivers/firmware/efi/efivars.c | 16 ++++------------ kernel/time/time.c | 2 +- net/xfrm/xfrm_state.c | 2 -- net/xfrm/xfrm_user.c | 2 -- 4 files changed, 5 insertions(+), 17 deletions(-) diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c index 3e626fd9bd4e..8061667a6765 100644 --- a/drivers/firmware/efi/efivars.c +++ b/drivers/firmware/efi/efivars.c @@ -229,14 +229,6 @@ sanity_check(struct efi_variable *var, efi_char16_t *name, efi_guid_t vendor, return 0; } -static inline bool is_compat(void) -{ - if (IS_ENABLED(CONFIG_COMPAT) && in_compat_syscall()) - return true; - - return false; -} - static void copy_out_compat(struct efi_variable *dst, struct compat_efi_variable *src) { @@ -263,7 +255,7 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count) u8 *data; int err; - if (is_compat()) { + if (in_compat_syscall()) { struct compat_efi_variable *compat; if (count != sizeof(*compat)) @@ -324,7 +316,7 @@ efivar_show_raw(struct efivar_entry *entry, char *buf) &entry->var.DataSize, entry->var.Data)) return -EIO; - if (is_compat()) { + if (in_compat_syscall()) { compat = (struct compat_efi_variable *)buf; size = sizeof(*compat); @@ -418,7 +410,7 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj, struct compat_efi_variable *compat = (struct compat_efi_variable *)buf; struct efi_variable *new_var = (struct efi_variable *)buf; struct efivar_entry *new_entry; - bool need_compat = is_compat(); + bool need_compat = in_compat_syscall(); efi_char16_t *name; unsigned long size; u32 attributes; @@ -495,7 +487,7 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj, if (!capable(CAP_SYS_ADMIN)) return -EACCES; - if (is_compat()) { + if (in_compat_syscall()) { if (count != sizeof(*compat)) return -EINVAL; diff --git a/kernel/time/time.c b/kernel/time/time.c index e3a7f7fd3abc..ad204cf6d001 100644 --- a/kernel/time/time.c +++ b/kernel/time/time.c @@ -842,7 +842,7 @@ int get_timespec64(struct timespec64 *ts, ts->tv_sec = kts.tv_sec; /* Zero out the padding for 32 bit systems or in compat mode */ - if (IS_ENABLED(CONFIG_64BIT_TIME) && (!IS_ENABLED(CONFIG_64BIT) || in_compat_syscall())) + if (IS_ENABLED(CONFIG_64BIT_TIME) && in_compat_syscall()) kts.tv_nsec &= 0xFFFFFFFFUL; ts->tv_nsec = kts.tv_nsec; diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index b669262682c9..dc4a9f1fb941 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -2077,10 +2077,8 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen struct xfrm_mgr *km; struct xfrm_policy *pol = NULL; -#ifdef CONFIG_COMPAT if (in_compat_syscall()) return -EOPNOTSUPP; -#endif if (!optval && !optlen) { xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL); diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index ca7a207b81a9..c9a84e22f5d5 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -2621,10 +2621,8 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, const struct xfrm_link *link; int type, err; -#ifdef CONFIG_COMPAT if (in_compat_syscall()) return -EOPNOTSUPP; -#endif type = nlh->nlmsg_type; if (type > XFRM_MSG_MAX)