From a6fe043880820981f6e4918240f967ea79bb063e Mon Sep 17 00:00:00 2001 From: Kameron Carr Date: Fri, 23 Jun 2023 15:09:49 -0700 Subject: Drivers: hv: Change hv_free_hyperv_page() to take void * argument Currently hv_free_hyperv_page() takes an unsigned long argument, which is inconsistent with the void * return value from the corresponding hv_alloc_hyperv_page() function and variants. This creates unnecessary extra casting. Change the hv_free_hyperv_page() argument type to void *. Also remove redundant casts from invocations of hv_alloc_hyperv_page() and variants. Signed-off-by: Kameron Carr Reviewed-by: Nuno Das Neves Reviewed-by: Dexuan Cui Link: https://lore.kernel.org/r/1687558189-19734-1-git-send-email-kameroncarr@linux.microsoft.com Signed-off-by: Wei Liu --- drivers/hv/connection.c | 13 ++++++------- drivers/hv/hv_common.c | 10 +++++----- include/asm-generic/mshyperv.h | 2 +- 3 files changed, 12 insertions(+), 13 deletions(-) diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c index 5978e9dbc286..ebf15f31d97e 100644 --- a/drivers/hv/connection.c +++ b/drivers/hv/connection.c @@ -209,8 +209,7 @@ int vmbus_connect(void) * Setup the vmbus event connection for channel interrupt * abstraction stuff */ - vmbus_connection.int_page = - (void *)hv_alloc_hyperv_zeroed_page(); + vmbus_connection.int_page = hv_alloc_hyperv_zeroed_page(); if (vmbus_connection.int_page == NULL) { ret = -ENOMEM; goto cleanup; @@ -225,8 +224,8 @@ int vmbus_connect(void) * Setup the monitor notification facility. The 1st page for * parent->child and the 2nd page for child->parent */ - vmbus_connection.monitor_pages[0] = (void *)hv_alloc_hyperv_page(); - vmbus_connection.monitor_pages[1] = (void *)hv_alloc_hyperv_page(); + vmbus_connection.monitor_pages[0] = hv_alloc_hyperv_page(); + vmbus_connection.monitor_pages[1] = hv_alloc_hyperv_page(); if ((vmbus_connection.monitor_pages[0] == NULL) || (vmbus_connection.monitor_pages[1] == NULL)) { ret = -ENOMEM; @@ -333,15 +332,15 @@ void vmbus_disconnect(void) destroy_workqueue(vmbus_connection.work_queue); if (vmbus_connection.int_page) { - hv_free_hyperv_page((unsigned long)vmbus_connection.int_page); + hv_free_hyperv_page(vmbus_connection.int_page); vmbus_connection.int_page = NULL; } set_memory_encrypted((unsigned long)vmbus_connection.monitor_pages[0], 1); set_memory_encrypted((unsigned long)vmbus_connection.monitor_pages[1], 1); - hv_free_hyperv_page((unsigned long)vmbus_connection.monitor_pages[0]); - hv_free_hyperv_page((unsigned long)vmbus_connection.monitor_pages[1]); + hv_free_hyperv_page(vmbus_connection.monitor_pages[0]); + hv_free_hyperv_page(vmbus_connection.monitor_pages[1]); vmbus_connection.monitor_pages[0] = NULL; vmbus_connection.monitor_pages[1] = NULL; } diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c index 542a1d53b303..6a2258fef1fe 100644 --- a/drivers/hv/hv_common.c +++ b/drivers/hv/hv_common.c @@ -115,12 +115,12 @@ void *hv_alloc_hyperv_zeroed_page(void) } EXPORT_SYMBOL_GPL(hv_alloc_hyperv_zeroed_page); -void hv_free_hyperv_page(unsigned long addr) +void hv_free_hyperv_page(void *addr) { if (PAGE_SIZE == HV_HYP_PAGE_SIZE) - free_page(addr); + free_page((unsigned long)addr); else - kfree((void *)addr); + kfree(addr); } EXPORT_SYMBOL_GPL(hv_free_hyperv_page); @@ -253,7 +253,7 @@ static void hv_kmsg_dump_unregister(void) atomic_notifier_chain_unregister(&panic_notifier_list, &hyperv_panic_report_block); - hv_free_hyperv_page((unsigned long)hv_panic_page); + hv_free_hyperv_page(hv_panic_page); hv_panic_page = NULL; } @@ -270,7 +270,7 @@ static void hv_kmsg_dump_register(void) ret = kmsg_dump_register(&hv_kmsg_dumper); if (ret) { pr_err("Hyper-V: kmsg dump register error 0x%x\n", ret); - hv_free_hyperv_page((unsigned long)hv_panic_page); + hv_free_hyperv_page(hv_panic_page); hv_panic_page = NULL; } } diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h index 402a8c1c202d..a8f4b653ef4e 100644 --- a/include/asm-generic/mshyperv.h +++ b/include/asm-generic/mshyperv.h @@ -190,7 +190,7 @@ int hv_common_cpu_die(unsigned int cpu); void *hv_alloc_hyperv_page(void); void *hv_alloc_hyperv_zeroed_page(void); -void hv_free_hyperv_page(unsigned long addr); +void hv_free_hyperv_page(void *addr); /** * hv_cpu_number_to_vp_number() - Map CPU to VP. -- cgit From 55e544e1a922d272b62ec576a3de92329f838ce9 Mon Sep 17 00:00:00 2001 From: Nischala Yelchuri Date: Tue, 20 Jun 2023 11:40:38 -0700 Subject: x86/hyperv: Improve code for referencing hyperv_pcpu_input_arg Several places in code for Hyper-V reference the per-CPU variable hyperv_pcpu_input_arg. Older code uses a multi-line sequence to reference the variable, and usually includes a cast. Newer code does a much simpler direct assignment. The latter is preferable as the complexity of the older code is unnecessary. Update older code to use the simpler direct assignment. Signed-off-by: Nischala Yelchuri Link: https://lore.kernel.org/r/1687286438-9421-1-git-send-email-niyelchu@linux.microsoft.com Signed-off-by: Wei Liu --- arch/x86/hyperv/hv_apic.c | 4 +--- arch/x86/hyperv/ivm.c | 7 +++---- arch/x86/hyperv/mmu.c | 12 ++---------- arch/x86/hyperv/nested.c | 11 ++--------- drivers/hv/hv_balloon.c | 2 +- 5 files changed, 9 insertions(+), 27 deletions(-) diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c index 1fbda2f94184..b21335e6a210 100644 --- a/arch/x86/hyperv/hv_apic.c +++ b/arch/x86/hyperv/hv_apic.c @@ -107,7 +107,6 @@ static bool cpu_is_self(int cpu) static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector, bool exclude_self) { - struct hv_send_ipi_ex **arg; struct hv_send_ipi_ex *ipi_arg; unsigned long flags; int nr_bank = 0; @@ -117,9 +116,8 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector, return false; local_irq_save(flags); - arg = (struct hv_send_ipi_ex **)this_cpu_ptr(hyperv_pcpu_input_arg); + ipi_arg = *this_cpu_ptr(hyperv_pcpu_input_arg); - ipi_arg = *arg; if (unlikely(!ipi_arg)) goto ipi_mask_ex_done; diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c index cc92388b7a99..9b63151bc6cd 100644 --- a/arch/x86/hyperv/ivm.c +++ b/arch/x86/hyperv/ivm.c @@ -246,7 +246,7 @@ EXPORT_SYMBOL_GPL(hv_ghcb_msr_read); static int hv_mark_gpa_visibility(u16 count, const u64 pfn[], enum hv_mem_host_visibility visibility) { - struct hv_gpa_range_for_visibility **input_pcpu, *input; + struct hv_gpa_range_for_visibility *input; u16 pages_processed; u64 hv_status; unsigned long flags; @@ -262,9 +262,8 @@ static int hv_mark_gpa_visibility(u16 count, const u64 pfn[], } local_irq_save(flags); - input_pcpu = (struct hv_gpa_range_for_visibility **) - this_cpu_ptr(hyperv_pcpu_input_arg); - input = *input_pcpu; + input = *this_cpu_ptr(hyperv_pcpu_input_arg); + if (unlikely(!input)) { local_irq_restore(flags); return -EINVAL; diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c index 8460bd35e10c..1cc113200ff5 100644 --- a/arch/x86/hyperv/mmu.c +++ b/arch/x86/hyperv/mmu.c @@ -61,7 +61,6 @@ static void hyperv_flush_tlb_multi(const struct cpumask *cpus, const struct flush_tlb_info *info) { int cpu, vcpu, gva_n, max_gvas; - struct hv_tlb_flush **flush_pcpu; struct hv_tlb_flush *flush; u64 status; unsigned long flags; @@ -74,10 +73,7 @@ static void hyperv_flush_tlb_multi(const struct cpumask *cpus, local_irq_save(flags); - flush_pcpu = (struct hv_tlb_flush **) - this_cpu_ptr(hyperv_pcpu_input_arg); - - flush = *flush_pcpu; + flush = *this_cpu_ptr(hyperv_pcpu_input_arg); if (unlikely(!flush)) { local_irq_restore(flags); @@ -178,17 +174,13 @@ static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus, const struct flush_tlb_info *info) { int nr_bank = 0, max_gvas, gva_n; - struct hv_tlb_flush_ex **flush_pcpu; struct hv_tlb_flush_ex *flush; u64 status; if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) return HV_STATUS_INVALID_PARAMETER; - flush_pcpu = (struct hv_tlb_flush_ex **) - this_cpu_ptr(hyperv_pcpu_input_arg); - - flush = *flush_pcpu; + flush = *this_cpu_ptr(hyperv_pcpu_input_arg); if (info->mm) { /* diff --git a/arch/x86/hyperv/nested.c b/arch/x86/hyperv/nested.c index 5d70968c8538..9dc259fa322e 100644 --- a/arch/x86/hyperv/nested.c +++ b/arch/x86/hyperv/nested.c @@ -19,7 +19,6 @@ int hyperv_flush_guest_mapping(u64 as) { - struct hv_guest_mapping_flush **flush_pcpu; struct hv_guest_mapping_flush *flush; u64 status; unsigned long flags; @@ -30,10 +29,7 @@ int hyperv_flush_guest_mapping(u64 as) local_irq_save(flags); - flush_pcpu = (struct hv_guest_mapping_flush **) - this_cpu_ptr(hyperv_pcpu_input_arg); - - flush = *flush_pcpu; + flush = *this_cpu_ptr(hyperv_pcpu_input_arg); if (unlikely(!flush)) { local_irq_restore(flags); @@ -90,7 +86,6 @@ EXPORT_SYMBOL_GPL(hyperv_fill_flush_guest_mapping_list); int hyperv_flush_guest_mapping_range(u64 as, hyperv_fill_flush_list_func fill_flush_list_func, void *data) { - struct hv_guest_mapping_flush_list **flush_pcpu; struct hv_guest_mapping_flush_list *flush; u64 status; unsigned long flags; @@ -102,10 +97,8 @@ int hyperv_flush_guest_mapping_range(u64 as, local_irq_save(flags); - flush_pcpu = (struct hv_guest_mapping_flush_list **) - this_cpu_ptr(hyperv_pcpu_input_arg); + flush = *this_cpu_ptr(hyperv_pcpu_input_arg); - flush = *flush_pcpu; if (unlikely(!flush)) { local_irq_restore(flags); goto fault; diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c index dffcc894f117..0d7a3ba66396 100644 --- a/drivers/hv/hv_balloon.c +++ b/drivers/hv/hv_balloon.c @@ -1628,7 +1628,7 @@ static int hv_free_page_report(struct page_reporting_dev_info *pr_dev_info, WARN_ON_ONCE(nents > HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES); WARN_ON_ONCE(sgl->length < (HV_HYP_PAGE_SIZE << page_reporting_order)); local_irq_save(flags); - hint = *(struct hv_memory_hint **)this_cpu_ptr(hyperv_pcpu_input_arg); + hint = *this_cpu_ptr(hyperv_pcpu_input_arg); if (!hint) { local_irq_restore(flags); return -ENOSPC; -- cgit From 6e2acbfe59b83043bc7ae1bb39fac4fc9dcd5a18 Mon Sep 17 00:00:00 2001 From: Dmitry Rokosov Date: Wed, 5 Jul 2023 00:54:04 +0300 Subject: clk: meson: change usleep_range() to udelay() for atomic context The function meson_clk_pll_enable() can be invoked under the enable_lock spinlock from the clk core logic, which risks a kernel panic during the usleep_range() call: BUG: scheduling while atomic: kworker/u4:2/36/0x00000002 Modules linked in: g_ffs usb_f_fs libcomposite CPU: 1 PID: 36 Comm: kworker/u4:2 Not tainted 6.4.0-rc5 #273 Workqueue: events_unbound async_run_entry_fn Call trace: dump_backtrace+0x9c/0x128 show_stack+0x20/0x38 dump_stack_lvl+0x48/0x60 dump_stack+0x18/0x28 __schedule_bug+0x58/0x78 __schedule+0x828/0xa88 schedule+0x64/0xd8 schedule_hrtimeout_range_clock+0xd0/0x208 schedule_hrtimeout_range+0x1c/0x30 usleep_range_state+0x6c/0xa8 meson_clk_pll_enable+0x1f4/0x310 clk_core_enable+0x78/0x200 clk_core_enable+0x58/0x200 clk_core_enable+0x58/0x200 clk_core_enable+0x58/0x200 clk_enable+0x34/0x60 So it is required to use the udelay() function instead of usleep_range() for the atomic context safety. Fixes: b6ec400aa153 ("clk: meson: introduce new pll power-on sequence for A1 SoC family") Reported-by: Jan Dakinevich Signed-off-by: Dmitry Rokosov Link: https://lore.kernel.org/r/20230704215404.11533-1-ddrokosov@sberdevices.ru Signed-off-by: Jerome Brunet --- drivers/clk/meson/clk-pll.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c index 8fef90bf962f..6fa7639a3050 100644 --- a/drivers/clk/meson/clk-pll.c +++ b/drivers/clk/meson/clk-pll.c @@ -367,9 +367,9 @@ static int meson_clk_pll_enable(struct clk_hw *hw) * 3. enable the lock detect module */ if (MESON_PARM_APPLICABLE(&pll->current_en)) { - usleep_range(10, 20); + udelay(10); meson_parm_write(clk->map, &pll->current_en, 1); - usleep_range(40, 50); + udelay(40); } if (MESON_PARM_APPLICABLE(&pll->l_detect)) { -- cgit From b2ec116aad38aa9c8b67fad4314e50823adf6949 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 11 Jul 2023 12:38:20 +0200 Subject: workqueue: Fix cpu_intensive_thresh_us name in help text There exists no parameter called "cpu_intensive_threshold_us". The actual parameter name is "cpu_intensive_thresh_us". Fixes: 6363845005202148 ("workqueue: Report work funcs that trigger automatic CPU_INTENSIVE mechanism") Signed-off-by: Geert Uytterhoeven Reviewed-by: Randy Dunlap Signed-off-by: Tejun Heo --- lib/Kconfig.debug | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index fbc89baf7de6..d6798513a8c2 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1200,7 +1200,7 @@ config WQ_CPU_INTENSIVE_REPORT help Say Y here to enable reporting of concurrency-managed per-cpu work items that hog CPUs for longer than - workqueue.cpu_intensive_threshold_us. Workqueue automatically + workqueue.cpu_intensive_thresh_us. Workqueue automatically detects and excludes them from concurrency management to prevent them from stalling other per-cpu work items. Occassional triggering may not necessarily indicate a problem. Repeated -- cgit From 8544cda94dae6be3f1359539079c68bb731428b1 Mon Sep 17 00:00:00 2001 From: Olivier Maignial Date: Fri, 23 Jun 2023 17:33:36 +0200 Subject: mtd: spinand: toshiba: Fix ecc_get_status Reading ECC status is failing. tx58cxgxsxraix_ecc_get_status() is using on-stack buffer for SPINAND_GET_FEATURE_OP() output. It is not suitable for DMA needs of spi-mem. Fix this by using the spi-mem operations dedicated buffer spinand->scratchbuf. See spinand->scratchbuf: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/include/linux/mtd/spinand.h?h=v6.3#n418 spi_mem_check_op(): https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/spi/spi-mem.c?h=v6.3#n199 Fixes: 10949af1681d ("mtd: spinand: Add initial support for Toshiba TC58CVG2S0H") Cc: stable@vger.kernel.org Signed-off-by: Olivier Maignial Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/DB4P250MB1032553D05FBE36DEE0D311EFE23A@DB4P250MB1032.EURP250.PROD.OUTLOOK.COM --- drivers/mtd/nand/spi/toshiba.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c index 7380b1ebaccd..a80427c13121 100644 --- a/drivers/mtd/nand/spi/toshiba.c +++ b/drivers/mtd/nand/spi/toshiba.c @@ -73,7 +73,7 @@ static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand, { struct nand_device *nand = spinand_to_nand(spinand); u8 mbf = 0; - struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, &mbf); + struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, spinand->scratchbuf); switch (status & STATUS_ECC_MASK) { case STATUS_ECC_NO_BITFLIPS: @@ -92,7 +92,7 @@ static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand, if (spi_mem_exec_op(spinand->spimem, &op)) return nanddev_get_ecc_conf(nand)->strength; - mbf >>= 4; + mbf = *(spinand->scratchbuf) >> 4; if (WARN_ON(mbf > nanddev_get_ecc_conf(nand)->strength || !mbf)) return nanddev_get_ecc_conf(nand)->strength; -- cgit From f5a05060670a4d8d6523afc7963eb559c2e3615f Mon Sep 17 00:00:00 2001 From: Olivier Maignial Date: Fri, 23 Jun 2023 17:33:37 +0200 Subject: mtd: spinand: winbond: Fix ecc_get_status Reading ECC status is failing. w25n02kv_ecc_get_status() is using on-stack buffer for SPINAND_GET_FEATURE_OP() output. It is not suitable for DMA needs of spi-mem. Fix this by using the spi-mem operations dedicated buffer spinand->scratchbuf. See spinand->scratchbuf: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/include/linux/mtd/spinand.h?h=v6.3#n418 spi_mem_check_op(): https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/spi/spi-mem.c?h=v6.3#n199 Fixes: 6154c7a58348 ("mtd: spinand: winbond: add Winbond W25N02KV flash support") Cc: stable@vger.kernel.org Signed-off-by: Olivier Maignial Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/DB4P250MB1032EDB9E36B764A33769039FE23A@DB4P250MB1032.EURP250.PROD.OUTLOOK.COM --- drivers/mtd/nand/spi/winbond.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c index 3ad58cd284d8..f507e3759301 100644 --- a/drivers/mtd/nand/spi/winbond.c +++ b/drivers/mtd/nand/spi/winbond.c @@ -108,7 +108,7 @@ static int w25n02kv_ecc_get_status(struct spinand_device *spinand, { struct nand_device *nand = spinand_to_nand(spinand); u8 mbf = 0; - struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, &mbf); + struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, spinand->scratchbuf); switch (status & STATUS_ECC_MASK) { case STATUS_ECC_NO_BITFLIPS: @@ -126,7 +126,7 @@ static int w25n02kv_ecc_get_status(struct spinand_device *spinand, if (spi_mem_exec_op(spinand->spimem, &op)) return nanddev_get_ecc_conf(nand)->strength; - mbf >>= 4; + mbf = *(spinand->scratchbuf) >> 4; if (WARN_ON(mbf > nanddev_get_ecc_conf(nand)->strength || !mbf)) return nanddev_get_ecc_conf(nand)->strength; -- cgit From d8403b9eeee66d5dd81ecb9445800b108c267ce3 Mon Sep 17 00:00:00 2001 From: Roger Quadros Date: Sun, 25 Jun 2023 00:10:21 +0530 Subject: mtd: rawnand: omap_elm: Fix incorrect type in assignment Once the ECC word endianness is converted to BE32, we force cast it to u32 so we can use elm_write_reg() which in turn uses writel(). Fixes below sparse warnings: drivers/mtd/nand/raw/omap_elm.c:180:37: sparse: expected unsigned int [usertype] val drivers/mtd/nand/raw/omap_elm.c:180:37: sparse: got restricted __be32 [usertype] drivers/mtd/nand/raw/omap_elm.c:185:37: sparse: expected unsigned int [usertype] val drivers/mtd/nand/raw/omap_elm.c:185:37: sparse: got restricted __be32 [usertype] drivers/mtd/nand/raw/omap_elm.c:190:37: sparse: expected unsigned int [usertype] val drivers/mtd/nand/raw/omap_elm.c:190:37: sparse: got restricted __be32 [usertype] >> drivers/mtd/nand/raw/omap_elm.c:200:40: sparse: sparse: restricted __be32 degrades to integer drivers/mtd/nand/raw/omap_elm.c:206:39: sparse: sparse: restricted __be32 degrades to integer drivers/mtd/nand/raw/omap_elm.c:210:37: sparse: expected unsigned int [assigned] [usertype] val drivers/mtd/nand/raw/omap_elm.c:210:37: sparse: got restricted __be32 [usertype] drivers/mtd/nand/raw/omap_elm.c:213:37: sparse: expected unsigned int [assigned] [usertype] val drivers/mtd/nand/raw/omap_elm.c:213:37: sparse: got restricted __be32 [usertype] drivers/mtd/nand/raw/omap_elm.c:216:37: sparse: expected unsigned int [assigned] [usertype] val drivers/mtd/nand/raw/omap_elm.c:216:37: sparse: got restricted __be32 [usertype] drivers/mtd/nand/raw/omap_elm.c:219:37: sparse: expected unsigned int [assigned] [usertype] val drivers/mtd/nand/raw/omap_elm.c:219:37: sparse: got restricted __be32 [usertype] drivers/mtd/nand/raw/omap_elm.c:222:37: sparse: expected unsigned int [assigned] [usertype] val drivers/mtd/nand/raw/omap_elm.c:222:37: sparse: got restricted __be32 [usertype] drivers/mtd/nand/raw/omap_elm.c:225:37: sparse: expected unsigned int [assigned] [usertype] val drivers/mtd/nand/raw/omap_elm.c:225:37: sparse: got restricted __be32 [usertype] drivers/mtd/nand/raw/omap_elm.c:228:39: sparse: sparse: restricted __be32 degrades to integer Fixes: bf22433575ef ("mtd: devices: elm: Add support for ELM error correction") Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202306212211.WDXokuWh-lkp@intel.com/ Signed-off-by: Roger Quadros Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20230624184021.7740-1-rogerq@kernel.org --- drivers/mtd/nand/raw/omap_elm.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c index 6e1eac6644a6..4a97d4a76454 100644 --- a/drivers/mtd/nand/raw/omap_elm.c +++ b/drivers/mtd/nand/raw/omap_elm.c @@ -177,17 +177,17 @@ static void elm_load_syndrome(struct elm_info *info, switch (info->bch_type) { case BCH8_ECC: /* syndrome fragment 0 = ecc[9-12B] */ - val = cpu_to_be32(*(u32 *) &ecc[9]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[9]); elm_write_reg(info, offset, val); /* syndrome fragment 1 = ecc[5-8B] */ offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[5]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[5]); elm_write_reg(info, offset, val); /* syndrome fragment 2 = ecc[1-4B] */ offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[1]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[1]); elm_write_reg(info, offset, val); /* syndrome fragment 3 = ecc[0B] */ @@ -197,35 +197,35 @@ static void elm_load_syndrome(struct elm_info *info, break; case BCH4_ECC: /* syndrome fragment 0 = ecc[20-52b] bits */ - val = (cpu_to_be32(*(u32 *) &ecc[3]) >> 4) | + val = ((__force u32)cpu_to_be32(*(u32 *)&ecc[3]) >> 4) | ((ecc[2] & 0xf) << 28); elm_write_reg(info, offset, val); /* syndrome fragment 1 = ecc[0-20b] bits */ offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[0]) >> 12; + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[0]) >> 12; elm_write_reg(info, offset, val); break; case BCH16_ECC: - val = cpu_to_be32(*(u32 *) &ecc[22]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[22]); elm_write_reg(info, offset, val); offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[18]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[18]); elm_write_reg(info, offset, val); offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[14]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[14]); elm_write_reg(info, offset, val); offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[10]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[10]); elm_write_reg(info, offset, val); offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[6]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[6]); elm_write_reg(info, offset, val); offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[2]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[2]); elm_write_reg(info, offset, val); offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[0]) >> 16; + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[0]) >> 16; elm_write_reg(info, offset, val); break; default: -- cgit From 7e6b04f9238eab0f684fafd158c1f32ea65b9eaa Mon Sep 17 00:00:00 2001 From: Arseniy Krasnov Date: Wed, 5 Jul 2023 09:52:10 +0300 Subject: mtd: rawnand: meson: fix OOB available bytes for ECC It is incorrect to calculate number of OOB bytes for ECC engine using some "already known" ECC step size (1024 bytes here). Number of such bytes for ECC engine must be whole OOB except 2 bytes for bad block marker, while proper ECC step size and strength will be selected by ECC logic. Fixes: 8fae856c5350 ("mtd: rawnand: meson: add support for Amlogic NAND flash controller") Cc: Signed-off-by: Arseniy Krasnov Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20230705065211.293500-1-AVKrasnov@sberdevices.ru --- drivers/mtd/nand/raw/meson_nand.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c index d3faf8086631..b10011dec1e6 100644 --- a/drivers/mtd/nand/raw/meson_nand.c +++ b/drivers/mtd/nand/raw/meson_nand.c @@ -1278,7 +1278,6 @@ static int meson_nand_attach_chip(struct nand_chip *nand) struct meson_nfc *nfc = nand_get_controller_data(nand); struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); struct mtd_info *mtd = nand_to_mtd(nand); - int nsectors = mtd->writesize / 1024; int raw_writesize; int ret; @@ -1304,7 +1303,7 @@ static int meson_nand_attach_chip(struct nand_chip *nand) nand->options |= NAND_NO_SUBPAGE_WRITE; ret = nand_ecc_choose_conf(nand, nfc->data->ecc_caps, - mtd->oobsize - 2 * nsectors); + mtd->oobsize - 2); if (ret) { dev_err(nfc->dev, "failed to ECC init\n"); return -EINVAL; -- cgit From d44263222134b5635932974c6177a5cba65a07e8 Mon Sep 17 00:00:00 2001 From: Sergei Antonov Date: Tue, 27 Jun 2023 15:05:49 +0300 Subject: mmc: moxart: read scr register without changing byte order Conversion from big-endian to native is done in a common function mmc_app_send_scr(). Converting in moxart_transfer_pio() is extra. Double conversion on a LE system returns an incorrect SCR value, leads to errors: mmc0: unrecognised SCR structure version 8 Fixes: 1b66e94e6b99 ("mmc: moxart: Add MOXA ART SD/MMC driver") Signed-off-by: Sergei Antonov Cc: Jonas Jensen Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20230627120549.2400325-1-saproj@gmail.com Signed-off-by: Ulf Hansson --- drivers/mmc/host/moxart-mmc.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c index 2d002c81dcf3..d0d6ffcf78d4 100644 --- a/drivers/mmc/host/moxart-mmc.c +++ b/drivers/mmc/host/moxart-mmc.c @@ -338,13 +338,7 @@ static void moxart_transfer_pio(struct moxart_host *host) return; } for (len = 0; len < remain && len < host->fifo_width;) { - /* SCR data must be read in big endian. */ - if (data->mrq->cmd->opcode == SD_APP_SEND_SCR) - *sgp = ioread32be(host->base + - REG_DATA_WINDOW); - else - *sgp = ioread32(host->base + - REG_DATA_WINDOW); + *sgp = ioread32(host->base + REG_DATA_WINDOW); sgp++; len += 4; } -- cgit From 5def5c1c15bf22934ee227af85c1716762f3829f Mon Sep 17 00:00:00 2001 From: Kunihiko Hayashi Date: Fri, 30 Jun 2023 09:45:33 +0900 Subject: mmc: sdhci-f-sdh30: Replace with sdhci_pltfm Even if sdhci_pltfm_pmops is specified for PM, this driver doesn't apply sdhci_pltfm, so the structure is not correctly referenced in PM functions. This applies sdhci_pltfm to this driver to fix this issue. - Call sdhci_pltfm_init() instead of sdhci_alloc_host() and other functions that covered by sdhci_pltfm. - Move ops and quirks to sdhci_pltfm_data - Replace sdhci_priv() with own private function sdhci_f_sdh30_priv(). Fixes: 87a507459f49 ("mmc: sdhci: host: add new f_sdh30") Signed-off-by: Kunihiko Hayashi Acked-by: Adrian Hunter Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20230630004533.26644-1-hayashi.kunihiko@socionext.com Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci_f_sdh30.c | 60 ++++++++++++++++++---------------------- 1 file changed, 27 insertions(+), 33 deletions(-) diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c index a202a69a4b08..b01ffb4d0973 100644 --- a/drivers/mmc/host/sdhci_f_sdh30.c +++ b/drivers/mmc/host/sdhci_f_sdh30.c @@ -29,9 +29,16 @@ struct f_sdhost_priv { bool enable_cmd_dat_delay; }; +static void *sdhci_f_sdhost_priv(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + + return sdhci_pltfm_priv(pltfm_host); +} + static void sdhci_f_sdh30_soft_voltage_switch(struct sdhci_host *host) { - struct f_sdhost_priv *priv = sdhci_priv(host); + struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host); u32 ctrl = 0; usleep_range(2500, 3000); @@ -64,7 +71,7 @@ static unsigned int sdhci_f_sdh30_get_min_clock(struct sdhci_host *host) static void sdhci_f_sdh30_reset(struct sdhci_host *host, u8 mask) { - struct f_sdhost_priv *priv = sdhci_priv(host); + struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host); u32 ctl; if (sdhci_readw(host, SDHCI_CLOCK_CONTROL) == 0) @@ -95,30 +102,32 @@ static const struct sdhci_ops sdhci_f_sdh30_ops = { .set_uhs_signaling = sdhci_set_uhs_signaling, }; +static const struct sdhci_pltfm_data sdhci_f_sdh30_pltfm_data = { + .ops = &sdhci_f_sdh30_ops, + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC + | SDHCI_QUIRK_INVERTED_WRITE_PROTECT, + .quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE + | SDHCI_QUIRK2_TUNING_WORK_AROUND, +}; + static int sdhci_f_sdh30_probe(struct platform_device *pdev) { struct sdhci_host *host; struct device *dev = &pdev->dev; - int irq, ctrl = 0, ret = 0; + int ctrl = 0, ret = 0; struct f_sdhost_priv *priv; + struct sdhci_pltfm_host *pltfm_host; u32 reg = 0; - irq = platform_get_irq(pdev, 0); - if (irq < 0) - return irq; - - host = sdhci_alloc_host(dev, sizeof(struct f_sdhost_priv)); + host = sdhci_pltfm_init(pdev, &sdhci_f_sdh30_pltfm_data, + sizeof(struct f_sdhost_priv)); if (IS_ERR(host)) return PTR_ERR(host); - priv = sdhci_priv(host); + pltfm_host = sdhci_priv(host); + priv = sdhci_pltfm_priv(pltfm_host); priv->dev = dev; - host->quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | - SDHCI_QUIRK_INVERTED_WRITE_PROTECT; - host->quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE | - SDHCI_QUIRK2_TUNING_WORK_AROUND; - priv->enable_cmd_dat_delay = device_property_read_bool(dev, "fujitsu,cmd-dat-delay-select"); @@ -126,18 +135,6 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev) if (ret) goto err; - platform_set_drvdata(pdev, host); - - host->hw_name = "f_sdh30"; - host->ops = &sdhci_f_sdh30_ops; - host->irq = irq; - - host->ioaddr = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(host->ioaddr)) { - ret = PTR_ERR(host->ioaddr); - goto err; - } - if (dev_of_node(dev)) { sdhci_get_of_property(pdev); @@ -204,24 +201,21 @@ err_rst: err_clk: clk_disable_unprepare(priv->clk_iface); err: - sdhci_free_host(host); + sdhci_pltfm_free(pdev); + return ret; } static int sdhci_f_sdh30_remove(struct platform_device *pdev) { struct sdhci_host *host = platform_get_drvdata(pdev); - struct f_sdhost_priv *priv = sdhci_priv(host); - - sdhci_remove_host(host, readl(host->ioaddr + SDHCI_INT_STATUS) == - 0xffffffff); + struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host); reset_control_assert(priv->rst); clk_disable_unprepare(priv->clk); clk_disable_unprepare(priv->clk_iface); - sdhci_free_host(host); - platform_set_drvdata(pdev, NULL); + sdhci_pltfm_unregister(pdev); return 0; } -- cgit From d0ca3b92b7a6f42841ea9da8492aaf649db79780 Mon Sep 17 00:00:00 2001 From: Johan Jonker Date: Fri, 14 Jul 2023 17:21:01 +0200 Subject: mtd: rawnand: rockchip: fix oobfree offset and description Rockchip boot blocks are written per 4 x 512 byte sectors per page. Each page with boot blocks must have a page address (PA) pointer in OOB to the next page. The currently advertised free OOB area starts at offset 6, like if 4 PA bytes were located right after the BBM. This is wrong as the PA bytes are located right before the ECC bytes. Fix the layout by allowing access to all bytes between the BBM and the PA bytes instead of reserving 4 bytes right after the BBM. This change breaks existing jffs2 users. Fixes: 058e0e847d54 ("mtd: rawnand: rockchip: NFC driver for RK3308, RK2928 and others") Signed-off-by: Johan Jonker Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/d202f12d-188c-20e8-f2c2-9cc874ad4d22@gmail.com --- drivers/mtd/nand/raw/rockchip-nand-controller.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/drivers/mtd/nand/raw/rockchip-nand-controller.c b/drivers/mtd/nand/raw/rockchip-nand-controller.c index 2312e27362cb..37fc07ba57aa 100644 --- a/drivers/mtd/nand/raw/rockchip-nand-controller.c +++ b/drivers/mtd/nand/raw/rockchip-nand-controller.c @@ -562,9 +562,10 @@ static int rk_nfc_write_page_raw(struct nand_chip *chip, const u8 *buf, * BBM OOB1 OOB2 OOB3 |......| PA0 PA1 PA2 PA3 * * The rk_nfc_ooblayout_free() function already has reserved - * these 4 bytes with: + * these 4 bytes together with 2 bytes for BBM + * by reducing it's length: * - * oob_region->offset = NFC_SYS_DATA_SIZE + 2; + * oob_region->length = rknand->metadata_size - NFC_SYS_DATA_SIZE - 2; */ if (!i) memcpy(rk_nfc_oob_ptr(chip, i), @@ -933,12 +934,8 @@ static int rk_nfc_ooblayout_free(struct mtd_info *mtd, int section, if (section) return -ERANGE; - /* - * The beginning of the OOB area stores the reserved data for the NFC, - * the size of the reserved data is NFC_SYS_DATA_SIZE bytes. - */ oob_region->length = rknand->metadata_size - NFC_SYS_DATA_SIZE - 2; - oob_region->offset = NFC_SYS_DATA_SIZE + 2; + oob_region->offset = 2; return 0; } -- cgit From ea690ad78dd611e3906df5b948a516000b05c1cb Mon Sep 17 00:00:00 2001 From: Johan Jonker Date: Fri, 14 Jul 2023 17:21:21 +0200 Subject: mtd: rawnand: rockchip: Align hwecc vs. raw page helper layouts Currently, read/write_page_hwecc() and read/write_page_raw() are not aligned: there is a mismatch in the OOB bytes which are not read/written at the same offset in both cases (raw vs. hwecc). This is a real problem when relying on the presence of the Page Addresses (PA) when using the NAND chip as a boot device, as the BootROM expects additional data in the OOB area at specific locations. Rockchip boot blocks are written per 4 x 512 byte sectors per page. Each page with boot blocks must have a page address (PA) pointer in OOB to the next page. Pages are written in a pattern depending on the NAND chip ID. Generate boot block page address and pattern for hwecc in user space and copy PA data to/from the already reserved last 4 bytes before ECC in the chip->oob_poi data layout. Align the different helpers. This change breaks existing jffs2 users. Fixes: 058e0e847d54 ("mtd: rawnand: rockchip: NFC driver for RK3308, RK2928 and others") Signed-off-by: Johan Jonker Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/5e782c08-862b-51ae-47ff-3299940928ca@gmail.com --- drivers/mtd/nand/raw/rockchip-nand-controller.c | 34 +++++++++++++++---------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/drivers/mtd/nand/raw/rockchip-nand-controller.c b/drivers/mtd/nand/raw/rockchip-nand-controller.c index 37fc07ba57aa..5a04680342c3 100644 --- a/drivers/mtd/nand/raw/rockchip-nand-controller.c +++ b/drivers/mtd/nand/raw/rockchip-nand-controller.c @@ -598,7 +598,7 @@ static int rk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf, int pages_per_blk = mtd->erasesize / mtd->writesize; int ret = 0, i, boot_rom_mode = 0; dma_addr_t dma_data, dma_oob; - u32 reg; + u32 tmp; u8 *oob; nand_prog_page_begin_op(chip, page, 0, NULL, 0); @@ -625,6 +625,13 @@ static int rk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf, * * 0xFF 0xFF 0xFF 0xFF | BBM OOB1 OOB2 OOB3 | ... * + * The code here just swaps the first 4 bytes with the last + * 4 bytes without losing any data. + * + * The chip->oob_poi data layout: + * + * BBM OOB1 OOB2 OOB3 |......| PA0 PA1 PA2 PA3 + * * Configure the ECC algorithm supported by the boot ROM. */ if ((page < (pages_per_blk * rknand->boot_blks)) && @@ -635,21 +642,17 @@ static int rk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf, } for (i = 0; i < ecc->steps; i++) { - if (!i) { - reg = 0xFFFFFFFF; - } else { + if (!i) + oob = chip->oob_poi + (ecc->steps - 1) * NFC_SYS_DATA_SIZE; + else oob = chip->oob_poi + (i - 1) * NFC_SYS_DATA_SIZE; - reg = oob[0] | oob[1] << 8 | oob[2] << 16 | - oob[3] << 24; - } - if (!i && boot_rom_mode) - reg = (page & (pages_per_blk - 1)) * 4; + tmp = oob[0] | oob[1] << 8 | oob[2] << 16 | oob[3] << 24; if (nfc->cfg->type == NFC_V9) - nfc->oob_buf[i] = reg; + nfc->oob_buf[i] = tmp; else - nfc->oob_buf[i * (oob_step / 4)] = reg; + nfc->oob_buf[i * (oob_step / 4)] = tmp; } dma_data = dma_map_single(nfc->dev, (void *)nfc->page_buf, @@ -812,12 +815,17 @@ static int rk_nfc_read_page_hwecc(struct nand_chip *chip, u8 *buf, int oob_on, goto timeout_err; } - for (i = 1; i < ecc->steps; i++) { - oob = chip->oob_poi + (i - 1) * NFC_SYS_DATA_SIZE; + for (i = 0; i < ecc->steps; i++) { + if (!i) + oob = chip->oob_poi + (ecc->steps - 1) * NFC_SYS_DATA_SIZE; + else + oob = chip->oob_poi + (i - 1) * NFC_SYS_DATA_SIZE; + if (nfc->cfg->type == NFC_V9) tmp = nfc->oob_buf[i]; else tmp = nfc->oob_buf[i * (oob_step / 4)]; + *oob++ = (u8)tmp; *oob++ = (u8)(tmp >> 8); *oob++ = (u8)(tmp >> 16); -- cgit From 0e52740ffd10c6c316837c6c128f460f1aaba1ea Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Sat, 8 Jul 2023 10:21:35 +0200 Subject: x86/bugs: Increase the x86 bugs vector size to two u32s There was never a doubt in my mind that they would not fit into a single u32 eventually. Signed-off-by: Borislav Petkov (AMD) --- arch/x86/include/asm/cpufeatures.h | 2 +- tools/arch/x86/include/asm/cpufeatures.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index cb8ca46213be..1f6d904c6481 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -14,7 +14,7 @@ * Defines x86 CPU feature bits */ #define NCAPINTS 21 /* N 32-bit words worth of info */ -#define NBUGINTS 1 /* N 32-bit bug flags */ +#define NBUGINTS 2 /* N 32-bit bug flags */ /* * Note: If the comment begins with a quoted string, that string is used diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index cb8ca46213be..1f6d904c6481 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h @@ -14,7 +14,7 @@ * Defines x86 CPU feature bits */ #define NCAPINTS 21 /* N 32-bit words worth of info */ -#define NBUGINTS 1 /* N 32-bit bug flags */ +#define NBUGINTS 2 /* N 32-bit bug flags */ /* * Note: If the comment begins with a quoted string, that string is used -- cgit From 84f68679032147dcdac9bb4d8eb8f4638e995dc6 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Tue, 18 Jul 2023 18:45:37 +0000 Subject: KVM: arm64: Allow pKVM on v1.0 compatible FF-A implementations pKVM initialization fails on systems with v1.1+ FF-A implementations, as the hyp does a strict match on the returned version from FFA_VERSION. This is a stronger assertion than required by the specification, which requires minor revisions be backwards compatible with earlier revisions of the same major version. Relax the check in hyp_ffa_init() to only test the returned major version. Even though v1.1 broke ABI, the expectation is that firmware incapable of using the v1.0 ABI return NOT_SUPPORTED instead of a valid version. Acked-by: Marc Zyngier Acked-by: Will Deacon Link: https://lore.kernel.org/r/20230718184537.3220867-1-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/kvm/hyp/nvhe/ffa.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/hyp/nvhe/ffa.c b/arch/arm64/kvm/hyp/nvhe/ffa.c index 58dcd92bf346..ab4f5d160c58 100644 --- a/arch/arm64/kvm/hyp/nvhe/ffa.c +++ b/arch/arm64/kvm/hyp/nvhe/ffa.c @@ -705,7 +705,20 @@ int hyp_ffa_init(void *pages) if (res.a0 == FFA_RET_NOT_SUPPORTED) return 0; - if (res.a0 != FFA_VERSION_1_0) + /* + * Firmware returns the maximum supported version of the FF-A + * implementation. Check that the returned version is + * backwards-compatible with the hyp according to the rules in DEN0077A + * v1.1 REL0 13.2.1. + * + * Of course, things are never simple when dealing with firmware. v1.1 + * broke ABI with v1.0 on several structures, which is itself + * incompatible with the aforementioned versioning scheme. The + * expectation is that v1.x implementations that do not support the v1.0 + * ABI return NOT_SUPPORTED rather than a version number, according to + * DEN0077A v1.1 REL0 18.6.4. + */ + if (FFA_MAJOR_VERSION(res.a0) != 1) return -EOPNOTSUPP; arm_smccc_1_1_smc(FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0, &res); -- cgit From 1eb8d61ac5c9c7ec56bb96d433532807509b9288 Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Wed, 19 Jul 2023 15:42:50 +0800 Subject: clk: mediatek: mt8183: Add back SSPM related clocks This reverts commit 860690a93ef23b567f781c1b631623e27190f101. On the MT8183, the SSPM related clocks were removed claiming a lack of usage. This however causes some issues when the driver was converted to the new simple-probe mechanism. This mechanism allocates enough space for all the clocks defined in the clock driver, not the highest index in the DT binding. This leads to out-of-bound writes if their are holes in the DT binding or the driver (due to deprecated or unimplemented clocks). These errors can go unnoticed and cause memory corruption, leading to crashes in unrelated areas, or nothing at all. KASAN will detect them. Add the SSPM related clocks back to the MT8183 clock driver to fully implement the DT binding. The SSPM clocks are for the power management co-processor, and should never be turned off. They are marked as such. Fixes: 3f37ba7cc385 ("clk: mediatek: mt8183: Convert all remaining clocks to common probe") Signed-off-by: Chen-Yu Tsai Link: https://lore.kernel.org/r/20230719074251.1219089-1-wenst@chromium.org Reviewed-by: AngeloGioacchino Del Regno Signed-off-by: Stephen Boyd --- drivers/clk/mediatek/clk-mt8183.c | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/drivers/clk/mediatek/clk-mt8183.c b/drivers/clk/mediatek/clk-mt8183.c index 1ba421b38ec5..e31f94387d87 100644 --- a/drivers/clk/mediatek/clk-mt8183.c +++ b/drivers/clk/mediatek/clk-mt8183.c @@ -328,6 +328,14 @@ static const char * const atb_parents[] = { "syspll_d5" }; +static const char * const sspm_parents[] = { + "clk26m", + "univpll_d2_d4", + "syspll_d2_d2", + "univpll_d2_d2", + "syspll_d3" +}; + static const char * const dpi0_parents[] = { "clk26m", "tvdpll_d2", @@ -507,6 +515,9 @@ static const struct mtk_mux top_muxes[] = { /* CLK_CFG_6 */ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_ATB, "atb_sel", atb_parents, 0xa0, 0xa4, 0xa8, 0, 2, 7, 0x004, 24), + MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_MUX_SSPM, "sspm_sel", + sspm_parents, 0xa0, 0xa4, 0xa8, 8, 3, 15, 0x004, 25, + CLK_IS_CRITICAL | CLK_SET_RATE_PARENT), MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_DPI0, "dpi0_sel", dpi0_parents, 0xa0, 0xa4, 0xa8, 16, 4, 23, 0x004, 26), MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_SCAM, "scam_sel", @@ -673,10 +684,18 @@ static const struct mtk_gate_regs infra3_cg_regs = { GATE_MTK(_id, _name, _parent, &infra2_cg_regs, _shift, \ &mtk_clk_gate_ops_setclr) +#define GATE_INFRA2_FLAGS(_id, _name, _parent, _shift, _flag) \ + GATE_MTK_FLAGS(_id, _name, _parent, &infra2_cg_regs, \ + _shift, &mtk_clk_gate_ops_setclr, _flag) + #define GATE_INFRA3(_id, _name, _parent, _shift) \ GATE_MTK(_id, _name, _parent, &infra3_cg_regs, _shift, \ &mtk_clk_gate_ops_setclr) +#define GATE_INFRA3_FLAGS(_id, _name, _parent, _shift, _flag) \ + GATE_MTK_FLAGS(_id, _name, _parent, &infra3_cg_regs, \ + _shift, &mtk_clk_gate_ops_setclr, _flag) + static const struct mtk_gate infra_clks[] = { /* INFRA0 */ GATE_INFRA0(CLK_INFRA_PMIC_TMR, "infra_pmic_tmr", "axi_sel", 0), @@ -748,7 +767,11 @@ static const struct mtk_gate infra_clks[] = { GATE_INFRA2(CLK_INFRA_UNIPRO_TICK, "infra_unipro_tick", "fufs_sel", 12), GATE_INFRA2(CLK_INFRA_UFS_MP_SAP_BCLK, "infra_ufs_mp_sap_bck", "fufs_sel", 13), GATE_INFRA2(CLK_INFRA_MD32_BCLK, "infra_md32_bclk", "axi_sel", 14), + /* infra_sspm is main clock in co-processor, should not be closed in Linux. */ + GATE_INFRA2_FLAGS(CLK_INFRA_SSPM, "infra_sspm", "sspm_sel", 15, CLK_IS_CRITICAL), GATE_INFRA2(CLK_INFRA_UNIPRO_MBIST, "infra_unipro_mbist", "axi_sel", 16), + /* infra_sspm_bus_hclk is main clock in co-processor, should not be closed in Linux. */ + GATE_INFRA2_FLAGS(CLK_INFRA_SSPM_BUS_HCLK, "infra_sspm_bus_hclk", "axi_sel", 17, CLK_IS_CRITICAL), GATE_INFRA2(CLK_INFRA_I2C5, "infra_i2c5", "i2c_sel", 18), GATE_INFRA2(CLK_INFRA_I2C5_ARBITER, "infra_i2c5_arbiter", "i2c_sel", 19), GATE_INFRA2(CLK_INFRA_I2C5_IMM, "infra_i2c5_imm", "i2c_sel", 20), @@ -766,6 +789,10 @@ static const struct mtk_gate infra_clks[] = { GATE_INFRA3(CLK_INFRA_MSDC0_SELF, "infra_msdc0_self", "msdc50_0_sel", 0), GATE_INFRA3(CLK_INFRA_MSDC1_SELF, "infra_msdc1_self", "msdc50_0_sel", 1), GATE_INFRA3(CLK_INFRA_MSDC2_SELF, "infra_msdc2_self", "msdc50_0_sel", 2), + /* infra_sspm_26m_self is main clock in co-processor, should not be closed in Linux. */ + GATE_INFRA3_FLAGS(CLK_INFRA_SSPM_26M_SELF, "infra_sspm_26m_self", "f_f26m_ck", 3, CLK_IS_CRITICAL), + /* infra_sspm_32k_self is main clock in co-processor, should not be closed in Linux. */ + GATE_INFRA3_FLAGS(CLK_INFRA_SSPM_32K_SELF, "infra_sspm_32k_self", "f_f26m_ck", 4, CLK_IS_CRITICAL), GATE_INFRA3(CLK_INFRA_UFS_AXI, "infra_ufs_axi", "axi_sel", 5), GATE_INFRA3(CLK_INFRA_I2C6, "infra_i2c6", "i2c_sel", 6), GATE_INFRA3(CLK_INFRA_AP_MSDC0, "infra_ap_msdc0", "msdc50_hclk_sel", 7), -- cgit From a29b2fccf5f2689a9637be85ff1f51c834c6fb33 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 11 Jul 2023 17:08:12 +0200 Subject: clk: imx93: Propagate correct error in imx93_clocks_probe() smatch reports: drivers/clk/imx/clk-imx93.c:294 imx93_clocks_probe() error: uninitialized symbol 'base'. Indeed, in case of an error, the wrong (yet uninitialized) variable is converted to an error code and returned. Fix this by propagating the error code in the correct variable. Fixes: e02ba11b45764705 ("clk: imx93: fix memory leak and missing unwind goto in imx93_clocks_probe") Reported-by: Dan Carpenter Closes: https://lore.kernel.org/all/9c2acd81-3ad8-485d-819e-9e4201277831@kadam.mountain Reported-by: kernel test robot Closes: https://lore.kernel.org/all/202306161533.4YDmL22b-lkp@intel.com/ Signed-off-by: Geert Uytterhoeven Link: https://lore.kernel.org/r/20230711150812.3562221-1-geert+renesas@glider.be Reviewed-by: Peng Fan Signed-off-by: Stephen Boyd --- drivers/clk/imx/clk-imx93.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/imx/clk-imx93.c b/drivers/clk/imx/clk-imx93.c index b6c7c2725906..44f435103c65 100644 --- a/drivers/clk/imx/clk-imx93.c +++ b/drivers/clk/imx/clk-imx93.c @@ -291,7 +291,7 @@ static int imx93_clocks_probe(struct platform_device *pdev) anatop_base = devm_of_iomap(dev, np, 0, NULL); of_node_put(np); if (WARN_ON(IS_ERR(anatop_base))) { - ret = PTR_ERR(base); + ret = PTR_ERR(anatop_base); goto unregister_hws; } -- cgit From e7dd44f4f3166db45248414f5df8f615392de47a Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Fri, 7 Jul 2023 21:58:51 +0800 Subject: clk: fixed-mmio: make COMMON_CLK_FIXED_MMIO depend on HAS_IOMEM On s390 systems (aka mainframes), it has classic channel devices for networking and permanent storage that are currently even more common than PCI devices. Hence it could have a fully functional s390 kernel with CONFIG_PCI=n, then the relevant iomem mapping functions [including ioremap(), devm_ioremap(), etc.] are not available. Here let COMMON_CLK_FIXED_MMIO depend on HAS_IOMEM so that it won't be built to cause below compiling error if PCI is unset: ------ ld: drivers/clk/clk-fixed-mmio.o: in function `fixed_mmio_clk_setup': clk-fixed-mmio.c:(.text+0x5e): undefined reference to `of_iomap' ld: clk-fixed-mmio.c:(.text+0xba): undefined reference to `iounmap' ------ Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202306211329.ticOJCSv-lkp@intel.com/ Signed-off-by: Baoquan He Cc: Michael Turquette Cc: Stephen Boyd Cc: linux-clk@vger.kernel.org Link: https://lore.kernel.org/r/20230707135852.24292-8-bhe@redhat.com Signed-off-by: Stephen Boyd --- drivers/clk/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 93f38a8178ba..6b3b424addab 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -444,6 +444,7 @@ config COMMON_CLK_BD718XX config COMMON_CLK_FIXED_MMIO bool "Clock driver for Memory Mapped Fixed values" depends on COMMON_CLK && OF + depends on HAS_IOMEM help Support for Memory Mapped IO Fixed clocks -- cgit From 8974eb588283b7d44a7c91fa09fcbaf380339f3a Mon Sep 17 00:00:00 2001 From: Daniel Sneddon Date: Wed, 12 Jul 2023 19:43:11 -0700 Subject: x86/speculation: Add Gather Data Sampling mitigation Gather Data Sampling (GDS) is a hardware vulnerability which allows unprivileged speculative access to data which was previously stored in vector registers. Intel processors that support AVX2 and AVX512 have gather instructions that fetch non-contiguous data elements from memory. On vulnerable hardware, when a gather instruction is transiently executed and encounters a fault, stale data from architectural or internal vector registers may get transiently stored to the destination vector register allowing an attacker to infer the stale data using typical side channel techniques like cache timing attacks. This mitigation is different from many earlier ones for two reasons. First, it is enabled by default and a bit must be set to *DISABLE* it. This is the opposite of normal mitigation polarity. This means GDS can be mitigated simply by updating microcode and leaving the new control bit alone. Second, GDS has a "lock" bit. This lock bit is there because the mitigation affects the hardware security features KeyLocker and SGX. It needs to be enabled and *STAY* enabled for these features to be mitigated against GDS. The mitigation is enabled in the microcode by default. Disable it by setting gather_data_sampling=off or by disabling all mitigations with mitigations=off. The mitigation status can be checked by reading: /sys/devices/system/cpu/vulnerabilities/gather_data_sampling Signed-off-by: Daniel Sneddon Signed-off-by: Dave Hansen Acked-by: Josh Poimboeuf --- Documentation/ABI/testing/sysfs-devices-system-cpu | 13 ++- .../admin-guide/hw-vuln/gather_data_sampling.rst | 99 ++++++++++++++++ Documentation/admin-guide/hw-vuln/index.rst | 1 + Documentation/admin-guide/kernel-parameters.txt | 41 ++++--- arch/x86/include/asm/cpufeatures.h | 1 + arch/x86/include/asm/msr-index.h | 11 ++ arch/x86/kernel/cpu/bugs.c | 129 +++++++++++++++++++++ arch/x86/kernel/cpu/common.c | 34 ++++-- arch/x86/kernel/cpu/cpu.h | 1 + drivers/base/cpu.c | 8 ++ 10 files changed, 310 insertions(+), 28 deletions(-) create mode 100644 Documentation/admin-guide/hw-vuln/gather_data_sampling.rst diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index ecd585ca2d50..77942eedf4f6 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -513,17 +513,18 @@ Description: information about CPUs heterogeneity. cpu_capacity: capacity of cpuX. What: /sys/devices/system/cpu/vulnerabilities + /sys/devices/system/cpu/vulnerabilities/gather_data_sampling + /sys/devices/system/cpu/vulnerabilities/itlb_multihit + /sys/devices/system/cpu/vulnerabilities/l1tf + /sys/devices/system/cpu/vulnerabilities/mds /sys/devices/system/cpu/vulnerabilities/meltdown + /sys/devices/system/cpu/vulnerabilities/mmio_stale_data + /sys/devices/system/cpu/vulnerabilities/retbleed + /sys/devices/system/cpu/vulnerabilities/spec_store_bypass /sys/devices/system/cpu/vulnerabilities/spectre_v1 /sys/devices/system/cpu/vulnerabilities/spectre_v2 - /sys/devices/system/cpu/vulnerabilities/spec_store_bypass - /sys/devices/system/cpu/vulnerabilities/l1tf - /sys/devices/system/cpu/vulnerabilities/mds /sys/devices/system/cpu/vulnerabilities/srbds /sys/devices/system/cpu/vulnerabilities/tsx_async_abort - /sys/devices/system/cpu/vulnerabilities/itlb_multihit - /sys/devices/system/cpu/vulnerabilities/mmio_stale_data - /sys/devices/system/cpu/vulnerabilities/retbleed Date: January 2018 Contact: Linux kernel mailing list Description: Information about CPU vulnerabilities diff --git a/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst b/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst new file mode 100644 index 000000000000..74dab6af7fe1 --- /dev/null +++ b/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst @@ -0,0 +1,99 @@ +.. SPDX-License-Identifier: GPL-2.0 + +GDS - Gather Data Sampling +========================== + +Gather Data Sampling is a hardware vulnerability which allows unprivileged +speculative access to data which was previously stored in vector registers. + +Problem +------- +When a gather instruction performs loads from memory, different data elements +are merged into the destination vector register. However, when a gather +instruction that is transiently executed encounters a fault, stale data from +architectural or internal vector registers may get transiently forwarded to the +destination vector register instead. This will allow a malicious attacker to +infer stale data using typical side channel techniques like cache timing +attacks. GDS is a purely sampling-based attack. + +The attacker uses gather instructions to infer the stale vector register data. +The victim does not need to do anything special other than use the vector +registers. The victim does not need to use gather instructions to be +vulnerable. + +Because the buffers are shared between Hyper-Threads cross Hyper-Thread attacks +are possible. + +Attack scenarios +---------------- +Without mitigation, GDS can infer stale data across virtually all +permission boundaries: + + Non-enclaves can infer SGX enclave data + Userspace can infer kernel data + Guests can infer data from hosts + Guest can infer guest from other guests + Users can infer data from other users + +Because of this, it is important to ensure that the mitigation stays enabled in +lower-privilege contexts like guests and when running outside SGX enclaves. + +The hardware enforces the mitigation for SGX. Likewise, VMMs should ensure +that guests are not allowed to disable the GDS mitigation. If a host erred and +allowed this, a guest could theoretically disable GDS mitigation, mount an +attack, and re-enable it. + +Mitigation mechanism +-------------------- +This issue is mitigated in microcode. The microcode defines the following new +bits: + + ================================ === ============================ + IA32_ARCH_CAPABILITIES[GDS_CTRL] R/O Enumerates GDS vulnerability + and mitigation support. + IA32_ARCH_CAPABILITIES[GDS_NO] R/O Processor is not vulnerable. + IA32_MCU_OPT_CTRL[GDS_MITG_DIS] R/W Disables the mitigation + 0 by default. + IA32_MCU_OPT_CTRL[GDS_MITG_LOCK] R/W Locks GDS_MITG_DIS=0. Writes + to GDS_MITG_DIS are ignored + Can't be cleared once set. + ================================ === ============================ + +GDS can also be mitigated on systems that don't have updated microcode by +disabling AVX. This can be done by setting "clearcpuid=avx" on the kernel +command-line. + +Mitigation control on the kernel command line +--------------------------------------------- +The mitigation can be disabled by setting "gather_data_sampling=off" or +"mitigations=off" on the kernel command line. Not specifying either will +default to the mitigation being enabled. + +GDS System Information +------------------------ +The kernel provides vulnerability status information through sysfs. For +GDS this can be accessed by the following sysfs file: + +/sys/devices/system/cpu/vulnerabilities/gather_data_sampling + +The possible values contained in this file are: + + ============================== ============================================= + Not affected Processor not vulnerable. + Vulnerable Processor vulnerable and mitigation disabled. + Vulnerable: No microcode Processor vulnerable and microcode is missing + mitigation. + Mitigation: Microcode Processor is vulnerable and mitigation is in + effect. + Mitigation: Microcode (locked) Processor is vulnerable and mitigation is in + effect and cannot be disabled. + Unknown: Dependent on + hypervisor status Running on a virtual guest processor that is + affected but with no way to know if host + processor is mitigated or vulnerable. + ============================== ============================================= + +GDS Default mitigation +---------------------- +The updated microcode will enable the mitigation by default. The kernel's +default action is to leave the mitigation enabled. diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst index e0614760a99e..436fac0bd9c3 100644 --- a/Documentation/admin-guide/hw-vuln/index.rst +++ b/Documentation/admin-guide/hw-vuln/index.rst @@ -19,3 +19,4 @@ are configurable at compile, boot or run time. l1d_flush.rst processor_mmio_stale_data.rst cross-thread-rsb.rst + gather_data_sampling.rst diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index a1457995fd41..c21d42140d6b 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1623,6 +1623,20 @@ Format: off | on default: on + gather_data_sampling= + [X86,INTEL] Control the Gather Data Sampling (GDS) + mitigation. + + Gather Data Sampling is a hardware vulnerability which + allows unprivileged speculative access to data which was + previously stored in vector registers. + + This issue is mitigated by default in updated microcode. + The mitigation may have a performance impact but can be + disabled. + + off: Disable GDS mitigation. + gcov_persist= [GCOV] When non-zero (default), profiling data for kernel modules is saved and remains accessible via debugfs, even when the module is unloaded/reloaded. @@ -3273,24 +3287,25 @@ Disable all optional CPU mitigations. This improves system performance, but it may also expose users to several CPU vulnerabilities. - Equivalent to: nopti [X86,PPC] - if nokaslr then kpti=0 [ARM64] - nospectre_v1 [X86,PPC] - nobp=0 [S390] - nospectre_v2 [X86,PPC,S390,ARM64] - spectre_v2_user=off [X86] - spec_store_bypass_disable=off [X86,PPC] - ssbd=force-off [ARM64] - nospectre_bhb [ARM64] + Equivalent to: if nokaslr then kpti=0 [ARM64] + gather_data_sampling=off [X86] + kvm.nx_huge_pages=off [X86] l1tf=off [X86] mds=off [X86] - tsx_async_abort=off [X86] - kvm.nx_huge_pages=off [X86] - srbds=off [X86,INTEL] + mmio_stale_data=off [X86] no_entry_flush [PPC] no_uaccess_flush [PPC] - mmio_stale_data=off [X86] + nobp=0 [S390] + nopti [X86,PPC] + nospectre_bhb [ARM64] + nospectre_v1 [X86,PPC] + nospectre_v2 [X86,PPC,S390,ARM64] retbleed=off [X86] + spec_store_bypass_disable=off [X86,PPC] + spectre_v2_user=off [X86] + srbds=off [X86,INTEL] + ssbd=force-off [ARM64] + tsx_async_abort=off [X86] Exceptions: This does not have any effect on diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index cb8ca46213be..8d6b34726033 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -483,5 +483,6 @@ #define X86_BUG_RETBLEED X86_BUG(27) /* CPU is affected by RETBleed */ #define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */ #define X86_BUG_SMT_RSB X86_BUG(29) /* CPU is vulnerable to Cross-Thread Return Address Predictions */ +#define X86_BUG_GDS X86_BUG(30) /* CPU is affected by Gather Data Sampling */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 3aedae61af4f..348f3da8b0f6 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -155,6 +155,15 @@ * Not susceptible to Post-Barrier * Return Stack Buffer Predictions. */ +#define ARCH_CAP_GDS_CTRL BIT(25) /* + * CPU is vulnerable to Gather + * Data Sampling (GDS) and + * has controls for mitigation. + */ +#define ARCH_CAP_GDS_NO BIT(26) /* + * CPU is not vulnerable to Gather + * Data Sampling (GDS). + */ #define ARCH_CAP_XAPIC_DISABLE BIT(21) /* * IA32_XAPIC_DISABLE_STATUS MSR @@ -178,6 +187,8 @@ #define RNGDS_MITG_DIS BIT(0) /* SRBDS support */ #define RTM_ALLOW BIT(1) /* TSX development mode */ #define FB_CLEAR_DIS BIT(3) /* CPU Fill buffer clear disable */ +#define GDS_MITG_DIS BIT(4) /* Disable GDS mitigation */ +#define GDS_MITG_LOCKED BIT(5) /* GDS mitigation locked */ #define MSR_IA32_SYSENTER_CS 0x00000174 #define MSR_IA32_SYSENTER_ESP 0x00000175 diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 9e2a91830f72..7824b101ddfe 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -47,6 +47,7 @@ static void __init taa_select_mitigation(void); static void __init mmio_select_mitigation(void); static void __init srbds_select_mitigation(void); static void __init l1d_flush_select_mitigation(void); +static void __init gds_select_mitigation(void); /* The base value of the SPEC_CTRL MSR without task-specific bits set */ u64 x86_spec_ctrl_base; @@ -160,6 +161,7 @@ void __init cpu_select_mitigations(void) md_clear_select_mitigation(); srbds_select_mitigation(); l1d_flush_select_mitigation(); + gds_select_mitigation(); } /* @@ -645,6 +647,120 @@ static int __init l1d_flush_parse_cmdline(char *str) } early_param("l1d_flush", l1d_flush_parse_cmdline); +#undef pr_fmt +#define pr_fmt(fmt) "GDS: " fmt + +enum gds_mitigations { + GDS_MITIGATION_OFF, + GDS_MITIGATION_UCODE_NEEDED, + GDS_MITIGATION_FULL, + GDS_MITIGATION_FULL_LOCKED, + GDS_MITIGATION_HYPERVISOR, +}; + +static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FULL; + +static const char * const gds_strings[] = { + [GDS_MITIGATION_OFF] = "Vulnerable", + [GDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", + [GDS_MITIGATION_FULL] = "Mitigation: Microcode", + [GDS_MITIGATION_FULL_LOCKED] = "Mitigation: Microcode (locked)", + [GDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", +}; + +void update_gds_msr(void) +{ + u64 mcu_ctrl_after; + u64 mcu_ctrl; + + switch (gds_mitigation) { + case GDS_MITIGATION_OFF: + rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); + mcu_ctrl |= GDS_MITG_DIS; + break; + case GDS_MITIGATION_FULL_LOCKED: + /* + * The LOCKED state comes from the boot CPU. APs might not have + * the same state. Make sure the mitigation is enabled on all + * CPUs. + */ + case GDS_MITIGATION_FULL: + rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); + mcu_ctrl &= ~GDS_MITG_DIS; + break; + case GDS_MITIGATION_UCODE_NEEDED: + case GDS_MITIGATION_HYPERVISOR: + return; + }; + + wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); + + /* + * Check to make sure that the WRMSR value was not ignored. Writes to + * GDS_MITG_DIS will be ignored if this processor is locked but the boot + * processor was not. + */ + rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after); + WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after); +} + +static void __init gds_select_mitigation(void) +{ + u64 mcu_ctrl; + + if (!boot_cpu_has_bug(X86_BUG_GDS)) + return; + + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { + gds_mitigation = GDS_MITIGATION_HYPERVISOR; + goto out; + } + + if (cpu_mitigations_off()) + gds_mitigation = GDS_MITIGATION_OFF; + /* Will verify below that mitigation _can_ be disabled */ + + /* No microcode */ + if (!(x86_read_arch_cap_msr() & ARCH_CAP_GDS_CTRL)) { + gds_mitigation = GDS_MITIGATION_UCODE_NEEDED; + goto out; + } + + rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); + if (mcu_ctrl & GDS_MITG_LOCKED) { + if (gds_mitigation == GDS_MITIGATION_OFF) + pr_warn("Mitigation locked. Disable failed.\n"); + + /* + * The mitigation is selected from the boot CPU. All other CPUs + * _should_ have the same state. If the boot CPU isn't locked + * but others are then update_gds_msr() will WARN() of the state + * mismatch. If the boot CPU is locked update_gds_msr() will + * ensure the other CPUs have the mitigation enabled. + */ + gds_mitigation = GDS_MITIGATION_FULL_LOCKED; + } + + update_gds_msr(); +out: + pr_info("%s\n", gds_strings[gds_mitigation]); +} + +static int __init gds_parse_cmdline(char *str) +{ + if (!str) + return -EINVAL; + + if (!boot_cpu_has_bug(X86_BUG_GDS)) + return 0; + + if (!strcmp(str, "off")) + gds_mitigation = GDS_MITIGATION_OFF; + + return 0; +} +early_param("gather_data_sampling", gds_parse_cmdline); + #undef pr_fmt #define pr_fmt(fmt) "Spectre V1 : " fmt @@ -2382,6 +2498,11 @@ static ssize_t retbleed_show_state(char *buf) return sysfs_emit(buf, "%s\n", retbleed_strings[retbleed_mitigation]); } +static ssize_t gds_show_state(char *buf) +{ + return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]); +} + static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, char *buf, unsigned int bug) { @@ -2431,6 +2552,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr case X86_BUG_RETBLEED: return retbleed_show_state(buf); + case X86_BUG_GDS: + return gds_show_state(buf); + default: break; } @@ -2495,4 +2619,9 @@ ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, cha { return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED); } + +ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_common(dev, attr, buf, X86_BUG_GDS); +} #endif diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 52683fddafaf..53224fe3ca6f 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1250,6 +1250,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { #define RETBLEED BIT(3) /* CPU is affected by SMT (cross-thread) return predictions */ #define SMT_RSB BIT(4) +/* CPU is affected by GDS */ +#define GDS BIT(5) static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS), @@ -1262,19 +1264,21 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO), VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS), VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED), - VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED), + VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS), VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED), - VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED), - VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED), + VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED | GDS), + VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED | GDS), VULNBL_INTEL_STEPPINGS(CANNONLAKE_L, X86_STEPPING_ANY, RETBLEED), - VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED), - VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPING_ANY, MMIO), - VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO), - VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED), + VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), + VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPING_ANY, MMIO | GDS), + VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO | GDS), + VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED), - VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED), + VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), + VULNBL_INTEL_STEPPINGS(TIGERLAKE_L, X86_STEPPING_ANY, GDS), + VULNBL_INTEL_STEPPINGS(TIGERLAKE, X86_STEPPING_ANY, GDS), VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED), - VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED), + VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS), VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPING_ANY, MMIO | MMIO_SBDS), VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO), VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS), @@ -1406,6 +1410,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) if (cpu_matches(cpu_vuln_blacklist, SMT_RSB)) setup_force_cpu_bug(X86_BUG_SMT_RSB); + /* + * Check if CPU is vulnerable to GDS. If running in a virtual machine on + * an affected processor, the VMM may have disabled the use of GATHER by + * disabling AVX2. The only way to do this in HW is to clear XCR0[2], + * which means that AVX will be disabled. + */ + if (cpu_matches(cpu_vuln_blacklist, GDS) && !(ia32_cap & ARCH_CAP_GDS_NO) && + boot_cpu_has(X86_FEATURE_AVX)) + setup_force_cpu_bug(X86_BUG_GDS); + if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) return; @@ -1962,6 +1976,8 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c) validate_apic_and_package_id(c); x86_spec_ctrl_setup_ap(); update_srbds_msr(); + if (boot_cpu_has_bug(X86_BUG_GDS)) + update_gds_msr(); tsx_ap_init(); } diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index 1c44630d4789..1dcd7d4e38ef 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h @@ -83,6 +83,7 @@ void cpu_select_mitigations(void); extern void x86_spec_ctrl_setup_ap(void); extern void update_srbds_msr(void); +extern void update_gds_msr(void); extern enum spectre_v2_mitigation spectre_v2_enabled; diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index c1815b9dae68..0469c09e8a8c 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -577,6 +577,12 @@ ssize_t __weak cpu_show_retbleed(struct device *dev, return sysfs_emit(buf, "Not affected\n"); } +ssize_t __weak cpu_show_gds(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "Not affected\n"); +} + static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); @@ -588,6 +594,7 @@ static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL); static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL); static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL); static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL); +static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL); static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_meltdown.attr, @@ -601,6 +608,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_srbds.attr, &dev_attr_mmio_stale_data.attr, &dev_attr_retbleed.attr, + &dev_attr_gather_data_sampling.attr, NULL }; -- cgit From c718ca0e99401d80d2480c08e1b02cf5f7cd7033 Mon Sep 17 00:00:00 2001 From: Raghavendra Rao Ananta Date: Wed, 19 Jul 2023 17:54:00 +0000 Subject: KVM: arm64: Fix hardware enable/disable flows for pKVM When running in protected mode, the hyp stub is disabled after pKVM is initialized, meaning the host cannot enable/disable the hyp at runtime. As such, kvm_arm_hardware_enabled is always 1 after initialization, and kvm_arch_hardware_enable() never enables the vgic maintenance irq or timer irqs. Unconditionally enable/disable the vgic + timer irqs in the respective calls, instead relying on the percpu bookkeeping in the generic code to keep track of which cpus have the interrupts unmasked. Fixes: 466d27e48d7c ("KVM: arm64: Simplify the CPUHP logic") Reported-by: Oliver Upton Suggested-by: Oliver Upton Signed-off-by: Raghavendra Rao Ananta Link: https://lore.kernel.org/r/20230719175400.647154-1-rananta@google.com Acked-by: Marc Zyngier Signed-off-by: Oliver Upton --- arch/arm64/kvm/arm.c | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 72dc53a75d1c..d8540563a623 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1874,8 +1874,6 @@ static void _kvm_arch_hardware_enable(void *discard) int kvm_arch_hardware_enable(void) { - int was_enabled; - /* * Most calls to this function are made with migration * disabled, but not with preemption disabled. The former is @@ -1884,13 +1882,10 @@ int kvm_arch_hardware_enable(void) */ preempt_disable(); - was_enabled = __this_cpu_read(kvm_arm_hardware_enabled); _kvm_arch_hardware_enable(NULL); - if (!was_enabled) { - kvm_vgic_cpu_up(); - kvm_timer_cpu_up(); - } + kvm_vgic_cpu_up(); + kvm_timer_cpu_up(); preempt_enable(); @@ -1907,10 +1902,8 @@ static void _kvm_arch_hardware_disable(void *discard) void kvm_arch_hardware_disable(void) { - if (__this_cpu_read(kvm_arm_hardware_enabled)) { - kvm_timer_cpu_down(); - kvm_vgic_cpu_down(); - } + kvm_timer_cpu_down(); + kvm_vgic_cpu_down(); if (!is_protected_kvm_enabled()) _kvm_arch_hardware_disable(NULL); -- cgit From 733c758e509b86a5d38b9af927817258b88ededd Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Wed, 19 Jul 2023 23:18:55 +0000 Subject: KVM: arm64: Rephrase percpu enable/disable tracking in terms of hyp kvm_arm_hardware_enabled is rather misleading, since it doesn't track the state of all hardware resources needed for running a VM. What it actually tracks is whether or not the hyp cpu context has been initialized. Since we're now at the point where vgic + timer irq management has been separated from kvm_arm_hardware_enabled, rephrase it (and the associated helpers) to make it clear what state is being tracked. Acked-by: Marc Zyngier Link: https://lore.kernel.org/r/20230719231855.262973-1-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/kvm/arm.c | 46 +++++++++++++++++++++++----------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index d8540563a623..d1cb298a58a0 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -55,7 +55,7 @@ DECLARE_KVM_NVHE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); static bool vgic_present, kvm_arm_initialised; -static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled); +static DEFINE_PER_CPU(unsigned char, kvm_hyp_initialized); DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use); bool is_kvm_arm_initialised(void) @@ -1864,11 +1864,19 @@ static void cpu_hyp_reinit(void) cpu_hyp_init_features(); } -static void _kvm_arch_hardware_enable(void *discard) +static void cpu_hyp_init(void *discard) { - if (!__this_cpu_read(kvm_arm_hardware_enabled)) { + if (!__this_cpu_read(kvm_hyp_initialized)) { cpu_hyp_reinit(); - __this_cpu_write(kvm_arm_hardware_enabled, 1); + __this_cpu_write(kvm_hyp_initialized, 1); + } +} + +static void cpu_hyp_uninit(void *discard) +{ + if (__this_cpu_read(kvm_hyp_initialized)) { + cpu_hyp_reset(); + __this_cpu_write(kvm_hyp_initialized, 0); } } @@ -1882,7 +1890,7 @@ int kvm_arch_hardware_enable(void) */ preempt_disable(); - _kvm_arch_hardware_enable(NULL); + cpu_hyp_init(NULL); kvm_vgic_cpu_up(); kvm_timer_cpu_up(); @@ -1892,21 +1900,13 @@ int kvm_arch_hardware_enable(void) return 0; } -static void _kvm_arch_hardware_disable(void *discard) -{ - if (__this_cpu_read(kvm_arm_hardware_enabled)) { - cpu_hyp_reset(); - __this_cpu_write(kvm_arm_hardware_enabled, 0); - } -} - void kvm_arch_hardware_disable(void) { kvm_timer_cpu_down(); kvm_vgic_cpu_down(); if (!is_protected_kvm_enabled()) - _kvm_arch_hardware_disable(NULL); + cpu_hyp_uninit(NULL); } #ifdef CONFIG_CPU_PM @@ -1915,16 +1915,16 @@ static int hyp_init_cpu_pm_notifier(struct notifier_block *self, void *v) { /* - * kvm_arm_hardware_enabled is left with its old value over + * kvm_hyp_initialized is left with its old value over * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should * re-enable hyp. */ switch (cmd) { case CPU_PM_ENTER: - if (__this_cpu_read(kvm_arm_hardware_enabled)) + if (__this_cpu_read(kvm_hyp_initialized)) /* - * don't update kvm_arm_hardware_enabled here - * so that the hardware will be re-enabled + * don't update kvm_hyp_initialized here + * so that the hyp will be re-enabled * when we resume. See below. */ cpu_hyp_reset(); @@ -1932,8 +1932,8 @@ static int hyp_init_cpu_pm_notifier(struct notifier_block *self, return NOTIFY_OK; case CPU_PM_ENTER_FAILED: case CPU_PM_EXIT: - if (__this_cpu_read(kvm_arm_hardware_enabled)) - /* The hardware was enabled before suspend. */ + if (__this_cpu_read(kvm_hyp_initialized)) + /* The hyp was enabled before suspend. */ cpu_hyp_reinit(); return NOTIFY_OK; @@ -2014,7 +2014,7 @@ static int __init init_subsystems(void) /* * Enable hardware so that subsystem initialisation can access EL2. */ - on_each_cpu(_kvm_arch_hardware_enable, NULL, 1); + on_each_cpu(cpu_hyp_init, NULL, 1); /* * Register CPU lower-power notifier @@ -2052,7 +2052,7 @@ out: hyp_cpu_pm_exit(); if (err || !is_protected_kvm_enabled()) - on_each_cpu(_kvm_arch_hardware_disable, NULL, 1); + on_each_cpu(cpu_hyp_uninit, NULL, 1); return err; } @@ -2090,7 +2090,7 @@ static int __init do_pkvm_init(u32 hyp_va_bits) * The stub hypercalls are now disabled, so set our local flag to * prevent a later re-init attempt in kvm_arch_hardware_enable(). */ - __this_cpu_write(kvm_arm_hardware_enabled, 1); + __this_cpu_write(kvm_hyp_initialized, 1); preempt_enable(); return ret; -- cgit From 553a5c03e90a6087e88f8ff878335ef0621536fb Mon Sep 17 00:00:00 2001 From: Daniel Sneddon Date: Wed, 12 Jul 2023 19:43:12 -0700 Subject: x86/speculation: Add force option to GDS mitigation The Gather Data Sampling (GDS) vulnerability allows malicious software to infer stale data previously stored in vector registers. This may include sensitive data such as cryptographic keys. GDS is mitigated in microcode, and systems with up-to-date microcode are protected by default. However, any affected system that is running with older microcode will still be vulnerable to GDS attacks. Since the gather instructions used by the attacker are part of the AVX2 and AVX512 extensions, disabling these extensions prevents gather instructions from being executed, thereby mitigating the system from GDS. Disabling AVX2 is sufficient, but we don't have the granularity to do this. The XCR0[2] disables AVX, with no option to just disable AVX2. Add a kernel parameter gather_data_sampling=force that will enable the microcode mitigation if available, otherwise it will disable AVX on affected systems. This option will be ignored if cmdline mitigations=off. This is a *big* hammer. It is known to break buggy userspace that uses incomplete, buggy AVX enumeration. Unfortunately, such userspace does exist in the wild: https://www.mail-archive.com/bug-coreutils@gnu.org/msg33046.html [ dhansen: add some more ominous warnings about disabling AVX ] Signed-off-by: Daniel Sneddon Signed-off-by: Dave Hansen Acked-by: Josh Poimboeuf --- .../admin-guide/hw-vuln/gather_data_sampling.rst | 18 ++++++++++++++---- Documentation/admin-guide/kernel-parameters.txt | 8 +++++++- arch/x86/kernel/cpu/bugs.c | 20 +++++++++++++++++++- 3 files changed, 40 insertions(+), 6 deletions(-) diff --git a/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst b/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst index 74dab6af7fe1..40b7a6260010 100644 --- a/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst +++ b/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst @@ -60,14 +60,21 @@ bits: ================================ === ============================ GDS can also be mitigated on systems that don't have updated microcode by -disabling AVX. This can be done by setting "clearcpuid=avx" on the kernel -command-line. +disabling AVX. This can be done by setting gather_data_sampling="force" or +"clearcpuid=avx" on the kernel command-line. + +If used, these options will disable AVX use by turning on XSAVE YMM support. +However, the processor will still enumerate AVX support. Userspace that +does not follow proper AVX enumeration to check both AVX *and* XSAVE YMM +support will break. Mitigation control on the kernel command line --------------------------------------------- The mitigation can be disabled by setting "gather_data_sampling=off" or -"mitigations=off" on the kernel command line. Not specifying either will -default to the mitigation being enabled. +"mitigations=off" on the kernel command line. Not specifying either will default +to the mitigation being enabled. Specifying "gather_data_sampling=force" will +use the microcode mitigation when available or disable AVX on affected systems +where the microcode hasn't been updated to include the mitigation. GDS System Information ------------------------ @@ -83,6 +90,9 @@ The possible values contained in this file are: Vulnerable Processor vulnerable and mitigation disabled. Vulnerable: No microcode Processor vulnerable and microcode is missing mitigation. + Mitigation: AVX disabled, + no microcode Processor is vulnerable and microcode is missing + mitigation. AVX disabled as mitigation. Mitigation: Microcode Processor is vulnerable and mitigation is in effect. Mitigation: Microcode (locked) Processor is vulnerable and mitigation is in diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index c21d42140d6b..816b966bed0f 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1633,7 +1633,13 @@ This issue is mitigated by default in updated microcode. The mitigation may have a performance impact but can be - disabled. + disabled. On systems without the microcode mitigation + disabling AVX serves as a mitigation. + + force: Disable AVX to mitigate systems without + microcode mitigation. No effect if the microcode + mitigation is present. Known to cause crashes in + userspace with buggy AVX enumeration. off: Disable GDS mitigation. diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 7824b101ddfe..155e8d1c325e 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -653,6 +653,7 @@ early_param("l1d_flush", l1d_flush_parse_cmdline); enum gds_mitigations { GDS_MITIGATION_OFF, GDS_MITIGATION_UCODE_NEEDED, + GDS_MITIGATION_FORCE, GDS_MITIGATION_FULL, GDS_MITIGATION_FULL_LOCKED, GDS_MITIGATION_HYPERVISOR, @@ -663,6 +664,7 @@ static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FULL static const char * const gds_strings[] = { [GDS_MITIGATION_OFF] = "Vulnerable", [GDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", + [GDS_MITIGATION_FORCE] = "Mitigation: AVX disabled, no microcode", [GDS_MITIGATION_FULL] = "Mitigation: Microcode", [GDS_MITIGATION_FULL_LOCKED] = "Mitigation: Microcode (locked)", [GDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", @@ -688,6 +690,7 @@ void update_gds_msr(void) rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); mcu_ctrl &= ~GDS_MITG_DIS; break; + case GDS_MITIGATION_FORCE: case GDS_MITIGATION_UCODE_NEEDED: case GDS_MITIGATION_HYPERVISOR: return; @@ -722,10 +725,23 @@ static void __init gds_select_mitigation(void) /* No microcode */ if (!(x86_read_arch_cap_msr() & ARCH_CAP_GDS_CTRL)) { - gds_mitigation = GDS_MITIGATION_UCODE_NEEDED; + if (gds_mitigation == GDS_MITIGATION_FORCE) { + /* + * This only needs to be done on the boot CPU so do it + * here rather than in update_gds_msr() + */ + setup_clear_cpu_cap(X86_FEATURE_AVX); + pr_warn("Microcode update needed! Disabling AVX as mitigation.\n"); + } else { + gds_mitigation = GDS_MITIGATION_UCODE_NEEDED; + } goto out; } + /* Microcode has mitigation, use it */ + if (gds_mitigation == GDS_MITIGATION_FORCE) + gds_mitigation = GDS_MITIGATION_FULL; + rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); if (mcu_ctrl & GDS_MITG_LOCKED) { if (gds_mitigation == GDS_MITIGATION_OFF) @@ -756,6 +772,8 @@ static int __init gds_parse_cmdline(char *str) if (!strcmp(str, "off")) gds_mitigation = GDS_MITIGATION_OFF; + else if (!strcmp(str, "force")) + gds_mitigation = GDS_MITIGATION_FORCE; return 0; } -- cgit From 53cf5797f114ba2bd86d23a862302119848eff19 Mon Sep 17 00:00:00 2001 From: Daniel Sneddon Date: Wed, 12 Jul 2023 19:43:13 -0700 Subject: x86/speculation: Add Kconfig option for GDS Gather Data Sampling (GDS) is mitigated in microcode. However, on systems that haven't received the updated microcode, disabling AVX can act as a mitigation. Add a Kconfig option that uses the microcode mitigation if available and disables AVX otherwise. Setting this option has no effect on systems not affected by GDS. This is the equivalent of setting gather_data_sampling=force. Signed-off-by: Daniel Sneddon Signed-off-by: Dave Hansen Acked-by: Josh Poimboeuf --- arch/x86/Kconfig | 19 +++++++++++++++++++ arch/x86/kernel/cpu/bugs.c | 4 ++++ 2 files changed, 23 insertions(+) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 7422db409770..36d3f61c4ed9 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2603,6 +2603,25 @@ config SLS against straight line speculation. The kernel image might be slightly larger. +config GDS_FORCE_MITIGATION + bool "Force GDS Mitigation" + depends on CPU_SUP_INTEL + default n + help + Gather Data Sampling (GDS) is a hardware vulnerability which allows + unprivileged speculative access to data which was previously stored in + vector registers. + + This option is equivalent to setting gather_data_sampling=force on the + command line. The microcode mitigation is used if present, otherwise + AVX is disabled as a mitigation. On affected systems that are missing + the microcode any userspace code that unconditionally uses AVX will + break with this option set. + + Setting this option on systems not vulnerable to GDS has no effect. + + If in doubt, say N. + endif config ARCH_HAS_ADD_PAGES diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 155e8d1c325e..41821a4551a2 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -659,7 +659,11 @@ enum gds_mitigations { GDS_MITIGATION_HYPERVISOR, }; +#if IS_ENABLED(CONFIG_GDS_FORCE_MITIGATION) +static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FORCE; +#else static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FULL; +#endif static const char * const gds_strings[] = { [GDS_MITIGATION_OFF] = "Vulnerable", -- cgit From 81ac7e5d741742d650b4ed6186c4826c1a0631a7 Mon Sep 17 00:00:00 2001 From: Daniel Sneddon Date: Wed, 12 Jul 2023 19:43:14 -0700 Subject: KVM: Add GDS_NO support to KVM Gather Data Sampling (GDS) is a transient execution attack using gather instructions from the AVX2 and AVX512 extensions. This attack allows malicious code to infer data that was previously stored in vector registers. Systems that are not vulnerable to GDS will set the GDS_NO bit of the IA32_ARCH_CAPABILITIES MSR. This is useful for VM guests that may think they are on vulnerable systems that are, in fact, not affected. Guests that are running on affected hosts where the mitigation is enabled are protected as if they were running on an unaffected system. On all hosts that are not affected or that are mitigated, set the GDS_NO bit. Signed-off-by: Daniel Sneddon Signed-off-by: Dave Hansen Acked-by: Josh Poimboeuf --- arch/x86/kernel/cpu/bugs.c | 7 +++++++ arch/x86/kvm/x86.c | 7 ++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 41821a4551a2..7985c658d129 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -674,6 +674,13 @@ static const char * const gds_strings[] = { [GDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", }; +bool gds_ucode_mitigated(void) +{ + return (gds_mitigation == GDS_MITIGATION_FULL || + gds_mitigation == GDS_MITIGATION_FULL_LOCKED); +} +EXPORT_SYMBOL_GPL(gds_ucode_mitigated); + void update_gds_msr(void) { u64 mcu_ctrl_after; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a6b9bea62fb8..ad09c8540189 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -314,6 +314,8 @@ u64 __read_mostly host_xcr0; static struct kmem_cache *x86_emulator_cache; +extern bool gds_ucode_mitigated(void); + /* * When called, it means the previous get/set msr reached an invalid msr. * Return true if we want to ignore/silent this failed msr access. @@ -1607,7 +1609,7 @@ static bool kvm_is_immutable_feature_msr(u32 msr) ARCH_CAP_SKIP_VMENTRY_L1DFLUSH | ARCH_CAP_SSB_NO | ARCH_CAP_MDS_NO | \ ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \ ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \ - ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO) + ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO) static u64 kvm_get_arch_capabilities(void) { @@ -1664,6 +1666,9 @@ static u64 kvm_get_arch_capabilities(void) */ } + if (!boot_cpu_has_bug(X86_BUG_GDS) || gds_ucode_mitigated()) + data |= ARCH_CAP_GDS_NO; + return data; } -- cgit From d5ace2a776442d80674eff9ed42e737f7dd95056 Mon Sep 17 00:00:00 2001 From: Michael Kelley Date: Fri, 21 Jul 2023 21:51:16 -0700 Subject: x86/hyperv: Disable IBT when hypercall page lacks ENDBR instruction On hardware that supports Indirect Branch Tracking (IBT), Hyper-V VMs with ConfigVersion 9.3 or later support IBT in the guest. However, current versions of Hyper-V have a bug in that there's not an ENDBR64 instruction at the beginning of the hypercall page. Since hypercalls are made with an indirect call to the hypercall page, all hypercall attempts fail with an exception and Linux panics. A Hyper-V fix is in progress to add ENDBR64. But guard against the Linux panic by clearing X86_FEATURE_IBT if the hypercall page doesn't start with ENDBR. The VM will boot and run without IBT. If future Linux 32-bit kernels were to support IBT, additional hypercall page hackery would be needed to make IBT work for such kernels in a Hyper-V VM. Cc: stable@vger.kernel.org Signed-off-by: Michael Kelley Link: https://lore.kernel.org/r/1690001476-98594-1-git-send-email-mikelley@microsoft.com Signed-off-by: Wei Liu --- arch/x86/hyperv/hv_init.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index 6c04b52f139b..953e280c07c3 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -471,6 +472,26 @@ void __init hyperv_init(void) wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); } + /* + * Some versions of Hyper-V that provide IBT in guest VMs have a bug + * in that there's no ENDBR64 instruction at the entry to the + * hypercall page. Because hypercalls are invoked via an indirect call + * to the hypercall page, all hypercall attempts fail when IBT is + * enabled, and Linux panics. For such buggy versions, disable IBT. + * + * Fixed versions of Hyper-V always provide ENDBR64 on the hypercall + * page, so if future Linux kernel versions enable IBT for 32-bit + * builds, additional hypercall page hackery will be required here + * to provide an ENDBR32. + */ +#ifdef CONFIG_X86_KERNEL_IBT + if (cpu_feature_enabled(X86_FEATURE_IBT) && + *(u32 *)hv_hypercall_pg != gen_endbr()) { + setup_clear_cpu_cap(X86_FEATURE_IBT); + pr_warn("Hyper-V: Disabling IBT because of Hyper-V bug\n"); + } +#endif + /* * hyperv_init() is called before LAPIC is initialized: see * apic_intr_mode_init() -> x86_platform.apic_post_init() and -- cgit From 060f2b979c4e0e894c381c76a4dcad24376feddd Mon Sep 17 00:00:00 2001 From: ZhiHu Date: Sun, 23 Jul 2023 23:12:47 +0000 Subject: x86/hyperv: fix a warning in mshyperv.h The following checkpatch warning is removed: WARNING: Use #include instead of Signed-off-by: ZhiHu Reviewed-by: Michael Kelley Signed-off-by: Wei Liu --- arch/x86/include/asm/mshyperv.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 49bb4f2bd300..cd17e706f9ca 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -5,7 +5,7 @@ #include #include #include -#include +#include #include #include #include -- cgit From ed0cf84e9cc42e6310961c87709621f1825c2bb8 Mon Sep 17 00:00:00 2001 From: Ani Sinha Date: Wed, 5 Jul 2023 19:14:07 +0530 Subject: vmbus_testing: fix wrong python syntax for integer value comparison It is incorrect in python to compare integer values using the "is" keyword. The "is" keyword in python is used to compare references to two objects, not their values. Newer version of python3 (version 3.8) throws a warning when such incorrect comparison is made. For value comparison, "==" should be used. Fix this in the code and suppress the following warning: /usr/sbin/vmbus_testing:167: SyntaxWarning: "is" with a literal. Did you mean "=="? Signed-off-by: Ani Sinha Link: https://lore.kernel.org/r/20230705134408.6302-1-anisinha@redhat.com Signed-off-by: Wei Liu --- tools/hv/vmbus_testing | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/hv/vmbus_testing b/tools/hv/vmbus_testing index e7212903dd1d..4467979d8f69 100755 --- a/tools/hv/vmbus_testing +++ b/tools/hv/vmbus_testing @@ -164,7 +164,7 @@ def recursive_file_lookup(path, file_map): def get_all_devices_test_status(file_map): for device in file_map: - if (get_test_state(locate_state(device, file_map)) is 1): + if (get_test_state(locate_state(device, file_map)) == 1): print("Testing = ON for: {}" .format(device.split("/")[5])) else: @@ -203,7 +203,7 @@ def write_test_files(path, value): def set_test_state(state_path, state_value, quiet): write_test_files(state_path, state_value) - if (get_test_state(state_path) is 1): + if (get_test_state(state_path) == 1): if (not quiet): print("Testing = ON for device: {}" .format(state_path.split("/")[5])) -- cgit From aa6fde93f3a49e42c0fe0490d7f3711bac0d162e Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 17 Jul 2023 12:50:02 -1000 Subject: workqueue: Scale up wq_cpu_intensive_thresh_us if BogoMIPS is below 4000 wq_cpu_intensive_thresh_us is used to detect CPU-hogging per-cpu work items. Once detected, they're excluded from concurrency management to prevent them from blocking other per-cpu work items. If CONFIG_WQ_CPU_INTENSIVE_REPORT is enabled, repeat offenders are also reported so that the code can be updated. The default threshold is 10ms which is long enough to do fair bit of work on modern CPUs while short enough to be usually not noticeable. This unfortunately leads to a lot of, arguable spurious, detections on very slow CPUs. Using the same threshold across CPUs whose performance levels may be apart by multiple levels of magnitude doesn't make whole lot of sense. This patch scales up wq_cpu_intensive_thresh_us upto 1 second when BogoMIPS is below 4000. This is obviously very inaccurate but it doesn't have to be accurate to be useful. The mechanism is still useful when the threshold is fully scaled up and the benefits of reports are usually shared with everyone regardless of who's reporting, so as long as there are sufficient number of fast machines reporting, we don't lose much. Some (or is it all?) ARM CPUs systemtically report significantly lower BogoMIPS. While this doesn't break anything, given how widespread ARM CPUs are, it's at least a missed opportunity and it probably would be a good idea to teach workqueue about it. Signed-off-by: Tejun Heo Reported-and-Tested-by: Geert Uytterhoeven --- kernel/workqueue.c | 43 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 42 insertions(+), 1 deletion(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 02a8f402eeb5..800b4208dba9 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -52,6 +52,7 @@ #include #include #include +#include #include "workqueue_internal.h" @@ -338,8 +339,10 @@ static cpumask_var_t *wq_numa_possible_cpumask; * Per-cpu work items which run for longer than the following threshold are * automatically considered CPU intensive and excluded from concurrency * management to prevent them from noticeably delaying other per-cpu work items. + * ULONG_MAX indicates that the user hasn't overridden it with a boot parameter. + * The actual value is initialized in wq_cpu_intensive_thresh_init(). */ -static unsigned long wq_cpu_intensive_thresh_us = 10000; +static unsigned long wq_cpu_intensive_thresh_us = ULONG_MAX; module_param_named(cpu_intensive_thresh_us, wq_cpu_intensive_thresh_us, ulong, 0644); static bool wq_disable_numa; @@ -6513,6 +6516,42 @@ void __init workqueue_init_early(void) !system_freezable_power_efficient_wq); } +static void __init wq_cpu_intensive_thresh_init(void) +{ + unsigned long thresh; + unsigned long bogo; + + /* if the user set it to a specific value, keep it */ + if (wq_cpu_intensive_thresh_us != ULONG_MAX) + return; + + /* + * The default of 10ms is derived from the fact that most modern (as of + * 2023) processors can do a lot in 10ms and that it's just below what + * most consider human-perceivable. However, the kernel also runs on a + * lot slower CPUs including microcontrollers where the threshold is way + * too low. + * + * Let's scale up the threshold upto 1 second if BogoMips is below 4000. + * This is by no means accurate but it doesn't have to be. The mechanism + * is still useful even when the threshold is fully scaled up. Also, as + * the reports would usually be applicable to everyone, some machines + * operating on longer thresholds won't significantly diminish their + * usefulness. + */ + thresh = 10 * USEC_PER_MSEC; + + /* see init/calibrate.c for lpj -> BogoMIPS calculation */ + bogo = max_t(unsigned long, loops_per_jiffy / 500000 * HZ, 1); + if (bogo < 4000) + thresh = min_t(unsigned long, thresh * 4000 / bogo, USEC_PER_SEC); + + pr_debug("wq_cpu_intensive_thresh: lpj=%lu BogoMIPS=%lu thresh_us=%lu\n", + loops_per_jiffy, bogo, thresh); + + wq_cpu_intensive_thresh_us = thresh; +} + /** * workqueue_init - bring workqueue subsystem fully online * @@ -6528,6 +6567,8 @@ void __init workqueue_init(void) struct worker_pool *pool; int cpu, bkt; + wq_cpu_intensive_thresh_init(); + /* * It'd be simpler to initialize NUMA in workqueue_init_early() but * CPU to node mapping may not be available that early on some -- cgit From ce92232614a5fb16992de8eb85bba7cb90772a1f Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Mon, 24 Jul 2023 13:38:23 +0100 Subject: KVM: arm64: Factor out code for checking (h)VHE mode into a macro The code for checking whether the kernel is in (h)VHE mode is repeated, and will be needed again in future patches. Factor it out in a macro. No functional change intended. No change in emitted assembly code intended. Signed-off-by: Fuad Tabba Link: https://lore.kernel.org/kvmarm/20230724123829.2929609-3-tabba@google.com/ Reviewed-by: Marc Zyngier Signed-off-by: Oliver Upton --- arch/arm64/include/asm/el2_setup.h | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index 8e5ffb58f83e..16d3bafa715d 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -31,6 +31,13 @@ .Lskip_hcrx_\@: .endm +/* Check if running in host at EL2 mode, i.e., (h)VHE. Jump to fail if not. */ +.macro __check_hvhe fail, tmp + mrs \tmp, hcr_el2 + and \tmp, \tmp, #HCR_E2H + cbz \tmp, \fail +.endm + /* * Allow Non-secure EL1 and EL0 to access physical timer and counter. * This is not necessary for VHE, since the host kernel runs in EL2, @@ -43,9 +50,7 @@ */ .macro __init_el2_timers mov x0, #3 // Enable EL1 physical timers - mrs x1, hcr_el2 - and x1, x1, #HCR_E2H - cbz x1, .LnVHE_\@ + __check_hvhe .LnVHE_\@, x1 lsl x0, x0, #10 .LnVHE_\@: msr cnthctl_el2, x0 @@ -139,9 +144,7 @@ /* Coprocessor traps */ .macro __init_el2_cptr - mrs x1, hcr_el2 - and x1, x1, #HCR_E2H - cbz x1, .LnVHE_\@ + __check_hvhe .LnVHE_\@, x1 mov x0, #(CPACR_EL1_FPEN_EL1EN | CPACR_EL1_FPEN_EL0EN) b .Lset_cptr_\@ .LnVHE_\@: @@ -269,9 +272,7 @@ .Linit_sve_\@: /* SVE register access */ mrs x0, cptr_el2 // Disable SVE traps - mrs x1, hcr_el2 - and x1, x1, #HCR_E2H - cbz x1, .Lcptr_nvhe_\@ + __check_hvhe .Lcptr_nvhe_\@, x1 // VHE case orr x0, x0, #(CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN) -- cgit From 45a3681a10ff1732fcd7a177fbf1f9ceeaffd7c9 Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Mon, 24 Jul 2023 13:38:24 +0100 Subject: KVM: arm64: Use the appropriate feature trap register for SVE at EL2 setup Use the architectural feature trap/control register that corresponds to the current KVM mode, i.e., CPTR_EL2 or CPACR_EL1, when setting up SVE feature traps. Signed-off-by: Fuad Tabba Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20230724123829.2929609-4-tabba@google.com Signed-off-by: Oliver Upton --- arch/arm64/include/asm/el2_setup.h | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index 16d3bafa715d..41c5b02f38c5 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -146,11 +146,12 @@ .macro __init_el2_cptr __check_hvhe .LnVHE_\@, x1 mov x0, #(CPACR_EL1_FPEN_EL1EN | CPACR_EL1_FPEN_EL0EN) - b .Lset_cptr_\@ + msr cpacr_el1, x0 + b .Lskip_set_cptr_\@ .LnVHE_\@: mov x0, #0x33ff -.Lset_cptr_\@: msr cptr_el2, x0 // Disable copro. traps to EL2 +.Lskip_set_cptr_\@: .endm /* Disable any fine grained traps */ @@ -271,17 +272,19 @@ check_override id_aa64pfr0, ID_AA64PFR0_EL1_SVE_SHIFT, .Linit_sve_\@, .Lskip_sve_\@, x1, x2 .Linit_sve_\@: /* SVE register access */ - mrs x0, cptr_el2 // Disable SVE traps __check_hvhe .Lcptr_nvhe_\@, x1 - // VHE case + // (h)VHE case + mrs x0, cpacr_el1 // Disable SVE traps orr x0, x0, #(CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN) - b .Lset_cptr_\@ + msr cpacr_el1, x0 + b .Lskip_set_cptr_\@ .Lcptr_nvhe_\@: // nVHE case + mrs x0, cptr_el2 // Disable SVE traps bic x0, x0, #CPTR_EL2_TZ -.Lset_cptr_\@: msr cptr_el2, x0 +.Lskip_set_cptr_\@: isb mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector msr_s SYS_ZCR_EL2, x1 // length for EL1. -- cgit From 380624d4358b0150804d279c20632555e453bc1f Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Mon, 24 Jul 2023 13:38:25 +0100 Subject: KVM: arm64: Disable SME traps for (h)VHE at setup Ensure that SME traps are disabled for (h)VHE when setting up EL2, as they are for nVHE. Signed-off-by: Fuad Tabba Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20230724123829.2929609-5-tabba@google.com Signed-off-by: Oliver Upton --- arch/arm64/include/asm/el2_setup.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index 41c5b02f38c5..b7afaa026842 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -293,9 +293,19 @@ check_override id_aa64pfr1, ID_AA64PFR1_EL1_SME_SHIFT, .Linit_sme_\@, .Lskip_sme_\@, x1, x2 .Linit_sme_\@: /* SME register access and priority mapping */ + __check_hvhe .Lcptr_nvhe_sme_\@, x1 + + // (h)VHE case + mrs x0, cpacr_el1 // Disable SME traps + orr x0, x0, #(CPACR_EL1_SMEN_EL0EN | CPACR_EL1_SMEN_EL1EN) + msr cpacr_el1, x0 + b .Lskip_set_cptr_sme_\@ + +.Lcptr_nvhe_sme_\@: // nVHE case mrs x0, cptr_el2 // Disable SME traps bic x0, x0, #CPTR_EL2_TSM msr cptr_el2, x0 +.Lskip_set_cptr_sme_\@: isb mrs x1, sctlr_el2 -- cgit From 90ae31c65d5afdd0864017c9354247ddb601917f Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Mon, 24 Jul 2023 13:38:26 +0100 Subject: KVM: arm64: Helper to write to appropriate feature trap register based on mode Factor out the code that decides whether to write to the feature trap registers, CPTR_EL2 or CPACR_EL1, based on the KVM mode, i.e., (h)VHE or nVHE. This function will be used in the subsequent patch. No functional change intended. Signed-off-by: Fuad Tabba Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20230724123829.2929609-6-tabba@google.com Signed-off-by: Oliver Upton --- arch/arm64/include/asm/kvm_emulate.h | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index efc0b45d79c3..f5941f6dce49 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -571,6 +571,14 @@ static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature) return test_bit(feature, vcpu->arch.features); } +static __always_inline void kvm_write_cptr_el2(u64 val) +{ + if (has_vhe() || has_hvhe()) + write_sysreg(val, cpacr_el1); + else + write_sysreg(val, cptr_el2); +} + static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu) { u64 val; @@ -597,9 +605,6 @@ static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu) { u64 val = kvm_get_reset_cptr_el2(vcpu); - if (has_vhe() || has_hvhe()) - write_sysreg(val, cpacr_el1); - else - write_sysreg(val, cptr_el2); + kvm_write_cptr_el2(val); } #endif /* __ARM64_KVM_EMULATE_H__ */ -- cgit From a9626099a51f697939d35983b92a9384a4c4a676 Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Mon, 24 Jul 2023 13:38:27 +0100 Subject: KVM: arm64: Use the appropriate feature trap register when activating traps Instead of writing directly to cptr_el2, use the helper that selects which feature trap register to write to based on the KVM mode. Fixes: 75c76ab5a641 ("KVM: arm64: Rework CPTR_EL2 programming for HVHE configuration") Signed-off-by: Fuad Tabba Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20230724123829.2929609-7-tabba@google.com Signed-off-by: Oliver Upton --- arch/arm64/kvm/hyp/nvhe/switch.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 0a6271052def..e89a23153e85 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -63,7 +63,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu) __activate_traps_fpsimd32(vcpu); } - write_sysreg(val, cptr_el2); + kvm_write_cptr_el2(val); write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2); if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { -- cgit From 7af0d5e50006614cbf313373df708df79d9f4657 Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Mon, 24 Jul 2023 13:38:28 +0100 Subject: KVM: arm64: Fix resetting SVE trap values on reset for hVHE Ensure that SVE traps are disabled for hVHE, if the FPSIMD state isn't owned by the guest, when getting the reset value for the architectural feature control register. Fixes: 75c76ab5a641 ("KVM: arm64: Rework CPTR_EL2 programming for HVHE configuration") Signed-off-by: Fuad Tabba Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20230724123829.2929609-8-tabba@google.com Signed-off-by: Oliver Upton --- arch/arm64/include/asm/kvm_emulate.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index f5941f6dce49..adfb7d0ac55b 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -588,6 +588,10 @@ static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu) CPACR_EL1_ZEN_EL1EN); } else if (has_hvhe()) { val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN); + + if (!vcpu_has_sve(vcpu) || + (vcpu->arch.fp_state != FP_STATE_GUEST_OWNED)) + val |= CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN; } else { val = CPTR_NVHE_EL2_RES1; -- cgit From 375110ab51dec5dcd077b8fa95075b2c67499119 Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Mon, 24 Jul 2023 13:38:29 +0100 Subject: KVM: arm64: Fix resetting SME trap values on reset for (h)VHE Ensure that SME traps are disabled for (h)VHE when getting the reset value for the architectural feature control register. Fixes: 75c76ab5a641 ("KVM: arm64: Rework CPTR_EL2 programming for HVHE configuration") Signed-off-by: Fuad Tabba Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20230724123829.2929609-9-tabba@google.com Signed-off-by: Oliver Upton --- arch/arm64/include/asm/kvm_emulate.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index adfb7d0ac55b..3d6725ff0bf6 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -586,12 +586,16 @@ static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu) if (has_vhe()) { val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN | CPACR_EL1_ZEN_EL1EN); + if (cpus_have_final_cap(ARM64_SME)) + val |= CPACR_EL1_SMEN_EL1EN; } else if (has_hvhe()) { val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN); if (!vcpu_has_sve(vcpu) || (vcpu->arch.fp_state != FP_STATE_GUEST_OWNED)) val |= CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN; + if (cpus_have_final_cap(ARM64_SME)) + val |= CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN; } else { val = CPTR_NVHE_EL2_RES1; -- cgit From 01b94b0f3922039f7d3e0d1eeb33b8891746b65f Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 24 Jul 2023 14:18:42 +0200 Subject: KVM: arm64: fix __kvm_host_psci_cpu_entry() prototype The kvm_host_psci_cpu_entry() function was renamed in order to add a wrapper around it, but the prototype did not change, so now the missing-prototype warning came back in W=1 builds: arch/arm64/kvm/hyp/nvhe/psci-relay.c:203:28: error: no previous prototype for function '__kvm_host_psci_cpu_entry' [-Werror,-Wmissing-prototypes] asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on) Fixes: dcf89d1111995 ("KVM: arm64: Add missing BTI instructions") Signed-off-by: Arnd Bergmann Acked-by: Marc Zyngier Link: https://lore.kernel.org/r/20230724121850.1386668-1-arnd@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/include/asm/kvm_asm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 7d170aaa2db4..24e28bb2d95b 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -278,7 +278,7 @@ asmlinkage void __noreturn hyp_panic_bad_stack(void); asmlinkage void kvm_unexpected_el2_exception(void); struct kvm_cpu_context; void handle_trap(struct kvm_cpu_context *host_ctxt); -asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on); +asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on); void __noreturn __pkvm_init_finalise(void); void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc); void kvm_patch_vector_branch(struct alt_instr *alt, -- cgit From a8f014ec6a214c94ed6a9ff5ba8904a5cadd42a6 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 20 Jul 2023 08:15:06 -0700 Subject: vboxsf: Use flexible arrays for trailing string member The declaration of struct shfl_string used trailing fake flexible arrays for the string member. This was tripping FORTIFY_SOURCE since commit df8fc4e934c1 ("kbuild: Enable -fstrict-flex-arrays=3"). Replace the utf8 and utf16 members with actual flexible arrays, drop the unused ucs2 member, and retriain a 2 byte padding to keep the structure size the same. Reported-by: Larry Finger Closes: https://lore.kernel.org/lkml/ab3a70e9-60ed-0f13-e3d4-8866eaccc8c1@lwfinger.net/ Tested-by: Larry Finger Reviewed-by: Hans de Goede Link: https://lore.kernel.org/r/20230720151458.never.673-kees@kernel.org Signed-off-by: Kees Cook --- fs/vboxsf/shfl_hostintf.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/vboxsf/shfl_hostintf.h b/fs/vboxsf/shfl_hostintf.h index aca829062c12..069a019c9247 100644 --- a/fs/vboxsf/shfl_hostintf.h +++ b/fs/vboxsf/shfl_hostintf.h @@ -68,9 +68,9 @@ struct shfl_string { /** UTF-8 or UTF-16 string. Nul terminated. */ union { - u8 utf8[2]; - u16 utf16[1]; - u16 ucs2[1]; /* misnomer, use utf16. */ + u8 legacy_padding[2]; + DECLARE_FLEX_ARRAY(u8, utf8); + DECLARE_FLEX_ARRAY(u16, utf16); } string; }; VMMDEV_ASSERT_SIZE(shfl_string, 6); -- cgit From 2dedcf414bb01b8d966eb445db1d181d92304fb2 Mon Sep 17 00:00:00 2001 From: Guchun Chen Date: Mon, 24 Jul 2023 10:42:29 +0800 Subject: drm/ttm: check null pointer before accessing when swapping MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a check to avoid null pointer dereference as below: [ 90.002283] general protection fault, probably for non-canonical address 0xdffffc0000000000: 0000 [#1] PREEMPT SMP KASAN NOPTI [ 90.002292] KASAN: null-ptr-deref in range [0x0000000000000000-0x0000000000000007] [ 90.002346] ? exc_general_protection+0x159/0x240 [ 90.002352] ? asm_exc_general_protection+0x26/0x30 [ 90.002357] ? ttm_bo_evict_swapout_allowable+0x322/0x5e0 [ttm] [ 90.002365] ? ttm_bo_evict_swapout_allowable+0x42e/0x5e0 [ttm] [ 90.002373] ttm_bo_swapout+0x134/0x7f0 [ttm] [ 90.002383] ? __pfx_ttm_bo_swapout+0x10/0x10 [ttm] [ 90.002391] ? lock_acquire+0x44d/0x4f0 [ 90.002398] ? ttm_device_swapout+0xa5/0x260 [ttm] [ 90.002412] ? lock_acquired+0x355/0xa00 [ 90.002416] ? do_raw_spin_trylock+0xb6/0x190 [ 90.002421] ? __pfx_lock_acquired+0x10/0x10 [ 90.002426] ? ttm_global_swapout+0x25/0x210 [ttm] [ 90.002442] ttm_device_swapout+0x198/0x260 [ttm] [ 90.002456] ? __pfx_ttm_device_swapout+0x10/0x10 [ttm] [ 90.002472] ttm_global_swapout+0x75/0x210 [ttm] [ 90.002486] ttm_tt_populate+0x187/0x3f0 [ttm] [ 90.002501] ttm_bo_handle_move_mem+0x437/0x590 [ttm] [ 90.002517] ttm_bo_validate+0x275/0x430 [ttm] [ 90.002530] ? __pfx_ttm_bo_validate+0x10/0x10 [ttm] [ 90.002544] ? kasan_save_stack+0x33/0x60 [ 90.002550] ? kasan_set_track+0x25/0x30 [ 90.002554] ? __kasan_kmalloc+0x8f/0xa0 [ 90.002558] ? amdgpu_gtt_mgr_new+0x81/0x420 [amdgpu] [ 90.003023] ? ttm_resource_alloc+0xf6/0x220 [ttm] [ 90.003038] amdgpu_bo_pin_restricted+0x2dd/0x8b0 [amdgpu] [ 90.003210] ? __x64_sys_ioctl+0x131/0x1a0 [ 90.003210] ? do_syscall_64+0x60/0x90 Fixes: a2848d08742c ("drm/ttm: never consider pinned BOs for eviction&swap") Tested-by: Mikhail Gavrilov Signed-off-by: Guchun Chen Reviewed-by: Alex Deucher Reviewed-by: Christian König Cc: stable@vger.kernel.org Link: https://patchwork.freedesktop.org/patch/msgid/20230724024229.1118444-1-guchun.chen@amd.com Signed-off-by: Christian König --- drivers/gpu/drm/ttm/ttm_bo.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 7139a522b2f3..54e3083076b7 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -519,7 +519,8 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo, if (bo->pin_count) { *locked = false; - *busy = false; + if (busy) + *busy = false; return false; } -- cgit From fb3bd914b3ec28f5fb697ac55c4846ac2d542855 Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Wed, 28 Jun 2023 11:02:39 +0200 Subject: x86/srso: Add a Speculative RAS Overflow mitigation Add a mitigation for the speculative return address stack overflow vulnerability found on AMD processors. The mitigation works by ensuring all RET instructions speculate to a controlled location, similar to how speculation is controlled in the retpoline sequence. To accomplish this, the __x86_return_thunk forces the CPU to mispredict every function return using a 'safe return' sequence. To ensure the safety of this mitigation, the kernel must ensure that the safe return sequence is itself free from attacker interference. In Zen3 and Zen4, this is accomplished by creating a BTB alias between the untraining function srso_untrain_ret_alias() and the safe return function srso_safe_ret_alias() which results in evicting a potentially poisoned BTB entry and using that safe one for all function returns. In older Zen1 and Zen2, this is accomplished using a reinterpretation technique similar to Retbleed one: srso_untrain_ret() and srso_safe_ret(). Signed-off-by: Borislav Petkov (AMD) --- Documentation/admin-guide/hw-vuln/index.rst | 1 + Documentation/admin-guide/hw-vuln/srso.rst | 133 ++++++++++++++++++++++++ Documentation/admin-guide/kernel-parameters.txt | 11 ++ arch/x86/Kconfig | 7 ++ arch/x86/include/asm/cpufeatures.h | 5 + arch/x86/include/asm/nospec-branch.h | 15 ++- arch/x86/include/asm/processor.h | 2 + arch/x86/kernel/alternative.c | 4 +- arch/x86/kernel/cpu/amd.c | 14 +++ arch/x86/kernel/cpu/bugs.c | 106 +++++++++++++++++++ arch/x86/kernel/cpu/common.c | 8 +- arch/x86/kernel/vmlinux.lds.S | 29 +++++- arch/x86/lib/retpoline.S | 82 ++++++++++++++- drivers/base/cpu.c | 8 ++ include/linux/cpu.h | 2 + tools/objtool/arch/x86/decode.c | 5 +- 16 files changed, 422 insertions(+), 10 deletions(-) create mode 100644 Documentation/admin-guide/hw-vuln/srso.rst diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst index e0614760a99e..ff4d3fa2a75c 100644 --- a/Documentation/admin-guide/hw-vuln/index.rst +++ b/Documentation/admin-guide/hw-vuln/index.rst @@ -19,3 +19,4 @@ are configurable at compile, boot or run time. l1d_flush.rst processor_mmio_stale_data.rst cross-thread-rsb.rst + srso diff --git a/Documentation/admin-guide/hw-vuln/srso.rst b/Documentation/admin-guide/hw-vuln/srso.rst new file mode 100644 index 000000000000..32eb5e6db272 --- /dev/null +++ b/Documentation/admin-guide/hw-vuln/srso.rst @@ -0,0 +1,133 @@ +.. SPDX-License-Identifier: GPL-2.0 + +Speculative Return Stack Overflow (SRSO) +======================================== + +This is a mitigation for the speculative return stack overflow (SRSO) +vulnerability found on AMD processors. The mechanism is by now the well +known scenario of poisoning CPU functional units - the Branch Target +Buffer (BTB) and Return Address Predictor (RAP) in this case - and then +tricking the elevated privilege domain (the kernel) into leaking +sensitive data. + +AMD CPUs predict RET instructions using a Return Address Predictor (aka +Return Address Stack/Return Stack Buffer). In some cases, a non-architectural +CALL instruction (i.e., an instruction predicted to be a CALL but is +not actually a CALL) can create an entry in the RAP which may be used +to predict the target of a subsequent RET instruction. + +The specific circumstances that lead to this varies by microarchitecture +but the concern is that an attacker can mis-train the CPU BTB to predict +non-architectural CALL instructions in kernel space and use this to +control the speculative target of a subsequent kernel RET, potentially +leading to information disclosure via a speculative side-channel. + +The issue is tracked under CVE-2023-20569. + +Affected processors +------------------- + +AMD Zen, generations 1-4. That is, all families 0x17 and 0x19. Older +processors have not been investigated. + +System information and options +------------------------------ + +First of all, it is required that the latest microcode be loaded for +mitigations to be effective. + +The sysfs file showing SRSO mitigation status is: + + /sys/devices/system/cpu/vulnerabilities/spec_rstack_overflow + +The possible values in this file are: + + - 'Not affected' The processor is not vulnerable + + - 'Vulnerable: no microcode' The processor is vulnerable, no + microcode extending IBPB functionality + to address the vulnerability has been + applied. + + - 'Mitigation: microcode' Extended IBPB functionality microcode + patch has been applied. It does not + address User->Kernel and Guest->Host + transitions protection but it does + address User->User and VM->VM attack + vectors. + + (spec_rstack_overflow=microcode) + + - 'Mitigation: safe RET' Software-only mitigation. It complements + the extended IBPB microcode patch + functionality by addressing User->Kernel + and Guest->Host transitions protection. + + Selected by default or by + spec_rstack_overflow=safe-ret + + - 'Mitigation: IBPB' Similar protection as "safe RET" above + but employs an IBPB barrier on privilege + domain crossings (User->Kernel, + Guest->Host). + + (spec_rstack_overflow=ibpb) + + - 'Mitigation: IBPB on VMEXIT' Mitigation addressing the cloud provider + scenario - the Guest->Host transitions + only. + + (spec_rstack_overflow=ibpb-vmexit) + +In order to exploit vulnerability, an attacker needs to: + + - gain local access on the machine + + - break kASLR + + - find gadgets in the running kernel in order to use them in the exploit + + - potentially create and pin an additional workload on the sibling + thread, depending on the microarchitecture (not necessary on fam 0x19) + + - run the exploit + +Considering the performance implications of each mitigation type, the +default one is 'Mitigation: safe RET' which should take care of most +attack vectors, including the local User->Kernel one. + +As always, the user is advised to keep her/his system up-to-date by +applying software updates regularly. + +The default setting will be reevaluated when needed and especially when +new attack vectors appear. + +As one can surmise, 'Mitigation: safe RET' does come at the cost of some +performance depending on the workload. If one trusts her/his userspace +and does not want to suffer the performance impact, one can always +disable the mitigation with spec_rstack_overflow=off. + +Similarly, 'Mitigation: IBPB' is another full mitigation type employing +an indrect branch prediction barrier after having applied the required +microcode patch for one's system. This mitigation comes also at +a performance cost. + +Mitigation: safe RET +-------------------- + +The mitigation works by ensuring all RET instructions speculate to +a controlled location, similar to how speculation is controlled in the +retpoline sequence. To accomplish this, the __x86_return_thunk forces +the CPU to mispredict every function return using a 'safe return' +sequence. + +To ensure the safety of this mitigation, the kernel must ensure that the +safe return sequence is itself free from attacker interference. In Zen3 +and Zen4, this is accomplished by creating a BTB alias between the +untraining function srso_untrain_ret_alias() and the safe return +function srso_safe_ret_alias() which results in evicting a potentially +poisoned BTB entry and using that safe one for all function returns. + +In older Zen1 and Zen2, this is accomplished using a reinterpretation +technique similar to Retbleed one: srso_untrain_ret() and +srso_safe_ret(). diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index a1457995fd41..f5ec3dade58e 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -5875,6 +5875,17 @@ Not specifying this option is equivalent to spectre_v2_user=auto. + spec_rstack_overflow= + [X86] Control RAS overflow mitigation on AMD Zen CPUs + + off - Disable mitigation + microcode - Enable microcode mitigation only + safe-ret - Enable sw-only safe RET mitigation (default) + ibpb - Enable mitigation by issuing IBPB on + kernel entry + ibpb-vmexit - Issue IBPB only on VMEXIT + (cloud-specific mitigation) + spec_store_bypass_disable= [HW] Control Speculative Store Bypass (SSB) Disable mitigation (Speculative Store Bypass vulnerability) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 7422db409770..d29f1e28a936 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2593,6 +2593,13 @@ config CPU_IBRS_ENTRY This mitigates both spectre_v2 and retbleed at great cost to performance. +config CPU_SRSO + bool "Mitigate speculative RAS overflow on AMD" + depends on CPU_SUP_AMD && X86_64 && RETHUNK + default y + help + Enable the SRSO mitigation needed on AMD Zen1-4 machines. + config SLS bool "Mitigate Straight-Line-Speculation" depends on CC_HAS_SLS && X86_64 diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 1f6d904c6481..bc1b4d68e616 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -309,6 +309,9 @@ #define X86_FEATURE_SMBA (11*32+21) /* "" Slow Memory Bandwidth Allocation */ #define X86_FEATURE_BMEC (11*32+22) /* "" Bandwidth Monitoring Event Configuration */ +#define X86_FEATURE_SRSO (11*32+24) /* "" AMD BTB untrain RETs */ +#define X86_FEATURE_SRSO_ALIAS (11*32+25) /* "" AMD BTB untrain RETs through aliasing */ + /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ #define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */ #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */ @@ -484,4 +487,6 @@ #define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */ #define X86_BUG_SMT_RSB X86_BUG(29) /* CPU is vulnerable to Cross-Thread Return Address Predictions */ +/* BUG word 2 */ +#define X86_BUG_SRSO X86_BUG(1*32 + 0) /* AMD SRSO bug */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 1a65cf4acb2b..43fe1c747085 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -211,7 +211,8 @@ * eventually turn into it's own annotation. */ .macro VALIDATE_UNRET_END -#if defined(CONFIG_NOINSTR_VALIDATION) && defined(CONFIG_CPU_UNRET_ENTRY) +#if defined(CONFIG_NOINSTR_VALIDATION) && \ + (defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO)) ANNOTATE_RETPOLINE_SAFE nop #endif @@ -296,6 +297,11 @@ "call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \ __stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH #endif + +#ifdef CONFIG_CPU_SRSO + ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \ + "call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS +#endif .endm .macro UNTRAIN_RET_FROM_CALL @@ -307,6 +313,11 @@ "call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \ __stringify(RESET_CALL_DEPTH_FROM_CALL), X86_FEATURE_CALL_DEPTH #endif + +#ifdef CONFIG_CPU_SRSO + ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \ + "call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS +#endif .endm @@ -332,6 +343,8 @@ extern retpoline_thunk_t __x86_indirect_jump_thunk_array[]; extern void __x86_return_thunk(void); extern void zen_untrain_ret(void); +extern void srso_untrain_ret(void); +extern void srso_untrain_ret_alias(void); extern void entry_ibpb(void); #ifdef CONFIG_CALL_THUNKS diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index d46300e94f85..7c67db7c9f53 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -682,9 +682,11 @@ extern u16 get_llc_id(unsigned int cpu); #ifdef CONFIG_CPU_SUP_AMD extern u32 amd_get_nodes_per_socket(void); extern u32 amd_get_highest_perf(void); +extern bool cpu_has_ibpb_brtype_microcode(void); #else static inline u32 amd_get_nodes_per_socket(void) { return 0; } static inline u32 amd_get_highest_perf(void) { return 0; } +static inline bool cpu_has_ibpb_brtype_microcode(void) { return false; } #endif extern unsigned long arch_align_stack(unsigned long sp); diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 2dcf3a06af09..920a8ca7a8f8 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -707,7 +707,9 @@ static int patch_return(void *addr, struct insn *insn, u8 *bytes) int i = 0; /* Patch the custom return thunks... */ - if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) { + if (cpu_feature_enabled(X86_FEATURE_RETHUNK) || + cpu_feature_enabled(X86_FEATURE_SRSO) || + cpu_feature_enabled(X86_FEATURE_SRSO_ALIAS)) { i = JMP32_INSN_SIZE; __text_gen_insn(bytes, JMP32_INSN_OPCODE, addr, x86_return_thunk, i); } else { diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 571abf808ea3..169cb255c483 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -1235,3 +1235,17 @@ u32 amd_get_highest_perf(void) return 255; } EXPORT_SYMBOL_GPL(amd_get_highest_perf); + +bool cpu_has_ibpb_brtype_microcode(void) +{ + u8 fam = boot_cpu_data.x86; + + if (fam == 0x17) { + /* Zen1/2 IBPB flushes branch type predictions too. */ + return boot_cpu_has(X86_FEATURE_AMD_IBPB); + } else if (fam == 0x19) { + return false; + } + + return false; +} diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 9e2a91830f72..31cef61da03a 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -47,6 +47,7 @@ static void __init taa_select_mitigation(void); static void __init mmio_select_mitigation(void); static void __init srbds_select_mitigation(void); static void __init l1d_flush_select_mitigation(void); +static void __init srso_select_mitigation(void); /* The base value of the SPEC_CTRL MSR without task-specific bits set */ u64 x86_spec_ctrl_base; @@ -160,6 +161,7 @@ void __init cpu_select_mitigations(void) md_clear_select_mitigation(); srbds_select_mitigation(); l1d_flush_select_mitigation(); + srso_select_mitigation(); } /* @@ -2185,6 +2187,95 @@ static int __init l1tf_cmdline(char *str) } early_param("l1tf", l1tf_cmdline); +#undef pr_fmt +#define pr_fmt(fmt) "Speculative Return Stack Overflow: " fmt + +enum srso_mitigation { + SRSO_MITIGATION_NONE, + SRSO_MITIGATION_MICROCODE, + SRSO_MITIGATION_SAFE_RET, +}; + +enum srso_mitigation_cmd { + SRSO_CMD_OFF, + SRSO_CMD_MICROCODE, + SRSO_CMD_SAFE_RET, +}; + +static const char * const srso_strings[] = { + [SRSO_MITIGATION_NONE] = "Vulnerable", + [SRSO_MITIGATION_MICROCODE] = "Mitigation: microcode", + [SRSO_MITIGATION_SAFE_RET] = "Mitigation: safe RET", +}; + +static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE; +static enum srso_mitigation_cmd srso_cmd __ro_after_init = SRSO_CMD_SAFE_RET; + +static int __init srso_parse_cmdline(char *str) +{ + if (!str) + return -EINVAL; + + if (!strcmp(str, "off")) + srso_cmd = SRSO_CMD_OFF; + else if (!strcmp(str, "microcode")) + srso_cmd = SRSO_CMD_MICROCODE; + else if (!strcmp(str, "safe-ret")) + srso_cmd = SRSO_CMD_SAFE_RET; + else + pr_err("Ignoring unknown SRSO option (%s).", str); + + return 0; +} +early_param("spec_rstack_overflow", srso_parse_cmdline); + +#define SRSO_NOTICE "WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options." + +static void __init srso_select_mitigation(void) +{ + bool has_microcode; + + if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off()) + return; + + has_microcode = cpu_has_ibpb_brtype_microcode(); + if (!has_microcode) { + pr_warn("IBPB-extending microcode not applied!\n"); + pr_warn(SRSO_NOTICE); + } + + switch (srso_cmd) { + case SRSO_CMD_OFF: + return; + + case SRSO_CMD_MICROCODE: + if (has_microcode) { + srso_mitigation = SRSO_MITIGATION_MICROCODE; + pr_warn(SRSO_NOTICE); + } + break; + + case SRSO_CMD_SAFE_RET: + if (IS_ENABLED(CONFIG_CPU_SRSO)) { + if (boot_cpu_data.x86 == 0x19) + setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS); + else + setup_force_cpu_cap(X86_FEATURE_SRSO); + srso_mitigation = SRSO_MITIGATION_SAFE_RET; + } else { + pr_err("WARNING: kernel not compiled with CPU_SRSO.\n"); + return; + } + break; + + default: + break; + + } + + pr_info("%s%s\n", srso_strings[srso_mitigation], (has_microcode ? "" : ", no microcode")); +} + #undef pr_fmt #define pr_fmt(fmt) fmt @@ -2382,6 +2473,13 @@ static ssize_t retbleed_show_state(char *buf) return sysfs_emit(buf, "%s\n", retbleed_strings[retbleed_mitigation]); } +static ssize_t srso_show_state(char *buf) +{ + return sysfs_emit(buf, "%s%s\n", + srso_strings[srso_mitigation], + (cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode")); +} + static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, char *buf, unsigned int bug) { @@ -2431,6 +2529,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr case X86_BUG_RETBLEED: return retbleed_show_state(buf); + case X86_BUG_SRSO: + return srso_show_state(buf); + default: break; } @@ -2495,4 +2596,9 @@ ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, cha { return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED); } + +ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_common(dev, attr, buf, X86_BUG_SRSO); +} #endif diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 52683fddafaf..d4d823eae0fc 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1250,6 +1250,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { #define RETBLEED BIT(3) /* CPU is affected by SMT (cross-thread) return predictions */ #define SMT_RSB BIT(4) +/* CPU is affected by SRSO */ +#define SRSO BIT(5) static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS), @@ -1281,8 +1283,9 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { VULNBL_AMD(0x15, RETBLEED), VULNBL_AMD(0x16, RETBLEED), - VULNBL_AMD(0x17, RETBLEED | SMT_RSB), + VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO), VULNBL_HYGON(0x18, RETBLEED | SMT_RSB), + VULNBL_AMD(0x19, SRSO), {} }; @@ -1406,6 +1409,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) if (cpu_matches(cpu_vuln_blacklist, SMT_RSB)) setup_force_cpu_bug(X86_BUG_SMT_RSB); + if (cpu_matches(cpu_vuln_blacklist, SRSO)) + setup_force_cpu_bug(X86_BUG_SRSO); + if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) return; diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 03c885d3640f..e76813230192 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -134,13 +134,27 @@ SECTIONS SOFTIRQENTRY_TEXT #ifdef CONFIG_RETPOLINE __indirect_thunk_start = .; - *(.text.__x86.*) + *(.text.__x86.indirect_thunk) + *(.text.__x86.return_thunk) __indirect_thunk_end = .; #endif STATIC_CALL_TEXT ALIGN_ENTRY_TEXT_BEGIN +#ifdef CONFIG_CPU_SRSO + *(.text.__x86.rethunk_untrain) +#endif + ENTRY_TEXT + +#ifdef CONFIG_CPU_SRSO + /* + * See the comment above srso_untrain_ret_alias()'s + * definition. + */ + . = srso_untrain_ret_alias | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20); + *(.text.__x86.rethunk_safe) +#endif ALIGN_ENTRY_TEXT_END *(.gnu.warning) @@ -509,7 +523,18 @@ INIT_PER_CPU(irq_stack_backing_store); #endif #ifdef CONFIG_RETHUNK -. = ASSERT((__x86_return_thunk & 0x3f) == 0, "__x86_return_thunk not cacheline-aligned"); +. = ASSERT((__ret & 0x3f) == 0, "__ret not cacheline-aligned"); +. = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned"); +#endif + +#ifdef CONFIG_CPU_SRSO +/* + * GNU ld cannot do XOR so do: (A | B) - (A & B) in order to compute the XOR + * of the two function addresses: + */ +. = ASSERT(((srso_untrain_ret_alias | srso_safe_ret_alias) - + (srso_untrain_ret_alias & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)), + "SRSO function pair won't alias"); #endif #endif /* CONFIG_X86_64 */ diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index 3fd066d42ec0..845cfb0d748f 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -11,6 +11,7 @@ #include #include #include +#include .section .text.__x86.indirect_thunk @@ -131,6 +132,45 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array) */ #ifdef CONFIG_RETHUNK +/* + * srso_untrain_ret_alias() and srso_safe_ret_alias() are placed at + * special addresses: + * + * - srso_untrain_ret_alias() is 2M aligned + * - srso_safe_ret_alias() is also in the same 2M page but bits 2, 8, 14 + * and 20 in its virtual address are set (while those bits in the + * srso_untrain_ret_alias() function are cleared). + * + * This guarantees that those two addresses will alias in the branch + * target buffer of Zen3/4 generations, leading to any potential + * poisoned entries at that BTB slot to get evicted. + * + * As a result, srso_safe_ret_alias() becomes a safe return. + */ +#ifdef CONFIG_CPU_SRSO + .section .text.__x86.rethunk_untrain + +SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) + ASM_NOP2 + lfence + jmp __x86_return_thunk +SYM_FUNC_END(srso_untrain_ret_alias) +__EXPORT_THUNK(srso_untrain_ret_alias) + + .section .text.__x86.rethunk_safe +#endif + +/* Needs a definition for the __x86_return_thunk alternative below. */ +SYM_START(srso_safe_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) +#ifdef CONFIG_CPU_SRSO + add $8, %_ASM_SP + UNWIND_HINT_FUNC +#endif + ANNOTATE_UNRET_SAFE + ret + int3 +SYM_FUNC_END(srso_safe_ret_alias) + .section .text.__x86.return_thunk /* @@ -143,7 +183,7 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array) * from re-poisioning the BTB prediction. */ .align 64 - .skip 64 - (__x86_return_thunk - zen_untrain_ret), 0xcc + .skip 64 - (__ret - zen_untrain_ret), 0xcc SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) ANNOTATE_NOENDBR /* @@ -175,10 +215,10 @@ SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) * evicted, __x86_return_thunk will suffer Straight Line Speculation * which will be contained safely by the INT3. */ -SYM_INNER_LABEL(__x86_return_thunk, SYM_L_GLOBAL) +SYM_INNER_LABEL(__ret, SYM_L_GLOBAL) ret int3 -SYM_CODE_END(__x86_return_thunk) +SYM_CODE_END(__ret) /* * Ensure the TEST decoding / BTB invalidation is complete. @@ -189,11 +229,45 @@ SYM_CODE_END(__x86_return_thunk) * Jump back and execute the RET in the middle of the TEST instruction. * INT3 is for SLS protection. */ - jmp __x86_return_thunk + jmp __ret int3 SYM_FUNC_END(zen_untrain_ret) __EXPORT_THUNK(zen_untrain_ret) +/* + * SRSO untraining sequence for Zen1/2, similar to zen_untrain_ret() + * above. On kernel entry, srso_untrain_ret() is executed which is a + * + * movabs $0xccccccc308c48348,%rax + * + * and when the return thunk executes the inner label srso_safe_ret() + * later, it is a stack manipulation and a RET which is mispredicted and + * thus a "safe" one to use. + */ + .align 64 + .skip 64 - (srso_safe_ret - srso_untrain_ret), 0xcc +SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) + ANNOTATE_NOENDBR + .byte 0x48, 0xb8 + +SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL) + add $8, %_ASM_SP + ret + int3 + int3 + int3 + lfence + call srso_safe_ret + int3 +SYM_CODE_END(srso_safe_ret) +SYM_FUNC_END(srso_untrain_ret) +__EXPORT_THUNK(srso_untrain_ret) + +SYM_FUNC_START(__x86_return_thunk) + ALTERNATIVE_2 "jmp __ret", "call srso_safe_ret", X86_FEATURE_SRSO, \ + "call srso_safe_ret_alias", X86_FEATURE_SRSO_ALIAS + int3 +SYM_CODE_END(__x86_return_thunk) EXPORT_SYMBOL(__x86_return_thunk) #endif /* CONFIG_RETHUNK */ diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index c1815b9dae68..f111586d1cce 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -577,6 +577,12 @@ ssize_t __weak cpu_show_retbleed(struct device *dev, return sysfs_emit(buf, "Not affected\n"); } +ssize_t __weak cpu_show_spec_rstack_overflow(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "Not affected\n"); +} + static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); @@ -588,6 +594,7 @@ static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL); static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL); static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL); static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL); +static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL); static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_meltdown.attr, @@ -601,6 +608,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_srbds.attr, &dev_attr_mmio_stale_data.attr, &dev_attr_retbleed.attr, + &dev_attr_spec_rstack_overflow.attr, NULL }; diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 6e6e57ec69e8..23ac87be1ff1 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -70,6 +70,8 @@ extern ssize_t cpu_show_mmio_stale_data(struct device *dev, char *buf); extern ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_spec_rstack_overflow(struct device *dev, + struct device_attribute *attr, char *buf); extern __printf(4, 5) struct device *cpu_device_create(struct device *parent, void *drvdata, diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c index 2e1caabecb18..2d51fa8da9e8 100644 --- a/tools/objtool/arch/x86/decode.c +++ b/tools/objtool/arch/x86/decode.c @@ -824,5 +824,8 @@ bool arch_is_retpoline(struct symbol *sym) bool arch_is_rethunk(struct symbol *sym) { - return !strcmp(sym->name, "__x86_return_thunk"); + return !strcmp(sym->name, "__x86_return_thunk") || + !strcmp(sym->name, "srso_untrain_ret") || + !strcmp(sym->name, "srso_safe_ret") || + !strcmp(sym->name, "__ret"); } -- cgit From 79113e4060aba744787a81edb9014f2865193854 Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Tue, 18 Jul 2023 11:13:40 +0200 Subject: x86/srso: Add IBPB_BRTYPE support Add support for the synthetic CPUID flag which "if this bit is 1, it indicates that MSR 49h (PRED_CMD) bit 0 (IBPB) flushes all branch type predictions from the CPU branch predictor." This flag is there so that this capability in guests can be detected easily (otherwise one would have to track microcode revisions which is impossible for guests). It is also needed only for Zen3 and -4. The other two (Zen1 and -2) always flush branch type predictions by default. Signed-off-by: Borislav Petkov (AMD) --- arch/x86/include/asm/cpufeatures.h | 2 ++ arch/x86/kernel/cpu/bugs.c | 12 +++++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index bc1b4d68e616..8aebe95d2fad 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -445,6 +445,8 @@ #define X86_FEATURE_AUTOIBRS (20*32+ 8) /* "" Automatic IBRS */ #define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* "" SMM_CTL MSR is not present */ +#define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */ + /* * BUG word(s) */ diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 31cef61da03a..ff61ef61277a 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -2238,10 +2238,20 @@ static void __init srso_select_mitigation(void) if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off()) return; - has_microcode = cpu_has_ibpb_brtype_microcode(); + /* + * The first check is for the kernel running as a guest in order + * for guests to verify whether IBPB is a viable mitigation. + */ + has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) || cpu_has_ibpb_brtype_microcode(); if (!has_microcode) { pr_warn("IBPB-extending microcode not applied!\n"); pr_warn(SRSO_NOTICE); + } else { + /* + * Enable the synthetic (even if in a real CPUID leaf) + * flag for guests. + */ + setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE); } switch (srso_cmd) { -- cgit From 1b5277c0ea0b247393a9c426769fde18cff5e2f6 Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Thu, 29 Jun 2023 17:43:40 +0200 Subject: x86/srso: Add SRSO_NO support Add support for the CPUID flag which denotes that the CPU is not affected by SRSO. Signed-off-by: Borislav Petkov (AMD) --- arch/x86/include/asm/cpufeatures.h | 2 ++ arch/x86/include/asm/msr-index.h | 1 + arch/x86/include/asm/nospec-branch.h | 6 +++--- arch/x86/kernel/cpu/amd.c | 12 ++++++------ arch/x86/kernel/cpu/bugs.c | 24 ++++++++++++++++++++---- arch/x86/kernel/cpu/common.c | 6 ++++-- arch/x86/kvm/cpuid.c | 3 +++ 7 files changed, 39 insertions(+), 15 deletions(-) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 8aebe95d2fad..93070aabbb2f 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -445,7 +445,9 @@ #define X86_FEATURE_AUTOIBRS (20*32+ 8) /* "" Automatic IBRS */ #define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* "" SMM_CTL MSR is not present */ +#define X86_FEATURE_SBPB (20*32+27) /* "" Selective Branch Prediction Barrier */ #define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */ +#define X86_FEATURE_SRSO_NO (20*32+29) /* "" CPU is not affected by SRSO */ /* * BUG word(s) diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 3aedae61af4f..c81483a3c13d 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -57,6 +57,7 @@ #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */ #define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */ +#define PRED_CMD_SBPB BIT(7) /* Selective Branch Prediction Barrier */ #define MSR_PPIN_CTL 0x0000004e #define MSR_PPIN 0x0000004f diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 43fe1c747085..8346c33760c1 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -492,11 +492,11 @@ void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature) : "memory"); } +extern u64 x86_pred_cmd; + static inline void indirect_branch_prediction_barrier(void) { - u64 val = PRED_CMD_IBPB; - - alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB); + alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_USE_IBPB); } /* The Intel SPEC CTRL MSR base value cache */ diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 169cb255c483..834f310b2f1a 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -1240,12 +1240,12 @@ bool cpu_has_ibpb_brtype_microcode(void) { u8 fam = boot_cpu_data.x86; - if (fam == 0x17) { - /* Zen1/2 IBPB flushes branch type predictions too. */ + /* Zen1/2 IBPB flushes branch type predictions too. */ + if (fam == 0x17) return boot_cpu_has(X86_FEATURE_AMD_IBPB); - } else if (fam == 0x19) { + /* Poke the MSR bit on Zen3/4 to check its presence. */ + else if (fam == 0x19) + return !wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB); + else return false; - } - - return false; } diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index ff61ef61277a..439ecad62317 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -57,6 +57,9 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); DEFINE_PER_CPU(u64, x86_spec_ctrl_current); EXPORT_SYMBOL_GPL(x86_spec_ctrl_current); +u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB; +EXPORT_SYMBOL_GPL(x86_pred_cmd); + static DEFINE_MUTEX(spec_ctrl_mutex); /* Update SPEC_CTRL MSR and its cached copy unconditionally */ @@ -2236,7 +2239,7 @@ static void __init srso_select_mitigation(void) bool has_microcode; if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off()) - return; + goto pred_cmd; /* * The first check is for the kernel running as a guest in order @@ -2249,9 +2252,18 @@ static void __init srso_select_mitigation(void) } else { /* * Enable the synthetic (even if in a real CPUID leaf) - * flag for guests. + * flags for guests. */ setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE); + setup_force_cpu_cap(X86_FEATURE_SBPB); + + /* + * Zen1/2 with SMT off aren't vulnerable after the right + * IBPB microcode has been applied. + */ + if ((boot_cpu_data.x86 < 0x19) && + (cpu_smt_control == CPU_SMT_DISABLED)) + setup_force_cpu_cap(X86_FEATURE_SRSO_NO); } switch (srso_cmd) { @@ -2274,16 +2286,20 @@ static void __init srso_select_mitigation(void) srso_mitigation = SRSO_MITIGATION_SAFE_RET; } else { pr_err("WARNING: kernel not compiled with CPU_SRSO.\n"); - return; + goto pred_cmd; } break; default: break; - } pr_info("%s%s\n", srso_strings[srso_mitigation], (has_microcode ? "" : ", no microcode")); + +pred_cmd: + if (boot_cpu_has(X86_FEATURE_SRSO_NO) || + srso_cmd == SRSO_CMD_OFF) + x86_pred_cmd = PRED_CMD_SBPB; } #undef pr_fmt diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index d4d823eae0fc..5576cdac3b4a 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1409,8 +1409,10 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) if (cpu_matches(cpu_vuln_blacklist, SMT_RSB)) setup_force_cpu_bug(X86_BUG_SMT_RSB); - if (cpu_matches(cpu_vuln_blacklist, SRSO)) - setup_force_cpu_bug(X86_BUG_SRSO); + if (!cpu_has(c, X86_FEATURE_SRSO_NO)) { + if (cpu_matches(cpu_vuln_blacklist, SRSO)) + setup_force_cpu_bug(X86_BUG_SRSO); + } if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) return; diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 7f4d13383cf2..d3432687c9e6 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -729,6 +729,9 @@ void kvm_set_cpu_caps(void) F(NULL_SEL_CLR_BASE) | F(AUTOIBRS) | 0 /* PrefetchCtlMsr */ ); + if (cpu_feature_enabled(X86_FEATURE_SRSO_NO)) + kvm_cpu_cap_set(X86_FEATURE_SRSO_NO); + kvm_cpu_cap_init_kvm_defined(CPUID_8000_0022_EAX, F(PERFMON_V2) ); -- cgit From 233d6f68b98d480a7c42ebe78c38f79d44741ca9 Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Thu, 6 Jul 2023 15:04:35 +0200 Subject: x86/srso: Add IBPB Add the option to mitigate using IBPB on a kernel entry. Pull in the Retbleed alternative so that the IBPB call from there can be used. Also, if Retbleed mitigation is done using IBPB, the same mitigation can and must be used here. Signed-off-by: Borislav Petkov (AMD) --- arch/x86/include/asm/nospec-branch.h | 2 +- arch/x86/kernel/cpu/bugs.c | 23 +++++++++++++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 8346c33760c1..3faf044569a5 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -290,7 +290,7 @@ */ .macro UNTRAIN_RET #if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \ - defined(CONFIG_CALL_DEPTH_TRACKING) + defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO) VALIDATE_UNRET_END ALTERNATIVE_3 "", \ CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \ diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 439ecad62317..f3cc432ed818 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -2197,18 +2197,21 @@ enum srso_mitigation { SRSO_MITIGATION_NONE, SRSO_MITIGATION_MICROCODE, SRSO_MITIGATION_SAFE_RET, + SRSO_MITIGATION_IBPB, }; enum srso_mitigation_cmd { SRSO_CMD_OFF, SRSO_CMD_MICROCODE, SRSO_CMD_SAFE_RET, + SRSO_CMD_IBPB, }; static const char * const srso_strings[] = { [SRSO_MITIGATION_NONE] = "Vulnerable", [SRSO_MITIGATION_MICROCODE] = "Mitigation: microcode", [SRSO_MITIGATION_SAFE_RET] = "Mitigation: safe RET", + [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB", }; static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE; @@ -2225,6 +2228,8 @@ static int __init srso_parse_cmdline(char *str) srso_cmd = SRSO_CMD_MICROCODE; else if (!strcmp(str, "safe-ret")) srso_cmd = SRSO_CMD_SAFE_RET; + else if (!strcmp(str, "ibpb")) + srso_cmd = SRSO_CMD_IBPB; else pr_err("Ignoring unknown SRSO option (%s).", str); @@ -2266,6 +2271,14 @@ static void __init srso_select_mitigation(void) setup_force_cpu_cap(X86_FEATURE_SRSO_NO); } + if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { + if (has_microcode) { + pr_err("Retbleed IBPB mitigation enabled, using same for SRSO\n"); + srso_mitigation = SRSO_MITIGATION_IBPB; + goto pred_cmd; + } + } + switch (srso_cmd) { case SRSO_CMD_OFF: return; @@ -2290,6 +2303,16 @@ static void __init srso_select_mitigation(void) } break; + case SRSO_CMD_IBPB: + if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) { + if (has_microcode) { + setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); + srso_mitigation = SRSO_MITIGATION_IBPB; + } + } else { + pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n"); + goto pred_cmd; + } default: break; } -- cgit From d893832d0e1ef41c72cdae444268c1d64a2be8ad Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Fri, 7 Jul 2023 13:53:41 +0200 Subject: x86/srso: Add IBPB on VMEXIT Add the option to flush IBPB only on VMEXIT in order to protect from malicious guests but one otherwise trusts the software that runs on the hypervisor. Signed-off-by: Borislav Petkov (AMD) --- arch/x86/include/asm/cpufeatures.h | 1 + arch/x86/kernel/cpu/bugs.c | 19 +++++++++++++++++++ arch/x86/kvm/svm/svm.c | 4 +++- arch/x86/kvm/svm/vmenter.S | 3 +++ 4 files changed, 26 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 93070aabbb2f..7600a8a1589f 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -311,6 +311,7 @@ #define X86_FEATURE_SRSO (11*32+24) /* "" AMD BTB untrain RETs */ #define X86_FEATURE_SRSO_ALIAS (11*32+25) /* "" AMD BTB untrain RETs through aliasing */ +#define X86_FEATURE_IBPB_ON_VMEXIT (11*32+26) /* "" Issue an IBPB only on VMEXIT */ /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ #define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */ diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index f3cc432ed818..d4109eb5eb2e 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -2198,6 +2198,7 @@ enum srso_mitigation { SRSO_MITIGATION_MICROCODE, SRSO_MITIGATION_SAFE_RET, SRSO_MITIGATION_IBPB, + SRSO_MITIGATION_IBPB_ON_VMEXIT, }; enum srso_mitigation_cmd { @@ -2205,6 +2206,7 @@ enum srso_mitigation_cmd { SRSO_CMD_MICROCODE, SRSO_CMD_SAFE_RET, SRSO_CMD_IBPB, + SRSO_CMD_IBPB_ON_VMEXIT, }; static const char * const srso_strings[] = { @@ -2212,6 +2214,7 @@ static const char * const srso_strings[] = { [SRSO_MITIGATION_MICROCODE] = "Mitigation: microcode", [SRSO_MITIGATION_SAFE_RET] = "Mitigation: safe RET", [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB", + [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only" }; static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE; @@ -2230,6 +2233,8 @@ static int __init srso_parse_cmdline(char *str) srso_cmd = SRSO_CMD_SAFE_RET; else if (!strcmp(str, "ibpb")) srso_cmd = SRSO_CMD_IBPB; + else if (!strcmp(str, "ibpb-vmexit")) + srso_cmd = SRSO_CMD_IBPB_ON_VMEXIT; else pr_err("Ignoring unknown SRSO option (%s).", str); @@ -2313,6 +2318,20 @@ static void __init srso_select_mitigation(void) pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n"); goto pred_cmd; } + break; + + case SRSO_CMD_IBPB_ON_VMEXIT: + if (IS_ENABLED(CONFIG_CPU_SRSO)) { + if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) { + setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); + srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT; + } + } else { + pr_err("WARNING: kernel not compiled with CPU_SRSO.\n"); + goto pred_cmd; + } + break; + default: break; } diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index d381ad424554..0a51fd56f960 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1498,7 +1498,9 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (sd->current_vmcb != svm->vmcb) { sd->current_vmcb = svm->vmcb; - indirect_branch_prediction_barrier(); + + if (!cpu_feature_enabled(X86_FEATURE_IBPB_ON_VMEXIT)) + indirect_branch_prediction_barrier(); } if (kvm_vcpu_apicv_active(vcpu)) avic_vcpu_load(vcpu, cpu); diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S index 8e8295e774f0..265452fc9ebe 100644 --- a/arch/x86/kvm/svm/vmenter.S +++ b/arch/x86/kvm/svm/vmenter.S @@ -224,6 +224,9 @@ SYM_FUNC_START(__svm_vcpu_run) */ UNTRAIN_RET + /* SRSO */ + ALTERNATIVE "", "call entry_ibpb", X86_FEATURE_IBPB_ON_VMEXIT + /* * Clear all general purpose registers except RSP and RAX to prevent * speculative use of the guest's values, even those that are reloaded -- cgit From d3053b4a6b76f29fd1bf0b438b19a2d6ece9657d Mon Sep 17 00:00:00 2001 From: Tudor Ambarus Date: Tue, 20 Jun 2023 05:53:59 +0300 Subject: MAINTAINERS: Add myself as reviewer for HYPERBUS Add myself as Designated Reviewer for Hyperbus support. I'm assessing the framework and I'd like to get involved in reviewing further patches. Signed-off-by: Tudor Ambarus Acked-by: Vignesh Raghavendra Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20230620025359.33839-1-tudor.ambarus@linaro.org --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/MAINTAINERS b/MAINTAINERS index 3be1bdfe8ecc..c5e338ca3b52 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9649,6 +9649,7 @@ F: tools/hv/ HYPERBUS SUPPORT M: Vignesh Raghavendra +R: Tudor Ambarus L: linux-mtd@lists.infradead.org S: Supported Q: http://patchwork.ozlabs.org/project/linux-mtd/list/ -- cgit From 71c8f9cf2623d0db79665f876b95afcdd8214aec Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 19 Jul 2023 21:00:25 +0200 Subject: mtd: spi-nor: avoid holes in struct spi_mem_op gcc gets confused when -ftrivial-auto-var-init=pattern is used on sparse bit fields such as 'struct spi_mem_op', which caused the previous false positive warning about an uninitialized variable: drivers/mtd/spi-nor/spansion.c: error: 'op' is used uninitialized [-Werror=uninitialized] In fact, the variable is fully initialized and gcc does not see it being used, so the warning is entirely bogus. The problem appears to be a misoptimization in the initialization of single bit fields when the rest of the bytes are not initialized. A previous workaround added another initialization, which ended up shutting up the warning in spansion.c, though it apparently still happens in other files as reported by Peter Foley in the gcc bugzilla. The workaround of adding a fake initialization seems particularly bad because it would set values that can never be correct but prevent the compiler from warning about actually missing initializations. Revert the broken workaround and instead pad the structure to only have bitfields that add up to full bytes, which should avoid this behavior in all drivers. I also filed a new bug against gcc with what I found, so this can hopefully be addressed in future gcc releases. At the moment, only gcc-12 and gcc-13 are affected. Cc: Peter Foley Cc: Pedro Falcato Link: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=110743 Link: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108402 Link: https://godbolt.org/z/efMMsG1Kx Fixes: 420c4495b5e56 ("mtd: spi-nor: spansion: make sure local struct does not contain garbage") Signed-off-by: Arnd Bergmann Acked-by: Mark Brown Acked-by: Tudor Ambarus Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20230719190045.4007391-1-arnd@kernel.org --- drivers/mtd/spi-nor/spansion.c | 4 ++-- include/linux/spi/spi-mem.h | 4 ++++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c index 36876aa849ed..15f9a80c10b9 100644 --- a/drivers/mtd/spi-nor/spansion.c +++ b/drivers/mtd/spi-nor/spansion.c @@ -361,7 +361,7 @@ static int cypress_nor_determine_addr_mode_by_sr1(struct spi_nor *nor, */ static int cypress_nor_set_addr_mode_nbytes(struct spi_nor *nor) { - struct spi_mem_op op = {}; + struct spi_mem_op op; u8 addr_mode; int ret; @@ -492,7 +492,7 @@ s25fs256t_post_bfpt_fixup(struct spi_nor *nor, const struct sfdp_parameter_header *bfpt_header, const struct sfdp_bfpt *bfpt) { - struct spi_mem_op op = {}; + struct spi_mem_op op; int ret; ret = cypress_nor_set_addr_mode_nbytes(nor); diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h index 8e984d75f5b6..6b0a7dc48a4b 100644 --- a/include/linux/spi/spi-mem.h +++ b/include/linux/spi/spi-mem.h @@ -101,6 +101,7 @@ struct spi_mem_op { u8 nbytes; u8 buswidth; u8 dtr : 1; + u8 __pad : 7; u16 opcode; } cmd; @@ -108,6 +109,7 @@ struct spi_mem_op { u8 nbytes; u8 buswidth; u8 dtr : 1; + u8 __pad : 7; u64 val; } addr; @@ -115,12 +117,14 @@ struct spi_mem_op { u8 nbytes; u8 buswidth; u8 dtr : 1; + u8 __pad : 7; } dummy; struct { u8 buswidth; u8 dtr : 1; u8 ecc : 1; + u8 __pad : 6; enum spi_mem_data_dir dir; unsigned int nbytes; union { -- cgit From c6abce60338aa2080973cd95be0aedad528bb41f Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Wed, 19 Jul 2023 23:55:01 +0200 Subject: mtd: rawnand: fsl_upm: Fix an off-by one test in fun_exec_op() 'op-cs' is copied in 'fun->mchip_number' which is used to access the 'mchip_offsets' and the 'rnb_gpio' arrays. These arrays have NAND_MAX_CHIPS elements, so the index must be below this limit. Fix the sanity check in order to avoid the NAND_MAX_CHIPS value. This would lead to out-of-bound accesses. Fixes: 54309d657767 ("mtd: rawnand: fsl_upm: Implement exec_op()") Signed-off-by: Christophe JAILLET Reviewed-by: Dan Carpenter Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/cd01cba1c7eda58bdabaae174c78c067325803d2.1689803636.git.christophe.jaillet@wanadoo.fr --- drivers/mtd/nand/raw/fsl_upm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c index 086426139173..7366e85c09fd 100644 --- a/drivers/mtd/nand/raw/fsl_upm.c +++ b/drivers/mtd/nand/raw/fsl_upm.c @@ -135,7 +135,7 @@ static int fun_exec_op(struct nand_chip *chip, const struct nand_operation *op, unsigned int i; int ret; - if (op->cs > NAND_MAX_CHIPS) + if (op->cs >= NAND_MAX_CHIPS) return -EINVAL; if (check_only) -- cgit From cdddb626dc053a2bbe8be4150e9b67395130a683 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Sat, 1 Jul 2023 11:17:22 -0600 Subject: media: venus: Use struct_size_t() helper in pkt_session_unset_buffers() Prefer struct_size_t() over struct_size() when no pointer instance of the structure type is present. Signed-off-by: "Gustavo A. R. Silva" Reviewed-by: Vikash Garodia Link: https://lore.kernel.org/r/ZKBfoqSl61jfpO2r@work Signed-off-by: Kees Cook --- drivers/media/platform/qcom/venus/hfi_cmds.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.c b/drivers/media/platform/qcom/venus/hfi_cmds.c index 7f0802a5518c..3418d2dd9371 100644 --- a/drivers/media/platform/qcom/venus/hfi_cmds.c +++ b/drivers/media/platform/qcom/venus/hfi_cmds.c @@ -251,8 +251,8 @@ int pkt_session_unset_buffers(struct hfi_session_release_buffer_pkt *pkt, pkt->extradata_size = 0; pkt->shdr.hdr.size = - struct_size((struct hfi_session_set_buffers_pkt *)0, - buffer_info, bd->num_buffers); + struct_size_t(struct hfi_session_set_buffers_pkt, + buffer_info, bd->num_buffers); } pkt->response_req = bd->response_required; -- cgit From 6722b25712054c0f903b839b8f5088438dd04df3 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 24 Jul 2023 23:43:20 +0530 Subject: powerpc/mm/altmap: Fix altmap boundary check altmap->free includes the entire free space from which altmap blocks can be allocated. So when checking whether the kernel is doing altmap block free, compute the boundary correctly, otherwise memory hotunplug can fail. Fixes: 9ef34630a461 ("powerpc/mm: Fallback to RAM if the altmap is unusable") Signed-off-by: "Aneesh Kumar K.V" Reviewed-by: David Hildenbrand Signed-off-by: Michael Ellerman Link: https://msgid.link/20230724181320.471386-1-aneesh.kumar@linux.ibm.com --- arch/powerpc/mm/init_64.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index fe1b83020e0d..0ec5b45b1e86 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -314,8 +314,7 @@ void __ref vmemmap_free(unsigned long start, unsigned long end, start = ALIGN_DOWN(start, page_size); if (altmap) { alt_start = altmap->base_pfn; - alt_end = altmap->base_pfn + altmap->reserve + - altmap->free + altmap->alloc + altmap->align; + alt_end = altmap->base_pfn + altmap->reserve + altmap->free; } pr_debug("vmemmap_free %lx...%lx\n", start, end); -- cgit From 41a506ef71eb38d94fe133f565c87c3e06ccc072 Mon Sep 17 00:00:00 2001 From: Naveen N Rao Date: Wed, 21 Jun 2023 10:43:49 +0530 Subject: powerpc/ftrace: Create a dummy stackframe to fix stack unwind With ppc64 -mprofile-kernel and ppc32 -pg, profiling instructions to call into ftrace are emitted right at function entry. The instruction sequence used is minimal to reduce overhead. Crucially, a stackframe is not created for the function being traced. This breaks stack unwinding since the function being traced does not have a stackframe for itself. As such, it never shows up in the backtrace: /sys/kernel/debug/tracing # echo 1 > /proc/sys/kernel/stack_tracer_enabled /sys/kernel/debug/tracing # cat stack_trace Depth Size Location (17 entries) ----- ---- -------- 0) 4144 32 ftrace_call+0x4/0x44 1) 4112 432 get_page_from_freelist+0x26c/0x1ad0 2) 3680 496 __alloc_pages+0x290/0x1280 3) 3184 336 __folio_alloc+0x34/0x90 4) 2848 176 vma_alloc_folio+0xd8/0x540 5) 2672 272 __handle_mm_fault+0x700/0x1cc0 6) 2400 208 handle_mm_fault+0xf0/0x3f0 7) 2192 80 ___do_page_fault+0x3e4/0xbe0 8) 2112 160 do_page_fault+0x30/0xc0 9) 1952 256 data_access_common_virt+0x210/0x220 10) 1696 400 0xc00000000f16b100 11) 1296 384 load_elf_binary+0x804/0x1b80 12) 912 208 bprm_execve+0x2d8/0x7e0 13) 704 64 do_execveat_common+0x1d0/0x2f0 14) 640 160 sys_execve+0x54/0x70 15) 480 64 system_call_exception+0x138/0x350 16) 416 416 system_call_common+0x160/0x2c4 Fix this by having ftrace create a dummy stackframe for the function being traced. With this, backtraces now capture the function being traced: /sys/kernel/debug/tracing # cat stack_trace Depth Size Location (17 entries) ----- ---- -------- 0) 3888 32 _raw_spin_trylock+0x8/0x70 1) 3856 576 get_page_from_freelist+0x26c/0x1ad0 2) 3280 64 __alloc_pages+0x290/0x1280 3) 3216 336 __folio_alloc+0x34/0x90 4) 2880 176 vma_alloc_folio+0xd8/0x540 5) 2704 416 __handle_mm_fault+0x700/0x1cc0 6) 2288 96 handle_mm_fault+0xf0/0x3f0 7) 2192 48 ___do_page_fault+0x3e4/0xbe0 8) 2144 192 do_page_fault+0x30/0xc0 9) 1952 608 data_access_common_virt+0x210/0x220 10) 1344 16 0xc0000000334bbb50 11) 1328 416 load_elf_binary+0x804/0x1b80 12) 912 64 bprm_execve+0x2d8/0x7e0 13) 848 176 do_execveat_common+0x1d0/0x2f0 14) 672 192 sys_execve+0x54/0x70 15) 480 64 system_call_exception+0x138/0x350 16) 416 416 system_call_common+0x160/0x2c4 This results in two additional stores in the ftrace entry code, but produces reliable backtraces. Fixes: 153086644fd1 ("powerpc/ftrace: Add support for -mprofile-kernel ftrace ABI") Cc: stable@vger.kernel.org Signed-off-by: Naveen N Rao Signed-off-by: Michael Ellerman Link: https://msgid.link/20230621051349.759567-1-naveen@kernel.org --- arch/powerpc/kernel/trace/ftrace_mprofile.S | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/trace/ftrace_mprofile.S b/arch/powerpc/kernel/trace/ftrace_mprofile.S index ffb1db386849..1f7d86de1538 100644 --- a/arch/powerpc/kernel/trace/ftrace_mprofile.S +++ b/arch/powerpc/kernel/trace/ftrace_mprofile.S @@ -33,6 +33,9 @@ * and then arrange for the ftrace function to be called. */ .macro ftrace_regs_entry allregs + /* Create a minimal stack frame for representing B */ + PPC_STLU r1, -STACK_FRAME_MIN_SIZE(r1) + /* Create our stack frame + pt_regs */ PPC_STLU r1,-SWITCH_FRAME_SIZE(r1) @@ -42,7 +45,7 @@ #ifdef CONFIG_PPC64 /* Save the original return address in A's stack frame */ - std r0, LRSAVE+SWITCH_FRAME_SIZE(r1) + std r0, LRSAVE+SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE(r1) /* Ok to continue? */ lbz r3, PACA_FTRACE_ENABLED(r13) cmpdi r3, 0 @@ -77,6 +80,8 @@ mflr r7 /* Save it as pt_regs->nip */ PPC_STL r7, _NIP(r1) + /* Also save it in B's stackframe header for proper unwind */ + PPC_STL r7, LRSAVE+SWITCH_FRAME_SIZE(r1) /* Save the read LR in pt_regs->link */ PPC_STL r0, _LINK(r1) @@ -142,7 +147,7 @@ #endif /* Pop our stack frame */ - addi r1, r1, SWITCH_FRAME_SIZE + addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE #ifdef CONFIG_LIVEPATCH_64 /* Based on the cmpd above, if the NIP was altered handle livepatch */ -- cgit From ee31742bf17636da1304af77b2cb1c29b5dda642 Mon Sep 17 00:00:00 2001 From: Alexander Stein Date: Mon, 15 May 2023 09:21:37 +0200 Subject: drm/imx/ipuv3: Fix front porch adjustment upon hactive aligning When hactive is not aligned to 8 pixels, it is aligned accordingly and hfront porch needs to be reduced the same amount. Unfortunately the front porch is set to the difference rather than reducing it. There are some Samsung TVs which can't cope with a front porch of instead of 70. Fixes: 94dfec48fca7 ("drm/imx: Add 8 pixel alignment fix") Signed-off-by: Alexander Stein Reviewed-by: Philipp Zabel Link: https://lore.kernel.org/r/20230515072137.116211-1-alexander.stein@ew.tq-group.com [p.zabel@pengutronix.de: Fixed subject] Signed-off-by: Philipp Zabel Link: https://patchwork.freedesktop.org/patch/msgid/20230515072137.116211-1-alexander.stein@ew.tq-group.com --- drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c index 5f26090b0c98..89585b31b985 100644 --- a/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c @@ -310,7 +310,7 @@ static void ipu_crtc_mode_set_nofb(struct drm_crtc *crtc) dev_warn(ipu_crtc->dev, "8-pixel align hactive %d -> %d\n", sig_cfg.mode.hactive, new_hactive); - sig_cfg.mode.hfront_porch = new_hactive - sig_cfg.mode.hactive; + sig_cfg.mode.hfront_porch -= new_hactive - sig_cfg.mode.hactive; sig_cfg.mode.hactive = new_hactive; } -- cgit From 74158a8cad79d2f5dcf71508993664c5cfcbfa3c Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Fri, 28 Jul 2023 00:08:24 +0000 Subject: KVM: arm64: Skip instruction after emulating write to TCR_EL1 Whelp, this is embarrassing. Since commit 082fdfd13841 ("KVM: arm64: Prevent guests from enabling HA/HD on Ampere1") KVM traps writes to TCR_EL1 on AmpereOne to work around an erratum in the unadvertised HAFDBS implementation, preventing the guest from enabling the feature. Unfortunately, I failed virtualization 101 when working on that change, and forgot to advance PC after instruction emulation. Do the right thing and skip the MSR instruction after emulating the write. Fixes: 082fdfd13841 ("KVM: arm64: Prevent guests from enabling HA/HD on Ampere1") Acked-by: Marc Zyngier Link: https://lore.kernel.org/r/20230728000824.3848025-1-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/kvm/hyp/include/hyp/switch.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 4bddb8541bec..34f222af6165 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -457,6 +457,7 @@ static bool handle_ampere1_tcr(struct kvm_vcpu *vcpu) */ val &= ~(TCR_HD | TCR_HA); write_sysreg_el1(val, SYS_TCR); + __kvm_skip_instr(vcpu); return true; } -- cgit From 238ec850b95a02dcdff3edc86781aa913549282f Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Fri, 28 Jul 2023 17:28:43 -0500 Subject: x86/srso: Fix return thunks in generated code Set X86_FEATURE_RETHUNK when enabling the SRSO mitigation so that generated code (e.g., ftrace, static call, eBPF) generates "jmp __x86_return_thunk" instead of RET. [ bp: Add a comment. ] Fixes: fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation") Signed-off-by: Josh Poimboeuf Signed-off-by: Borislav Petkov (AMD) --- arch/x86/kernel/alternative.c | 4 +--- arch/x86/kernel/cpu/bugs.c | 6 ++++++ 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 920a8ca7a8f8..2dcf3a06af09 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -707,9 +707,7 @@ static int patch_return(void *addr, struct insn *insn, u8 *bytes) int i = 0; /* Patch the custom return thunks... */ - if (cpu_feature_enabled(X86_FEATURE_RETHUNK) || - cpu_feature_enabled(X86_FEATURE_SRSO) || - cpu_feature_enabled(X86_FEATURE_SRSO_ALIAS)) { + if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) { i = JMP32_INSN_SIZE; __text_gen_insn(bytes, JMP32_INSN_OPCODE, addr, x86_return_thunk, i); } else { diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index d4109eb5eb2e..7314a6bdc862 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -2297,6 +2297,12 @@ static void __init srso_select_mitigation(void) case SRSO_CMD_SAFE_RET: if (IS_ENABLED(CONFIG_CPU_SRSO)) { + /* + * Enable the return thunk for generated code + * like ftrace, static_call, etc. + */ + setup_force_cpu_cap(X86_FEATURE_RETHUNK); + if (boot_cpu_data.x86 == 0x19) setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS); else -- cgit From 3bbbe97ad83db8d9df06daf027b0840188de625d Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Fri, 28 Jul 2023 23:03:22 +0200 Subject: x86/srso: Add a forgotten NOENDBR annotation Fix: vmlinux.o: warning: objtool: .export_symbol+0x29e40: data relocation to !ENDBR: srso_untrain_ret_alias+0x0 Reported-by: Linus Torvalds Signed-off-by: Borislav Petkov (AMD) --- arch/x86/lib/retpoline.S | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index 845cfb0d748f..2cff585f22f2 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -151,6 +151,7 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array) .section .text.__x86.rethunk_untrain SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) + ANNOTATE_NOENDBR ASM_NOP2 lfence jmp __x86_return_thunk -- cgit From 46d14e17095237007b59f56aae2d81ae2dcb0f93 Mon Sep 17 00:00:00 2001 From: Yan Zhao Date: Mon, 31 Jul 2023 19:20:33 +0800 Subject: drm/i915/gvt: Fix bug in getting msg length in AUX CH registers handler Msg length should be obtained from value written to AUX_CH_CTL register rather than from enum type of the register. Commit 0cad796a2269 ("drm/i915: Use REG_BIT() & co. for AUX CH registers") incorrectly calculates the msg_length from reg type and yields below warning in intel_gvt_i2c_handle_aux_ch_write(): "i915 0000:00:02.0: drm_WARN_ON(msg_length != 4)". Fixes: 0cad796a2269 ("drm/i915: Use REG_BIT() & co. for AUX CH registers") Signed-off-by: Yan Zhao Signed-off-by: Zhenyu Wang Link: http://patchwork.freedesktop.org/patch/msgid/20230731112033.7275-1-yan.y.zhao@intel.com Reviewed-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/edid.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c index 2a0438f12a14..af9afdb53c7f 100644 --- a/drivers/gpu/drm/i915/gvt/edid.c +++ b/drivers/gpu/drm/i915/gvt/edid.c @@ -491,7 +491,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, return; } - msg_length = REG_FIELD_GET(DP_AUX_CH_CTL_MESSAGE_SIZE_MASK, reg); + msg_length = REG_FIELD_GET(DP_AUX_CH_CTL_MESSAGE_SIZE_MASK, value); // check the msg in DATA register. msg = vgpu_vreg(vgpu, offset + 4); -- cgit From d14560ac1b595aa2e792365e91fea6aeaee66c2b Mon Sep 17 00:00:00 2001 From: Andi Shyti Date: Tue, 25 Jul 2023 02:19:44 +0200 Subject: drm/i915/gt: Cleanup aux invalidation registers Fix the 'NV' definition postfix that is supposed to be INV. Take the chance to also order properly the registers based on their address and call the GEN12_GFX_CCS_AUX_INV address as GEN12_CCS_AUX_INV like all the other similar registers. Remove also VD1, VD3 and VE1 registers that don't exist and add BCS0 and CCS0. Signed-off-by: Andi Shyti Cc: # v5.8+ Reviewed-by: Nirmoy Das Reviewed-by: Andrzej Hajda Link: https://patchwork.freedesktop.org/patch/msgid/20230725001950.1014671-2-andi.shyti@linux.intel.com (cherry picked from commit 2f0b927d3ca3440445975ebde27f3df1c3ed6f76) Signed-off-by: Tvrtko Ursulin --- drivers/gpu/drm/i915/gt/gen8_engine_cs.c | 8 ++++---- drivers/gpu/drm/i915/gt/intel_gt_regs.h | 16 ++++++++-------- drivers/gpu/drm/i915/gt/intel_lrc.c | 6 +++--- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c index 23857cc08eca..563efee05560 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c @@ -287,8 +287,8 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) if (!HAS_FLAT_CCS(rq->engine->i915)) { /* hsdes: 1809175790 */ - cs = gen12_emit_aux_table_inv(rq->engine->gt, - cs, GEN12_GFX_CCS_AUX_NV); + cs = gen12_emit_aux_table_inv(rq->engine->gt, cs, + GEN12_CCS_AUX_INV); } *cs++ = preparser_disable(false); @@ -348,10 +348,10 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode) if (aux_inv) { /* hsdes: 1809175790 */ if (rq->engine->class == VIDEO_DECODE_CLASS) cs = gen12_emit_aux_table_inv(rq->engine->gt, - cs, GEN12_VD0_AUX_NV); + cs, GEN12_VD0_AUX_INV); else cs = gen12_emit_aux_table_inv(rq->engine->gt, - cs, GEN12_VE0_AUX_NV); + cs, GEN12_VE0_AUX_INV); } if (mode & EMIT_INVALIDATE) diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h index 718cb2c80f79..2cdfb2f713d0 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h @@ -332,9 +332,11 @@ #define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4) #define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index) * 4) #define BSD_HWS_PGA_GEN7 _MMIO(0x4180) -#define GEN12_GFX_CCS_AUX_NV _MMIO(0x4208) -#define GEN12_VD0_AUX_NV _MMIO(0x4218) -#define GEN12_VD1_AUX_NV _MMIO(0x4228) + +#define GEN12_CCS_AUX_INV _MMIO(0x4208) +#define GEN12_VD0_AUX_INV _MMIO(0x4218) +#define GEN12_VE0_AUX_INV _MMIO(0x4238) +#define GEN12_BCS0_AUX_INV _MMIO(0x4248) #define GEN8_RTCR _MMIO(0x4260) #define GEN8_M1TCR _MMIO(0x4264) @@ -342,14 +344,12 @@ #define GEN8_BTCR _MMIO(0x426c) #define GEN8_VTCR _MMIO(0x4270) -#define GEN12_VD2_AUX_NV _MMIO(0x4298) -#define GEN12_VD3_AUX_NV _MMIO(0x42a8) -#define GEN12_VE0_AUX_NV _MMIO(0x4238) - #define BLT_HWS_PGA_GEN7 _MMIO(0x4280) -#define GEN12_VE1_AUX_NV _MMIO(0x42b8) +#define GEN12_VD2_AUX_INV _MMIO(0x4298) +#define GEN12_CCS0_AUX_INV _MMIO(0x42c8) #define AUX_INV REG_BIT(0) + #define VEBOX_HWS_PGA_GEN7 _MMIO(0x4380) #define GEN12_AUX_ERR_DBG _MMIO(0x43f4) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index a4ec20aaafe2..325f3dbfb90e 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1367,7 +1367,7 @@ gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs) /* hsdes: 1809175790 */ if (!HAS_FLAT_CCS(ce->engine->i915)) cs = gen12_emit_aux_table_inv(ce->engine->gt, - cs, GEN12_GFX_CCS_AUX_NV); + cs, GEN12_CCS_AUX_INV); /* Wa_16014892111 */ if (IS_MTL_GRAPHICS_STEP(ce->engine->i915, M, STEP_A0, STEP_B0) || @@ -1396,10 +1396,10 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs) if (!HAS_FLAT_CCS(ce->engine->i915)) { if (ce->engine->class == VIDEO_DECODE_CLASS) cs = gen12_emit_aux_table_inv(ce->engine->gt, - cs, GEN12_VD0_AUX_NV); + cs, GEN12_VD0_AUX_INV); else if (ce->engine->class == VIDEO_ENHANCEMENT_CLASS) cs = gen12_emit_aux_table_inv(ce->engine->gt, - cs, GEN12_VE0_AUX_NV); + cs, GEN12_VE0_AUX_INV); } return cs; -- cgit From b2f59e9026038a5bbcbc0019fa58f963138211ee Mon Sep 17 00:00:00 2001 From: Andi Shyti Date: Tue, 25 Jul 2023 02:19:45 +0200 Subject: drm/i915: Add the gen12_needs_ccs_aux_inv helper We always assumed that a device might either have AUX or FLAT CCS, but this is an approximation that is not always true, e.g. PVC represents an exception. Set the basis for future finer selection by implementing a boolean gen12_needs_ccs_aux_inv() function that tells whether aux invalidation is needed or not. Currently PVC is the only exception to the above mentioned rule. Requires: 059ae7ae2a1c ("drm/i915/gt: Cleanup aux invalidation registers") Signed-off-by: Andi Shyti Cc: Matt Roper Cc: Jonathan Cavitt Cc: # v5.8+ Reviewed-by: Matt Roper Reviewed-by: Andrzej Hajda Reviewed-by: Nirmoy Das Link: https://patchwork.freedesktop.org/patch/msgid/20230725001950.1014671-3-andi.shyti@linux.intel.com (cherry picked from commit c827655b87ad201ebe36f2e28d16b5491c8f7801) Signed-off-by: Tvrtko Ursulin --- drivers/gpu/drm/i915/gt/gen8_engine_cs.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c index 563efee05560..460c9225a50f 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c @@ -165,6 +165,18 @@ static u32 preparser_disable(bool state) return MI_ARB_CHECK | 1 << 8 | state; } +static bool gen12_needs_ccs_aux_inv(struct intel_engine_cs *engine) +{ + if (IS_PONTEVECCHIO(engine->i915)) + return false; + + /* + * so far platforms supported by i915 having + * flat ccs do not require AUX invalidation + */ + return !HAS_FLAT_CCS(engine->i915); +} + u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg) { u32 gsi_offset = gt->uncore->gsi_offset; @@ -267,7 +279,7 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) else if (engine->class == COMPUTE_CLASS) flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS; - if (!HAS_FLAT_CCS(rq->engine->i915)) + if (gen12_needs_ccs_aux_inv(rq->engine)) count = 8 + 4; else count = 8; @@ -285,7 +297,7 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); - if (!HAS_FLAT_CCS(rq->engine->i915)) { + if (gen12_needs_ccs_aux_inv(rq->engine)) { /* hsdes: 1809175790 */ cs = gen12_emit_aux_table_inv(rq->engine->gt, cs, GEN12_CCS_AUX_INV); @@ -307,7 +319,7 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode) if (mode & EMIT_INVALIDATE) { cmd += 2; - if (!HAS_FLAT_CCS(rq->engine->i915) && + if (gen12_needs_ccs_aux_inv(rq->engine) && (rq->engine->class == VIDEO_DECODE_CLASS || rq->engine->class == VIDEO_ENHANCEMENT_CLASS)) { aux_inv = rq->engine->mask & -- cgit From 78a6ccd65fa3a7cc697810db079cc4b84dff03d5 Mon Sep 17 00:00:00 2001 From: Jonathan Cavitt Date: Tue, 25 Jul 2023 02:19:46 +0200 Subject: drm/i915/gt: Ensure memory quiesced before invalidation All memory traffic must be quiesced before requesting an aux invalidation on platforms that use Aux CCS. Fixes: 972282c4cf24 ("drm/i915/gen12: Add aux table invalidate for all engines") Requires: a2a4aa0eef3b ("drm/i915: Add the gen12_needs_ccs_aux_inv helper") Signed-off-by: Jonathan Cavitt Signed-off-by: Andi Shyti Cc: # v5.8+ Reviewed-by: Nirmoy Das Reviewed-by: Andrzej Hajda Link: https://patchwork.freedesktop.org/patch/msgid/20230725001950.1014671-4-andi.shyti@linux.intel.com (cherry picked from commit ad8ebf12217e451cd19804b1c3e97ad56491c74a) Signed-off-by: Tvrtko Ursulin --- drivers/gpu/drm/i915/gt/gen8_engine_cs.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c index 460c9225a50f..6210b38a2d38 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c @@ -214,7 +214,11 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) { struct intel_engine_cs *engine = rq->engine; - if (mode & EMIT_FLUSH) { + /* + * On Aux CCS platforms the invalidation of the Aux + * table requires quiescing memory traffic beforehand + */ + if (mode & EMIT_FLUSH || gen12_needs_ccs_aux_inv(engine)) { u32 flags = 0; int err; u32 *cs; -- cgit From 592b228f12e15867a63e3a6eeeb54c5c12662a62 Mon Sep 17 00:00:00 2001 From: Andi Shyti Date: Tue, 25 Jul 2023 02:19:47 +0200 Subject: drm/i915/gt: Rename flags with bit_group_X according to the datasheet In preparation of the next patch align with the datasheet (BSPEC 47112) with the naming of the pipe control set of flag values. The variable "flags" in gen12_emit_flush_rcs() is applied as a set of flags called Bit Group 1. Define also the Bit Group 0 as bit_group_0 where currently only PIPE_CONTROL0_HDC_PIPELINE_FLUSH bit is set. Signed-off-by: Andi Shyti Cc: # v5.8+ Reviewed-by: Matt Roper Reviewed-by: Andrzej Hajda Reviewed-by: Nirmoy Das Link: https://patchwork.freedesktop.org/patch/msgid/20230725001950.1014671-5-andi.shyti@linux.intel.com (cherry picked from commit f2dcd21d5a22e13f2fbfe7ab65149038b93cf2ff) Signed-off-by: Tvrtko Ursulin --- drivers/gpu/drm/i915/gt/gen8_engine_cs.c | 34 +++++++++++++++++--------------- drivers/gpu/drm/i915/gt/gen8_engine_cs.h | 18 ++++++++++------- 2 files changed, 29 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c index 6210b38a2d38..5d2175e918dd 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c @@ -219,7 +219,8 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) * table requires quiescing memory traffic beforehand */ if (mode & EMIT_FLUSH || gen12_needs_ccs_aux_inv(engine)) { - u32 flags = 0; + u32 bit_group_0 = 0; + u32 bit_group_1 = 0; int err; u32 *cs; @@ -227,32 +228,33 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) if (err) return err; - flags |= PIPE_CONTROL_TILE_CACHE_FLUSH; - flags |= PIPE_CONTROL_FLUSH_L3; - flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; - flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + bit_group_0 |= PIPE_CONTROL0_HDC_PIPELINE_FLUSH; + + bit_group_1 |= PIPE_CONTROL_TILE_CACHE_FLUSH; + bit_group_1 |= PIPE_CONTROL_FLUSH_L3; + bit_group_1 |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; + bit_group_1 |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; /* Wa_1409600907:tgl,adl-p */ - flags |= PIPE_CONTROL_DEPTH_STALL; - flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; - flags |= PIPE_CONTROL_FLUSH_ENABLE; + bit_group_1 |= PIPE_CONTROL_DEPTH_STALL; + bit_group_1 |= PIPE_CONTROL_DC_FLUSH_ENABLE; + bit_group_1 |= PIPE_CONTROL_FLUSH_ENABLE; - flags |= PIPE_CONTROL_STORE_DATA_INDEX; - flags |= PIPE_CONTROL_QW_WRITE; + bit_group_1 |= PIPE_CONTROL_STORE_DATA_INDEX; + bit_group_1 |= PIPE_CONTROL_QW_WRITE; - flags |= PIPE_CONTROL_CS_STALL; + bit_group_1 |= PIPE_CONTROL_CS_STALL; if (!HAS_3D_PIPELINE(engine->i915)) - flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS; + bit_group_1 &= ~PIPE_CONTROL_3D_ARCH_FLAGS; else if (engine->class == COMPUTE_CLASS) - flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS; + bit_group_1 &= ~PIPE_CONTROL_3D_ENGINE_FLAGS; cs = intel_ring_begin(rq, 6); if (IS_ERR(cs)) return PTR_ERR(cs); - cs = gen12_emit_pipe_control(cs, - PIPE_CONTROL0_HDC_PIPELINE_FLUSH, - flags, LRC_PPHWSP_SCRATCH_ADDR); + cs = gen12_emit_pipe_control(cs, bit_group_0, bit_group_1, + LRC_PPHWSP_SCRATCH_ADDR); intel_ring_advance(rq, cs); } diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h index 655e5c00ddc2..a44eda096557 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h @@ -49,25 +49,29 @@ u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs); u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg); static inline u32 * -__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset) +__gen8_emit_pipe_control(u32 *batch, u32 bit_group_0, + u32 bit_group_1, u32 offset) { memset(batch, 0, 6 * sizeof(u32)); - batch[0] = GFX_OP_PIPE_CONTROL(6) | flags0; - batch[1] = flags1; + batch[0] = GFX_OP_PIPE_CONTROL(6) | bit_group_0; + batch[1] = bit_group_1; batch[2] = offset; return batch + 6; } -static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset) +static inline u32 *gen8_emit_pipe_control(u32 *batch, + u32 bit_group_1, u32 offset) { - return __gen8_emit_pipe_control(batch, 0, flags, offset); + return __gen8_emit_pipe_control(batch, 0, bit_group_1, offset); } -static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset) +static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 bit_group_0, + u32 bit_group_1, u32 offset) { - return __gen8_emit_pipe_control(batch, flags0, flags1, offset); + return __gen8_emit_pipe_control(batch, bit_group_0, + bit_group_1, offset); } static inline u32 * -- cgit From 824df77ab2107d8d4740b834b276681a41ae1ac8 Mon Sep 17 00:00:00 2001 From: Andi Shyti Date: Tue, 25 Jul 2023 02:19:48 +0200 Subject: drm/i915/gt: Enable the CCS_FLUSH bit in the pipe control and in the CS Enable the CCS_FLUSH bit 13 in the control pipe for render and compute engines in platforms starting from Meteor Lake (BSPEC 43904 and 47112). For the copy engine add MI_FLUSH_DW_CCS (bit 16) in the command streamer. Fixes: 972282c4cf24 ("drm/i915/gen12: Add aux table invalidate for all engines") Requires: 8da173db894a ("drm/i915/gt: Rename flags with bit_group_X according to the datasheet") Signed-off-by: Andi Shyti Cc: Jonathan Cavitt Cc: Nirmoy Das Cc: # v5.8+ Reviewed-by: Matt Roper Reviewed-by: Andrzej Hajda Reviewed-by: Nirmoy Das Link: https://patchwork.freedesktop.org/patch/msgid/20230725001950.1014671-6-andi.shyti@linux.intel.com (cherry picked from commit b70df82b428774875c7c56d3808102165891547c) Signed-off-by: Tvrtko Ursulin --- drivers/gpu/drm/i915/gt/gen8_engine_cs.c | 11 +++++++++++ drivers/gpu/drm/i915/gt/intel_gpu_commands.h | 1 + 2 files changed, 12 insertions(+) diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c index 5d2175e918dd..ec54d36eaef7 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c @@ -230,6 +230,13 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) bit_group_0 |= PIPE_CONTROL0_HDC_PIPELINE_FLUSH; + /* + * When required, in MTL and beyond platforms we + * need to set the CCS_FLUSH bit in the pipe control + */ + if (GRAPHICS_VER_FULL(rq->i915) >= IP_VER(12, 70)) + bit_group_0 |= PIPE_CONTROL_CCS_FLUSH; + bit_group_1 |= PIPE_CONTROL_TILE_CACHE_FLUSH; bit_group_1 |= PIPE_CONTROL_FLUSH_L3; bit_group_1 |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; @@ -356,6 +363,10 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode) cmd |= MI_INVALIDATE_TLB; if (rq->engine->class == VIDEO_DECODE_CLASS) cmd |= MI_INVALIDATE_BSD; + + if (gen12_needs_ccs_aux_inv(rq->engine) && + rq->engine->class == COPY_ENGINE_CLASS) + cmd |= MI_FLUSH_DW_CCS; } *cs++ = cmd; diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h index 5d143e2a8db0..5df7cce23197 100644 --- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h +++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h @@ -299,6 +299,7 @@ #define PIPE_CONTROL_QW_WRITE (1<<14) #define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14) #define PIPE_CONTROL_DEPTH_STALL (1<<13) +#define PIPE_CONTROL_CCS_FLUSH (1<<13) /* MTL+ */ #define PIPE_CONTROL_WRITE_FLUSH (1<<12) #define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */ #define PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE (1<<11) /* MBZ on ILK */ -- cgit From 0fde2f23516a00fd90dfb980b66b4665fcbfa659 Mon Sep 17 00:00:00 2001 From: Jonathan Cavitt Date: Tue, 25 Jul 2023 02:19:49 +0200 Subject: drm/i915/gt: Poll aux invalidation register bit on invalidation For platforms that use Aux CCS, wait for aux invalidation to complete by checking the aux invalidation register bit is cleared. Fixes: 972282c4cf24 ("drm/i915/gen12: Add aux table invalidate for all engines") Signed-off-by: Jonathan Cavitt Signed-off-by: Andi Shyti Cc: # v5.8+ Reviewed-by: Nirmoy Das Reviewed-by: Andrzej Hajda Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20230725001950.1014671-7-andi.shyti@linux.intel.com (cherry picked from commit d459c86f00aa98028d155a012c65dc42f7c37e76) Signed-off-by: Tvrtko Ursulin --- drivers/gpu/drm/i915/gt/gen8_engine_cs.c | 17 ++++++++++++----- drivers/gpu/drm/i915/gt/intel_gpu_commands.h | 1 + 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c index ec54d36eaef7..ec7a0ddf9e12 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c @@ -184,7 +184,15 @@ u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv *cs++ = MI_LOAD_REGISTER_IMM(1) | MI_LRI_MMIO_REMAP_EN; *cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset; *cs++ = AUX_INV; - *cs++ = MI_NOOP; + + *cs++ = MI_SEMAPHORE_WAIT_TOKEN | + MI_SEMAPHORE_REGISTER_POLL | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_EQ_SDD; + *cs++ = 0; + *cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset; + *cs++ = 0; + *cs++ = 0; return cs; } @@ -292,10 +300,9 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) else if (engine->class == COMPUTE_CLASS) flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS; + count = 8; if (gen12_needs_ccs_aux_inv(rq->engine)) - count = 8 + 4; - else - count = 8; + count += 8; cs = intel_ring_begin(rq, count); if (IS_ERR(cs)) @@ -338,7 +345,7 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode) aux_inv = rq->engine->mask & ~GENMASK(_BCS(I915_MAX_BCS - 1), BCS0); if (aux_inv) - cmd += 4; + cmd += 8; } } diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h index 5df7cce23197..2bd8d98d2110 100644 --- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h +++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h @@ -121,6 +121,7 @@ #define MI_SEMAPHORE_TARGET(engine) ((engine)<<15) #define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */ #define MI_SEMAPHORE_WAIT_TOKEN MI_INSTR(0x1c, 3) /* GEN12+ */ +#define MI_SEMAPHORE_REGISTER_POLL (1 << 16) #define MI_SEMAPHORE_POLL (1 << 15) #define MI_SEMAPHORE_SAD_GT_SDD (0 << 12) #define MI_SEMAPHORE_SAD_GTE_SDD (1 << 12) -- cgit From 6a35f22d222528e1b157c6978c9424d2f8cbe0a1 Mon Sep 17 00:00:00 2001 From: Andi Shyti Date: Tue, 25 Jul 2023 02:19:50 +0200 Subject: drm/i915/gt: Support aux invalidation on all engines Perform some refactoring with the purpose of keeping in one single place all the operations around the aux table invalidation. With this refactoring add more engines where the invalidation should be performed. Fixes: 972282c4cf24 ("drm/i915/gen12: Add aux table invalidate for all engines") Signed-off-by: Andi Shyti Cc: Jonathan Cavitt Cc: Matt Roper Cc: # v5.8+ Reviewed-by: Andrzej Hajda Link: https://patchwork.freedesktop.org/patch/msgid/20230725001950.1014671-8-andi.shyti@linux.intel.com (cherry picked from commit 76ff7789d6e63d1a10b3b58f5c70b2e640c7a880) Signed-off-by: Tvrtko Ursulin --- drivers/gpu/drm/i915/gt/gen8_engine_cs.c | 66 ++++++++++++++++++-------------- drivers/gpu/drm/i915/gt/gen8_engine_cs.h | 3 +- drivers/gpu/drm/i915/gt/intel_lrc.c | 17 +------- 3 files changed, 41 insertions(+), 45 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c index ec7a0ddf9e12..2702ad4c26c8 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c @@ -165,21 +165,47 @@ static u32 preparser_disable(bool state) return MI_ARB_CHECK | 1 << 8 | state; } +static i915_reg_t gen12_get_aux_inv_reg(struct intel_engine_cs *engine) +{ + switch (engine->id) { + case RCS0: + return GEN12_CCS_AUX_INV; + case BCS0: + return GEN12_BCS0_AUX_INV; + case VCS0: + return GEN12_VD0_AUX_INV; + case VCS2: + return GEN12_VD2_AUX_INV; + case VECS0: + return GEN12_VE0_AUX_INV; + case CCS0: + return GEN12_CCS0_AUX_INV; + default: + return INVALID_MMIO_REG; + } +} + static bool gen12_needs_ccs_aux_inv(struct intel_engine_cs *engine) { + i915_reg_t reg = gen12_get_aux_inv_reg(engine); + if (IS_PONTEVECCHIO(engine->i915)) return false; /* - * so far platforms supported by i915 having - * flat ccs do not require AUX invalidation + * So far platforms supported by i915 having flat ccs do not require + * AUX invalidation. Check also whether the engine requires it. */ - return !HAS_FLAT_CCS(engine->i915); + return i915_mmio_reg_valid(reg) && !HAS_FLAT_CCS(engine->i915); } -u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg) +u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs) { - u32 gsi_offset = gt->uncore->gsi_offset; + i915_reg_t inv_reg = gen12_get_aux_inv_reg(engine); + u32 gsi_offset = engine->gt->uncore->gsi_offset; + + if (!gen12_needs_ccs_aux_inv(engine)) + return cs; *cs++ = MI_LOAD_REGISTER_IMM(1) | MI_LRI_MMIO_REMAP_EN; *cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset; @@ -317,11 +343,7 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); - if (gen12_needs_ccs_aux_inv(rq->engine)) { - /* hsdes: 1809175790 */ - cs = gen12_emit_aux_table_inv(rq->engine->gt, cs, - GEN12_CCS_AUX_INV); - } + cs = gen12_emit_aux_table_inv(engine, cs); *cs++ = preparser_disable(false); intel_ring_advance(rq, cs); @@ -332,21 +354,14 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode) { - intel_engine_mask_t aux_inv = 0; - u32 cmd, *cs; + u32 cmd = 4; + u32 *cs; - cmd = 4; if (mode & EMIT_INVALIDATE) { cmd += 2; - if (gen12_needs_ccs_aux_inv(rq->engine) && - (rq->engine->class == VIDEO_DECODE_CLASS || - rq->engine->class == VIDEO_ENHANCEMENT_CLASS)) { - aux_inv = rq->engine->mask & - ~GENMASK(_BCS(I915_MAX_BCS - 1), BCS0); - if (aux_inv) - cmd += 8; - } + if (gen12_needs_ccs_aux_inv(rq->engine)) + cmd += 8; } cs = intel_ring_begin(rq, cmd); @@ -381,14 +396,7 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode) *cs++ = 0; /* upper addr */ *cs++ = 0; /* value */ - if (aux_inv) { /* hsdes: 1809175790 */ - if (rq->engine->class == VIDEO_DECODE_CLASS) - cs = gen12_emit_aux_table_inv(rq->engine->gt, - cs, GEN12_VD0_AUX_INV); - else - cs = gen12_emit_aux_table_inv(rq->engine->gt, - cs, GEN12_VE0_AUX_INV); - } + cs = gen12_emit_aux_table_inv(rq->engine, cs); if (mode & EMIT_INVALIDATE) *cs++ = preparser_disable(false); diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h index a44eda096557..867ba697aceb 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h @@ -13,6 +13,7 @@ #include "intel_gt_regs.h" #include "intel_gpu_commands.h" +struct intel_engine_cs; struct intel_gt; struct i915_request; @@ -46,7 +47,7 @@ u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs); u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs); u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs); -u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg); +u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs); static inline u32 * __gen8_emit_pipe_control(u32 *batch, u32 bit_group_0, diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 325f3dbfb90e..9477c2422321 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1364,10 +1364,7 @@ gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs) IS_DG2_G11(ce->engine->i915)) cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE, 0); - /* hsdes: 1809175790 */ - if (!HAS_FLAT_CCS(ce->engine->i915)) - cs = gen12_emit_aux_table_inv(ce->engine->gt, - cs, GEN12_CCS_AUX_INV); + cs = gen12_emit_aux_table_inv(ce->engine, cs); /* Wa_16014892111 */ if (IS_MTL_GRAPHICS_STEP(ce->engine->i915, M, STEP_A0, STEP_B0) || @@ -1392,17 +1389,7 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs) PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE, 0); - /* hsdes: 1809175790 */ - if (!HAS_FLAT_CCS(ce->engine->i915)) { - if (ce->engine->class == VIDEO_DECODE_CLASS) - cs = gen12_emit_aux_table_inv(ce->engine->gt, - cs, GEN12_VD0_AUX_INV); - else if (ce->engine->class == VIDEO_ENHANCEMENT_CLASS) - cs = gen12_emit_aux_table_inv(ce->engine->gt, - cs, GEN12_VE0_AUX_INV); - } - - return cs; + return gen12_emit_aux_table_inv(ce->engine, cs); } static void -- cgit From a337b64f0d5717248a0c894e2618e658e6a9de9f Mon Sep 17 00:00:00 2001 From: Janusz Krzysztofik Date: Thu, 20 Jul 2023 11:35:44 +0200 Subject: drm/i915: Fix premature release of request's reusable memory Infinite waits for completion of GPU activity have been observed in CI, mostly inside __i915_active_wait(), triggered by igt@gem_barrier_race or igt@perf@stress-open-close. Root cause analysis, based of ftrace dumps generated with a lot of extra trace_printk() calls added to the code, revealed loops of request dependencies being accidentally built, preventing the requests from being processed, each waiting for completion of another one's activity. After we substitute a new request for a last active one tracked on a timeline, we set up a dependency of our new request to wait on completion of current activity of that previous one. While doing that, we must take care of keeping the old request still in memory until we use its attributes for setting up that await dependency, or we can happen to set up the await dependency on an unrelated request that already reuses the memory previously allocated to the old one, already released. Combined with perf adding consecutive kernel context remote requests to different user context timelines, unresolvable loops of await dependencies can be built, leading do infinite waits. We obtain a pointer to the previous request to wait upon when we substitute it with a pointer to our new request in an active tracker, e.g. in intel_timeline.last_request. In some processing paths we protect that old request from being freed before we use it by getting a reference to it under RCU protection, but in others, e.g. __i915_request_commit() -> __i915_request_add_to_timeline() -> __i915_request_ensure_ordering(), we don't. But anyway, since the requests' memory is SLAB_FAILSAFE_BY_RCU, that RCU protection is not sufficient against reuse of memory. We could protect i915_request's memory from being prematurely reused by calling its release function via call_rcu() and using rcu_read_lock() consequently, as proposed in v1. However, that approach leads to significant (up to 10 times) increase of SLAB utilization by i915_request SLAB cache. Another potential approach is to take a reference to the previous active fence. When updating an active fence tracker, we first lock the new fence, substitute a pointer of the current active fence with the new one, then we lock the substituted fence. With this approach, there is a time window after the substitution and before the lock when the request can be concurrently released by an interrupt handler and its memory reused, then we may happen to lock and return a new, unrelated request. Always get a reference to the current active fence first, before replacing it with a new one. Having it protected from premature release and reuse, lock it and then replace with the new one but only if not yet signalled via a potential concurrent interrupt nor replaced with another one by a potential concurrent thread, otherwise retry, starting from getting a reference to the new current one. Adjust users to not get a reference to the previous active fence themselves and always put the reference got by __i915_active_fence_set() when no longer needed. v3: Fix lockdep splat reports and other issues caused by incorrect use of try_cmpxchg() (use (cmpxchg() != prev) instead) v2: Protect request's memory by getting a reference to it in favor of delegating its release to call_rcu() (Chris) Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/8211 Fixes: df9f85d8582e ("drm/i915: Serialise i915_active_fence_set() with itself") Suggested-by: Chris Wilson Signed-off-by: Janusz Krzysztofik Cc: # v5.6+ Reviewed-by: Andi Shyti Signed-off-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20230720093543.832147-2-janusz.krzysztofik@linux.intel.com (cherry picked from commit 946e047a3d88d46d15b5c5af0414098e12b243f7) Signed-off-by: Tvrtko Ursulin --- drivers/gpu/drm/i915/i915_active.c | 99 ++++++++++++++++++++++++++----------- drivers/gpu/drm/i915/i915_request.c | 11 +++++ 2 files changed, 81 insertions(+), 29 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index 8ef93889061a..5ec293011d99 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -449,8 +449,11 @@ int i915_active_add_request(struct i915_active *ref, struct i915_request *rq) } } while (unlikely(is_barrier(active))); - if (!__i915_active_fence_set(active, fence)) + fence = __i915_active_fence_set(active, fence); + if (!fence) __i915_active_acquire(ref); + else + dma_fence_put(fence); out: i915_active_release(ref); @@ -469,13 +472,9 @@ __i915_active_set_fence(struct i915_active *ref, return NULL; } - rcu_read_lock(); prev = __i915_active_fence_set(active, fence); - if (prev) - prev = dma_fence_get_rcu(prev); - else + if (!prev) __i915_active_acquire(ref); - rcu_read_unlock(); return prev; } @@ -1019,10 +1018,11 @@ void i915_request_add_active_barriers(struct i915_request *rq) * * Records the new @fence as the last active fence along its timeline in * this active tracker, moving the tracking callbacks from the previous - * fence onto this one. Returns the previous fence (if not already completed), - * which the caller must ensure is executed before the new fence. To ensure - * that the order of fences within the timeline of the i915_active_fence is - * understood, it should be locked by the caller. + * fence onto this one. Gets and returns a reference to the previous fence + * (if not already completed), which the caller must put after making sure + * that it is executed before the new fence. To ensure that the order of + * fences within the timeline of the i915_active_fence is understood, it + * should be locked by the caller. */ struct dma_fence * __i915_active_fence_set(struct i915_active_fence *active, @@ -1031,7 +1031,23 @@ __i915_active_fence_set(struct i915_active_fence *active, struct dma_fence *prev; unsigned long flags; - if (fence == rcu_access_pointer(active->fence)) + /* + * In case of fences embedded in i915_requests, their memory is + * SLAB_FAILSAFE_BY_RCU, then it can be reused right after release + * by new requests. Then, there is a risk of passing back a pointer + * to a new, completely unrelated fence that reuses the same memory + * while tracked under a different active tracker. Combined with i915 + * perf open/close operations that build await dependencies between + * engine kernel context requests and user requests from different + * timelines, this can lead to dependency loops and infinite waits. + * + * As a countermeasure, we try to get a reference to the active->fence + * first, so if we succeed and pass it back to our user then it is not + * released and potentially reused by an unrelated request before the + * user has a chance to set up an await dependency on it. + */ + prev = i915_active_fence_get(active); + if (fence == prev) return fence; GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)); @@ -1040,27 +1056,56 @@ __i915_active_fence_set(struct i915_active_fence *active, * Consider that we have two threads arriving (A and B), with * C already resident as the active->fence. * - * A does the xchg first, and so it sees C or NULL depending - * on the timing of the interrupt handler. If it is NULL, the - * previous fence must have been signaled and we know that - * we are first on the timeline. If it is still present, - * we acquire the lock on that fence and serialise with the interrupt - * handler, in the process removing it from any future interrupt - * callback. A will then wait on C before executing (if present). - * - * As B is second, it sees A as the previous fence and so waits for - * it to complete its transition and takes over the occupancy for - * itself -- remembering that it needs to wait on A before executing. + * Both A and B have got a reference to C or NULL, depending on the + * timing of the interrupt handler. Let's assume that if A has got C + * then it has locked C first (before B). * * Note the strong ordering of the timeline also provides consistent * nesting rules for the fence->lock; the inner lock is always the * older lock. */ spin_lock_irqsave(fence->lock, flags); - prev = xchg(__active_fence_slot(active), fence); - if (prev) { - GEM_BUG_ON(prev == fence); + if (prev) spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING); + + /* + * A does the cmpxchg first, and so it sees C or NULL, as before, or + * something else, depending on the timing of other threads and/or + * interrupt handler. If not the same as before then A unlocks C if + * applicable and retries, starting from an attempt to get a new + * active->fence. Meanwhile, B follows the same path as A. + * Once A succeeds with cmpxch, B fails again, retires, gets A from + * active->fence, locks it as soon as A completes, and possibly + * succeeds with cmpxchg. + */ + while (cmpxchg(__active_fence_slot(active), prev, fence) != prev) { + if (prev) { + spin_unlock(prev->lock); + dma_fence_put(prev); + } + spin_unlock_irqrestore(fence->lock, flags); + + prev = i915_active_fence_get(active); + GEM_BUG_ON(prev == fence); + + spin_lock_irqsave(fence->lock, flags); + if (prev) + spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING); + } + + /* + * If prev is NULL then the previous fence must have been signaled + * and we know that we are first on the timeline. If it is still + * present then, having the lock on that fence already acquired, we + * serialise with the interrupt handler, in the process of removing it + * from any future interrupt callback. A will then wait on C before + * executing (if present). + * + * As B is second, it sees A as the previous fence and so waits for + * it to complete its transition and takes over the occupancy for + * itself -- remembering that it needs to wait on A before executing. + */ + if (prev) { __list_del_entry(&active->cb.node); spin_unlock(prev->lock); /* serialise with prev->cb_list */ } @@ -1077,11 +1122,7 @@ int i915_active_fence_set(struct i915_active_fence *active, int err = 0; /* Must maintain timeline ordering wrt previous active requests */ - rcu_read_lock(); fence = __i915_active_fence_set(active, &rq->fence); - if (fence) /* but the previous fence may not belong to that timeline! */ - fence = dma_fence_get_rcu(fence); - rcu_read_unlock(); if (fence) { err = i915_request_await_dma_fence(rq, fence); dma_fence_put(fence); diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 894068bb37b6..833b73edefdb 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -1661,6 +1661,11 @@ __i915_request_ensure_parallel_ordering(struct i915_request *rq, request_to_parent(rq)->parallel.last_rq = i915_request_get(rq); + /* + * Users have to put a reference potentially got by + * __i915_active_fence_set() to the returned request + * when no longer needed + */ return to_request(__i915_active_fence_set(&timeline->last_request, &rq->fence)); } @@ -1707,6 +1712,10 @@ __i915_request_ensure_ordering(struct i915_request *rq, 0); } + /* + * Users have to put the reference to prev potentially got + * by __i915_active_fence_set() when no longer needed + */ return prev; } @@ -1760,6 +1769,8 @@ __i915_request_add_to_timeline(struct i915_request *rq) prev = __i915_request_ensure_ordering(rq, timeline); else prev = __i915_request_ensure_parallel_ordering(rq, timeline); + if (prev) + i915_request_put(prev); /* * Make sure that no request gazumped us - if it was allocated after -- cgit From 2dc0bc1138eecc88b2c376ccb0b0acb215c25a5c Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 1 Aug 2023 20:26:50 +1000 Subject: powerpc/64e: Fix secondary thread bringup for ELFv2 kernels When booting on e6500 with an ELF v2 ABI kernel, the secondary threads do not start correctly: [ 0.051118] smp: Bringing up secondary CPUs ... [ 5.072700] Processor 1 is stuck. This occurs because the startup code is written to use function descriptors when loading the entry point for the secondary threads. When building with ELF v2 ABI there are no function descriptors, and the code loads junk values for the entry point address. Fix it by using ppc_function_entry() in C, and DOTSYM() in asm, both of which work correctly for ELF v2 ABI as well as ELF v1 ABI kernels. Fixes: 8c5fa3b5c4df ("powerpc/64: Make ELFv2 the default for big-endian builds") Signed-off-by: Michael Ellerman Link: https://msgid.link/20230801102650.48705-1-mpe@ellerman.id.au --- arch/powerpc/kernel/head_64.S | 3 +-- arch/powerpc/platforms/85xx/smp.c | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index f132d8704263..6440b1bb332a 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -375,8 +375,7 @@ _GLOBAL(generic_secondary_smp_init) beq 20f /* start the specified thread */ - LOAD_REG_ADDR(r5, fsl_secondary_thread_init) - ld r4, 0(r5) + LOAD_REG_ADDR(r5, DOTSYM(fsl_secondary_thread_init)) bl book3e_start_thread /* stop the current thread */ diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c index 9c43cf32f4c9..40aa58206888 100644 --- a/arch/powerpc/platforms/85xx/smp.c +++ b/arch/powerpc/platforms/85xx/smp.c @@ -180,7 +180,7 @@ static void wake_hw_thread(void *info) unsigned long inia; int cpu = *(const int *)info; - inia = *(unsigned long *)fsl_secondary_thread_init; + inia = ppc_function_entry(fsl_secondary_thread_init); book3e_start_thread(cpu_thread_in_core(cpu), inia); } #endif -- cgit From 16e95a62eed18864aecac404f1e4eed764c363f2 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Tue, 25 Jul 2023 13:39:12 +0800 Subject: powercap: intel_rapl: Fix a sparse warning in TPMI interface Depends on the interface used, the RAPL registers can be either MSR indexes or memory mapped IO addresses. Current RAPL common code uses u64 to save both MSR and memory mapped IO registers. With this, when handling register address with an __iomem annotation, it triggers a sparse warning like below: sparse warnings: (new ones prefixed by >>) >> drivers/powercap/intel_rapl_tpmi.c:141:41: sparse: sparse: incorrect type in initializer (different address spaces) @@ expected unsigned long long [usertype] *tpmi_rapl_regs @@ got void [noderef] __iomem * @@ drivers/powercap/intel_rapl_tpmi.c:141:41: sparse: expected unsigned long long [usertype] *tpmi_rapl_regs drivers/powercap/intel_rapl_tpmi.c:141:41: sparse: got void [noderef] __iomem * Fix the problem by using a union to save the registers instead. Suggested-by: David Laight Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202307031405.dy3druuy-lkp@intel.com/ Tested-by: Wang Wendy Signed-off-by: Zhang Rui [ rjw: Subject and changelog edits ] Signed-off-by: Rafael J. Wysocki --- drivers/powercap/intel_rapl_common.c | 14 +++---- drivers/powercap/intel_rapl_msr.c | 49 +++++++++++----------- drivers/powercap/intel_rapl_tpmi.c | 17 ++++---- .../intel/int340x_thermal/processor_thermal_rapl.c | 16 +++---- include/linux/intel_rapl.h | 14 +++++-- 5 files changed, 58 insertions(+), 52 deletions(-) diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index 4e646e5e48f6..8fac57b28f8a 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -818,7 +818,7 @@ static int rapl_read_data_raw(struct rapl_domain *rd, return -EINVAL; ra.reg = rd->regs[rpi->id]; - if (!ra.reg) + if (!ra.reg.val) return -EINVAL; /* non-hardware data are collected by the polling thread */ @@ -830,7 +830,7 @@ static int rapl_read_data_raw(struct rapl_domain *rd, ra.mask = rpi->mask; if (rd->rp->priv->read_raw(get_rid(rd->rp), &ra)) { - pr_debug("failed to read reg 0x%llx for %s:%s\n", ra.reg, rd->rp->name, rd->name); + pr_debug("failed to read reg 0x%llx for %s:%s\n", ra.reg.val, rd->rp->name, rd->name); return -EIO; } @@ -920,7 +920,7 @@ static int rapl_check_unit_core(struct rapl_domain *rd) ra.mask = ~0; if (rd->rp->priv->read_raw(get_rid(rd->rp), &ra)) { pr_err("Failed to read power unit REG 0x%llx on %s:%s, exit.\n", - ra.reg, rd->rp->name, rd->name); + ra.reg.val, rd->rp->name, rd->name); return -ENODEV; } @@ -948,7 +948,7 @@ static int rapl_check_unit_atom(struct rapl_domain *rd) ra.mask = ~0; if (rd->rp->priv->read_raw(get_rid(rd->rp), &ra)) { pr_err("Failed to read power unit REG 0x%llx on %s:%s, exit.\n", - ra.reg, rd->rp->name, rd->name); + ra.reg.val, rd->rp->name, rd->name); return -ENODEV; } @@ -1135,7 +1135,7 @@ static int rapl_check_unit_tpmi(struct rapl_domain *rd) ra.mask = ~0; if (rd->rp->priv->read_raw(get_rid(rd->rp), &ra)) { pr_err("Failed to read power unit REG 0x%llx on %s:%s, exit.\n", - ra.reg, rd->rp->name, rd->name); + ra.reg.val, rd->rp->name, rd->name); return -ENODEV; } @@ -1411,8 +1411,8 @@ static int rapl_get_domain_unit(struct rapl_domain *rd) struct rapl_defaults *defaults = get_defaults(rd->rp); int ret; - if (!rd->regs[RAPL_DOMAIN_REG_UNIT]) { - if (!rd->rp->priv->reg_unit) { + if (!rd->regs[RAPL_DOMAIN_REG_UNIT].val) { + if (!rd->rp->priv->reg_unit.val) { pr_err("No valid Unit register found\n"); return -ENODEV; } diff --git a/drivers/powercap/intel_rapl_msr.c b/drivers/powercap/intel_rapl_msr.c index 569e25eab1e1..dd471021f237 100644 --- a/drivers/powercap/intel_rapl_msr.c +++ b/drivers/powercap/intel_rapl_msr.c @@ -34,28 +34,32 @@ static struct rapl_if_priv *rapl_msr_priv; static struct rapl_if_priv rapl_msr_priv_intel = { .type = RAPL_IF_MSR, - .reg_unit = MSR_RAPL_POWER_UNIT, - .regs[RAPL_DOMAIN_PACKAGE] = { - MSR_PKG_POWER_LIMIT, MSR_PKG_ENERGY_STATUS, MSR_PKG_PERF_STATUS, 0, MSR_PKG_POWER_INFO }, - .regs[RAPL_DOMAIN_PP0] = { - MSR_PP0_POWER_LIMIT, MSR_PP0_ENERGY_STATUS, 0, MSR_PP0_POLICY, 0 }, - .regs[RAPL_DOMAIN_PP1] = { - MSR_PP1_POWER_LIMIT, MSR_PP1_ENERGY_STATUS, 0, MSR_PP1_POLICY, 0 }, - .regs[RAPL_DOMAIN_DRAM] = { - MSR_DRAM_POWER_LIMIT, MSR_DRAM_ENERGY_STATUS, MSR_DRAM_PERF_STATUS, 0, MSR_DRAM_POWER_INFO }, - .regs[RAPL_DOMAIN_PLATFORM] = { - MSR_PLATFORM_POWER_LIMIT, MSR_PLATFORM_ENERGY_STATUS, 0, 0, 0}, + .reg_unit.msr = MSR_RAPL_POWER_UNIT, + .regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_LIMIT].msr = MSR_PKG_POWER_LIMIT, + .regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_STATUS].msr = MSR_PKG_ENERGY_STATUS, + .regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_PERF].msr = MSR_PKG_PERF_STATUS, + .regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_INFO].msr = MSR_PKG_POWER_INFO, + .regs[RAPL_DOMAIN_PP0][RAPL_DOMAIN_REG_LIMIT].msr = MSR_PP0_POWER_LIMIT, + .regs[RAPL_DOMAIN_PP0][RAPL_DOMAIN_REG_STATUS].msr = MSR_PP0_ENERGY_STATUS, + .regs[RAPL_DOMAIN_PP0][RAPL_DOMAIN_REG_POLICY].msr = MSR_PP0_POLICY, + .regs[RAPL_DOMAIN_PP1][RAPL_DOMAIN_REG_LIMIT].msr = MSR_PP1_POWER_LIMIT, + .regs[RAPL_DOMAIN_PP1][RAPL_DOMAIN_REG_STATUS].msr = MSR_PP1_ENERGY_STATUS, + .regs[RAPL_DOMAIN_PP1][RAPL_DOMAIN_REG_POLICY].msr = MSR_PP1_POLICY, + .regs[RAPL_DOMAIN_DRAM][RAPL_DOMAIN_REG_LIMIT].msr = MSR_DRAM_POWER_LIMIT, + .regs[RAPL_DOMAIN_DRAM][RAPL_DOMAIN_REG_STATUS].msr = MSR_DRAM_ENERGY_STATUS, + .regs[RAPL_DOMAIN_DRAM][RAPL_DOMAIN_REG_PERF].msr = MSR_DRAM_PERF_STATUS, + .regs[RAPL_DOMAIN_DRAM][RAPL_DOMAIN_REG_INFO].msr = MSR_DRAM_POWER_INFO, + .regs[RAPL_DOMAIN_PLATFORM][RAPL_DOMAIN_REG_LIMIT].msr = MSR_PLATFORM_POWER_LIMIT, + .regs[RAPL_DOMAIN_PLATFORM][RAPL_DOMAIN_REG_STATUS].msr = MSR_PLATFORM_ENERGY_STATUS, .limits[RAPL_DOMAIN_PACKAGE] = BIT(POWER_LIMIT2), .limits[RAPL_DOMAIN_PLATFORM] = BIT(POWER_LIMIT2), }; static struct rapl_if_priv rapl_msr_priv_amd = { .type = RAPL_IF_MSR, - .reg_unit = MSR_AMD_RAPL_POWER_UNIT, - .regs[RAPL_DOMAIN_PACKAGE] = { - 0, MSR_AMD_PKG_ENERGY_STATUS, 0, 0, 0 }, - .regs[RAPL_DOMAIN_PP0] = { - 0, MSR_AMD_CORE_ENERGY_STATUS, 0, 0, 0 }, + .reg_unit.msr = MSR_AMD_RAPL_POWER_UNIT, + .regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_STATUS].msr = MSR_AMD_PKG_ENERGY_STATUS, + .regs[RAPL_DOMAIN_PP0][RAPL_DOMAIN_REG_STATUS].msr = MSR_AMD_CORE_ENERGY_STATUS, }; /* Handles CPU hotplug on multi-socket systems. @@ -99,10 +103,8 @@ static int rapl_cpu_down_prep(unsigned int cpu) static int rapl_msr_read_raw(int cpu, struct reg_action *ra) { - u32 msr = (u32)ra->reg; - - if (rdmsrl_safe_on_cpu(cpu, msr, &ra->value)) { - pr_debug("failed to read msr 0x%x on cpu %d\n", msr, cpu); + if (rdmsrl_safe_on_cpu(cpu, ra->reg.msr, &ra->value)) { + pr_debug("failed to read msr 0x%x on cpu %d\n", ra->reg.msr, cpu); return -EIO; } ra->value &= ra->mask; @@ -112,17 +114,16 @@ static int rapl_msr_read_raw(int cpu, struct reg_action *ra) static void rapl_msr_update_func(void *info) { struct reg_action *ra = info; - u32 msr = (u32)ra->reg; u64 val; - ra->err = rdmsrl_safe(msr, &val); + ra->err = rdmsrl_safe(ra->reg.msr, &val); if (ra->err) return; val &= ~ra->mask; val |= ra->value; - ra->err = wrmsrl_safe(msr, val); + ra->err = wrmsrl_safe(ra->reg.msr, val); } static int rapl_msr_write_raw(int cpu, struct reg_action *ra) @@ -171,7 +172,7 @@ static int rapl_msr_probe(struct platform_device *pdev) if (id) { rapl_msr_priv->limits[RAPL_DOMAIN_PACKAGE] |= BIT(POWER_LIMIT4); - rapl_msr_priv->regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_PL4] = + rapl_msr_priv->regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_PL4].msr = MSR_VR_CURRENT_CONFIG; pr_info("PL4 support detected.\n"); } diff --git a/drivers/powercap/intel_rapl_tpmi.c b/drivers/powercap/intel_rapl_tpmi.c index 4f4f13ded225..891c90fefd8b 100644 --- a/drivers/powercap/intel_rapl_tpmi.c +++ b/drivers/powercap/intel_rapl_tpmi.c @@ -59,10 +59,10 @@ static struct powercap_control_type *tpmi_control_type; static int tpmi_rapl_read_raw(int id, struct reg_action *ra) { - if (!ra->reg) + if (!ra->reg.mmio) return -EINVAL; - ra->value = readq((void __iomem *)ra->reg); + ra->value = readq(ra->reg.mmio); ra->value &= ra->mask; return 0; @@ -72,15 +72,15 @@ static int tpmi_rapl_write_raw(int id, struct reg_action *ra) { u64 val; - if (!ra->reg) + if (!ra->reg.mmio) return -EINVAL; - val = readq((void __iomem *)ra->reg); + val = readq(ra->reg.mmio); val &= ~ra->mask; val |= ra->value; - writeq(val, (void __iomem *)ra->reg); + writeq(val, ra->reg.mmio); return 0; } @@ -138,8 +138,7 @@ static int parse_one_domain(struct tpmi_rapl_package *trp, u32 offset) enum tpmi_rapl_register reg_index; enum rapl_domain_reg_id reg_id; int tpmi_domain_size, tpmi_domain_flags; - u64 *tpmi_rapl_regs = trp->base + offset; - u64 tpmi_domain_header = readq((void __iomem *)tpmi_rapl_regs); + u64 tpmi_domain_header = readq(trp->base + offset); /* Domain Parent bits are ignored for now */ tpmi_domain_version = tpmi_domain_header & 0xff; @@ -180,7 +179,7 @@ static int parse_one_domain(struct tpmi_rapl_package *trp, u32 offset) return -EINVAL; } - if (trp->priv.regs[domain_type][RAPL_DOMAIN_REG_UNIT]) { + if (trp->priv.regs[domain_type][RAPL_DOMAIN_REG_UNIT].mmio) { pr_warn(FW_BUG "Duplicate Domain type %d\n", tpmi_domain_type); return -EINVAL; } @@ -218,7 +217,7 @@ static int parse_one_domain(struct tpmi_rapl_package *trp, u32 offset) default: continue; } - trp->priv.regs[domain_type][reg_id] = (u64)&tpmi_rapl_regs[reg_index]; + trp->priv.regs[domain_type][reg_id].mmio = trp->base + offset + reg_index * 8; } return 0; diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c index 013f1633f082..2f00fc3bf274 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c @@ -57,10 +57,10 @@ static int rapl_mmio_cpu_down_prep(unsigned int cpu) static int rapl_mmio_read_raw(int cpu, struct reg_action *ra) { - if (!ra->reg) + if (!ra->reg.mmio) return -EINVAL; - ra->value = readq((void __iomem *)ra->reg); + ra->value = readq(ra->reg.mmio); ra->value &= ra->mask; return 0; } @@ -69,13 +69,13 @@ static int rapl_mmio_write_raw(int cpu, struct reg_action *ra) { u64 val; - if (!ra->reg) + if (!ra->reg.mmio) return -EINVAL; - val = readq((void __iomem *)ra->reg); + val = readq(ra->reg.mmio); val &= ~ra->mask; val |= ra->value; - writeq(val, (void __iomem *)ra->reg); + writeq(val, ra->reg.mmio); return 0; } @@ -92,13 +92,13 @@ int proc_thermal_rapl_add(struct pci_dev *pdev, struct proc_thermal_device *proc for (domain = RAPL_DOMAIN_PACKAGE; domain < RAPL_DOMAIN_MAX; domain++) { for (reg = RAPL_DOMAIN_REG_LIMIT; reg < RAPL_DOMAIN_REG_MAX; reg++) if (rapl_regs->regs[domain][reg]) - rapl_mmio_priv.regs[domain][reg] = - (u64)proc_priv->mmio_base + + rapl_mmio_priv.regs[domain][reg].mmio = + proc_priv->mmio_base + rapl_regs->regs[domain][reg]; rapl_mmio_priv.limits[domain] = rapl_regs->limits[domain]; } rapl_mmio_priv.type = RAPL_IF_MMIO; - rapl_mmio_priv.reg_unit = (u64)proc_priv->mmio_base + rapl_regs->reg_unit; + rapl_mmio_priv.reg_unit.mmio = proc_priv->mmio_base + rapl_regs->reg_unit; rapl_mmio_priv.read_raw = rapl_mmio_read_raw; rapl_mmio_priv.write_raw = rapl_mmio_write_raw; diff --git a/include/linux/intel_rapl.h b/include/linux/intel_rapl.h index e6936cb25047..33f21bd85dbf 100644 --- a/include/linux/intel_rapl.h +++ b/include/linux/intel_rapl.h @@ -100,10 +100,16 @@ struct rapl_package; #define RAPL_DOMAIN_NAME_LENGTH 16 +union rapl_reg { + void __iomem *mmio; + u32 msr; + u64 val; +}; + struct rapl_domain { char name[RAPL_DOMAIN_NAME_LENGTH]; enum rapl_domain_type id; - u64 regs[RAPL_DOMAIN_REG_MAX]; + union rapl_reg regs[RAPL_DOMAIN_REG_MAX]; struct powercap_zone power_zone; struct rapl_domain_data rdd; struct rapl_power_limit rpl[NR_POWER_LIMITS]; @@ -116,7 +122,7 @@ struct rapl_domain { }; struct reg_action { - u64 reg; + union rapl_reg reg; u64 mask; u64 value; int err; @@ -143,8 +149,8 @@ struct rapl_if_priv { enum rapl_if_type type; struct powercap_control_type *control_type; enum cpuhp_state pcap_rapl_online; - u64 reg_unit; - u64 regs[RAPL_DOMAIN_MAX][RAPL_DOMAIN_REG_MAX]; + union rapl_reg reg_unit; + union rapl_reg regs[RAPL_DOMAIN_MAX][RAPL_DOMAIN_REG_MAX]; int limits[RAPL_DOMAIN_MAX]; int (*read_raw)(int id, struct reg_action *ra); int (*write_raw)(int id, struct reg_action *ra); -- cgit From e7e607bd00481745550389a29ecabe33e13d67cf Mon Sep 17 00:00:00 2001 From: Xiubo Li Date: Tue, 25 Jul 2023 12:03:59 +0800 Subject: ceph: defer stopping mdsc delayed_work Flushing the dirty buffer may take a long time if the cluster is overloaded or if there is network issue. So we should ping the MDSs periodically to keep alive, else the MDS will blocklist the kclient. Cc: stable@vger.kernel.org Link: https://tracker.ceph.com/issues/61843 Signed-off-by: Xiubo Li Reviewed-by: Milind Changire Signed-off-by: Ilya Dryomov --- fs/ceph/mds_client.c | 4 ++-- fs/ceph/mds_client.h | 5 +++++ fs/ceph/super.c | 10 ++++++++++ 3 files changed, 17 insertions(+), 2 deletions(-) diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 66048a86c480..5fb367b1d4b0 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -4764,7 +4764,7 @@ static void delayed_work(struct work_struct *work) dout("mdsc delayed_work\n"); - if (mdsc->stopping) + if (mdsc->stopping >= CEPH_MDSC_STOPPING_FLUSHED) return; mutex_lock(&mdsc->mutex); @@ -4943,7 +4943,7 @@ void send_flush_mdlog(struct ceph_mds_session *s) void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc) { dout("pre_umount\n"); - mdsc->stopping = 1; + mdsc->stopping = CEPH_MDSC_STOPPING_BEGIN; ceph_mdsc_iterate_sessions(mdsc, send_flush_mdlog, true); ceph_mdsc_iterate_sessions(mdsc, lock_unlock_session, false); diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index 724307ff89cd..86d2965e68a1 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -380,6 +380,11 @@ struct cap_wait { int want; }; +enum { + CEPH_MDSC_STOPPING_BEGIN = 1, + CEPH_MDSC_STOPPING_FLUSHED = 2, +}; + /* * mds client state */ diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 3fc48b43cab0..a5f52013314d 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -1374,6 +1374,16 @@ static void ceph_kill_sb(struct super_block *s) ceph_mdsc_pre_umount(fsc->mdsc); flush_fs_workqueues(fsc); + /* + * Though the kill_anon_super() will finally trigger the + * sync_filesystem() anyway, we still need to do it here + * and then bump the stage of shutdown to stop the work + * queue as earlier as possible. + */ + sync_filesystem(s); + + fsc->mdsc->stopping = CEPH_MDSC_STOPPING_FLUSHED; + kill_anon_super(s); fsc->client->extra_mon_dispatch = NULL; -- cgit From 1b0fc0345f2852ffe54fb9ae0e12e2ee69ad6a20 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Tue, 1 Aug 2023 07:31:07 -0700 Subject: Documentation/x86: Fix backwards on/off logic about YMM support These options clearly turn *off* XSAVE YMM support. Correct the typo. Reported-by: Ben Hutchings Fixes: 553a5c03e90a ("x86/speculation: Add force option to GDS mitigation") Signed-off-by: Dave Hansen --- Documentation/admin-guide/hw-vuln/gather_data_sampling.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst b/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst index 40b7a6260010..264bfa937f7d 100644 --- a/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst +++ b/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst @@ -63,7 +63,7 @@ GDS can also be mitigated on systems that don't have updated microcode by disabling AVX. This can be done by setting gather_data_sampling="force" or "clearcpuid=avx" on the kernel command-line. -If used, these options will disable AVX use by turning on XSAVE YMM support. +If used, these options will disable AVX use by turning off XSAVE YMM support. However, the processor will still enumerate AVX support. Userspace that does not follow proper AVX enumeration to check both AVX *and* XSAVE YMM support will break. -- cgit From 9d01e07fd1bfb4daae156ab528aa196f5ac2b2bc Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Tue, 1 Aug 2023 19:14:24 +0200 Subject: rbd: prevent busy loop when requesting exclusive lock Due to rbd_try_acquire_lock() effectively swallowing all but EBLOCKLISTED error from rbd_try_lock() ("request lock anyway") and rbd_request_lock() returning ETIMEDOUT error not only for an actual notify timeout but also when the lock owner doesn't respond, a busy loop inside of rbd_acquire_lock() between rbd_try_acquire_lock() and rbd_request_lock() is possible. Requesting the lock on EBUSY error (returned by get_lock_owner_info() if an incompatible lock or invalid lock owner is detected) makes very little sense. The same goes for ETIMEDOUT error (might pop up pretty much anywhere if osd_request_timeout option is set) and many others. Just fail I/O requests on rbd_dev->acquiring_list immediately on any error from rbd_try_lock(). Cc: stable@vger.kernel.org # 588159009d5b: rbd: retrieve and check lock owner twice before blocklisting Cc: stable@vger.kernel.org Signed-off-by: Ilya Dryomov Reviewed-by: Dongsheng Yang --- drivers/block/rbd.c | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 24afcc93ac01..2328cc05be36 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3675,7 +3675,7 @@ static int rbd_lock(struct rbd_device *rbd_dev) ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie, RBD_LOCK_TAG, "", 0); - if (ret) + if (ret && ret != -EEXIST) return ret; __rbd_lock(rbd_dev, cookie); @@ -3878,7 +3878,7 @@ static struct ceph_locker *get_lock_owner_info(struct rbd_device *rbd_dev) &rbd_dev->header_oloc, RBD_LOCK_NAME, &lock_type, &lock_tag, &lockers, &num_lockers); if (ret) { - rbd_warn(rbd_dev, "failed to retrieve lockers: %d", ret); + rbd_warn(rbd_dev, "failed to get header lockers: %d", ret); return ERR_PTR(ret); } @@ -3940,8 +3940,10 @@ static int find_watcher(struct rbd_device *rbd_dev, ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, &watchers, &num_watchers); - if (ret) + if (ret) { + rbd_warn(rbd_dev, "failed to get watchers: %d", ret); return ret; + } sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie); for (i = 0; i < num_watchers; i++) { @@ -3985,8 +3987,12 @@ static int rbd_try_lock(struct rbd_device *rbd_dev) locker = refreshed_locker = NULL; ret = rbd_lock(rbd_dev); - if (ret != -EBUSY) + if (!ret) + goto out; + if (ret != -EBUSY) { + rbd_warn(rbd_dev, "failed to lock header: %d", ret); goto out; + } /* determine if the current lock holder is still alive */ locker = get_lock_owner_info(rbd_dev); @@ -4089,11 +4095,8 @@ static int rbd_try_acquire_lock(struct rbd_device *rbd_dev) ret = rbd_try_lock(rbd_dev); if (ret < 0) { - rbd_warn(rbd_dev, "failed to lock header: %d", ret); - if (ret == -EBLOCKLISTED) - goto out; - - ret = 1; /* request lock anyway */ + rbd_warn(rbd_dev, "failed to acquire lock: %d", ret); + goto out; } if (ret > 0) { up_write(&rbd_dev->lock_rwsem); @@ -6627,12 +6630,11 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev) cancel_delayed_work_sync(&rbd_dev->lock_dwork); if (!ret) ret = -ETIMEDOUT; - } - if (ret) { - rbd_warn(rbd_dev, "failed to acquire exclusive lock: %ld", ret); - return ret; + rbd_warn(rbd_dev, "failed to acquire lock: %ld", ret); } + if (ret) + return ret; /* * The lock may have been released by now, unless automatic lock -- cgit From e6e2843230799230fc5deb8279728a7218b0d63c Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Tue, 1 Aug 2023 19:14:24 +0200 Subject: libceph: fix potential hang in ceph_osdc_notify() If the cluster becomes unavailable, ceph_osdc_notify() may hang even with osd_request_timeout option set because linger_notify_finish_wait() waits for MWatchNotify NOTIFY_COMPLETE message with no associated OSD request in flight -- it's completely asynchronous. Introduce an additional timeout, derived from the specified notify timeout. While at it, switch both waits to killable which is more correct. Cc: stable@vger.kernel.org Signed-off-by: Ilya Dryomov Reviewed-by: Dongsheng Yang Reviewed-by: Xiubo Li --- net/ceph/osd_client.c | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 11c04e7d928e..658a6f2320cf 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -3334,17 +3334,24 @@ static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq) int ret; dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id); - ret = wait_for_completion_interruptible(&lreq->reg_commit_wait); + ret = wait_for_completion_killable(&lreq->reg_commit_wait); return ret ?: lreq->reg_commit_error; } -static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq) +static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq, + unsigned long timeout) { - int ret; + long left; dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id); - ret = wait_for_completion_interruptible(&lreq->notify_finish_wait); - return ret ?: lreq->notify_finish_error; + left = wait_for_completion_killable_timeout(&lreq->notify_finish_wait, + ceph_timeout_jiffies(timeout)); + if (left <= 0) + left = left ?: -ETIMEDOUT; + else + left = lreq->notify_finish_error; /* completed */ + + return left; } /* @@ -4896,7 +4903,8 @@ int ceph_osdc_notify(struct ceph_osd_client *osdc, linger_submit(lreq); ret = linger_reg_commit_wait(lreq); if (!ret) - ret = linger_notify_finish_wait(lreq); + ret = linger_notify_finish_wait(lreq, + msecs_to_jiffies(2 * timeout * MSEC_PER_SEC)); else dout("lreq %p failed to initiate notify %d\n", lreq, ret); -- cgit From 0a8589055936d8feb56477123a8373ac634018fa Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Mon, 24 Jul 2023 13:23:14 +0900 Subject: ata,scsi: do not issue START STOP UNIT on resume During system resume, ata_port_pm_resume() triggers ata EH to 1) Resume the controller 2) Reset and rescan the ports 3) Revalidate devices This EH execution is started asynchronously from ata_port_pm_resume(), which means that when sd_resume() is executed, none or only part of the above processing may have been executed. However, sd_resume() issues a START STOP UNIT to wake up the drive from sleep mode. This command is translated to ATA with ata_scsi_start_stop_xlat() and issued to the device. However, depending on the state of execution of the EH process and revalidation triggerred by ata_port_pm_resume(), two things may happen: 1) The START STOP UNIT fails if it is received before the controller has been reenabled at the beginning of the EH execution. This is visible with error messages like: ata10.00: device reported invalid CHS sector 0 sd 9:0:0:0: [sdc] Start/Stop Unit failed: Result: hostbyte=DID_OK driverbyte=DRIVER_OK sd 9:0:0:0: [sdc] Sense Key : Illegal Request [current] sd 9:0:0:0: [sdc] Add. Sense: Unaligned write command sd 9:0:0:0: PM: dpm_run_callback(): scsi_bus_resume+0x0/0x90 returns -5 sd 9:0:0:0: PM: failed to resume async: error -5 2) The START STOP UNIT command is received while the EH process is on-going, which mean that it is stopped and must wait for its completion, at which point the command is rather useless as the drive is already fully spun up already. This case results also in a significant delay in sd_resume() which is observable by users as the entire system resume completion is delayed. Given that ATA devices will be woken up by libata activity on resume, sd_resume() has no need to issue a START STOP UNIT command, which solves the above mentioned problems. Do not issue this command by introducing the new scsi_device flag no_start_on_resume and setting this flag to 1 in ata_scsi_dev_config(). sd_resume() is modified to issue a START STOP UNIT command only if this flag is not set. Reported-by: Paul Ausbeck Closes: https://bugzilla.kernel.org/show_bug.cgi?id=215880 Fixes: a19a93e4c6a9 ("scsi: core: pm: Rely on the device driver core for async power management") Signed-off-by: Damien Le Moal Tested-by: Tanner Watkins Tested-by: Paul Ausbeck Reviewed-by: Hannes Reinecke Reviewed-by: Bart Van Assche --- drivers/ata/libata-scsi.c | 7 +++++++ drivers/scsi/sd.c | 9 ++++++--- include/scsi/scsi_device.h | 1 + 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 370d18aca71e..c6ece32de8e3 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -1100,7 +1100,14 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev) } } else { sdev->sector_size = ata_id_logical_sector_size(dev->id); + /* + * Stop the drive on suspend but do not issue START STOP UNIT + * on resume as this is not necessary and may fail: the device + * will be woken up by ata_port_pm_resume() with a port reset + * and device revalidation. + */ sdev->manage_start_stop = 1; + sdev->no_start_on_resume = 1; } /* diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 68b12afa0721..3c668cfb146d 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -3876,7 +3876,7 @@ static int sd_suspend_runtime(struct device *dev) static int sd_resume(struct device *dev) { struct scsi_disk *sdkp = dev_get_drvdata(dev); - int ret; + int ret = 0; if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */ return 0; @@ -3884,8 +3884,11 @@ static int sd_resume(struct device *dev) if (!sdkp->device->manage_start_stop) return 0; - sd_printk(KERN_NOTICE, sdkp, "Starting disk\n"); - ret = sd_start_stop_device(sdkp, 1); + if (!sdkp->device->no_start_on_resume) { + sd_printk(KERN_NOTICE, sdkp, "Starting disk\n"); + ret = sd_start_stop_device(sdkp, 1); + } + if (!ret) opal_unlock_from_suspend(sdkp->opal_dev); return ret; diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 75b2235b99e2..b9230b6add04 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -194,6 +194,7 @@ struct scsi_device { unsigned no_start_on_add:1; /* do not issue start on add */ unsigned allow_restart:1; /* issue START_UNIT in error handler */ unsigned manage_start_stop:1; /* Let HLD (sd) manage start/stop */ + unsigned no_start_on_resume:1; /* Do not issue START_STOP_UNIT on resume */ unsigned start_stop_pwr_cond:1; /* Set power cond. in START_STOP_UNIT */ unsigned no_uld_attach:1; /* disable connecting to upper level drivers */ unsigned select_no_atn:1; -- cgit From c71b7aa8619a0c9700132d0733e33999fb614339 Mon Sep 17 00:00:00 2001 From: Nikita Travkin Date: Wed, 2 Aug 2023 11:41:22 +0500 Subject: drm/panel: samsung-s6d7aa0: Add MODULE_DEVICE_TABLE The driver can be built as a module, however the lack of the MODULE_DEVICE_TABLE macro prevents it from being automatically probed from the DT in such case. Add the missed macro to make sure the module can load automatically. Fixes: 6810bb390282 ("drm/panel: Add Samsung S6D7AA0 panel controller driver") Signed-off-by: Nikita Travkin Acked-by: Artur Weber Reviewed-by: Neil Armstrong Signed-off-by: Neil Armstrong Link: https://patchwork.freedesktop.org/patch/msgid/20230802-gt5-panel-dtable-v1-1-c0a765c175e2@trvn.ru --- drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c index 102e1fc7ee38..be4ec5bb5223 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c @@ -569,6 +569,7 @@ static const struct of_device_id s6d7aa0_of_match[] = { }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, s6d7aa0_of_match); static struct mipi_dsi_driver s6d7aa0_driver = { .probe = s6d7aa0_probe, -- cgit From 86582e6189dd8f9f52c25d46c70fe5d111da6345 Mon Sep 17 00:00:00 2001 From: Benjamin Gray Date: Thu, 6 Jul 2023 11:08:16 +1000 Subject: powerpc/powermac: Use early_* IO variants in via_calibrate_decr() On a powermac platform, via the call path: start_kernel() time_init() ppc_md.calibrate_decr() (pmac_calibrate_decr) via_calibrate_decr() ioremap() and iounmap() are called. The unmap can enable interrupts unexpectedly (cond_resched() in vunmap_pmd_range()), which causes a warning later in the boot sequence in start_kernel(). Use the early_* variants of these IO functions to prevent this. The issue is pre-existing, but is surfaced by commit 721255b9826b ("genirq: Use a maple tree for interrupt descriptor management"). Signed-off-by: Benjamin Gray Reviewed-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://msgid.link/20230706010816.72682-1-bgray@linux.ibm.com --- arch/powerpc/platforms/powermac/time.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c index 4c5790aff1b5..8633891b7aa5 100644 --- a/arch/powerpc/platforms/powermac/time.c +++ b/arch/powerpc/platforms/powermac/time.c @@ -26,8 +26,8 @@ #include #include +#include #include -#include #include #include #include @@ -182,7 +182,7 @@ static int __init via_calibrate_decr(void) return 0; } of_node_put(vias); - via = ioremap(rsrc.start, resource_size(&rsrc)); + via = early_ioremap(rsrc.start, resource_size(&rsrc)); if (via == NULL) { printk(KERN_ERR "Failed to map VIA for timer calibration !\n"); return 0; @@ -207,7 +207,7 @@ static int __init via_calibrate_decr(void) ppc_tb_freq = (dstart - dend) * 100 / 6; - iounmap(via); + early_iounmap((void *)via, resource_size(&rsrc)); return 1; } -- cgit From 11260c3d608b59231f4c228147a795ab21a10b33 Mon Sep 17 00:00:00 2001 From: Paulo Alcantara Date: Wed, 2 Aug 2023 13:43:03 -0300 Subject: smb: client: fix dfs link mount against w2k8 Customer reported that they couldn't mount their DFS link that was seen by the client as a DFS interlink -- special form of DFS link where its single target may point to a different DFS namespace -- and it turned out that it was just a regular DFS link where its referral header flags missed the StorageServers bit thus making the client think it couldn't tree connect to target directly without requiring further referrals. When the DFS link referral header flags misses the StoraServers bit and its target doesn't respond to any referrals, then tree connect to it. Fixes: a1c0d00572fc ("cifs: share dfs connections and supers") Cc: stable@vger.kernel.org Signed-off-by: Paulo Alcantara (SUSE) Signed-off-by: Steve French --- fs/smb/client/dfs.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/fs/smb/client/dfs.c b/fs/smb/client/dfs.c index df3fd3b720da..ee772c3d9f00 100644 --- a/fs/smb/client/dfs.c +++ b/fs/smb/client/dfs.c @@ -177,8 +177,12 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx) struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl); rc = dfs_get_referral(mnt_ctx, ref_path + 1, NULL, &tl); - if (rc) + if (rc) { + rc = cifs_mount_get_tcon(mnt_ctx); + if (!rc) + rc = cifs_is_path_remote(mnt_ctx); break; + } tit = dfs_cache_get_tgt_iterator(&tl); if (!tit) { -- cgit From 8c82d2bf5944123d8e90d01bf27655497d9aa321 Mon Sep 17 00:00:00 2001 From: John Hubbard Date: Wed, 12 Jul 2023 12:35:14 -0700 Subject: selftests/riscv: fix potential build failure during the "emit_tests" step The riscv selftests (which were modeled after the arm64 selftests) are improperly declaring the "emit_tests" target to depend upon the "all" target. This approach, when combined with commit 9fc96c7c19df ("selftests: error out if kernel header files are not yet built"), has caused build failures [1] on arm64, and is likely to cause similar failures for riscv. To fix this, simply remove the unnecessary "all" dependency from the emit_tests target. The dependency is still effectively honored, because again, invocation is via "install", which also depends upon "all". An alternative approach would be to harden the emit_tests target so that it can depend upon "all", but that's a lot more complicated and hard to get right, and doesn't seem worth it, especially given that emit_tests should probably not be overridden at all. [1] https://lore.kernel.org/20230710-kselftest-fix-arm64-v1-1-48e872844f25@kernel.org Fixes: 9fc96c7c19df ("selftests: error out if kernel header files are not yet built") Signed-off-by: John Hubbard Tested-by: Alexandre Ghiti Link: https://lore.kernel.org/r/20230712193514.740033-1-jhubbard@nvidia.com Cc: stable@vger.kernel.org Signed-off-by: Palmer Dabbelt --- tools/testing/selftests/riscv/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/riscv/Makefile b/tools/testing/selftests/riscv/Makefile index 9dd629cc86aa..f4b3d5c9af5b 100644 --- a/tools/testing/selftests/riscv/Makefile +++ b/tools/testing/selftests/riscv/Makefile @@ -43,7 +43,7 @@ run_tests: all done # Avoid any output on non riscv on emit_tests -emit_tests: all +emit_tests: @for DIR in $(RISCV_SUBTARGETS); do \ BUILD_TARGET=$(OUTPUT)/$$DIR; \ $(MAKE) OUTPUT=$$BUILD_TARGET -C $$DIR $@; \ -- cgit From 25696067202f047e22c1562f1f56b0e2eb547d1a Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Thu, 13 Jul 2023 13:58:29 +0200 Subject: selftests: riscv: Fix compilation error with vstate_exec_nolibc.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The following error happens: In file included from vstate_exec_nolibc.c:2: /usr/include/riscv64-linux-gnu/sys/prctl.h:42:12: error: conflicting types for ‘prctl’; h ave ‘int(int, ...)’ 42 | extern int prctl (int __option, ...) __THROW; | ^~~~~ In file included from ./../../../../include/nolibc/nolibc.h:99, from : ./../../../../include/nolibc/sys.h:892:5: note: previous definition of ‘prctl’ with type ‘int(int, long unsigned int, long unsigned int, long unsigned int, long unsigned int) ’ 892 | int prctl(int option, unsigned long arg2, unsigned long arg3, | ^~~~~ Fix this by not including , which is not needed here since prctl syscall is directly called using its number. Fixes: 7cf6198ce22d ("selftests: Test RISC-V Vector prctl interface") Signed-off-by: Alexandre Ghiti Link: https://lore.kernel.org/r/20230713115829.110421-1-alexghiti@rivosinc.com Cc: stable@vger.kernel.org Signed-off-by: Palmer Dabbelt --- tools/testing/selftests/riscv/vector/vstate_exec_nolibc.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/tools/testing/selftests/riscv/vector/vstate_exec_nolibc.c b/tools/testing/selftests/riscv/vector/vstate_exec_nolibc.c index 5cbc392944a6..2c0d2b1126c1 100644 --- a/tools/testing/selftests/riscv/vector/vstate_exec_nolibc.c +++ b/tools/testing/selftests/riscv/vector/vstate_exec_nolibc.c @@ -1,6 +1,4 @@ // SPDX-License-Identifier: GPL-2.0-only -#include - #define THIS_PROGRAM "./vstate_exec_nolibc" int main(int argc, char **argv) -- cgit From 568701fee36652a7660ed667a3980c945d8051e0 Mon Sep 17 00:00:00 2001 From: Sunil V L Date: Mon, 24 Jul 2023 15:33:46 +0530 Subject: RISC-V: ACPI: Fix acpi_os_ioremap to return iomem address acpi_os_ioremap() currently is a wrapper to memremap() on RISC-V. But the callers of acpi_os_ioremap() expect it to return __iomem address and hence sparse tool reports a new warning. Fix this issue by type casting to __iomem type. Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202307230357.egcTAefj-lkp@intel.com/ Fixes: a91a9ffbd3a5 ("RISC-V: Add support to build the ACPI core") Signed-off-by: Sunil V L Reviewed-by: Conor Dooley Link: https://lore.kernel.org/r/20230724100346.1302937-1-sunilvl@ventanamicro.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/acpi.h | 2 +- arch/riscv/kernel/acpi.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/riscv/include/asm/acpi.h b/arch/riscv/include/asm/acpi.h index f71ce21ff684..d5604d2073bc 100644 --- a/arch/riscv/include/asm/acpi.h +++ b/arch/riscv/include/asm/acpi.h @@ -19,7 +19,7 @@ typedef u64 phys_cpuid_t; #define PHYS_CPUID_INVALID INVALID_HARTID /* ACPI table mapping after acpi_permanent_mmap is set */ -void *acpi_os_ioremap(acpi_physical_address phys, acpi_size size); +void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size); #define acpi_os_ioremap acpi_os_ioremap #define acpi_strict 1 /* No out-of-spec workarounds on RISC-V */ diff --git a/arch/riscv/kernel/acpi.c b/arch/riscv/kernel/acpi.c index 5ee03ebab80e..56cb2c986c48 100644 --- a/arch/riscv/kernel/acpi.c +++ b/arch/riscv/kernel/acpi.c @@ -215,9 +215,9 @@ void __init __acpi_unmap_table(void __iomem *map, unsigned long size) early_iounmap(map, size); } -void *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) +void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) { - return memremap(phys, size, MEMREMAP_WB); + return (void __iomem *)memremap(phys, size, MEMREMAP_WB); } #ifdef CONFIG_PCI -- cgit From fbe7d19d2b7fcbd38905ba9f691be8f245c6faa6 Mon Sep 17 00:00:00 2001 From: Song Shuai Date: Mon, 24 Jul 2023 18:09:16 +0800 Subject: riscv: Export va_kernel_pa_offset in vmcoreinfo MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since RISC-V Linux v6.4, the commit 3335068f8721 ("riscv: Use PUD/P4D/PGD pages for the linear mapping") changes phys_ram_base from the physical start of the kernel to the actual start of the DRAM. The Crash-utility's VTOP() still uses phys_ram_base and kernel_map.virt_addr to translate kernel virtual address, that failed the Crash with Linux v6.4 [1]. Export kernel_map.va_kernel_pa_offset in vmcoreinfo to help Crash translate the kernel virtual address correctly. Fixes: 3335068f8721 ("riscv: Use PUD/P4D/PGD pages for the linear mapping") Link: https://lore.kernel.org/linux-riscv/20230724040649.220279-1-suagrfillet@gmail.com/ [1] Signed-off-by: Song Shuai Reviewed-by: Xianting Tian  Reviewed-by: Alexandre Ghiti Link: https://lore.kernel.org/r/20230724100917.309061-1-suagrfillet@gmail.com Cc: stable@vger.kernel.org Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/crash_core.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/riscv/kernel/crash_core.c b/arch/riscv/kernel/crash_core.c index b351a3c01355..55f1d7856b54 100644 --- a/arch/riscv/kernel/crash_core.c +++ b/arch/riscv/kernel/crash_core.c @@ -18,4 +18,6 @@ void arch_crash_save_vmcoreinfo(void) vmcoreinfo_append_str("NUMBER(MODULES_END)=0x%lx\n", MODULES_END); #endif vmcoreinfo_append_str("NUMBER(KERNEL_LINK_ADDR)=0x%lx\n", KERNEL_LINK_ADDR); + vmcoreinfo_append_str("NUMBER(va_kernel_pa_offset)=0x%lx\n", + kernel_map.va_kernel_pa_offset); } -- cgit From 640c503d7dbd7d34a62099c933f4db0ed77ccbec Mon Sep 17 00:00:00 2001 From: Song Shuai Date: Mon, 24 Jul 2023 18:09:17 +0800 Subject: Documentation: kdump: Add va_kernel_pa_offset for RISCV64 RISC-V Linux exports "va_kernel_pa_offset" in vmcoreinfo to help Crash-utility translate the kernel virtual address correctly. Here adds the definition of "va_kernel_pa_offset". Fixes: 3335068f8721 ("riscv: Use PUD/P4D/PGD pages for the linear mapping") Link: https://lore.kernel.org/linux-riscv/20230724040649.220279-1-suagrfillet@gmail.com/ Signed-off-by: Song Shuai Reviewed-by: Alexandre Ghiti Link: https://lore.kernel.org/r/20230724100917.309061-2-suagrfillet@gmail.com Cc: stable@vger.kernel.org Signed-off-by: Palmer Dabbelt --- Documentation/admin-guide/kdump/vmcoreinfo.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Documentation/admin-guide/kdump/vmcoreinfo.rst b/Documentation/admin-guide/kdump/vmcoreinfo.rst index c18d94fa6470..f8ebb63b6c5d 100644 --- a/Documentation/admin-guide/kdump/vmcoreinfo.rst +++ b/Documentation/admin-guide/kdump/vmcoreinfo.rst @@ -624,3 +624,9 @@ Used to get the correct ranges: * VMALLOC_START ~ VMALLOC_END : vmalloc() / ioremap() space. * VMEMMAP_START ~ VMEMMAP_END : vmemmap space, used for struct page array. * KERNEL_LINK_ADDR : start address of Kernel link and BPF + +va_kernel_pa_offset +------------------- + +Indicates the offset between the kernel virtual and physical mappings. +Used to translate virtual to physical addresses. -- cgit From 9e2d0c336524706fb327e9b87477f5f3337ad7a6 Mon Sep 17 00:00:00 2001 From: Saurabh Sengar Date: Fri, 23 Jun 2023 09:28:08 -0700 Subject: x86/hyperv: add noop functions to x86_init mpparse functions Hyper-V can run VMs at different privilege "levels" known as Virtual Trust Levels (VTL). Sometimes, it chooses to run two different VMs at different levels but they share some of their address space. In such setups VTL2 (higher level VM) has visibility of all of the VTL0 (level 0) memory space. When the CONFIG_X86_MPPARSE is enabled for VTL2, the VTL2 kernel performs a search within the low memory to locate MP tables. However, in systems where VTL0 manages the low memory and may contain valid tables, this scanning can result in incorrect MP table information being provided to the VTL2 kernel, mistakenly considering VTL0's MP table as its own Add noop functions to avoid MP parse scan by VTL2. Signed-off-by: Saurabh Sengar Acked-by: Dave Hansen Link: https://lore.kernel.org/r/1687537688-5397-1-git-send-email-ssengar@linux.microsoft.com Signed-off-by: Wei Liu --- arch/x86/hyperv/hv_vtl.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/x86/hyperv/hv_vtl.c b/arch/x86/hyperv/hv_vtl.c index 85d38b9f3586..db5d2ea39fc0 100644 --- a/arch/x86/hyperv/hv_vtl.c +++ b/arch/x86/hyperv/hv_vtl.c @@ -25,6 +25,10 @@ void __init hv_vtl_init_platform(void) x86_init.irqs.pre_vector_init = x86_init_noop; x86_init.timers.timer_init = x86_init_noop; + /* Avoid searching for BIOS MP tables */ + x86_init.mpparse.find_smp_config = x86_init_noop; + x86_init.mpparse.get_smp_config = x86_init_uint_noop; + x86_platform.get_wallclock = get_rtc_noop; x86_platform.set_wallclock = set_rtc_noop; x86_platform.get_nmi_reason = hv_get_nmi_reason; -- cgit From 6ad0f2f91ad14ba0a3c2990c054fd6fbe8100429 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Tue, 25 Jul 2023 22:21:08 +0800 Subject: Drivers: hv: vmbus: Remove unused extern declaration vmbus_ontimer() Since commit 30fbee49b071 ("Staging: hv: vmbus: Get rid of the unused function vmbus_ontimer()") this is not used anymore, so can remove it. Signed-off-by: YueHaibing Reviewed-by: Michael Kelley Link: https://lore.kernel.org/r/20230725142108.27280-1-yuehaibing@huawei.com Signed-off-by: Wei Liu --- include/linux/hyperv.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index bfbc37ce223b..3ac3974b3c78 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -1239,9 +1239,6 @@ extern int vmbus_recvpacket_raw(struct vmbus_channel *channel, u32 *buffer_actual_len, u64 *requestid); - -extern void vmbus_ontimer(unsigned long data); - /* Base driver object */ struct hv_driver { const char *name; -- cgit From 534fc31d09b706a16d83533e16b5dc855caf7576 Mon Sep 17 00:00:00 2001 From: Ross Lagerwall Date: Thu, 3 Aug 2023 08:41:22 +0200 Subject: xen/netback: Fix buffer overrun triggered by unusual packet It is possible that a guest can send a packet that contains a head + 18 slots and yet has a len <= XEN_NETBACK_TX_COPY_LEN. This causes nr_slots to underflow in xenvif_get_requests() which then causes the subsequent loop's termination condition to be wrong, causing a buffer overrun of queue->tx_map_ops. Rework the code to account for the extra frag_overflow slots. This is CVE-2023-34319 / XSA-432. Fixes: ad7f402ae4f4 ("xen/netback: Ensure protocol headers don't fall in the non-linear area") Signed-off-by: Ross Lagerwall Reviewed-by: Paul Durrant Reviewed-by: Wei Liu Signed-off-by: Juergen Gross --- drivers/net/xen-netback/netback.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index c8d20cddf658..88f760a7cbc3 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -396,7 +396,7 @@ static void xenvif_get_requests(struct xenvif_queue *queue, struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops; struct xen_netif_tx_request *txp = first; - nr_slots = shinfo->nr_frags + 1; + nr_slots = shinfo->nr_frags + frag_overflow + 1; copy_count(skb) = 0; XENVIF_TX_CB(skb)->split_mask = 0; @@ -462,8 +462,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue, } } - for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; - shinfo->nr_frags++, gop++) { + for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS; + shinfo->nr_frags++, gop++, nr_slots--) { index = pending_index(queue->pending_cons++); pending_idx = queue->pending_ring[index]; xenvif_tx_create_map_op(queue, pending_idx, txp, @@ -476,12 +476,12 @@ static void xenvif_get_requests(struct xenvif_queue *queue, txp++; } - if (frag_overflow) { + if (nr_slots > 0) { shinfo = skb_shinfo(nskb); frags = shinfo->frags; - for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow; + for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; shinfo->nr_frags++, txp++, gop++) { index = pending_index(queue->pending_cons++); pending_idx = queue->pending_ring[index]; @@ -492,6 +492,11 @@ static void xenvif_get_requests(struct xenvif_queue *queue, } skb_shinfo(skb)->frag_list = nskb; + } else if (nskb) { + /* A frag_list skb was allocated but it is no longer needed + * because enough slots were converted to copy ops above. + */ + kfree_skb(nskb); } (*copy_ops) = cop - queue->tx_copy_ops; -- cgit From c2ff2b736c41cc63bb0aaec85cccfead9fbcfe92 Mon Sep 17 00:00:00 2001 From: "Mike Rapoport (IBM)" Date: Thu, 3 Aug 2023 09:24:04 +0300 Subject: parisc/mm: preallocate fixmap page tables at init Christoph Biedl reported early OOM on recent kernels: swapper: page allocation failure: order:0, mode:0x100(__GFP_ZERO), nodemask=(null) CPU: 0 PID: 0 Comm: swapper Not tainted 6.3.0-rc4+ #16 Hardware name: 9000/785/C3600 Backtrace: [<10408594>] show_stack+0x48/0x5c [<10e152d8>] dump_stack_lvl+0x48/0x64 [<10e15318>] dump_stack+0x24/0x34 [<105cf7f8>] warn_alloc+0x10c/0x1c8 [<105d068c>] __alloc_pages+0xbbc/0xcf8 [<105d0e4c>] __get_free_pages+0x28/0x78 [<105ad10c>] __pte_alloc_kernel+0x30/0x98 [<10406934>] set_fixmap+0xec/0xf4 [<10411ad4>] patch_map.constprop.0+0xa8/0xdc [<10411bb0>] __patch_text_multiple+0xa8/0x208 [<10411d78>] patch_text+0x30/0x48 [<1041246c>] arch_jump_label_transform+0x90/0xcc [<1056f734>] jump_label_update+0xd4/0x184 [<1056fc9c>] static_key_enable_cpuslocked+0xc0/0x110 [<1056fd08>] static_key_enable+0x1c/0x2c [<1011362c>] init_mem_debugging_and_hardening+0xdc/0xf8 [<1010141c>] start_kernel+0x5f0/0xa98 [<10105da8>] start_parisc+0xb8/0xe4 Mem-Info: active_anon:0 inactive_anon:0 isolated_anon:0 active_file:0 inactive_file:0 isolated_file:0 unevictable:0 dirty:0 writeback:0 slab_reclaimable:0 slab_unreclaimable:0 mapped:0 shmem:0 pagetables:0 sec_pagetables:0 bounce:0 kernel_misc_reclaimable:0 free:0 free_pcp:0 free_cma:0 Node 0 active_anon:0kB inactive_anon:0kB active_file:0kB inactive_file:0kB unevictable:0kB isolated(anon):0kB isolated(file):0kB mapped:0kB dirty:0kB writeback:0kB shmem:0kB +writeback_tmp:0kB kernel_stack:0kB pagetables:0kB sec_pagetables:0kB all_unreclaimable? no Normal free:0kB boost:0kB min:0kB low:0kB high:0kB reserved_highatomic:0KB active_anon:0kB inactive_anon:0kB active_file:0kB inactive_file:0kB unevictable:0kB writepending:0kB +present:1048576kB managed:1039360kB mlocked:0kB bounce:0kB free_pcp:0kB local_pcp:0kB free_cma:0kB lowmem_reserve[]: 0 0 Normal: 0*4kB 0*8kB 0*16kB 0*32kB 0*64kB 0*128kB 0*256kB 0*512kB 0*1024kB 0*2048kB 0*4096kB = 0kB 0 total pagecache pages 0 pages in swap cache Free swap = 0kB Total swap = 0kB 262144 pages RAM 0 pages HighMem/MovableOnly 2304 pages reserved Backtrace: [<10411d78>] patch_text+0x30/0x48 [<1041246c>] arch_jump_label_transform+0x90/0xcc [<1056f734>] jump_label_update+0xd4/0x184 [<1056fc9c>] static_key_enable_cpuslocked+0xc0/0x110 [<1056fd08>] static_key_enable+0x1c/0x2c [<1011362c>] init_mem_debugging_and_hardening+0xdc/0xf8 [<1010141c>] start_kernel+0x5f0/0xa98 [<10105da8>] start_parisc+0xb8/0xe4 Kernel Fault: Code=15 (Data TLB miss fault) at addr 0f7fe3c0 CPU: 0 PID: 0 Comm: swapper Not tainted 6.3.0-rc4+ #16 Hardware name: 9000/785/C3600 This happens because patching static key code temporarily maps it via fixmap and if it happens before page allocator is initialized set_fixmap() cannot allocate memory using pte_alloc_kernel(). Make sure that fixmap page tables are preallocated early so that pte_offset_kernel() in set_fixmap() never resorts to pte allocation. Signed-off-by: Mike Rapoport (IBM) Acked-by: Vlastimil Babka Signed-off-by: Helge Deller Tested-by: Christoph Biedl Tested-by: John David Anglin Cc: # v6.4+ --- arch/parisc/mm/fixmap.c | 3 --- arch/parisc/mm/init.c | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 3 deletions(-) diff --git a/arch/parisc/mm/fixmap.c b/arch/parisc/mm/fixmap.c index cc15d737fda6..ae3493dae9dc 100644 --- a/arch/parisc/mm/fixmap.c +++ b/arch/parisc/mm/fixmap.c @@ -19,9 +19,6 @@ void notrace set_fixmap(enum fixed_addresses idx, phys_addr_t phys) pmd_t *pmd = pmd_offset(pud, vaddr); pte_t *pte; - if (pmd_none(*pmd)) - pte = pte_alloc_kernel(pmd, vaddr); - pte = pte_offset_kernel(pmd, vaddr); set_pte_at(&init_mm, vaddr, pte, __mk_pte(phys, PAGE_KERNEL_RWX)); flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE); diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 406c52fe23d5..389941c7f209 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -669,6 +669,39 @@ static void __init gateway_init(void) PAGE_SIZE, PAGE_GATEWAY, 1); } +static void __init fixmap_init(void) +{ + unsigned long addr = FIXMAP_START; + unsigned long end = FIXMAP_START + FIXMAP_SIZE; + pgd_t *pgd = pgd_offset_k(addr); + p4d_t *p4d = p4d_offset(pgd, addr); + pud_t *pud = pud_offset(p4d, addr); + pmd_t *pmd; + + BUILD_BUG_ON(FIXMAP_SIZE > PMD_SIZE); + +#if CONFIG_PGTABLE_LEVELS == 3 + if (pud_none(*pud)) { + pmd = memblock_alloc(PAGE_SIZE << PMD_TABLE_ORDER, + PAGE_SIZE << PMD_TABLE_ORDER); + if (!pmd) + panic("fixmap: pmd allocation failed.\n"); + pud_populate(NULL, pud, pmd); + } +#endif + + pmd = pmd_offset(pud, addr); + do { + pte_t *pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + if (!pte) + panic("fixmap: pte allocation failed.\n"); + + pmd_populate_kernel(&init_mm, pmd, pte); + + addr += PAGE_SIZE; + } while (addr < end); +} + static void __init parisc_bootmem_free(void) { unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, }; @@ -683,6 +716,7 @@ void __init paging_init(void) setup_bootmem(); pagetable_init(); gateway_init(); + fixmap_init(); flush_cache_all_local(); /* start with known state */ flush_tlb_all_local(NULL); -- cgit From ce9ff57d393db86a34ba3f817d7fb886b7c278dc Mon Sep 17 00:00:00 2001 From: Petr Tesarik Date: Wed, 2 Aug 2023 18:33:21 +0200 Subject: parisc: pci-dma: remove unused and dead EISA code and comment Clearly, this code isn't needed, but it gives a false positive when grepping the complete source tree for coherent_dma_mask. Signed-off-by: Petr Tesarik Signed-off-by: Helge Deller --- arch/parisc/kernel/pci-dma.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index d818ece23b4a..3f6b507970eb 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c @@ -417,14 +417,6 @@ void *arch_dma_alloc(struct device *dev, size_t size, map_uncached_pages(vaddr, size, paddr); *dma_handle = (dma_addr_t) paddr; -#if 0 -/* This probably isn't needed to support EISA cards. -** ISA cards will certainly only support 24-bit DMA addressing. -** Not clear if we can, want, or need to support ISA. -*/ - if (!dev || *dev->coherent_dma_mask < 0xffffffff) - gfp |= GFP_DMA; -#endif return (void *)vaddr; } -- cgit From 2e1b1d7063a35ab6cf9984f9d5bc29829e1e8788 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 26 Jul 2023 17:09:14 +0200 Subject: parport: gsc: remove DMA leftover code This driver does not actually work with DMA mode, but still tries to call ISA DMA interface functions that are stubbed out on parisc, resulting in a W=1 build warning: drivers/parport/parport_gsc.c: In function 'parport_remove_chip': drivers/parport/parport_gsc.c:389:20: warning: suggest braces around empty body in an 'if' statement [-Wempty-body] 389 | free_dma(p->dma); Remove the corresponding code as a prerequisite for turning on -Wempty-body by default in all kernels. Signed-off-by: Arnd Bergmann Signed-off-by: Helge Deller --- drivers/parport/parport_gsc.c | 28 ++++------------------------ drivers/parport/parport_gsc.h | 7 ------- 2 files changed, 4 insertions(+), 31 deletions(-) diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c index 0dcc497b0449..5e4475254bd0 100644 --- a/drivers/parport/parport_gsc.c +++ b/drivers/parport/parport_gsc.c @@ -28,7 +28,6 @@ #include #include -#include #include #include @@ -226,9 +225,9 @@ static int parport_PS2_supported(struct parport *pb) /* --- Initialisation code -------------------------------- */ -struct parport *parport_gsc_probe_port(unsigned long base, +static struct parport *parport_gsc_probe_port(unsigned long base, unsigned long base_hi, int irq, - int dma, struct parisc_device *padev) + struct parisc_device *padev) { struct parport_gsc_private *priv; struct parport_operations *ops; @@ -250,12 +249,9 @@ struct parport *parport_gsc_probe_port(unsigned long base, } priv->ctr = 0xc; priv->ctr_writable = 0xff; - priv->dma_buf = NULL; - priv->dma_handle = 0; p->base = base; p->base_hi = base_hi; p->irq = irq; - p->dma = dma; p->modes = PARPORT_MODE_PCSPP | PARPORT_MODE_SAFEININT; p->ops = ops; p->private_data = priv; @@ -286,17 +282,9 @@ struct parport *parport_gsc_probe_port(unsigned long base, if (p->irq == PARPORT_IRQ_AUTO) { p->irq = PARPORT_IRQ_NONE; } - if (p->irq != PARPORT_IRQ_NONE) { + if (p->irq != PARPORT_IRQ_NONE) pr_cont(", irq %d", p->irq); - if (p->dma == PARPORT_DMA_AUTO) { - p->dma = PARPORT_DMA_NONE; - } - } - if (p->dma == PARPORT_DMA_AUTO) /* To use DMA, giving the irq - is mandatory (see above) */ - p->dma = PARPORT_DMA_NONE; - pr_cont(" ["); #define printmode(x) \ do { \ @@ -321,7 +309,6 @@ do { \ pr_warn("%s: irq %d in use, resorting to polled operation\n", p->name, p->irq); p->irq = PARPORT_IRQ_NONE; - p->dma = PARPORT_DMA_NONE; } } @@ -369,8 +356,7 @@ static int __init parport_init_chip(struct parisc_device *dev) pr_info("%s: enhanced parport-modes not supported\n", __func__); } - p = parport_gsc_probe_port(port, 0, dev->irq, - /* PARPORT_IRQ_NONE */ PARPORT_DMA_NONE, dev); + p = parport_gsc_probe_port(port, 0, dev->irq, dev); if (p) parport_count++; dev_set_drvdata(&dev->dev, p); @@ -382,16 +368,10 @@ static void __exit parport_remove_chip(struct parisc_device *dev) { struct parport *p = dev_get_drvdata(&dev->dev); if (p) { - struct parport_gsc_private *priv = p->private_data; struct parport_operations *ops = p->ops; parport_remove_port(p); - if (p->dma != PARPORT_DMA_NONE) - free_dma(p->dma); if (p->irq != PARPORT_IRQ_NONE) free_irq(p->irq, p); - if (priv->dma_buf) - dma_free_coherent(&priv->dev->dev, PAGE_SIZE, - priv->dma_buf, priv->dma_handle); kfree (p->private_data); parport_put_port(p); kfree (ops); /* hope no-one cached it */ diff --git a/drivers/parport/parport_gsc.h b/drivers/parport/parport_gsc.h index 9301217edf12..d447a568c257 100644 --- a/drivers/parport/parport_gsc.h +++ b/drivers/parport/parport_gsc.h @@ -63,8 +63,6 @@ struct parport_gsc_private { int writeIntrThreshold; /* buffer suitable for DMA, if DMA enabled */ - char *dma_buf; - dma_addr_t dma_handle; struct pci_dev *dev; }; @@ -199,9 +197,4 @@ extern void parport_gsc_inc_use_count(void); extern void parport_gsc_dec_use_count(void); -extern struct parport *parport_gsc_probe_port(unsigned long base, - unsigned long base_hi, - int irq, int dma, - struct parisc_device *padev); - #endif /* __DRIVERS_PARPORT_PARPORT_GSC_H */ -- cgit From 99b2f159b6e76b84357eae6dc2a206871aa630d5 Mon Sep 17 00:00:00 2001 From: "hanyu001@208suo.com" Date: Thu, 20 Jul 2023 14:40:27 +0800 Subject: parisc: unaligned: Add required spaces after ',' Fix checkpatch warnings: unaligned.c:475: ERROR: space required after that ',' Signed-off-by: Yu Han Signed-off-by: Helge Deller --- arch/parisc/kernel/unaligned.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c index 033b9e50b44a..813062701922 100644 --- a/arch/parisc/kernel/unaligned.c +++ b/arch/parisc/kernel/unaligned.c @@ -337,7 +337,7 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop) : "r19", "r20", "r21", "r22", "r1" ); #else { - unsigned long valh=(val>>32),vall=(val&0xffffffffl); + unsigned long valh = (val >> 32), vall = (val & 0xffffffffl); __asm__ __volatile__ ( " mtsp %4, %%sr1\n" " zdep %2, 29, 2, %%r19\n" @@ -473,7 +473,7 @@ void handle_unaligned(struct pt_regs *regs) case OPCODE_LDWA_I: case OPCODE_LDW_S: case OPCODE_LDWA_S: - ret = emulate_ldw(regs, R3(regs->iir),0); + ret = emulate_ldw(regs, R3(regs->iir), 0); break; case OPCODE_STH: @@ -482,7 +482,7 @@ void handle_unaligned(struct pt_regs *regs) case OPCODE_STW: case OPCODE_STWA: - ret = emulate_stw(regs, R2(regs->iir),0); + ret = emulate_stw(regs, R2(regs->iir), 0); break; #ifdef CONFIG_64BIT @@ -490,12 +490,12 @@ void handle_unaligned(struct pt_regs *regs) case OPCODE_LDDA_I: case OPCODE_LDD_S: case OPCODE_LDDA_S: - ret = emulate_ldd(regs, R3(regs->iir),0); + ret = emulate_ldd(regs, R3(regs->iir), 0); break; case OPCODE_STD: case OPCODE_STDA: - ret = emulate_std(regs, R2(regs->iir),0); + ret = emulate_std(regs, R2(regs->iir), 0); break; #endif @@ -503,24 +503,24 @@ void handle_unaligned(struct pt_regs *regs) case OPCODE_FLDWS: case OPCODE_FLDWXR: case OPCODE_FLDWSR: - ret = emulate_ldw(regs,FR3(regs->iir),1); + ret = emulate_ldw(regs, FR3(regs->iir), 1); break; case OPCODE_FLDDX: case OPCODE_FLDDS: - ret = emulate_ldd(regs,R3(regs->iir),1); + ret = emulate_ldd(regs, R3(regs->iir), 1); break; case OPCODE_FSTWX: case OPCODE_FSTWS: case OPCODE_FSTWXR: case OPCODE_FSTWSR: - ret = emulate_stw(regs,FR3(regs->iir),1); + ret = emulate_stw(regs, FR3(regs->iir), 1); break; case OPCODE_FSTDX: case OPCODE_FSTDS: - ret = emulate_std(regs,R3(regs->iir),1); + ret = emulate_std(regs, R3(regs->iir), 1); break; case OPCODE_LDCD_I: -- cgit From c9bb40b7f786662e33d71afe236442b0b61f0446 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 3 Aug 2023 00:46:39 +0100 Subject: arm64/fpsimd: Clear SME state in the target task when setting the VL When setting SME vector lengths we clear TIF_SME to reenable SME traps, doing a reallocation of the backing storage on next use. We do this using clear_thread_flag() which operates on the current thread, meaning that when setting the vector length via ptrace we may both not force traps for the target task and force a spurious flush of any SME state that the tracing task may have. Clear the flag in the target task. Fixes: e12310a0d30f ("arm64/sme: Implement ptrace support for streaming mode SVE registers") Reported-by: David Spickett Signed-off-by: Mark Brown Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20230803-arm64-fix-ptrace-tif-sme-v1-1-88312fd6fbfd@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/fpsimd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 520b681a07bb..a61a1fd6492d 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -909,7 +909,7 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type, */ task->thread.svcr &= ~(SVCR_SM_MASK | SVCR_ZA_MASK); - clear_thread_flag(TIF_SME); + clear_tsk_thread_flag(task, TIF_SME); free_sme = true; } } -- cgit From 89a65c3f170e5c3b05a626046c68354e2afd7912 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 3 Aug 2023 01:19:06 +0100 Subject: arm64/ptrace: Flush FP state when setting ZT0 When setting ZT0 via ptrace we do not currently force a reload of the floating point register state from memory, do that to ensure that the newly set value gets loaded into the registers on next task execution. The function was templated off the function for FPSIMD which due to our providing the option of embedding a FPSIMD regset within the SVE regset does not directly include the flush. Fixes: f90b529bcbe5 ("arm64/sme: Implement ZT0 ptrace support") Signed-off-by: Mark Brown Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20230803-arm64-fix-ptrace-zt0-flush-v1-1-72e854eaf96e@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/ptrace.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index d7f4f0d1ae12..740e81e9db04 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -1180,6 +1180,8 @@ static int zt_set(struct task_struct *target, if (ret == 0) target->thread.svcr |= SVCR_ZA_MASK; + fpsimd_flush_task_state(target); + return ret; } -- cgit From b3d8aa84bbfe9b58ccc5332cacf8ea17200af310 Mon Sep 17 00:00:00 2001 From: Boqun Feng Date: Sat, 29 Jul 2023 18:29:02 -0700 Subject: rust: allocator: Prevent mis-aligned allocation Currently the rust allocator simply passes the size of the type Layout to krealloc(), and in theory the alignment requirement from the type Layout may be larger than the guarantee provided by SLAB, which means the allocated object is mis-aligned. Fix this by adjusting the allocation size to the nearest power of two, which SLAB always guarantees a size-aligned allocation. And because Rust guarantees that the original size must be a multiple of alignment and the alignment must be a power of two, then the alignment requirement is satisfied. Suggested-by: Vlastimil Babka Co-developed-by: "Andreas Hindborg (Samsung)" Signed-off-by: "Andreas Hindborg (Samsung)" Signed-off-by: Boqun Feng Cc: stable@vger.kernel.org # v6.1+ Acked-by: Vlastimil Babka Fixes: 247b365dc8dc ("rust: add `kernel` crate") Link: https://github.com/Rust-for-Linux/linux/issues/974 Link: https://lore.kernel.org/r/20230730012905.643822-2-boqun.feng@gmail.com [ Applied rewording of comment as discussed in the mailing list. ] Signed-off-by: Miguel Ojeda --- rust/bindings/bindings_helper.h | 1 + rust/kernel/allocator.rs | 74 ++++++++++++++++++++++++++++++++--------- 2 files changed, 60 insertions(+), 15 deletions(-) diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h index 3e601ce2548d..058954961bfc 100644 --- a/rust/bindings/bindings_helper.h +++ b/rust/bindings/bindings_helper.h @@ -13,5 +13,6 @@ #include /* `bindgen` gets confused at certain things. */ +const size_t BINDINGS_ARCH_SLAB_MINALIGN = ARCH_SLAB_MINALIGN; const gfp_t BINDINGS_GFP_KERNEL = GFP_KERNEL; const gfp_t BINDINGS___GFP_ZERO = __GFP_ZERO; diff --git a/rust/kernel/allocator.rs b/rust/kernel/allocator.rs index 397a3dd57a9b..9363b527be66 100644 --- a/rust/kernel/allocator.rs +++ b/rust/kernel/allocator.rs @@ -9,6 +9,36 @@ use crate::bindings; struct KernelAllocator; +/// Calls `krealloc` with a proper size to alloc a new object aligned to `new_layout`'s alignment. +/// +/// # Safety +/// +/// - `ptr` can be either null or a pointer which has been allocated by this allocator. +/// - `new_layout` must have a non-zero size. +unsafe fn krealloc_aligned(ptr: *mut u8, new_layout: Layout, flags: bindings::gfp_t) -> *mut u8 { + // Customized layouts from `Layout::from_size_align()` can have size < align, so pad first. + let layout = new_layout.pad_to_align(); + + let mut size = layout.size(); + + if layout.align() > bindings::BINDINGS_ARCH_SLAB_MINALIGN { + // The alignment requirement exceeds the slab guarantee, thus try to enlarge the size + // to use the "power-of-two" size/alignment guarantee (see comments in `kmalloc()` for + // more information). + // + // Note that `layout.size()` (after padding) is guaranteed to be a multiple of + // `layout.align()`, so `next_power_of_two` gives enough alignment guarantee. + size = size.next_power_of_two(); + } + + // SAFETY: + // - `ptr` is either null or a pointer returned from a previous `k{re}alloc()` by the + // function safety requirement. + // - `size` is greater than 0 since it's either a `layout.size()` (which cannot be zero + // according to the function safety requirement) or a result from `next_power_of_two()`. + unsafe { bindings::krealloc(ptr as *const core::ffi::c_void, size, flags) as *mut u8 } +} + unsafe impl GlobalAlloc for KernelAllocator { unsafe fn alloc(&self, layout: Layout) -> *mut u8 { // `krealloc()` is used instead of `kmalloc()` because the latter is @@ -30,10 +60,20 @@ static ALLOCATOR: KernelAllocator = KernelAllocator; // to extract the object file that has them from the archive. For the moment, // let's generate them ourselves instead. // +// Note: Although these are *safe* functions, they are called by the compiler +// with parameters that obey the same `GlobalAlloc` function safety +// requirements: size and align should form a valid layout, and size is +// greater than 0. +// // Note that `#[no_mangle]` implies exported too, nowadays. #[no_mangle] -fn __rust_alloc(size: usize, _align: usize) -> *mut u8 { - unsafe { bindings::krealloc(core::ptr::null(), size, bindings::GFP_KERNEL) as *mut u8 } +fn __rust_alloc(size: usize, align: usize) -> *mut u8 { + // SAFETY: See assumption above. + let layout = unsafe { Layout::from_size_align_unchecked(size, align) }; + + // SAFETY: `ptr::null_mut()` is null, per assumption above the size of `layout` is greater + // than 0. + unsafe { krealloc_aligned(ptr::null_mut(), layout, bindings::GFP_KERNEL) } } #[no_mangle] @@ -42,23 +82,27 @@ fn __rust_dealloc(ptr: *mut u8, _size: usize, _align: usize) { } #[no_mangle] -fn __rust_realloc(ptr: *mut u8, _old_size: usize, _align: usize, new_size: usize) -> *mut u8 { - unsafe { - bindings::krealloc( - ptr as *const core::ffi::c_void, - new_size, - bindings::GFP_KERNEL, - ) as *mut u8 - } +fn __rust_realloc(ptr: *mut u8, _old_size: usize, align: usize, new_size: usize) -> *mut u8 { + // SAFETY: See assumption above. + let new_layout = unsafe { Layout::from_size_align_unchecked(new_size, align) }; + + // SAFETY: Per assumption above, `ptr` is allocated by `__rust_*` before, and the size of + // `new_layout` is greater than 0. + unsafe { krealloc_aligned(ptr, new_layout, bindings::GFP_KERNEL) } } #[no_mangle] -fn __rust_alloc_zeroed(size: usize, _align: usize) -> *mut u8 { +fn __rust_alloc_zeroed(size: usize, align: usize) -> *mut u8 { + // SAFETY: See assumption above. + let layout = unsafe { Layout::from_size_align_unchecked(size, align) }; + + // SAFETY: `ptr::null_mut()` is null, per assumption above the size of `layout` is greater + // than 0. unsafe { - bindings::krealloc( - core::ptr::null(), - size, + krealloc_aligned( + ptr::null_mut(), + layout, bindings::GFP_KERNEL | bindings::__GFP_ZERO, - ) as *mut u8 + ) } } -- cgit From 1d24eb2d536ba27ef938a6563ac8bfb49c738cc1 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Thu, 6 Jul 2023 09:46:15 +0000 Subject: rust: delete `ForeignOwnable::borrow_mut` We discovered that the current design of `borrow_mut` is problematic. This patch removes it until a better solution can be found. Specifically, the current design gives you access to a `&mut T`, which lets you change where the `ForeignOwnable` points (e.g., with `core::mem::swap`). No upcoming user of this API intended to make that possible, making all of them unsound. Signed-off-by: Alice Ryhl Reviewed-by: Gary Guo Reviewed-by: Benno Lossin Reviewed-by: Martin Rodriguez Reboredo Fixes: 0fc4424d24a2 ("rust: types: introduce `ForeignOwnable`") Link: https://lore.kernel.org/r/20230706094615.3080784-1-aliceryhl@google.com Signed-off-by: Miguel Ojeda --- rust/kernel/sync/arc.rs | 3 +-- rust/kernel/types.rs | 22 ++-------------------- 2 files changed, 3 insertions(+), 22 deletions(-) diff --git a/rust/kernel/sync/arc.rs b/rust/kernel/sync/arc.rs index a89843cacaad..172f563976a9 100644 --- a/rust/kernel/sync/arc.rs +++ b/rust/kernel/sync/arc.rs @@ -243,8 +243,7 @@ impl ForeignOwnable for Arc { let inner = NonNull::new(ptr as *mut ArcInner).unwrap(); // SAFETY: The safety requirements of `from_foreign` ensure that the object remains alive - // for the lifetime of the returned value. Additionally, the safety requirements of - // `ForeignOwnable::borrow_mut` ensure that no new mutable references are created. + // for the lifetime of the returned value. unsafe { ArcBorrow::new(inner) } } diff --git a/rust/kernel/types.rs b/rust/kernel/types.rs index 1e5380b16ed5..d479f8da8f38 100644 --- a/rust/kernel/types.rs +++ b/rust/kernel/types.rs @@ -35,34 +35,16 @@ pub trait ForeignOwnable: Sized { /// /// `ptr` must have been returned by a previous call to [`ForeignOwnable::into_foreign`] for /// which a previous matching [`ForeignOwnable::from_foreign`] hasn't been called yet. - /// Additionally, all instances (if any) of values returned by [`ForeignOwnable::borrow_mut`] - /// for this object must have been dropped. unsafe fn borrow<'a>(ptr: *const core::ffi::c_void) -> Self::Borrowed<'a>; - /// Mutably borrows a foreign-owned object. - /// - /// # Safety - /// - /// `ptr` must have been returned by a previous call to [`ForeignOwnable::into_foreign`] for - /// which a previous matching [`ForeignOwnable::from_foreign`] hasn't been called yet. - /// Additionally, all instances (if any) of values returned by [`ForeignOwnable::borrow`] and - /// [`ForeignOwnable::borrow_mut`] for this object must have been dropped. - unsafe fn borrow_mut(ptr: *const core::ffi::c_void) -> ScopeGuard { - // SAFETY: The safety requirements ensure that `ptr` came from a previous call to - // `into_foreign`. - ScopeGuard::new_with_data(unsafe { Self::from_foreign(ptr) }, |d| { - d.into_foreign(); - }) - } - /// Converts a foreign-owned object back to a Rust-owned one. /// /// # Safety /// /// `ptr` must have been returned by a previous call to [`ForeignOwnable::into_foreign`] for /// which a previous matching [`ForeignOwnable::from_foreign`] hasn't been called yet. - /// Additionally, all instances (if any) of values returned by [`ForeignOwnable::borrow`] and - /// [`ForeignOwnable::borrow_mut`] for this object must have been dropped. + /// Additionally, all instances (if any) of values returned by [`ForeignOwnable::borrow`] for + /// this object must have been dropped. unsafe fn from_foreign(ptr: *const core::ffi::c_void) -> Self; } -- cgit From b05544884300e98512964103b33f8f87650ce887 Mon Sep 17 00:00:00 2001 From: Andrea Righi Date: Tue, 11 Jul 2023 09:19:14 +0200 Subject: rust: fix bindgen build error with UBSAN_BOUNDS_STRICT With commit 2d47c6956ab3 ("ubsan: Tighten UBSAN_BOUNDS on GCC") if CONFIG_UBSAN is enabled and gcc supports -fsanitize=bounds-strict, we can trigger the following build error due to bindgen lacking support for this additional build option: BINDGEN rust/bindings/bindings_generated.rs error: unsupported argument 'bounds-strict' to option '-fsanitize=' Fix by adding -fsanitize=bounds-strict to the list of skipped gcc flags for bindgen. Fixes: 2d47c6956ab3 ("ubsan: Tighten UBSAN_BOUNDS on GCC") Signed-off-by: Andrea Righi Acked-by: Kees Cook Reviewed-by: Martin Rodriguez Reboredo Link: https://lore.kernel.org/r/20230711071914.133946-1-andrea.righi@canonical.com Signed-off-by: Miguel Ojeda --- rust/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rust/Makefile b/rust/Makefile index 7c9d9f11aec5..4124bfa01798 100644 --- a/rust/Makefile +++ b/rust/Makefile @@ -257,7 +257,7 @@ bindgen_skip_c_flags := -mno-fp-ret-in-387 -mpreferred-stack-boundary=% \ -fno-partial-inlining -fplugin-arg-arm_ssp_per_task_plugin-% \ -fno-reorder-blocks -fno-allow-store-data-races -fasan-shadow-offset=% \ -fzero-call-used-regs=% -fno-stack-clash-protection \ - -fno-inline-functions-called-once \ + -fno-inline-functions-called-once -fsanitize=bounds-strict \ --param=% --param asan-% # Derived from `scripts/Makefile.clang`. -- cgit From 045aecdfcb2e060db142d83a0f4082380c465d2c Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 3 Aug 2023 19:33:21 +0100 Subject: arm64/ptrace: Don't enable SVE when setting streaming SVE Systems which implement SME without also implementing SVE are architecturally valid but were not initially supported by the kernel, unfortunately we missed one issue in the ptrace code. The SVE register setting code is shared between SVE and streaming mode SVE. When we set full SVE register state we currently enable TIF_SVE unconditionally, in the case where streaming SVE is being configured on a system that supports vanilla SVE this is not an issue since we always initialise enough state for both vector lengths but on a system which only support SME it will result in us attempting to restore the SVE vector length after having set streaming SVE registers. Fix this by making the enabling of SVE conditional on setting SVE vector state. If we set streaming SVE state and SVE was not already enabled this will result in a SVE access trap on next use of normal SVE, this will cause us to flush our register state but this is fine since the only way to trigger a SVE access trap would be to exit streaming mode which will cause the in register state to be flushed anyway. Fixes: e12310a0d30f ("arm64/sme: Implement ptrace support for streaming mode SVE registers") Signed-off-by: Mark Brown Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20230803-arm64-fix-ptrace-ssve-no-sve-v1-1-49df214bfb3e@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/ptrace.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 740e81e9db04..5b9b4305248b 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -932,11 +932,13 @@ static int sve_set_common(struct task_struct *target, /* * Ensure target->thread.sve_state is up to date with target's * FPSIMD regs, so that a short copyin leaves trailing - * registers unmodified. Always enable SVE even if going into - * streaming mode. + * registers unmodified. Only enable SVE if we are + * configuring normal SVE, a system with streaming SVE may not + * have normal SVE. */ fpsimd_sync_to_sve(target); - set_tsk_thread_flag(target, TIF_SVE); + if (type == ARM64_VEC_SVE) + set_tsk_thread_flag(target, TIF_SVE); target->thread.fp_type = FP_STATE_SVE; BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); -- cgit From 507ea5dd92d23fcf10e4d1a68a443c86a49753ed Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 3 Aug 2023 19:33:22 +0100 Subject: arm64/fpsimd: Sync FPSIMD state with SVE for SME only systems Currently we guard FPSIMD/SVE state conversions with a check for the system supporting SVE but SME only systems may need to sync streaming mode SVE state so add a check for SME support too. These functions are only used by the ptrace code. Fixes: e12310a0d30f ("arm64/sme: Implement ptrace support for streaming mode SVE registers") Signed-off-by: Mark Brown Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20230803-arm64-fix-ptrace-ssve-no-sve-v1-2-49df214bfb3e@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/fpsimd.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index a61a1fd6492d..bfaa70cb040e 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -679,7 +679,7 @@ static void fpsimd_to_sve(struct task_struct *task) void *sst = task->thread.sve_state; struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state; - if (!system_supports_sve()) + if (!system_supports_sve() && !system_supports_sme()) return; vq = sve_vq_from_vl(thread_get_cur_vl(&task->thread)); @@ -705,7 +705,7 @@ static void sve_to_fpsimd(struct task_struct *task) unsigned int i; __uint128_t const *p; - if (!system_supports_sve()) + if (!system_supports_sve() && !system_supports_sme()) return; vl = thread_get_cur_vl(&task->thread); -- cgit From 69af56ae56a48a2522aad906c4461c6c7c092737 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 3 Aug 2023 19:33:23 +0100 Subject: arm64/fpsimd: Sync and zero pad FPSIMD state for streaming SVE We have a function sve_sync_from_fpsimd_zeropad() which is used by the ptrace code to update the SVE state when the user writes to the the FPSIMD register set. Currently this checks that the task has SVE enabled but this will miss updates for tasks which have streaming SVE enabled if SVE has not been enabled for the thread, also do the conversion if the task has streaming SVE enabled. Fixes: e12310a0d30f ("arm64/sme: Implement ptrace support for streaming mode SVE registers") Signed-off-by: Mark Brown Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20230803-arm64-fix-ptrace-ssve-no-sve-v1-3-49df214bfb3e@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/fpsimd.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index bfaa70cb040e..75c37b1c55aa 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -835,7 +835,8 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task) void *sst = task->thread.sve_state; struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state; - if (!test_tsk_thread_flag(task, TIF_SVE)) + if (!test_tsk_thread_flag(task, TIF_SVE) && + !thread_sm_enabled(&task->thread)) return; vq = sve_vq_from_vl(thread_get_cur_vl(&task->thread)); -- cgit From 4e15a0ddc3ff40e8ea84032213976ecf774d7f77 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 4 Aug 2023 12:42:45 -0400 Subject: KVM: SEV: snapshot the GHCB before accessing it Validation of the GHCB is susceptible to time-of-check/time-of-use vulnerabilities. To avoid them, we would like to always snapshot the fields that are read in sev_es_validate_vmgexit(), and not use the GHCB anymore after it returns. This means: - invoking sev_es_sync_from_ghcb() before any GHCB access, including before sev_es_validate_vmgexit() - snapshotting all fields including the valid bitmap and the sw_scratch field, which are currently not caching anywhere. The valid bitmap is the first thing to be copied out of the GHCB; then, further accesses will use the copy in svm->sev_es. Fixes: 291bd20d5d88 ("KVM: SVM: Add initial support for a VMGEXIT VMEXIT") Cc: stable@vger.kernel.org Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/sev.c | 69 +++++++++++++++++++++++++------------------------- arch/x86/kvm/svm/svm.h | 26 +++++++++++++++++++ 2 files changed, 61 insertions(+), 34 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 07756b7348ae..e898f0b2b0ba 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2417,15 +2417,18 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm) */ memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); - vcpu->arch.regs[VCPU_REGS_RAX] = ghcb_get_rax_if_valid(ghcb); - vcpu->arch.regs[VCPU_REGS_RBX] = ghcb_get_rbx_if_valid(ghcb); - vcpu->arch.regs[VCPU_REGS_RCX] = ghcb_get_rcx_if_valid(ghcb); - vcpu->arch.regs[VCPU_REGS_RDX] = ghcb_get_rdx_if_valid(ghcb); - vcpu->arch.regs[VCPU_REGS_RSI] = ghcb_get_rsi_if_valid(ghcb); + BUILD_BUG_ON(sizeof(svm->sev_es.valid_bitmap) != sizeof(ghcb->save.valid_bitmap)); + memcpy(&svm->sev_es.valid_bitmap, &ghcb->save.valid_bitmap, sizeof(ghcb->save.valid_bitmap)); - svm->vmcb->save.cpl = ghcb_get_cpl_if_valid(ghcb); + vcpu->arch.regs[VCPU_REGS_RAX] = kvm_ghcb_get_rax_if_valid(svm, ghcb); + vcpu->arch.regs[VCPU_REGS_RBX] = kvm_ghcb_get_rbx_if_valid(svm, ghcb); + vcpu->arch.regs[VCPU_REGS_RCX] = kvm_ghcb_get_rcx_if_valid(svm, ghcb); + vcpu->arch.regs[VCPU_REGS_RDX] = kvm_ghcb_get_rdx_if_valid(svm, ghcb); + vcpu->arch.regs[VCPU_REGS_RSI] = kvm_ghcb_get_rsi_if_valid(svm, ghcb); - if (ghcb_xcr0_is_valid(ghcb)) { + svm->vmcb->save.cpl = kvm_ghcb_get_cpl_if_valid(svm, ghcb); + + if (kvm_ghcb_xcr0_is_valid(svm)) { vcpu->arch.xcr0 = ghcb_get_xcr0(ghcb); kvm_update_cpuid_runtime(vcpu); } @@ -2436,6 +2439,7 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm) control->exit_code_hi = upper_32_bits(exit_code); control->exit_info_1 = ghcb_get_sw_exit_info_1(ghcb); control->exit_info_2 = ghcb_get_sw_exit_info_2(ghcb); + svm->sev_es.sw_scratch = kvm_ghcb_get_sw_scratch_if_valid(svm, ghcb); /* Clear the valid entries fields */ memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap)); @@ -2464,56 +2468,56 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm) reason = GHCB_ERR_MISSING_INPUT; - if (!ghcb_sw_exit_code_is_valid(ghcb) || - !ghcb_sw_exit_info_1_is_valid(ghcb) || - !ghcb_sw_exit_info_2_is_valid(ghcb)) + if (!kvm_ghcb_sw_exit_code_is_valid(svm) || + !kvm_ghcb_sw_exit_info_1_is_valid(svm) || + !kvm_ghcb_sw_exit_info_2_is_valid(svm)) goto vmgexit_err; switch (ghcb_get_sw_exit_code(ghcb)) { case SVM_EXIT_READ_DR7: break; case SVM_EXIT_WRITE_DR7: - if (!ghcb_rax_is_valid(ghcb)) + if (!kvm_ghcb_rax_is_valid(svm)) goto vmgexit_err; break; case SVM_EXIT_RDTSC: break; case SVM_EXIT_RDPMC: - if (!ghcb_rcx_is_valid(ghcb)) + if (!kvm_ghcb_rcx_is_valid(svm)) goto vmgexit_err; break; case SVM_EXIT_CPUID: - if (!ghcb_rax_is_valid(ghcb) || - !ghcb_rcx_is_valid(ghcb)) + if (!kvm_ghcb_rax_is_valid(svm) || + !kvm_ghcb_rcx_is_valid(svm)) goto vmgexit_err; if (ghcb_get_rax(ghcb) == 0xd) - if (!ghcb_xcr0_is_valid(ghcb)) + if (!kvm_ghcb_xcr0_is_valid(svm)) goto vmgexit_err; break; case SVM_EXIT_INVD: break; case SVM_EXIT_IOIO: if (ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_STR_MASK) { - if (!ghcb_sw_scratch_is_valid(ghcb)) + if (!kvm_ghcb_sw_scratch_is_valid(svm)) goto vmgexit_err; } else { if (!(ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_TYPE_MASK)) - if (!ghcb_rax_is_valid(ghcb)) + if (!kvm_ghcb_rax_is_valid(svm)) goto vmgexit_err; } break; case SVM_EXIT_MSR: - if (!ghcb_rcx_is_valid(ghcb)) + if (!kvm_ghcb_rcx_is_valid(svm)) goto vmgexit_err; if (ghcb_get_sw_exit_info_1(ghcb)) { - if (!ghcb_rax_is_valid(ghcb) || - !ghcb_rdx_is_valid(ghcb)) + if (!kvm_ghcb_rax_is_valid(svm) || + !kvm_ghcb_rdx_is_valid(svm)) goto vmgexit_err; } break; case SVM_EXIT_VMMCALL: - if (!ghcb_rax_is_valid(ghcb) || - !ghcb_cpl_is_valid(ghcb)) + if (!kvm_ghcb_rax_is_valid(svm) || + !kvm_ghcb_cpl_is_valid(svm)) goto vmgexit_err; break; case SVM_EXIT_RDTSCP: @@ -2521,19 +2525,19 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm) case SVM_EXIT_WBINVD: break; case SVM_EXIT_MONITOR: - if (!ghcb_rax_is_valid(ghcb) || - !ghcb_rcx_is_valid(ghcb) || - !ghcb_rdx_is_valid(ghcb)) + if (!kvm_ghcb_rax_is_valid(svm) || + !kvm_ghcb_rcx_is_valid(svm) || + !kvm_ghcb_rdx_is_valid(svm)) goto vmgexit_err; break; case SVM_EXIT_MWAIT: - if (!ghcb_rax_is_valid(ghcb) || - !ghcb_rcx_is_valid(ghcb)) + if (!kvm_ghcb_rax_is_valid(svm) || + !kvm_ghcb_rcx_is_valid(svm)) goto vmgexit_err; break; case SVM_VMGEXIT_MMIO_READ: case SVM_VMGEXIT_MMIO_WRITE: - if (!ghcb_sw_scratch_is_valid(ghcb)) + if (!kvm_ghcb_sw_scratch_is_valid(svm)) goto vmgexit_err; break; case SVM_VMGEXIT_NMI_COMPLETE: @@ -2563,9 +2567,6 @@ vmgexit_err: dump_ghcb(svm); } - /* Clear the valid entries fields */ - memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap)); - ghcb_set_sw_exit_info_1(ghcb, 2); ghcb_set_sw_exit_info_2(ghcb, reason); @@ -2586,7 +2587,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm) */ if (svm->sev_es.ghcb_sa_sync) { kvm_write_guest(svm->vcpu.kvm, - ghcb_get_sw_scratch(svm->sev_es.ghcb), + svm->sev_es.sw_scratch, svm->sev_es.ghcb_sa, svm->sev_es.ghcb_sa_len); svm->sev_es.ghcb_sa_sync = false; @@ -2637,7 +2638,7 @@ static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) u64 scratch_gpa_beg, scratch_gpa_end; void *scratch_va; - scratch_gpa_beg = ghcb_get_sw_scratch(ghcb); + scratch_gpa_beg = svm->sev_es.sw_scratch; if (!scratch_gpa_beg) { pr_err("vmgexit: scratch gpa not provided\n"); goto e_scratch; @@ -2853,11 +2854,11 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) exit_code = ghcb_get_sw_exit_code(ghcb); + sev_es_sync_from_ghcb(svm); ret = sev_es_validate_vmgexit(svm); if (ret) return ret; - sev_es_sync_from_ghcb(svm); ghcb_set_sw_exit_info_1(ghcb, 0); ghcb_set_sw_exit_info_2(ghcb, 0); diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 18af7e712a5a..8239c8de45ac 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -190,10 +190,12 @@ struct vcpu_sev_es_state { /* SEV-ES support */ struct sev_es_save_area *vmsa; struct ghcb *ghcb; + u8 valid_bitmap[16]; struct kvm_host_map ghcb_map; bool received_first_sipi; /* SEV-ES scratch area support */ + u64 sw_scratch; void *ghcb_sa; u32 ghcb_sa_len; bool ghcb_sa_sync; @@ -744,4 +746,28 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm); void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted); void __svm_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted); +#define DEFINE_KVM_GHCB_ACCESSORS(field) \ + static __always_inline bool kvm_ghcb_##field##_is_valid(const struct vcpu_svm *svm) \ + { \ + return test_bit(GHCB_BITMAP_IDX(field), \ + (unsigned long *)&svm->sev_es.valid_bitmap); \ + } \ + \ + static __always_inline u64 kvm_ghcb_get_##field##_if_valid(struct vcpu_svm *svm, struct ghcb *ghcb) \ + { \ + return kvm_ghcb_##field##_is_valid(svm) ? ghcb->save.field : 0; \ + } \ + +DEFINE_KVM_GHCB_ACCESSORS(cpl) +DEFINE_KVM_GHCB_ACCESSORS(rax) +DEFINE_KVM_GHCB_ACCESSORS(rcx) +DEFINE_KVM_GHCB_ACCESSORS(rdx) +DEFINE_KVM_GHCB_ACCESSORS(rbx) +DEFINE_KVM_GHCB_ACCESSORS(rsi) +DEFINE_KVM_GHCB_ACCESSORS(sw_exit_code) +DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_1) +DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_2) +DEFINE_KVM_GHCB_ACCESSORS(sw_scratch) +DEFINE_KVM_GHCB_ACCESSORS(xcr0) + #endif -- cgit From 7588dbcebcbf0193ab5b76987396d0254270b04a Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 4 Aug 2023 12:56:36 -0400 Subject: KVM: SEV: only access GHCB fields once A KVM guest using SEV-ES or SEV-SNP with multiple vCPUs can trigger a double fetch race condition vulnerability and invoke the VMGEXIT handler recursively. sev_handle_vmgexit() maps the GHCB page using kvm_vcpu_map() and then fetches the exit code using ghcb_get_sw_exit_code(). Soon after, sev_es_validate_vmgexit() fetches the exit code again. Since the GHCB page is shared with the guest, the guest is able to quickly swap the values with another vCPU and hence bypass the validation. One vmexit code that can be rejected by sev_es_validate_vmgexit() is SVM_EXIT_VMGEXIT; if sev_handle_vmgexit() observes it in the second fetch, the call to svm_invoke_exit_handler() will invoke sev_handle_vmgexit() again recursively. To avoid the race, always fetch the GHCB data from the places where sev_es_sync_from_ghcb stores it. Exploiting recursions on linux kernel has been proven feasible in the past, but the impact is mitigated by stack guard pages (CONFIG_VMAP_STACK). Still, if an attacker manages to call the handler multiple times, they can theoretically trigger a stack overflow and cause a denial-of-service, or potentially guest-to-host escape in kernel configurations without stack guard pages. Note that winning the race reliably in every iteration is very tricky due to the very tight window of the fetches; depending on the compiler settings, they are often consecutive because of optimization and inlining. Tested by booting an SEV-ES RHEL9 guest. Fixes: CVE-2023-4155 Fixes: 291bd20d5d88 ("KVM: SVM: Add initial support for a VMGEXIT VMEXIT") Cc: stable@vger.kernel.org Reported-by: Andy Nguyen Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/sev.c | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index e898f0b2b0ba..ca4ba5fe9a01 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2445,9 +2445,15 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm) memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap)); } +static u64 kvm_ghcb_get_sw_exit_code(struct vmcb_control_area *control) +{ + return (((u64)control->exit_code_hi) << 32) | control->exit_code; +} + static int sev_es_validate_vmgexit(struct vcpu_svm *svm) { - struct kvm_vcpu *vcpu; + struct vmcb_control_area *control = &svm->vmcb->control; + struct kvm_vcpu *vcpu = &svm->vcpu; struct ghcb *ghcb; u64 exit_code; u64 reason; @@ -2458,7 +2464,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm) * Retrieve the exit code now even though it may not be marked valid * as it could help with debugging. */ - exit_code = ghcb_get_sw_exit_code(ghcb); + exit_code = kvm_ghcb_get_sw_exit_code(control); /* Only GHCB Usage code 0 is supported */ if (ghcb->ghcb_usage) { @@ -2473,7 +2479,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm) !kvm_ghcb_sw_exit_info_2_is_valid(svm)) goto vmgexit_err; - switch (ghcb_get_sw_exit_code(ghcb)) { + switch (exit_code) { case SVM_EXIT_READ_DR7: break; case SVM_EXIT_WRITE_DR7: @@ -2490,18 +2496,18 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm) if (!kvm_ghcb_rax_is_valid(svm) || !kvm_ghcb_rcx_is_valid(svm)) goto vmgexit_err; - if (ghcb_get_rax(ghcb) == 0xd) + if (vcpu->arch.regs[VCPU_REGS_RAX] == 0xd) if (!kvm_ghcb_xcr0_is_valid(svm)) goto vmgexit_err; break; case SVM_EXIT_INVD: break; case SVM_EXIT_IOIO: - if (ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_STR_MASK) { + if (control->exit_info_1 & SVM_IOIO_STR_MASK) { if (!kvm_ghcb_sw_scratch_is_valid(svm)) goto vmgexit_err; } else { - if (!(ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_TYPE_MASK)) + if (!(control->exit_info_1 & SVM_IOIO_TYPE_MASK)) if (!kvm_ghcb_rax_is_valid(svm)) goto vmgexit_err; } @@ -2509,7 +2515,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm) case SVM_EXIT_MSR: if (!kvm_ghcb_rcx_is_valid(svm)) goto vmgexit_err; - if (ghcb_get_sw_exit_info_1(ghcb)) { + if (control->exit_info_1) { if (!kvm_ghcb_rax_is_valid(svm) || !kvm_ghcb_rdx_is_valid(svm)) goto vmgexit_err; @@ -2553,8 +2559,6 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm) return 0; vmgexit_err: - vcpu = &svm->vcpu; - if (reason == GHCB_ERR_INVALID_USAGE) { vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n", ghcb->ghcb_usage); @@ -2852,8 +2856,6 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb); - exit_code = ghcb_get_sw_exit_code(ghcb); - sev_es_sync_from_ghcb(svm); ret = sev_es_validate_vmgexit(svm); if (ret) @@ -2862,6 +2864,7 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) ghcb_set_sw_exit_info_1(ghcb, 0); ghcb_set_sw_exit_info_2(ghcb, 0); + exit_code = kvm_ghcb_get_sw_exit_code(control); switch (exit_code) { case SVM_VMGEXIT_MMIO_READ: ret = setup_vmgexit_scratch(svm, true, control->exit_info_2); -- cgit From 63dbc67cf4ed11f94b2e8dde34b41438a3cb3d83 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 4 Aug 2023 13:01:43 -0400 Subject: KVM: SEV: remove ghcb variable declarations To avoid possible time-of-check/time-of-use issues, the GHCB should almost never be accessed outside dump_ghcb, sev_es_sync_to_ghcb and sev_es_sync_from_ghcb. The only legitimate uses are to set the exitinfo fields and to find the address of the scratch area embedded in the ghcb. Accessing ghcb_usage also goes through svm->sev_es.ghcb in sev_es_validate_vmgexit(), but that is because anyway the value is not used. Removing a shortcut variable that contains the value of svm->sev_es.ghcb makes these cases a bit more verbose, but it limits the chance of someone reading the ghcb by mistake. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/sev.c | 30 ++++++++++++------------------ 1 file changed, 12 insertions(+), 18 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index ca4ba5fe9a01..d3aec1f2cad2 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2454,12 +2454,9 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm) { struct vmcb_control_area *control = &svm->vmcb->control; struct kvm_vcpu *vcpu = &svm->vcpu; - struct ghcb *ghcb; u64 exit_code; u64 reason; - ghcb = svm->sev_es.ghcb; - /* * Retrieve the exit code now even though it may not be marked valid * as it could help with debugging. @@ -2467,7 +2464,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm) exit_code = kvm_ghcb_get_sw_exit_code(control); /* Only GHCB Usage code 0 is supported */ - if (ghcb->ghcb_usage) { + if (svm->sev_es.ghcb->ghcb_usage) { reason = GHCB_ERR_INVALID_USAGE; goto vmgexit_err; } @@ -2561,7 +2558,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm) vmgexit_err: if (reason == GHCB_ERR_INVALID_USAGE) { vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n", - ghcb->ghcb_usage); + svm->sev_es.ghcb->ghcb_usage); } else if (reason == GHCB_ERR_INVALID_EVENT) { vcpu_unimpl(vcpu, "vmgexit: exit code %#llx is not valid\n", exit_code); @@ -2571,8 +2568,8 @@ vmgexit_err: dump_ghcb(svm); } - ghcb_set_sw_exit_info_1(ghcb, 2); - ghcb_set_sw_exit_info_2(ghcb, reason); + ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2); + ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, reason); /* Resume the guest to "return" the error code. */ return 1; @@ -2637,7 +2634,6 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu) static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) { struct vmcb_control_area *control = &svm->vmcb->control; - struct ghcb *ghcb = svm->sev_es.ghcb; u64 ghcb_scratch_beg, ghcb_scratch_end; u64 scratch_gpa_beg, scratch_gpa_end; void *scratch_va; @@ -2713,8 +2709,8 @@ static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) return 0; e_scratch: - ghcb_set_sw_exit_info_1(ghcb, 2); - ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_SCRATCH_AREA); + ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2); + ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_SCRATCH_AREA); return 1; } @@ -2827,7 +2823,6 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) struct vcpu_svm *svm = to_svm(vcpu); struct vmcb_control_area *control = &svm->vmcb->control; u64 ghcb_gpa, exit_code; - struct ghcb *ghcb; int ret; /* Validate the GHCB */ @@ -2852,17 +2847,16 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) } svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva; - ghcb = svm->sev_es.ghcb_map.hva; - trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb); + trace_kvm_vmgexit_enter(vcpu->vcpu_id, svm->sev_es.ghcb); sev_es_sync_from_ghcb(svm); ret = sev_es_validate_vmgexit(svm); if (ret) return ret; - ghcb_set_sw_exit_info_1(ghcb, 0); - ghcb_set_sw_exit_info_2(ghcb, 0); + ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 0); + ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 0); exit_code = kvm_ghcb_get_sw_exit_code(control); switch (exit_code) { @@ -2902,13 +2896,13 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) break; case 1: /* Get AP jump table address */ - ghcb_set_sw_exit_info_2(ghcb, sev->ap_jump_table); + ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, sev->ap_jump_table); break; default: pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n", control->exit_info_1); - ghcb_set_sw_exit_info_1(ghcb, 2); - ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_INPUT); + ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2); + ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_INPUT); } ret = 1; -- cgit From 797964253d358cf8d705614dda394dbe30120223 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Thu, 3 Aug 2023 11:35:53 -0700 Subject: file: reinstate f_pos locking optimization for regular files In commit 20ea1e7d13c1 ("file: always lock position for FMODE_ATOMIC_POS") we ended up always taking the file pos lock, because pidfd_getfd() could get a reference to the file even when it didn't have an elevated file count due to threading of other sharing cases. But Mateusz Guzik reports that the extra locking is actually measurable, so let's re-introduce the optimization, and only force the locking for directory traversal. Directories need the lock for correctness reasons, while regular files only need it for "POSIX semantics". Since pidfd_getfd() is about debuggers etc special things that are _way_ outside of POSIX, we can relax the rules for that case. Reported-by: Mateusz Guzik Cc: Christian Brauner Link: https://lore.kernel.org/linux-fsdevel/20230803095311.ijpvhx3fyrbkasul@f/ Signed-off-by: Linus Torvalds --- fs/file.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/fs/file.c b/fs/file.c index 35c62b54c9d6..dbca26ef7a01 100644 --- a/fs/file.c +++ b/fs/file.c @@ -1036,12 +1036,28 @@ unsigned long __fdget_raw(unsigned int fd) return __fget_light(fd, 0); } +/* + * Try to avoid f_pos locking. We only need it if the + * file is marked for FMODE_ATOMIC_POS, and it can be + * accessed multiple ways. + * + * Always do it for directories, because pidfd_getfd() + * can make a file accessible even if it otherwise would + * not be, and for directories this is a correctness + * issue, not a "POSIX requirement". + */ +static inline bool file_needs_f_pos_lock(struct file *file) +{ + return (file->f_mode & FMODE_ATOMIC_POS) && + (file_count(file) > 1 || S_ISDIR(file_inode(file)->i_mode)); +} + unsigned long __fdget_pos(unsigned int fd) { unsigned long v = __fdget(fd); struct file *file = (struct file *)(v & ~3); - if (file && (file->f_mode & FMODE_ATOMIC_POS)) { + if (file && file_needs_f_pos_lock(file)) { v |= FDPUT_POS_UNLOCK; mutex_lock(&file->f_pos_lock); } -- cgit From d5ad9aae13dcced333c1a7816ff0a4fbbb052466 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Fri, 4 Aug 2023 20:22:11 +0100 Subject: selftests/rseq: Fix build with undefined __weak Commit 3bcbc20942db ("selftests/rseq: Play nice with binaries statically linked against glibc 2.35+") which is now in Linus' tree introduced uses of __weak but did nothing to ensure that a definition is provided for it resulting in build failures for the rseq tests: rseq.c:41:1: error: unknown type name '__weak' __weak ptrdiff_t __rseq_offset; ^ rseq.c:41:17: error: expected ';' after top level declarator __weak ptrdiff_t __rseq_offset; ^ ; rseq.c:42:1: error: unknown type name '__weak' __weak unsigned int __rseq_size; ^ rseq.c:43:1: error: unknown type name '__weak' __weak unsigned int __rseq_flags; Fix this by using the definition from tools/include compiler.h. Fixes: 3bcbc20942db ("selftests/rseq: Play nice with binaries statically linked against glibc 2.35+") Signed-off-by: Mark Brown Message-Id: <20230804-kselftest-rseq-build-v1-1-015830b66aa9@kernel.org> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/rseq/Makefile | 4 +++- tools/testing/selftests/rseq/rseq.c | 2 ++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/rseq/Makefile b/tools/testing/selftests/rseq/Makefile index b357ba24af06..7a957c7d459a 100644 --- a/tools/testing/selftests/rseq/Makefile +++ b/tools/testing/selftests/rseq/Makefile @@ -4,8 +4,10 @@ ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),) CLANG_FLAGS += -no-integrated-as endif +top_srcdir = ../../../.. + CFLAGS += -O2 -Wall -g -I./ $(KHDR_INCLUDES) -L$(OUTPUT) -Wl,-rpath=./ \ - $(CLANG_FLAGS) + $(CLANG_FLAGS) -I$(top_srcdir)/tools/include LDLIBS += -lpthread -ldl # Own dependencies because we only want to build against 1st prerequisite, but diff --git a/tools/testing/selftests/rseq/rseq.c b/tools/testing/selftests/rseq/rseq.c index a723da253244..96e812bdf8a4 100644 --- a/tools/testing/selftests/rseq/rseq.c +++ b/tools/testing/selftests/rseq/rseq.c @@ -31,6 +31,8 @@ #include #include +#include + #include "../kselftest.h" #include "rseq.h" -- cgit From 5aa4fda5aa9c2a5a7bac67b4a12b089ab81fee3c Mon Sep 17 00:00:00 2001 From: Long Li Date: Sat, 29 Jul 2023 11:36:18 +0800 Subject: ksmbd: validate command request size In commit 2b9b8f3b68ed ("ksmbd: validate command payload size"), except for SMB2_OPLOCK_BREAK_HE command, the request size of other commands is not checked, it's not expected. Fix it by add check for request size of other commands. Cc: stable@vger.kernel.org Fixes: 2b9b8f3b68ed ("ksmbd: validate command payload size") Acked-by: Namjae Jeon Signed-off-by: Long Li Signed-off-by: Steve French --- fs/smb/server/smb2misc.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/fs/smb/server/smb2misc.c b/fs/smb/server/smb2misc.c index 33b7e6c4ceff..e881df1d10cb 100644 --- a/fs/smb/server/smb2misc.c +++ b/fs/smb/server/smb2misc.c @@ -380,13 +380,13 @@ int ksmbd_smb2_check_message(struct ksmbd_work *work) } if (smb2_req_struct_sizes[command] != pdu->StructureSize2) { - if (command == SMB2_OPLOCK_BREAK_HE && - le16_to_cpu(pdu->StructureSize2) != OP_BREAK_STRUCT_SIZE_20 && - le16_to_cpu(pdu->StructureSize2) != OP_BREAK_STRUCT_SIZE_21) { + if (!(command == SMB2_OPLOCK_BREAK_HE && + (le16_to_cpu(pdu->StructureSize2) == OP_BREAK_STRUCT_SIZE_20 || + le16_to_cpu(pdu->StructureSize2) == OP_BREAK_STRUCT_SIZE_21))) { /* special case for SMB2.1 lease break message */ ksmbd_debug(SMB, - "Illegal request size %d for oplock break\n", - le16_to_cpu(pdu->StructureSize2)); + "Illegal request size %u for command %d\n", + le16_to_cpu(pdu->StructureSize2), command); return 1; } } -- cgit From 79ed288cef201f1f212dfb934bcaac75572fb8f6 Mon Sep 17 00:00:00 2001 From: Namjae Jeon Date: Sun, 6 Aug 2023 08:44:17 +0900 Subject: ksmbd: fix wrong next length validation of ea buffer in smb2_set_ea() There are multiple smb2_ea_info buffers in FILE_FULL_EA_INFORMATION request from client. ksmbd find next smb2_ea_info using ->NextEntryOffset of current smb2_ea_info. ksmbd need to validate buffer length Before accessing the next ea. ksmbd should check buffer length using buf_len, not next variable. next is the start offset of current ea that got from previous ea. Cc: stable@vger.kernel.org Reported-by: zdi-disclosures@trendmicro.com # ZDI-CAN-21598 Signed-off-by: Namjae Jeon Signed-off-by: Steve French --- fs/smb/server/smb2pdu.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c index 9849d7489345..7cc1b0c47d0a 100644 --- a/fs/smb/server/smb2pdu.c +++ b/fs/smb/server/smb2pdu.c @@ -2324,9 +2324,16 @@ next: break; buf_len -= next; eabuf = (struct smb2_ea_info *)((char *)eabuf + next); - if (next < (u32)eabuf->EaNameLength + le16_to_cpu(eabuf->EaValueLength)) + if (buf_len < sizeof(struct smb2_ea_info)) { + rc = -EINVAL; break; + } + if (buf_len < sizeof(struct smb2_ea_info) + eabuf->EaNameLength + + le16_to_cpu(eabuf->EaValueLength)) { + rc = -EINVAL; + break; + } } while (next != 0); kfree(attr_name); -- cgit From a0fc452a5d7fed986205539259df1d60546f536c Mon Sep 17 00:00:00 2001 From: Aleksa Sarai Date: Sun, 6 Aug 2023 02:11:58 +1000 Subject: open: make RESOLVE_CACHED correctly test for O_TMPFILE O_TMPFILE is actually __O_TMPFILE|O_DIRECTORY. This means that the old fast-path check for RESOLVE_CACHED would reject all users passing O_DIRECTORY with -EAGAIN, when in fact the intended test was to check for __O_TMPFILE. Cc: stable@vger.kernel.org # v5.12+ Fixes: 99668f618062 ("fs: expose LOOKUP_CACHED through openat2() RESOLVE_CACHED") Signed-off-by: Aleksa Sarai Message-Id: <20230806-resolve_cached-o_tmpfile-v1-1-7ba16308465e@cyphar.com> Signed-off-by: Christian Brauner --- fs/open.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/open.c b/fs/open.c index 0c55c8e7f837..e6ead0f19964 100644 --- a/fs/open.c +++ b/fs/open.c @@ -1322,7 +1322,7 @@ inline int build_open_flags(const struct open_how *how, struct open_flags *op) lookup_flags |= LOOKUP_IN_ROOT; if (how->resolve & RESOLVE_CACHED) { /* Don't bother even trying for create/truncate/tmpfile open */ - if (flags & (O_TRUNC | O_CREAT | O_TMPFILE)) + if (flags & (O_TRUNC | O_CREAT | __O_TMPFILE)) return -EAGAIN; lookup_flags |= LOOKUP_CACHED; } -- cgit From 0a2c2baafa312ac4cec4f0bababedab3f971f224 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sat, 5 Aug 2023 10:49:31 -0700 Subject: proc: fix missing conversion to 'iterate_shared' I'm looking at the directory handling due to the discussion about f_pos locking (see commit 797964253d35: "file: reinstate f_pos locking optimization for regular files"), and wanting to clean that up. And one source of ugliness is how we were supposed to move filesystems over to the '->iterate_shared()' function that only takes the inode lock for reading many many years ago, but several filesystems still use the bad old '->iterate()' that takes the inode lock for exclusive access. See commit 6192269444eb ("introduce a parallel variant of ->iterate()") that also added some documentation stating Old method is only used if the new one is absent; eventually it will be removed. Switch while you still can; the old one won't stay. and that was back in April 2016. Here we are, many years later, and the old version is still clearly sadly alive and well. Now, some of those old style iterators are probably just because the filesystem may end up having per-inode mutable data that it uses for iterating a directory, but at least one case is just a mistake. Al switched over most filesystems to use '->iterate_shared()' back when it was introduced. In particular, the /proc filesystem was converted as one of the first ones in commit f50752eaa0b0 ("switch all procfs directories ->iterate_shared()"). But then later one new user of '->iterate()' was then re-introduced by commit 6d9c939dbe4d ("procfs: add smack subdir to attrs"). And that's clearly not what we wanted, since that new case just uses the same 'proc_pident_readdir()' and 'proc_pident_lookup()' helper functions that other /proc pident directories use, and they are most definitely safe to use with the inode lock held shared. So just fix it. This still leaves a fair number of oddball filesystems using the old-style directory iterator (ceph, coda, exfat, jfs, ntfs, ocfs2, overlayfs, and vboxsf), but at least we don't have any remaining in the core filesystems. I'm going to add a wrapper function that just drops the read-lock and takes it as a write lock, so that we can clean up the core vfs layer and make all the ugly 'this filesystem needs exclusive inode locking' be just filesystem-internal warts. I just didn't want to make that conversion when we still had a core user left. Signed-off-by: Linus Torvalds Signed-off-by: Christian Brauner --- fs/proc/base.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/proc/base.c b/fs/proc/base.c index 05452c3b9872..9df3f4839662 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -2817,7 +2817,7 @@ static int proc_##LSM##_attr_dir_iterate(struct file *filp, \ \ static const struct file_operations proc_##LSM##_attr_dir_ops = { \ .read = generic_read_dir, \ - .iterate = proc_##LSM##_attr_dir_iterate, \ + .iterate_shared = proc_##LSM##_attr_dir_iterate, \ .llseek = default_llseek, \ }; \ \ -- cgit From 3e3271549670783be20e233a2b78a87a0b04c715 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sat, 5 Aug 2023 12:25:01 -0700 Subject: vfs: get rid of old '->iterate' directory operation All users now just use '->iterate_shared()', which only takes the directory inode lock for reading. Filesystems that never got convered to shared mode now instead use a wrapper that drops the lock, re-takes it in write mode, calls the old function, and then downgrades the lock back to read mode. This way the VFS layer and other callers no longer need to care about filesystems that never got converted to the modern era. The filesystems that use the new wrapper are ceph, coda, exfat, jfs, ntfs, ocfs2, overlayfs, and vboxsf. Honestly, several of them look like they really could just iterate their directories in shared mode and skip the wrapper entirely, but the point of this change is to not change semantics or fix filesystems that haven't been fixed in the last 7+ years, but to finally get rid of the dual iterators. Signed-off-by: Linus Torvalds Signed-off-by: Christian Brauner --- Documentation/filesystems/locking.rst | 5 ++- Documentation/filesystems/porting.rst | 25 ++++++------- fs/ceph/dir.c | 5 +-- fs/coda/dir.c | 20 ++++------- fs/exfat/dir.c | 3 +- fs/exportfs/expfs.c | 2 +- fs/jfs/namei.c | 3 +- fs/ntfs/dir.c | 3 +- fs/ocfs2/file.c | 5 +-- fs/overlayfs/readdir.c | 3 +- fs/readdir.c | 68 ++++++++++++++++++++++++++--------- fs/vboxsf/dir.c | 3 +- include/linux/fs.h | 8 ++++- 13 files changed, 95 insertions(+), 58 deletions(-) diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst index ed148919e11a..0ca479dbb1cd 100644 --- a/Documentation/filesystems/locking.rst +++ b/Documentation/filesystems/locking.rst @@ -551,9 +551,8 @@ mutex or just to use i_size_read() instead. Note: this does not protect the file->f_pos against concurrent modifications since this is something the userspace has to take care about. -->iterate() is called with i_rwsem exclusive. - -->iterate_shared() is called with i_rwsem at least shared. +->iterate_shared() is called with i_rwsem held for reading, and with the +file f_pos_lock held exclusively ->fasync() is responsible for maintaining the FASYNC bit in filp->f_flags. Most instances call fasync_helper(), which does that maintenance, so it's diff --git a/Documentation/filesystems/porting.rst b/Documentation/filesystems/porting.rst index d2d684ae7798..0f5da78ef4f9 100644 --- a/Documentation/filesystems/porting.rst +++ b/Documentation/filesystems/porting.rst @@ -537,7 +537,7 @@ vfs_readdir() is gone; switch to iterate_dir() instead **mandatory** -->readdir() is gone now; switch to ->iterate() +->readdir() is gone now; switch to ->iterate_shared() **mandatory** @@ -693,24 +693,19 @@ parallel now. --- -**recommended** +**mandatory** -->iterate_shared() is added; it's a parallel variant of ->iterate(). +->iterate_shared() is added. Exclusion on struct file level is still provided (as well as that between it and lseek on the same struct file), but if your directory has been opened several times, you can get these called in parallel. Exclusion between that method and all directory-modifying ones is still provided, of course. -Often enough ->iterate() can serve as ->iterate_shared() without any -changes - it is a read-only operation, after all. If you have any -per-inode or per-dentry in-core data structures modified by ->iterate(), -you might need something to serialize the access to them. If you -do dcache pre-seeding, you'll need to switch to d_alloc_parallel() for -that; look for in-tree examples. - -Old method is only used if the new one is absent; eventually it will -be removed. Switch while you still can; the old one won't stay. +If you have any per-inode or per-dentry in-core data structures modified +by ->iterate_shared(), you might need something to serialize the access +to them. If you do dcache pre-seeding, you'll need to switch to +d_alloc_parallel() for that; look for in-tree examples. --- @@ -930,9 +925,9 @@ should be done by looking at FMODE_LSEEK in file->f_mode. filldir_t (readdir callbacks) calling conventions have changed. Instead of returning 0 or -E... it returns bool now. false means "no more" (as -E... used to) and true - "keep going" (as 0 in old calling conventions). Rationale: -callers never looked at specific -E... values anyway. ->iterate() and -->iterate_shared() instance require no changes at all, all filldir_t ones in -the tree converted. +callers never looked at specific -E... values anyway. -> iterate_shared() +instances require no changes at all, all filldir_t ones in the tree +converted. --- diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 4a2b39d9a61a..bdcffb04513f 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -2019,9 +2019,10 @@ unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn) } } +WRAP_DIR_ITER(ceph_readdir) // FIXME! const struct file_operations ceph_dir_fops = { .read = ceph_read_dir, - .iterate = ceph_readdir, + .iterate_shared = shared_ceph_readdir, .llseek = ceph_dir_llseek, .open = ceph_open, .release = ceph_release, @@ -2033,7 +2034,7 @@ const struct file_operations ceph_dir_fops = { }; const struct file_operations ceph_snapdir_fops = { - .iterate = ceph_readdir, + .iterate_shared = shared_ceph_readdir, .llseek = ceph_dir_llseek, .open = ceph_open, .release = ceph_release, diff --git a/fs/coda/dir.c b/fs/coda/dir.c index 8450b1bd354b..1b960de2bf39 100644 --- a/fs/coda/dir.c +++ b/fs/coda/dir.c @@ -429,21 +429,14 @@ static int coda_readdir(struct file *coda_file, struct dir_context *ctx) cfi = coda_ftoc(coda_file); host_file = cfi->cfi_container; - if (host_file->f_op->iterate || host_file->f_op->iterate_shared) { + if (host_file->f_op->iterate_shared) { struct inode *host_inode = file_inode(host_file); ret = -ENOENT; if (!IS_DEADDIR(host_inode)) { - if (host_file->f_op->iterate_shared) { - inode_lock_shared(host_inode); - ret = host_file->f_op->iterate_shared(host_file, ctx); - file_accessed(host_file); - inode_unlock_shared(host_inode); - } else { - inode_lock(host_inode); - ret = host_file->f_op->iterate(host_file, ctx); - file_accessed(host_file); - inode_unlock(host_inode); - } + inode_lock_shared(host_inode); + ret = host_file->f_op->iterate_shared(host_file, ctx); + file_accessed(host_file); + inode_unlock_shared(host_inode); } return ret; } @@ -585,10 +578,11 @@ const struct inode_operations coda_dir_inode_operations = { .setattr = coda_setattr, }; +WRAP_DIR_ITER(coda_readdir) // FIXME! const struct file_operations coda_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = coda_readdir, + .iterate_shared = shared_coda_readdir, .open = coda_open, .release = coda_release, .fsync = coda_fsync, diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c index 598081d0d059..e1586bba6d86 100644 --- a/fs/exfat/dir.c +++ b/fs/exfat/dir.c @@ -306,10 +306,11 @@ out: return err; } +WRAP_DIR_ITER(exfat_iterate) // FIXME! const struct file_operations exfat_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = exfat_iterate, + .iterate_shared = shared_exfat_iterate, .unlocked_ioctl = exfat_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = exfat_compat_ioctl, diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c index 40e624cf7e92..d1dbe47c7975 100644 --- a/fs/exportfs/expfs.c +++ b/fs/exportfs/expfs.c @@ -315,7 +315,7 @@ static int get_name(const struct path *path, char *name, struct dentry *child) goto out; error = -EINVAL; - if (!file->f_op->iterate && !file->f_op->iterate_shared) + if (!file->f_op->iterate_shared) goto out_close; buffer.sequence = 0; diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c index 9b030297aa64..e98ddb2b1cf2 100644 --- a/fs/jfs/namei.c +++ b/fs/jfs/namei.c @@ -1535,9 +1535,10 @@ const struct inode_operations jfs_dir_inode_operations = { #endif }; +WRAP_DIR_ITER(jfs_readdir) // FIXME! const struct file_operations jfs_dir_operations = { .read = generic_read_dir, - .iterate = jfs_readdir, + .iterate_shared = shared_jfs_readdir, .fsync = jfs_fsync, .unlocked_ioctl = jfs_ioctl, .compat_ioctl = compat_ptr_ioctl, diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c index 518c3a21a556..4596c90e7b7c 100644 --- a/fs/ntfs/dir.c +++ b/fs/ntfs/dir.c @@ -1525,10 +1525,11 @@ static int ntfs_dir_fsync(struct file *filp, loff_t start, loff_t end, #endif /* NTFS_RW */ +WRAP_DIR_ITER(ntfs_readdir) // FIXME! const struct file_operations ntfs_dir_ops = { .llseek = generic_file_llseek, /* Seek inside directory. */ .read = generic_read_dir, /* Return -EISDIR. */ - .iterate = ntfs_readdir, /* Read directory contents. */ + .iterate_shared = shared_ntfs_readdir, /* Read directory contents. */ #ifdef NTFS_RW .fsync = ntfs_dir_fsync, /* Sync a directory to disk. */ #endif /* NTFS_RW */ diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 91a194596552..bf2c17ea96a0 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -2793,10 +2793,11 @@ const struct file_operations ocfs2_fops = { .remap_file_range = ocfs2_remap_file_range, }; +WRAP_DIR_ITER(ocfs2_readdir) // FIXME! const struct file_operations ocfs2_dops = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = ocfs2_readdir, + .iterate_shared = shared_ocfs2_readdir, .fsync = ocfs2_sync_file, .release = ocfs2_dir_release, .open = ocfs2_dir_open, @@ -2842,7 +2843,7 @@ const struct file_operations ocfs2_fops_no_plocks = { const struct file_operations ocfs2_dops_no_plocks = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = ocfs2_readdir, + .iterate_shared = shared_ocfs2_readdir, .fsync = ocfs2_sync_file, .release = ocfs2_dir_release, .open = ocfs2_dir_open, diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c index ee5c4736480f..de39e067ae65 100644 --- a/fs/overlayfs/readdir.c +++ b/fs/overlayfs/readdir.c @@ -954,10 +954,11 @@ static int ovl_dir_open(struct inode *inode, struct file *file) return 0; } +WRAP_DIR_ITER(ovl_iterate) // FIXME! const struct file_operations ovl_dir_operations = { .read = generic_read_dir, .open = ovl_dir_open, - .iterate = ovl_iterate, + .iterate_shared = shared_ovl_iterate, .llseek = ovl_dir_llseek, .fsync = ovl_dir_fsync, .release = ovl_dir_release, diff --git a/fs/readdir.c b/fs/readdir.c index b264ce60114d..c8c46e294431 100644 --- a/fs/readdir.c +++ b/fs/readdir.c @@ -24,6 +24,53 @@ #include +/* + * Some filesystems were never converted to '->iterate_shared()' + * and their directory iterators want the inode lock held for + * writing. This wrapper allows for converting from the shared + * semantics to the exclusive inode use. + */ +int wrap_directory_iterator(struct file *file, + struct dir_context *ctx, + int (*iter)(struct file *, struct dir_context *)) +{ + struct inode *inode = file_inode(file); + int ret; + + /* + * We'd love to have an 'inode_upgrade_trylock()' operation, + * see the comment in mmap_upgrade_trylock() in mm/memory.c. + * + * But considering this is for "filesystems that never got + * converted", it really doesn't matter. + * + * Also note that since we have to return with the lock held + * for reading, we can't use the "killable()" locking here, + * since we do need to get the lock even if we're dying. + * + * We could do the write part killably and then get the read + * lock unconditionally if it mattered, but see above on why + * this does the very simplistic conversion. + */ + up_read(&inode->i_rwsem); + down_write(&inode->i_rwsem); + + /* + * Since we dropped the inode lock, we should do the + * DEADDIR test again. See 'iterate_dir()' below. + * + * Note that we don't need to re-do the f_pos games, + * since the file must be locked wrt f_pos anyway. + */ + ret = -ENOENT; + if (!IS_DEADDIR(inode)) + ret = iter(file, ctx); + + downgrade_write(&inode->i_rwsem); + return ret; +} +EXPORT_SYMBOL(wrap_directory_iterator); + /* * Note the "unsafe_put_user() semantics: we goto a * label for errors. @@ -40,39 +87,28 @@ int iterate_dir(struct file *file, struct dir_context *ctx) { struct inode *inode = file_inode(file); - bool shared = false; int res = -ENOTDIR; - if (file->f_op->iterate_shared) - shared = true; - else if (!file->f_op->iterate) + + if (!file->f_op->iterate_shared) goto out; res = security_file_permission(file, MAY_READ); if (res) goto out; - if (shared) - res = down_read_killable(&inode->i_rwsem); - else - res = down_write_killable(&inode->i_rwsem); + res = down_read_killable(&inode->i_rwsem); if (res) goto out; res = -ENOENT; if (!IS_DEADDIR(inode)) { ctx->pos = file->f_pos; - if (shared) - res = file->f_op->iterate_shared(file, ctx); - else - res = file->f_op->iterate(file, ctx); + res = file->f_op->iterate_shared(file, ctx); file->f_pos = ctx->pos; fsnotify_access(file); file_accessed(file); } - if (shared) - inode_unlock_shared(inode); - else - inode_unlock(inode); + inode_unlock_shared(inode); out: return res; } diff --git a/fs/vboxsf/dir.c b/fs/vboxsf/dir.c index 075f15c43c78..5f1a14d5b927 100644 --- a/fs/vboxsf/dir.c +++ b/fs/vboxsf/dir.c @@ -179,9 +179,10 @@ static int vboxsf_dir_iterate(struct file *dir, struct dir_context *ctx) return 0; } +WRAP_DIR_ITER(vboxsf_dir_iterate) // FIXME! const struct file_operations vboxsf_dir_fops = { .open = vboxsf_dir_open, - .iterate = vboxsf_dir_iterate, + .iterate_shared = shared_vboxsf_dir_iterate, .release = vboxsf_dir_release, .read = generic_read_dir, .llseek = generic_file_llseek, diff --git a/include/linux/fs.h b/include/linux/fs.h index 6867512907d6..562f2623c9c9 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1780,7 +1780,6 @@ struct file_operations { ssize_t (*write_iter) (struct kiocb *, struct iov_iter *); int (*iopoll)(struct kiocb *kiocb, struct io_comp_batch *, unsigned int flags); - int (*iterate) (struct file *, struct dir_context *); int (*iterate_shared) (struct file *, struct dir_context *); __poll_t (*poll) (struct file *, struct poll_table_struct *); long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); @@ -1817,6 +1816,13 @@ struct file_operations { unsigned int poll_flags); } __randomize_layout; +/* Wrap a directory iterator that needs exclusive inode access */ +int wrap_directory_iterator(struct file *, struct dir_context *, + int (*) (struct file *, struct dir_context *)); +#define WRAP_DIR_ITER(x) \ + static int shared_##x(struct file *file , struct dir_context *ctx) \ + { return wrap_directory_iterator(file, ctx, x); } + struct inode_operations { struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int); const char * (*get_link) (struct dentry *, struct inode *, struct delayed_call *); -- cgit From 7d84d1b9af6366aa9df1b523bdb7e002372e38d0 Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Sun, 6 Aug 2023 14:49:35 +0200 Subject: fs: rely on ->iterate_shared to determine f_pos locking Now that we removed ->iterate we don't need to check for either ->iterate or ->iterate_shared in file_needs_f_pos_lock(). Simply check for ->iterate_shared instead. This will tell us whether we need to unconditionally take the lock. Not just does it allow us to avoid checking f_inode's mode it also actually clearly shows that we're locking because of readdir. Signed-off-by: Christian Brauner --- fs/file.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/file.c b/fs/file.c index dbca26ef7a01..3fd003a8604f 100644 --- a/fs/file.c +++ b/fs/file.c @@ -1049,7 +1049,7 @@ unsigned long __fdget_raw(unsigned int fd) static inline bool file_needs_f_pos_lock(struct file *file) { return (file->f_mode & FMODE_ATOMIC_POS) && - (file_count(file) > 1 || S_ISDIR(file_inode(file)->i_mode)); + (file_count(file) > 1 || file->f_op->iterate_shared); } unsigned long __fdget_pos(unsigned int fd) -- cgit From 52a93d39b17dc7eb98b6aa3edb93943248e03b2f Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 6 Aug 2023 15:07:51 -0700 Subject: Linux 6.5-rc5 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 653238528aac..6bbf9db6b414 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ VERSION = 6 PATCHLEVEL = 5 SUBLEVEL = 0 -EXTRAVERSION = -rc4 +EXTRAVERSION = -rc5 NAME = Hurr durr I'ma ninja sloth # *DOCUMENTATION* -- cgit From 5a15d8348881e9371afdf9f5357a135489496955 Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Mon, 7 Aug 2023 10:46:04 +0200 Subject: x86/srso: Tie SBPB bit setting to microcode patch detection The SBPB bit in MSR_IA32_PRED_CMD is supported only after a microcode patch has been applied so set X86_FEATURE_SBPB only then. Otherwise, guests would attempt to set that bit and #GP on the MSR write. While at it, make SMT detection more robust as some guests - depending on how and what CPUID leafs their report - lead to cpu_smt_control getting set to CPU_SMT_NOT_SUPPORTED but SRSO_NO should be set for any guest incarnation where one simply cannot do SMT, for whatever reason. Fixes: fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation") Reported-by: Konrad Rzeszutek Wilk Reported-by: Salvatore Bonaccorso Signed-off-by: Borislav Petkov (AMD) --- arch/x86/kernel/cpu/amd.c | 19 ++++++++++++------- arch/x86/kernel/cpu/bugs.c | 7 +++---- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 834f310b2f1a..41e10c26efb5 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -1238,14 +1238,19 @@ EXPORT_SYMBOL_GPL(amd_get_highest_perf); bool cpu_has_ibpb_brtype_microcode(void) { - u8 fam = boot_cpu_data.x86; - + switch (boot_cpu_data.x86) { /* Zen1/2 IBPB flushes branch type predictions too. */ - if (fam == 0x17) + case 0x17: return boot_cpu_has(X86_FEATURE_AMD_IBPB); - /* Poke the MSR bit on Zen3/4 to check its presence. */ - else if (fam == 0x19) - return !wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB); - else + case 0x19: + /* Poke the MSR bit on Zen3/4 to check its presence. */ + if (!wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) { + setup_force_cpu_cap(X86_FEATURE_SBPB); + return true; + } else { + return false; + } + default: return false; + } } diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 7314a6bdc862..2afe69bff218 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -2265,14 +2265,13 @@ static void __init srso_select_mitigation(void) * flags for guests. */ setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE); - setup_force_cpu_cap(X86_FEATURE_SBPB); /* * Zen1/2 with SMT off aren't vulnerable after the right * IBPB microcode has been applied. */ if ((boot_cpu_data.x86 < 0x19) && - (cpu_smt_control == CPU_SMT_DISABLED)) + (!cpu_smt_possible() || (cpu_smt_control == CPU_SMT_DISABLED))) setup_force_cpu_cap(X86_FEATURE_SRSO_NO); } @@ -2345,8 +2344,8 @@ static void __init srso_select_mitigation(void) pr_info("%s%s\n", srso_strings[srso_mitigation], (has_microcode ? "" : ", no microcode")); pred_cmd: - if (boot_cpu_has(X86_FEATURE_SRSO_NO) || - srso_cmd == SRSO_CMD_OFF) + if ((boot_cpu_has(X86_FEATURE_SRSO_NO) || srso_cmd == SRSO_CMD_OFF) && + boot_cpu_has(X86_FEATURE_SBPB)) x86_pred_cmd = PRED_CMD_SBPB; } -- cgit From 2cbd80642b76480c9b0697297af917d9388a0b46 Mon Sep 17 00:00:00 2001 From: Andreas Gruenbacher Date: Fri, 4 Aug 2023 22:17:32 +0200 Subject: gfs2: Fix freeze consistency check in gfs2_trans_add_meta Function gfs2_trans_add_meta() checks for the SDF_FROZEN flag to make sure that no buffers are added to a transaction while the filesystem is frozen. With the recent freeze/thaw rework, the SDF_FROZEN flag is cleared after thaw_super() is called, which is sufficient for serializing freeze/thaw. However, other filesystem operations started after thaw_super() may now be calling gfs2_trans_add_meta() before the SDF_FROZEN flag is cleared, which will trigger the SDF_FROZEN check in gfs2_trans_add_meta(). Fix that by checking the s_writers.frozen state instead. In addition, make sure not to call gfs2_assert_withdraw() with the sd_log_lock spin lock held. Check for a withdrawn filesystem before checking for a frozen filesystem, and don't pin/add buffers to the current transaction in case of a failure in either case. Signed-off-by: Andreas Gruenbacher Signed-off-by: Bob Peterson --- fs/gfs2/trans.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c index ec1631257978..7e835be7032d 100644 --- a/fs/gfs2/trans.c +++ b/fs/gfs2/trans.c @@ -230,9 +230,11 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh) { struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct super_block *sb = sdp->sd_vfs; struct gfs2_bufdata *bd; struct gfs2_meta_header *mh; struct gfs2_trans *tr = current->journal_info; + bool withdraw = false; lock_buffer(bh); if (buffer_pinned(bh)) { @@ -266,13 +268,15 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh) (unsigned long long)bd->bd_bh->b_blocknr); BUG(); } - if (unlikely(test_bit(SDF_FROZEN, &sdp->sd_flags))) { - fs_info(sdp, "GFS2:adding buf while frozen\n"); - gfs2_assert_withdraw(sdp, 0); - } if (unlikely(gfs2_withdrawn(sdp))) { fs_info(sdp, "GFS2:adding buf while withdrawn! 0x%llx\n", (unsigned long long)bd->bd_bh->b_blocknr); + goto out_unlock; + } + if (unlikely(sb->s_writers.frozen == SB_FREEZE_COMPLETE)) { + fs_info(sdp, "GFS2:adding buf while frozen\n"); + withdraw = true; + goto out_unlock; } gfs2_pin(sdp, bd->bd_bh); mh->__pad0 = cpu_to_be64(0); @@ -281,6 +285,8 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh) tr->tr_num_buf_new++; out_unlock: gfs2_log_unlock(sdp); + if (withdraw) + gfs2_assert_withdraw(sdp, 0); out: unlock_buffer(bh); } -- cgit From 0be8432166a61abc537e1247e530f4b85970b56b Mon Sep 17 00:00:00 2001 From: Bob Peterson Date: Wed, 2 Aug 2023 09:24:12 -0500 Subject: gfs2: Don't use filemap_splice_read Starting with patch 2cb1e08985, gfs2 started using the new function filemap_splice_read rather than the old (and subsequently deleted) function generic_file_splice_read. filemap_splice_read works by taking references to a number of folios in the page cache and splicing those folios into a pipe. The folios are then read from the pipe and the folio references are dropped. This can take an arbitrary amount of time. We cannot allow that in gfs2 because those folio references will pin the inode glock to the node and prevent it from being demoted, which can lead to cluster-wide deadlocks. Instead, use copy_splice_read. (In addition, the old generic_file_splice_read called into ->read_iter, which called gfs2_file_read_iter, which took the inode glock during the operation. The new filemap_splice_read interface does not take the inode glock anymore. This is fixable, but it still wouldn't prevent cluster-wide deadlocks.) Fixes: 2cb1e08985e3 ("splice: Use filemap_splice_read() instead of generic_file_splice_read()") Signed-off-by: Bob Peterson Signed-off-by: Andreas Gruenbacher --- fs/gfs2/file.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index 1bf3c4453516..b43fa8b8fc05 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -1578,7 +1578,7 @@ const struct file_operations gfs2_file_fops = { .fsync = gfs2_fsync, .lock = gfs2_lock, .flock = gfs2_flock, - .splice_read = filemap_splice_read, + .splice_read = copy_splice_read, .splice_write = gfs2_file_splice_write, .setlease = simple_nosetlease, .fallocate = gfs2_fallocate, @@ -1609,7 +1609,7 @@ const struct file_operations gfs2_file_fops_nolock = { .open = gfs2_open, .release = gfs2_release, .fsync = gfs2_fsync, - .splice_read = filemap_splice_read, + .splice_read = copy_splice_read, .splice_write = gfs2_file_splice_write, .setlease = generic_setlease, .fallocate = gfs2_fallocate, -- cgit From 0b15afc9038146bb2009e7924b1ead2e919b2a56 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 26 Jul 2023 20:00:35 +0200 Subject: tpm/tpm_tis: Disable interrupts for TUXEDO InfinityBook S 15/17 Gen7 TUXEDO InfinityBook S 15/17 Gen7 suffers from an IRQ problem on tpm_tis like a few other laptops. Add an entry for the workaround. Cc: stable@vger.kernel.org Fixes: e644b2f498d2 ("tpm, tpm_tis: Enable interrupt test") Link: https://bugzilla.suse.com/show_bug.cgi?id=1213645 Signed-off-by: Takashi Iwai Acked-by: Jarkko Sakkinen Signed-off-by: Jarkko Sakkinen --- drivers/char/tpm/tpm_tis.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index cc42cf3de960..a98773ac2e55 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -162,6 +162,14 @@ static const struct dmi_system_id tpm_tis_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L590"), }, }, + { + .callback = tpm_tis_disable_irq, + .ident = "TUXEDO InfinityBook S 15/17 Gen7", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_MATCH(DMI_PRODUCT_NAME, "TUXEDO InfinityBook S 15/17 Gen7"), + }, + }, { .callback = tpm_tis_disable_irq, .ident = "UPX-TGL", -- cgit From 0de030b308236a1392f924f527cf74614d8b6aef Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Sun, 11 Jun 2023 07:32:10 -0400 Subject: sysctl: set variable key_sysctls storage-class-specifier to static smatch reports security/keys/sysctl.c:12:18: warning: symbol 'key_sysctls' was not declared. Should it be static? This variable is only used in its defining file, so it should be static. Signed-off-by: Tom Rix Signed-off-by: Jarkko Sakkinen --- security/keys/sysctl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/security/keys/sysctl.c b/security/keys/sysctl.c index b72b82bb20c6..b348e1679d5d 100644 --- a/security/keys/sysctl.c +++ b/security/keys/sysctl.c @@ -9,7 +9,7 @@ #include #include "internal.h" -struct ctl_table key_sysctls[] = { +static struct ctl_table key_sysctls[] = { { .procname = "maxkeys", .data = &key_quota_maxkeys, -- cgit From 554b841d470338a3b1d6335b14ee1cd0c8f5d754 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Wed, 2 Aug 2023 07:25:33 -0500 Subject: tpm: Disable RNG for all AMD fTPMs The TPM RNG functionality is not necessary for entropy when the CPU already supports the RDRAND instruction. The TPM RNG functionality was previously disabled on a subset of AMD fTPM series, but reports continue to show problems on some systems causing stutter root caused to TPM RNG functionality. Expand disabling TPM RNG use for all AMD fTPMs whether they have versions that claim to have fixed or not. To accomplish this, move the detection into part of the TPM CRB registration and add a flag indicating that the TPM should opt-out of registration to hwrng. Cc: stable@vger.kernel.org # 6.1.y+ Fixes: b006c439d58d ("hwrng: core - start hwrng kthread also for untrusted sources") Fixes: f1324bbc4011 ("tpm: disable hwrng for fTPM on some AMD designs") Reported-by: daniil.stas@posteo.net Closes: https://bugzilla.kernel.org/show_bug.cgi?id=217719 Reported-by: bitlord0xff@gmail.com Closes: https://bugzilla.kernel.org/show_bug.cgi?id=217212 Signed-off-by: Mario Limonciello Reviewed-by: Jarkko Sakkinen Signed-off-by: Jarkko Sakkinen --- drivers/char/tpm/tpm-chip.c | 68 ++------------------------------------------- drivers/char/tpm/tpm_crb.c | 30 ++++++++++++++++++++ include/linux/tpm.h | 1 + 3 files changed, 33 insertions(+), 66 deletions(-) diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index cf5499e51999..e904aae9771b 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -510,70 +510,6 @@ static int tpm_add_legacy_sysfs(struct tpm_chip *chip) return 0; } -/* - * Some AMD fTPM versions may cause stutter - * https://www.amd.com/en/support/kb/faq/pa-410 - * - * Fixes are available in two series of fTPM firmware: - * 6.x.y.z series: 6.0.18.6 + - * 3.x.y.z series: 3.57.y.5 + - */ -#ifdef CONFIG_X86 -static bool tpm_amd_is_rng_defective(struct tpm_chip *chip) -{ - u32 val1, val2; - u64 version; - int ret; - - if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) - return false; - - ret = tpm_request_locality(chip); - if (ret) - return false; - - ret = tpm2_get_tpm_pt(chip, TPM2_PT_MANUFACTURER, &val1, NULL); - if (ret) - goto release; - if (val1 != 0x414D4400U /* AMD */) { - ret = -ENODEV; - goto release; - } - ret = tpm2_get_tpm_pt(chip, TPM2_PT_FIRMWARE_VERSION_1, &val1, NULL); - if (ret) - goto release; - ret = tpm2_get_tpm_pt(chip, TPM2_PT_FIRMWARE_VERSION_2, &val2, NULL); - -release: - tpm_relinquish_locality(chip); - - if (ret) - return false; - - version = ((u64)val1 << 32) | val2; - if ((version >> 48) == 6) { - if (version >= 0x0006000000180006ULL) - return false; - } else if ((version >> 48) == 3) { - if (version >= 0x0003005700000005ULL) - return false; - } else { - return false; - } - - dev_warn(&chip->dev, - "AMD fTPM version 0x%llx causes system stutter; hwrng disabled\n", - version); - - return true; -} -#else -static inline bool tpm_amd_is_rng_defective(struct tpm_chip *chip) -{ - return false; -} -#endif /* CONFIG_X86 */ - static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait) { struct tpm_chip *chip = container_of(rng, struct tpm_chip, hwrng); @@ -588,7 +524,7 @@ static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait) static int tpm_add_hwrng(struct tpm_chip *chip) { if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM) || tpm_is_firmware_upgrade(chip) || - tpm_amd_is_rng_defective(chip)) + chip->flags & TPM_CHIP_FLAG_HWRNG_DISABLED) return 0; snprintf(chip->hwrng_name, sizeof(chip->hwrng_name), @@ -719,7 +655,7 @@ void tpm_chip_unregister(struct tpm_chip *chip) { tpm_del_legacy_sysfs(chip); if (IS_ENABLED(CONFIG_HW_RANDOM_TPM) && !tpm_is_firmware_upgrade(chip) && - !tpm_amd_is_rng_defective(chip)) + !(chip->flags & TPM_CHIP_FLAG_HWRNG_DISABLED)) hwrng_unregister(&chip->hwrng); tpm_bios_log_teardown(chip); if (chip->flags & TPM_CHIP_FLAG_TPM2 && !tpm_is_firmware_upgrade(chip)) diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c index 1a5d09b18513..9eb1a1859012 100644 --- a/drivers/char/tpm/tpm_crb.c +++ b/drivers/char/tpm/tpm_crb.c @@ -463,6 +463,28 @@ static bool crb_req_canceled(struct tpm_chip *chip, u8 status) return (cancel & CRB_CANCEL_INVOKE) == CRB_CANCEL_INVOKE; } +static int crb_check_flags(struct tpm_chip *chip) +{ + u32 val; + int ret; + + ret = crb_request_locality(chip, 0); + if (ret) + return ret; + + ret = tpm2_get_tpm_pt(chip, TPM2_PT_MANUFACTURER, &val, NULL); + if (ret) + goto release; + + if (val == 0x414D4400U /* AMD */) + chip->flags |= TPM_CHIP_FLAG_HWRNG_DISABLED; + +release: + crb_relinquish_locality(chip, 0); + + return ret; +} + static const struct tpm_class_ops tpm_crb = { .flags = TPM_OPS_AUTO_STARTUP, .status = crb_status, @@ -800,6 +822,14 @@ static int crb_acpi_add(struct acpi_device *device) chip->acpi_dev_handle = device->handle; chip->flags = TPM_CHIP_FLAG_TPM2; + rc = tpm_chip_bootstrap(chip); + if (rc) + goto out; + + rc = crb_check_flags(chip); + if (rc) + goto out; + rc = tpm_chip_register(chip); out: diff --git a/include/linux/tpm.h b/include/linux/tpm.h index 6a1e8f157255..4ee9d13749ad 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h @@ -283,6 +283,7 @@ enum tpm_chip_flags { TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED = BIT(6), TPM_CHIP_FLAG_FIRMWARE_UPGRADE = BIT(7), TPM_CHIP_FLAG_SUSPENDED = BIT(8), + TPM_CHIP_FLAG_HWRNG_DISABLED = BIT(9), }; #define to_tpm_chip(d) container_of(d, struct tpm_chip, dev) -- cgit From e117e7adc637e364b599dc766f1d740698e7e027 Mon Sep 17 00:00:00 2001 From: Jonathan McDowell Date: Fri, 4 Aug 2023 17:08:40 +0000 Subject: tpm/tpm_tis: Disable interrupts for Lenovo P620 devices The Lenovo ThinkStation P620 suffers from an irq storm issue like various other Lenovo machines, so add an entry for it to tpm_tis_dmi_table and force polling. It is worth noting that 481c2d14627d (tpm,tpm_tis: Disable interrupts after 1000 unhandled IRQs) does not seem to fix the problem on this machine, but setting 'tpm_tis.interrupts=0' on the kernel command line does. [jarkko@kernel.org: truncated the commit ID in the description to 12 characters] Cc: stable@vger.kernel.org # v6.4+ Fixes: e644b2f498d2 ("tpm, tpm_tis: Enable interrupt test") Signed-off-by: Jonathan McDowell Reviewed-by: Jarkko Sakkinen Signed-off-by: Jarkko Sakkinen --- drivers/char/tpm/tpm_tis.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index a98773ac2e55..ac4daaf294a3 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -162,6 +162,14 @@ static const struct dmi_system_id tpm_tis_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L590"), }, }, + { + .callback = tpm_tis_disable_irq, + .ident = "ThinkStation P620", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkStation P620"), + }, + }, { .callback = tpm_tis_disable_irq, .ident = "TUXEDO InfinityBook S 15/17 Gen7", -- cgit From c0b067588a4836b762cfc6a4c83f122ca1dbb93a Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 1 Aug 2023 18:42:47 -0300 Subject: Revert "perf report: Append inlines to non-DWARF callchains" This reverts commit 46d21ec067490ab9cdcc89b9de5aae28786a8b8e. The tests were made with a specific workload, further tests on a recently updated fedora 38 system with a system wide perf.data file shows 'perf report' taking excessive time resolving inlines in vmlinux, so lets revert this until a full investigation and improvement on the addr2line support code is made. Reported-by: Jesper Dangaard Brouer Acked-by: Artem Savkov Tested-by: Jesper Dangaard Brouer Cc: Andrii Nakryiko Cc: Namhyung Kim Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Masami Hiramatsu Cc: Milian Wolff Cc: Peter Zijlstra Link: https://lore.kernel.org/r/ZMl8VyhdwhClTM5g@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/machine.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 4e62843d51b7..f4cb41ee23cd 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -45,7 +45,6 @@ static void __machine__remove_thread(struct machine *machine, struct thread_rb_node *nd, struct thread *th, bool lock); -static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms, u64 ip); static struct dso *machine__kernel_dso(struct machine *machine) { @@ -2385,10 +2384,6 @@ static int add_callchain_ip(struct thread *thread, ms.maps = maps__get(al.maps); ms.map = map__get(al.map); ms.sym = al.sym; - - if (!branch && append_inlines(cursor, &ms, ip) == 0) - goto out; - srcline = callchain_srcline(&ms, al.addr); err = callchain_cursor_append(cursor, ip, &ms, branch, flags, nr_loop_iter, -- cgit From 8cdd4aeff2e858c95bb088409028893cfb4e53d4 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 7 Aug 2023 10:41:42 -0300 Subject: tools arch x86: Sync the msr-index.h copy with the kernel sources To pick up the changes from these csets: 522b1d69219d8f08 ("x86/cpu/amd: Add a Zenbleed fix") That cause no changes to tooling: $ tools/perf/trace/beauty/tracepoints/x86_msr.sh > before $ cp arch/x86/include/asm/msr-index.h tools/arch/x86/include/asm/msr-index.h $ tools/perf/trace/beauty/tracepoints/x86_msr.sh > after $ diff -u before after $ Just silences this perf build warning: Warning: Kernel ABI header differences: diff -u tools/arch/x86/include/asm/msr-index.h arch/x86/include/asm/msr-index.h Cc: Adrian Hunter Cc: Borislav Petkov (AMD) Cc: Ian Rogers Cc: Jiri Olsa Cc: Namhyung Kim Link: https://lore.kernel.org/lkml/ZND17H7BI4ariERn@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/arch/x86/include/asm/msr-index.h | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h index 3aedae61af4f..a00a53e15ab7 100644 --- a/tools/arch/x86/include/asm/msr-index.h +++ b/tools/arch/x86/include/asm/msr-index.h @@ -545,6 +545,7 @@ #define MSR_AMD64_DE_CFG 0xc0011029 #define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1 #define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT) +#define MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT 9 #define MSR_AMD64_BU_CFG2 0xc001102a #define MSR_AMD64_IBSFETCHCTL 0xc0011030 -- cgit From 487ae3b42d1040b4cd5ff9754e7516b409204029 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 1 Aug 2023 13:54:52 -0700 Subject: perf stat: Don't display zero tool counts Andi reported (see link below) a regression when printing the 'duration_time' tool event, where it gets printed as "not counted" for most of the CPUs, fix it by skipping zero counts for tool events. Reported-by: Andi Kleen Signed-off-by: Ian Rogers Tested-by: Andi Kleen Tested-by: Arnaldo Carvalho de Melo Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Athira Rajeev Cc: Claire Jensen Cc: Ingo Molnar Cc: Jiri Olsa Cc: Kan Liang Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Link: https://lore.kernel.org/all/ZMlrzcVrVi1lTDmn@tassilo/ Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/stat-display.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c index 7329b3340f88..d45d5dcb0e2b 100644 --- a/tools/perf/util/stat-display.c +++ b/tools/perf/util/stat-display.c @@ -931,6 +931,11 @@ static bool should_skip_zero_counter(struct perf_stat_config *config, */ if (config->aggr_mode == AGGR_THREAD && config->system_wide) return true; + + /* Tool events have the software PMU but are only gathered on 1. */ + if (evsel__is_tool(counter)) + return true; + /* * Skip value 0 when it's an uncore event and the given aggr id * does not belong to the PMU cpumask. -- cgit From 77245f1c3c6495521f6a3af082696ee2f8ce3921 Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Sat, 5 Aug 2023 00:06:43 +0200 Subject: x86/CPU/AMD: Do not leak quotient data after a division by 0 Under certain circumstances, an integer division by 0 which faults, can leave stale quotient data from a previous division operation on Zen1 microarchitectures. Do a dummy division 0/1 before returning from the #DE exception handler in order to avoid any leaks of potentially sensitive data. Signed-off-by: Borislav Petkov (AMD) Cc: Signed-off-by: Linus Torvalds --- arch/x86/include/asm/cpufeatures.h | 1 + arch/x86/include/asm/processor.h | 2 ++ arch/x86/kernel/cpu/amd.c | 19 +++++++++++++++++++ arch/x86/kernel/traps.c | 2 ++ 4 files changed, 24 insertions(+) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 094f88fee536..b69b0d7756aa 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -495,4 +495,5 @@ /* BUG word 2 */ #define X86_BUG_SRSO X86_BUG(1*32 + 0) /* AMD SRSO bug */ +#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 7c67db7c9f53..973db0406528 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -683,10 +683,12 @@ extern u16 get_llc_id(unsigned int cpu); extern u32 amd_get_nodes_per_socket(void); extern u32 amd_get_highest_perf(void); extern bool cpu_has_ibpb_brtype_microcode(void); +extern void amd_clear_divider(void); #else static inline u32 amd_get_nodes_per_socket(void) { return 0; } static inline u32 amd_get_highest_perf(void) { return 0; } static inline bool cpu_has_ibpb_brtype_microcode(void) { return false; } +static inline void amd_clear_divider(void) { } #endif extern unsigned long arch_align_stack(unsigned long sp); diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 1e1e253038ce..b55d8f82b621 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -75,6 +75,10 @@ static const int amd_zenbleed[] = AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf), AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf)); +static const int amd_div0[] = + AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x00, 0x0, 0x2f, 0xf), + AMD_MODEL_RANGE(0x17, 0x50, 0x0, 0x5f, 0xf)); + static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) { int osvw_id = *erratum++; @@ -1130,6 +1134,11 @@ static void init_amd(struct cpuinfo_x86 *c) WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS)); zenbleed_check(c); + + if (cpu_has_amd_erratum(c, amd_div0)) { + pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n"); + setup_force_cpu_bug(X86_BUG_DIV0); + } } #ifdef CONFIG_X86_32 @@ -1309,3 +1318,13 @@ bool cpu_has_ibpb_brtype_microcode(void) return false; } } + +/* + * Issue a DIV 0/1 insn to clear any division data from previous DIV + * operations. + */ +void noinstr amd_clear_divider(void) +{ + asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0) + :: "a" (0), "d" (0), "r" (1)); +} diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 4a817d20ce3b..1885326a8f65 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -206,6 +206,8 @@ DEFINE_IDTENTRY(exc_divide_error) { do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE, FPE_INTDIV, error_get_trap_addr(regs)); + + amd_clear_divider(); } DEFINE_IDTENTRY(exc_overflow) -- cgit From 22883973244b1caaa26f9c6171a41ba843c8d4bd Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Wed, 9 Aug 2023 17:46:00 +0300 Subject: mm: Fix access_remote_vm() regression on tagged addresses GDB uses /proc/PID/mem to access memory of the target process. GDB doesn't untag addresses manually, but relies on kernel to do the right thing. mem_rw() of procfs uses access_remote_vm() to get data from the target process. It worked fine until recent changes in __access_remote_vm() that now checks if there's VMA at target address using raw address. Untag the address before looking up the VMA. Signed-off-by: Kirill A. Shutemov Reported-by: Christina Schimpe Fixes: eee9c708cc89 ("gup: avoid stack expansion warning for known-good case") Cc: stable@vger.kernel.org Signed-off-by: Linus Torvalds --- mm/memory.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mm/memory.c b/mm/memory.c index 603b2f419948..1ec1ef3418bf 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5705,6 +5705,9 @@ int __access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, if (mmap_read_lock_killable(mm)) return 0; + /* Untag the address before looking up the VMA */ + addr = untagged_addr_remote(mm, addr); + /* Avoid triggering the temporary warning in __get_user_pages */ if (!vma_lookup(mm, addr) && !expand_stack(mm, addr)) return 0; -- cgit From cacc6e22932f373a91d7be55a9b992dc77f4c59b Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Mon, 7 Aug 2023 23:12:29 -0500 Subject: tpm: Add a helper for checking hwrng enabled The same checks are repeated in three places to decide whether to use hwrng. Consolidate these into a helper. Also this fixes a case that one of them was missing a check in the cleanup path. Fixes: 554b841d4703 ("tpm: Disable RNG for all AMD fTPMs") Signed-off-by: Mario Limonciello Signed-off-by: Linus Torvalds --- drivers/char/tpm/tpm-chip.c | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index e904aae9771b..ea6b4013bc38 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -521,10 +521,20 @@ static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait) return tpm_get_random(chip, data, max); } +static bool tpm_is_hwrng_enabled(struct tpm_chip *chip) +{ + if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM)) + return false; + if (tpm_is_firmware_upgrade(chip)) + return false; + if (chip->flags & TPM_CHIP_FLAG_HWRNG_DISABLED) + return false; + return true; +} + static int tpm_add_hwrng(struct tpm_chip *chip) { - if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM) || tpm_is_firmware_upgrade(chip) || - chip->flags & TPM_CHIP_FLAG_HWRNG_DISABLED) + if (!tpm_is_hwrng_enabled(chip)) return 0; snprintf(chip->hwrng_name, sizeof(chip->hwrng_name), @@ -629,7 +639,7 @@ int tpm_chip_register(struct tpm_chip *chip) return 0; out_hwrng: - if (IS_ENABLED(CONFIG_HW_RANDOM_TPM) && !tpm_is_firmware_upgrade(chip)) + if (tpm_is_hwrng_enabled(chip)) hwrng_unregister(&chip->hwrng); out_ppi: tpm_bios_log_teardown(chip); @@ -654,8 +664,7 @@ EXPORT_SYMBOL_GPL(tpm_chip_register); void tpm_chip_unregister(struct tpm_chip *chip) { tpm_del_legacy_sysfs(chip); - if (IS_ENABLED(CONFIG_HW_RANDOM_TPM) && !tpm_is_firmware_upgrade(chip) && - !(chip->flags & TPM_CHIP_FLAG_HWRNG_DISABLED)) + if (tpm_is_hwrng_enabled(chip)) hwrng_unregister(&chip->hwrng); tpm_bios_log_teardown(chip); if (chip->flags & TPM_CHIP_FLAG_TPM2 && !tpm_is_firmware_upgrade(chip)) -- cgit