diff options
-rw-r--r-- | arch/arm/kernel/setup.c | 16 | ||||
-rw-r--r-- | arch/x86/entry/common.c | 5 | ||||
-rw-r--r-- | arch/x86/events/intel/lbr.c | 3 | ||||
-rw-r--r-- | arch/x86/include/asm/page_64.h | 2 | ||||
-rw-r--r-- | arch/x86/lib/retpoline.S | 4 | ||||
-rw-r--r-- | arch/x86/xen/enlighten_pv.c | 2 | ||||
-rw-r--r-- | drivers/mmc/host/meson-gx-mmc.c | 50 | ||||
-rw-r--r-- | drivers/pci/pci.c | 16 | ||||
-rw-r--r-- | drivers/spi/spi-nxp-fspi.c | 11 | ||||
-rw-r--r-- | drivers/spi/spi-tegra20-slink.c | 5 | ||||
-rw-r--r-- | include/linux/debug_locks.h | 2 | ||||
-rw-r--r-- | kernel/dma/swiotlb.c | 23 | ||||
-rw-r--r-- | kernel/locking/lockdep.c | 4 | ||||
-rw-r--r-- | kernel/module.c | 14 | ||||
-rw-r--r-- | kernel/sched/fair.c | 28 | ||||
-rw-r--r-- | kernel/signal.c | 17 | ||||
-rw-r--r-- | lib/debug_locks.c | 2 |
17 files changed, 163 insertions, 41 deletions
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 1a5edf562e85..73ca7797b92f 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -545,9 +545,11 @@ void notrace cpu_init(void) * In Thumb-2, msr with an immediate value is not allowed. */ #ifdef CONFIG_THUMB2_KERNEL -#define PLC "r" +#define PLC_l "l" +#define PLC_r "r" #else -#define PLC "I" +#define PLC_l "I" +#define PLC_r "I" #endif /* @@ -569,15 +571,15 @@ void notrace cpu_init(void) "msr cpsr_c, %9" : : "r" (stk), - PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), + PLC_r (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), "I" (offsetof(struct stack, irq[0])), - PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE), + PLC_r (PSR_F_BIT | PSR_I_BIT | ABT_MODE), "I" (offsetof(struct stack, abt[0])), - PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE), + PLC_r (PSR_F_BIT | PSR_I_BIT | UND_MODE), "I" (offsetof(struct stack, und[0])), - PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE), + PLC_r (PSR_F_BIT | PSR_I_BIT | FIQ_MODE), "I" (offsetof(struct stack, fiq[0])), - PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE) + PLC_l (PSR_F_BIT | PSR_I_BIT | SVC_MODE) : "r14"); #endif } diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 7b2542b13ebd..04bce95bc7e3 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -130,8 +130,8 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs) /* User code screwed up. */ regs->ax = -EFAULT; - instrumentation_end(); local_irq_disable(); + instrumentation_end(); irqentry_exit_to_user_mode(regs); return false; } @@ -269,15 +269,16 @@ __visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs) irqentry_state_t state = irqentry_enter(regs); bool inhcall; + instrumentation_begin(); run_sysvec_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs); inhcall = get_and_clear_inhcall(); if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) { - instrumentation_begin(); irqentry_exit_cond_resched(); instrumentation_end(); restore_inhcall(inhcall); } else { + instrumentation_end(); irqentry_exit(regs, state); } } diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index 4409d2cccfda..e8453de7a964 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -731,7 +731,8 @@ void reserve_lbr_buffers(void) if (!kmem_cache || cpuc->lbr_xsave) continue; - cpuc->lbr_xsave = kmem_cache_alloc_node(kmem_cache, GFP_KERNEL, + cpuc->lbr_xsave = kmem_cache_alloc_node(kmem_cache, + GFP_KERNEL | __GFP_ZERO, cpu_to_node(cpu)); } } diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h index ca840fec7776..4bde0dc66100 100644 --- a/arch/x86/include/asm/page_64.h +++ b/arch/x86/include/asm/page_64.h @@ -75,7 +75,7 @@ void copy_page(void *to, void *from); * * With page table isolation enabled, we map the LDT in ... [stay tuned] */ -static inline unsigned long task_size_max(void) +static __always_inline unsigned long task_size_max(void) { unsigned long ret; diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index 4d32cb06ffd5..ec9922cba30a 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -58,12 +58,16 @@ SYM_FUNC_START_NOALIGN(__x86_indirect_alt_call_\reg) 2: .skip 5-(2b-1b), 0x90 SYM_FUNC_END(__x86_indirect_alt_call_\reg) +STACK_FRAME_NON_STANDARD(__x86_indirect_alt_call_\reg) + SYM_FUNC_START_NOALIGN(__x86_indirect_alt_jmp_\reg) ANNOTATE_RETPOLINE_SAFE 1: jmp *%\reg 2: .skip 5-(2b-1b), 0x90 SYM_FUNC_END(__x86_indirect_alt_jmp_\reg) +STACK_FRAME_NON_STANDARD(__x86_indirect_alt_jmp_\reg) + .endm /* diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index e87699aa2dc8..03149422dce2 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -592,8 +592,10 @@ DEFINE_IDTENTRY_RAW(xenpv_exc_debug) DEFINE_IDTENTRY_RAW(exc_xen_unknown_trap) { /* This should never happen and there is no way to handle it. */ + instrumentation_begin(); pr_err("Unknown trap in Xen PV mode."); BUG(); + instrumentation_end(); } #ifdef CONFIG_X86_MCE diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index 016a6106151a..3f28eb4d17fe 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -165,6 +165,7 @@ struct meson_host { unsigned int bounce_buf_size; void *bounce_buf; + void __iomem *bounce_iomem_buf; dma_addr_t bounce_dma_addr; struct sd_emmc_desc *descs; dma_addr_t descs_dma_addr; @@ -745,6 +746,47 @@ static void meson_mmc_desc_chain_transfer(struct mmc_host *mmc, u32 cmd_cfg) writel(start, host->regs + SD_EMMC_START); } +/* local sg copy to buffer version with _to/fromio usage for dram_access_quirk */ +static void meson_mmc_copy_buffer(struct meson_host *host, struct mmc_data *data, + size_t buflen, bool to_buffer) +{ + unsigned int sg_flags = SG_MITER_ATOMIC; + struct scatterlist *sgl = data->sg; + unsigned int nents = data->sg_len; + struct sg_mapping_iter miter; + unsigned int offset = 0; + + if (to_buffer) + sg_flags |= SG_MITER_FROM_SG; + else + sg_flags |= SG_MITER_TO_SG; + + sg_miter_start(&miter, sgl, nents, sg_flags); + + while ((offset < buflen) && sg_miter_next(&miter)) { + unsigned int len; + + len = min(miter.length, buflen - offset); + + /* When dram_access_quirk, the bounce buffer is a iomem mapping */ + if (host->dram_access_quirk) { + if (to_buffer) + memcpy_toio(host->bounce_iomem_buf + offset, miter.addr, len); + else + memcpy_fromio(miter.addr, host->bounce_iomem_buf + offset, len); + } else { + if (to_buffer) + memcpy(host->bounce_buf + offset, miter.addr, len); + else + memcpy(miter.addr, host->bounce_buf + offset, len); + } + + offset += len; + } + + sg_miter_stop(&miter); +} + static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd) { struct meson_host *host = mmc_priv(mmc); @@ -788,8 +830,7 @@ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd) if (data->flags & MMC_DATA_WRITE) { cmd_cfg |= CMD_CFG_DATA_WR; WARN_ON(xfer_bytes > host->bounce_buf_size); - sg_copy_to_buffer(data->sg, data->sg_len, - host->bounce_buf, xfer_bytes); + meson_mmc_copy_buffer(host, data, xfer_bytes, true); dma_wmb(); } @@ -958,8 +999,7 @@ static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id) if (meson_mmc_bounce_buf_read(data)) { xfer_bytes = data->blksz * data->blocks; WARN_ON(xfer_bytes > host->bounce_buf_size); - sg_copy_from_buffer(data->sg, data->sg_len, - host->bounce_buf, xfer_bytes); + meson_mmc_copy_buffer(host, data, xfer_bytes, false); } next_cmd = meson_mmc_get_next_command(cmd); @@ -1179,7 +1219,7 @@ static int meson_mmc_probe(struct platform_device *pdev) * instead of the DDR memory */ host->bounce_buf_size = SD_EMMC_SRAM_DATA_BUF_LEN; - host->bounce_buf = host->regs + SD_EMMC_SRAM_DATA_BUF_OFF; + host->bounce_iomem_buf = host->regs + SD_EMMC_SRAM_DATA_BUF_OFF; host->bounce_dma_addr = res->start + SD_EMMC_SRAM_DATA_BUF_OFF; } else { /* data bounce buffer */ diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index b717680377a9..8d4ebe095d0c 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1900,11 +1900,21 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags) int err; int i, bars = 0; - if (atomic_inc_return(&dev->enable_cnt) > 1) { - pci_update_current_state(dev, dev->current_state); - return 0; /* already enabled */ + /* + * Power state could be unknown at this point, either due to a fresh + * boot or a device removal call. So get the current power state + * so that things like MSI message writing will behave as expected + * (e.g. if the device really is in D0 at enable time). + */ + if (dev->pm_cap) { + u16 pmcsr; + pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); + dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); } + if (atomic_inc_return(&dev->enable_cnt) > 1) + return 0; /* already enabled */ + bridge = pci_upstream_bridge(dev); if (bridge) pci_enable_bridge(bridge); diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c index 6e6c2403944d..a66fa97046ee 100644 --- a/drivers/spi/spi-nxp-fspi.c +++ b/drivers/spi/spi-nxp-fspi.c @@ -1124,12 +1124,6 @@ static int nxp_fspi_probe(struct platform_device *pdev) goto err_put_ctrl; } - /* Clear potential interrupts */ - reg = fspi_readl(f, f->iobase + FSPI_INTR); - if (reg) - fspi_writel(f, reg, f->iobase + FSPI_INTR); - - /* find the resources - controller memory mapped space */ if (is_acpi_node(f->dev->fwnode)) res = platform_get_resource(pdev, IORESOURCE_MEM, 1); @@ -1167,6 +1161,11 @@ static int nxp_fspi_probe(struct platform_device *pdev) } } + /* Clear potential interrupts */ + reg = fspi_readl(f, f->iobase + FSPI_INTR); + if (reg) + fspi_writel(f, reg, f->iobase + FSPI_INTR); + /* find the irq */ ret = platform_get_irq(pdev, 0); if (ret < 0) diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c index f7c832fd4003..6a726c95ac7a 100644 --- a/drivers/spi/spi-tegra20-slink.c +++ b/drivers/spi/spi-tegra20-slink.c @@ -1118,6 +1118,11 @@ static int tegra_slink_probe(struct platform_device *pdev) pm_runtime_put_noidle(&pdev->dev); goto exit_pm_disable; } + + reset_control_assert(tspi->rst); + udelay(2); + reset_control_deassert(tspi->rst); + tspi->def_command_reg = SLINK_M_S; tspi->def_command2_reg = SLINK_CS_ACTIVE_BETWEEN; tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND); diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h index 2915f56ad421..edb5c186b0b7 100644 --- a/include/linux/debug_locks.h +++ b/include/linux/debug_locks.h @@ -27,8 +27,10 @@ extern int debug_locks_off(void); int __ret = 0; \ \ if (!oops_in_progress && unlikely(c)) { \ + instrumentation_begin(); \ if (debug_locks_off() && !debug_locks_silent) \ WARN(1, "DEBUG_LOCKS_WARN_ON(%s)", #c); \ + instrumentation_end(); \ __ret = 1; \ } \ __ret; \ diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 8ca7d505d61c..e50df8d8f87e 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -335,6 +335,14 @@ void __init swiotlb_exit(void) } /* + * Return the offset into a iotlb slot required to keep the device happy. + */ +static unsigned int swiotlb_align_offset(struct device *dev, u64 addr) +{ + return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1); +} + +/* * Bounce: copy the swiotlb buffer from or back to the original dma location */ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size, @@ -346,10 +354,17 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size size_t alloc_size = mem->slots[index].alloc_size; unsigned long pfn = PFN_DOWN(orig_addr); unsigned char *vaddr = phys_to_virt(tlb_addr); + unsigned int tlb_offset; if (orig_addr == INVALID_PHYS_ADDR) return; + tlb_offset = (tlb_addr & (IO_TLB_SIZE - 1)) - + swiotlb_align_offset(dev, orig_addr); + + orig_addr += tlb_offset; + alloc_size -= tlb_offset; + if (size > alloc_size) { dev_WARN_ONCE(dev, 1, "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n", @@ -391,14 +406,6 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size #define slot_addr(start, idx) ((start) + ((idx) << IO_TLB_SHIFT)) /* - * Return the offset into a iotlb slot required to keep the device happy. - */ -static unsigned int swiotlb_align_offset(struct device *dev, u64 addr) -{ - return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1); -} - -/* * Carefully handle integer overflow which can occur when boundary_mask == ~0UL. */ static inline unsigned long get_max_slots(unsigned long boundary_mask) diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 7641bd407239..e32313072506 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -843,7 +843,7 @@ static int count_matching_names(struct lock_class *new_class) } /* used from NMI context -- must be lockless */ -static __always_inline struct lock_class * +static noinstr struct lock_class * look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass) { struct lockdep_subclass_key *key; @@ -851,12 +851,14 @@ look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass) struct lock_class *class; if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) { + instrumentation_begin(); debug_locks_off(); printk(KERN_ERR "BUG: looking up invalid subclass: %u\n", subclass); printk(KERN_ERR "turning off the locking correctness validator.\n"); dump_stack(); + instrumentation_end(); return NULL; } diff --git a/kernel/module.c b/kernel/module.c index 7e78dfabca97..927d46cb8eb9 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -266,9 +266,18 @@ static void module_assert_mutex_or_preempt(void) #endif } +#ifdef CONFIG_MODULE_SIG static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE); module_param(sig_enforce, bool_enable_only, 0644); +void set_module_sig_enforced(void) +{ + sig_enforce = true; +} +#else +#define sig_enforce false +#endif + /* * Export sig_enforce kernel cmdline parameter to allow other subsystems rely * on that instead of directly to CONFIG_MODULE_SIG_FORCE config. @@ -279,11 +288,6 @@ bool is_module_sig_enforced(void) } EXPORT_SYMBOL(is_module_sig_enforced); -void set_module_sig_enforced(void) -{ - sig_enforce = true; -} - /* Block module loading/unloading? */ int modules_disabled = 0; core_param(nomodule, modules_disabled, bint, 0); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index bfaa6e1f6067..23663318fb81 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3298,6 +3298,31 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags) #ifdef CONFIG_SMP #ifdef CONFIG_FAIR_GROUP_SCHED +/* + * Because list_add_leaf_cfs_rq always places a child cfs_rq on the list + * immediately before a parent cfs_rq, and cfs_rqs are removed from the list + * bottom-up, we only have to test whether the cfs_rq before us on the list + * is our child. + * If cfs_rq is not on the list, test whether a child needs its to be added to + * connect a branch to the tree * (see list_add_leaf_cfs_rq() for details). + */ +static inline bool child_cfs_rq_on_list(struct cfs_rq *cfs_rq) +{ + struct cfs_rq *prev_cfs_rq; + struct list_head *prev; + + if (cfs_rq->on_list) { + prev = cfs_rq->leaf_cfs_rq_list.prev; + } else { + struct rq *rq = rq_of(cfs_rq); + + prev = rq->tmp_alone_branch; + } + + prev_cfs_rq = container_of(prev, struct cfs_rq, leaf_cfs_rq_list); + + return (prev_cfs_rq->tg->parent == cfs_rq->tg); +} static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) { @@ -3313,6 +3338,9 @@ static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) if (cfs_rq->avg.runnable_sum) return false; + if (child_cfs_rq_on_list(cfs_rq)) + return false; + return true; } diff --git a/kernel/signal.c b/kernel/signal.c index f7c6ffcbd044..f1ecd8f0c11d 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -435,6 +435,12 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags, * Preallocation does not hold sighand::siglock so it can't * use the cache. The lockless caching requires that only * one consumer and only one producer run at a time. + * + * For the regular allocation case it is sufficient to + * check @q for NULL because this code can only be called + * if the target task @t has not been reaped yet; which + * means this code can never observe the error pointer which is + * written to @t->sigqueue_cache in exit_task_sigqueue_cache(). */ q = READ_ONCE(t->sigqueue_cache); if (!q || sigqueue_flags) @@ -463,13 +469,18 @@ void exit_task_sigqueue_cache(struct task_struct *tsk) struct sigqueue *q = tsk->sigqueue_cache; if (q) { - tsk->sigqueue_cache = NULL; /* * Hand it back to the cache as the task might * be self reaping which would leak the object. */ kmem_cache_free(sigqueue_cachep, q); } + + /* + * Set an error pointer to ensure that @tsk will not cache a + * sigqueue when it is reaping it's child tasks + */ + tsk->sigqueue_cache = ERR_PTR(-1); } static void sigqueue_cache_or_free(struct sigqueue *q) @@ -481,6 +492,10 @@ static void sigqueue_cache_or_free(struct sigqueue *q) * is intentional when run without holding current->sighand->siglock, * which is fine as current obviously cannot run __sigqueue_free() * concurrently. + * + * The NULL check is safe even if current has been reaped already, + * in which case exit_task_sigqueue_cache() wrote an error pointer + * into current->sigqueue_cache. */ if (!READ_ONCE(current->sigqueue_cache)) WRITE_ONCE(current->sigqueue_cache, q); diff --git a/lib/debug_locks.c b/lib/debug_locks.c index 06d3135bd184..a75ee30b77cb 100644 --- a/lib/debug_locks.c +++ b/lib/debug_locks.c @@ -36,7 +36,7 @@ EXPORT_SYMBOL_GPL(debug_locks_silent); /* * Generic 'turn off all lock debugging' function: */ -noinstr int debug_locks_off(void) +int debug_locks_off(void) { if (debug_locks && __debug_locks_off()) { if (!debug_locks_silent) { |