diff options
86 files changed, 860 insertions, 313 deletions
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 9bffdfc648dc..85b022179104 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -66,6 +66,8 @@ fwmark_reflect - BOOLEAN route/max_size - INTEGER Maximum number of routes allowed in the kernel. Increase this when using large numbers of interfaces and/or routes. + From linux kernel 3.6 onwards, this is deprecated for ipv4 + as route cache is no longer used. neigh/default/gc_thresh1 - INTEGER Minimum number of entries to keep. Garbage collector will not diff --git a/Documentation/thermal/cpu-cooling-api.txt b/Documentation/thermal/cpu-cooling-api.txt index fca24c931ec8..753e47cc2e20 100644 --- a/Documentation/thermal/cpu-cooling-api.txt +++ b/Documentation/thermal/cpu-cooling-api.txt @@ -3,7 +3,7 @@ CPU cooling APIs How To Written by Amit Daniel Kachhap <amit.kachhap@linaro.org> -Updated: 12 May 2012 +Updated: 6 Jan 2015 Copyright (c) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com) @@ -25,7 +25,18 @@ the user. The registration APIs returns the cooling device pointer. clip_cpus: cpumask of cpus where the frequency constraints will happen. -1.1.2 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) +1.1.2 struct thermal_cooling_device *of_cpufreq_cooling_register( + struct device_node *np, const struct cpumask *clip_cpus) + + This interface function registers the cpufreq cooling device with + the name "thermal-cpufreq-%x" linking it with a device tree node, in + order to bind it via the thermal DT code. This api can support multiple + instances of cpufreq cooling devices. + + np: pointer to the cooling device device tree node + clip_cpus: cpumask of cpus where the frequency constraints will happen. + +1.1.3 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) This interface function unregisters the "thermal-cpufreq-%x" cooling device. diff --git a/MAINTAINERS b/MAINTAINERS index 8bed045c0c48..ab6610e6c610 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4749,7 +4749,7 @@ S: Supported F: drivers/scsi/ipr.* IBM Power Virtual Ethernet Device Driver -M: Santiago Leon <santil@linux.vnet.ibm.com> +M: Thomas Falcon <tlfalcon@linux.vnet.ibm.com> L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/ibm/ibmveth.* @@ -9543,7 +9543,8 @@ F: drivers/platform/x86/thinkpad_acpi.c TI BANDGAP AND THERMAL DRIVER M: Eduardo Valentin <edubezval@gmail.com> L: linux-pm@vger.kernel.org -S: Supported +L: linux-omap@vger.kernel.org +S: Maintained F: drivers/thermal/ti-soc-thermal/ TI CLOCK DRIVER diff --git a/arch/arm/boot/dts/imx6sx-sdb.dts b/arch/arm/boot/dts/imx6sx-sdb.dts index 1e6e5cc1c14c..8c1febd7e3f2 100644 --- a/arch/arm/boot/dts/imx6sx-sdb.dts +++ b/arch/arm/boot/dts/imx6sx-sdb.dts @@ -159,13 +159,28 @@ pinctrl-0 = <&pinctrl_enet1>; phy-supply = <®_enet_3v3>; phy-mode = "rgmii"; + phy-handle = <ðphy1>; status = "okay"; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + ethphy1: ethernet-phy@0 { + reg = <0>; + }; + + ethphy2: ethernet-phy@1 { + reg = <1>; + }; + }; }; &fec2 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_enet2>; phy-mode = "rgmii"; + phy-handle = <ðphy2>; status = "okay"; }; diff --git a/arch/arm/boot/dts/vf610-twr.dts b/arch/arm/boot/dts/vf610-twr.dts index a0f762159cb2..f2b64b1b00fa 100644 --- a/arch/arm/boot/dts/vf610-twr.dts +++ b/arch/arm/boot/dts/vf610-twr.dts @@ -129,13 +129,28 @@ &fec0 { phy-mode = "rmii"; + phy-handle = <ðphy0>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_fec0>; status = "okay"; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + ethphy0: ethernet-phy@0 { + reg = <0>; + }; + + ethphy1: ethernet-phy@1 { + reg = <1>; + }; + }; }; &fec1 { phy-mode = "rmii"; + phy-handle = <ðphy1>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_fec1>; status = "okay"; diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 8127e45e2637..865a7e28ea2d 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -41,6 +41,8 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) { vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; + if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) + vcpu->arch.hcr_el2 &= ~HCR_RW; } static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index fbe909fb0a1a..c3ca89c27c6b 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -1014,6 +1014,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa) * Instead, we invalidate Stage-2 for this IPA, and the * whole of Stage-1. Weep... */ + lsr x1, x1, #12 tlbi ipas2e1is, x1 /* * We have to ensure completion of the invalidation at Stage-2, diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 70a7816535cd..0b4326578985 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -90,7 +90,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) if (!cpu_has_32bit_el1()) return -EINVAL; cpu_reset = &default_regs_reset32; - vcpu->arch.hcr_el2 &= ~HCR_RW; } else { cpu_reset = &default_regs_reset; } diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index 75e75d7b1702..244e0dbe45db 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h @@ -4,7 +4,7 @@ #include <uapi/asm/unistd.h> -#define NR_syscalls 355 +#define NR_syscalls 356 #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_STAT diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h index 2c1bec9a14b6..61fb6cb9d2ae 100644 --- a/arch/m68k/include/uapi/asm/unistd.h +++ b/arch/m68k/include/uapi/asm/unistd.h @@ -360,5 +360,6 @@ #define __NR_getrandom 352 #define __NR_memfd_create 353 #define __NR_bpf 354 +#define __NR_execveat 355 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */ diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S index 2ca219e184cd..a0ec4303f2c8 100644 --- a/arch/m68k/kernel/syscalltable.S +++ b/arch/m68k/kernel/syscalltable.S @@ -375,4 +375,5 @@ ENTRY(sys_call_table) .long sys_getrandom .long sys_memfd_create .long sys_bpf + .long sys_execveat /* 355 */ diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index ebc4f165690a..0be6c681cab1 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -23,9 +23,9 @@ #define THREAD_SIZE (1 << THREAD_SHIFT) #ifdef CONFIG_PPC64 -#define CURRENT_THREAD_INFO(dest, sp) clrrdi dest, sp, THREAD_SHIFT +#define CURRENT_THREAD_INFO(dest, sp) stringify_in_c(clrrdi dest, sp, THREAD_SHIFT) #else -#define CURRENT_THREAD_INFO(dest, sp) rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT +#define CURRENT_THREAD_INFO(dest, sp) stringify_in_c(rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT) #endif #ifndef __ASSEMBLY__ @@ -71,12 +71,13 @@ struct thread_info { #define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) /* how to get the thread information struct from C */ -register unsigned long __current_r1 asm("r1"); static inline struct thread_info *current_thread_info(void) { - /* gcc4, at least, is smart enough to turn this into a single - * rlwinm for ppc32 and clrrdi for ppc64 */ - return (struct thread_info *)(__current_r1 & ~(THREAD_SIZE-1)); + unsigned long val; + + asm (CURRENT_THREAD_INFO(%0,1) : "=r" (val)); + + return (struct thread_info *)val; } #endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S index 54eca8b3b288..0509bca5e830 100644 --- a/arch/powerpc/platforms/powernv/opal-wrappers.S +++ b/arch/powerpc/platforms/powernv/opal-wrappers.S @@ -40,7 +40,6 @@ BEGIN_FTR_SECTION; \ b 1f; \ END_FTR_SECTION(0, 1); \ ld r12,opal_tracepoint_refcount@toc(r2); \ - std r12,32(r1); \ cmpdi r12,0; \ bne- LABEL; \ 1: diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c index 32040ace00ea..afbe07907c10 100644 --- a/arch/s390/hypfs/hypfs_vm.c +++ b/arch/s390/hypfs/hypfs_vm.c @@ -231,7 +231,7 @@ failed: struct dbfs_d2fc_hdr { u64 len; /* Length of d2fc buffer without header */ u16 version; /* Version of header */ - char tod_ext[16]; /* TOD clock for d2fc */ + char tod_ext[STORE_CLOCK_EXT_SIZE]; /* TOD clock for d2fc */ u64 count; /* Number of VM guests in d2fc buffer */ char reserved[30]; } __attribute__ ((packed)); diff --git a/arch/s390/include/asm/irqflags.h b/arch/s390/include/asm/irqflags.h index 37b9091ab8c0..16aa0c779e07 100644 --- a/arch/s390/include/asm/irqflags.h +++ b/arch/s390/include/asm/irqflags.h @@ -36,7 +36,7 @@ static inline notrace void __arch_local_irq_ssm(unsigned long flags) static inline notrace unsigned long arch_local_save_flags(void) { - return __arch_local_irq_stosm(0x00); + return __arch_local_irq_stnsm(0xff); } static inline notrace unsigned long arch_local_irq_save(void) diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index 8beee1cceba4..98eb2a579223 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h @@ -67,20 +67,22 @@ static inline void local_tick_enable(unsigned long long comp) set_clock_comparator(S390_lowcore.clock_comparator); } -#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ +#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ +#define STORE_CLOCK_EXT_SIZE 16 /* stcke writes 16 bytes */ typedef unsigned long long cycles_t; -static inline void get_tod_clock_ext(char clk[16]) +static inline void get_tod_clock_ext(char *clk) { - typedef struct { char _[sizeof(clk)]; } addrtype; + typedef struct { char _[STORE_CLOCK_EXT_SIZE]; } addrtype; asm volatile("stcke %0" : "=Q" (*(addrtype *) clk) : : "cc"); } static inline unsigned long long get_tod_clock(void) { - unsigned char clk[16]; + unsigned char clk[STORE_CLOCK_EXT_SIZE]; + get_tod_clock_ext(clk); return *((unsigned long long *)&clk[1]); } diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h index 2b446cf0cc65..67878af257a0 100644 --- a/arch/s390/include/uapi/asm/unistd.h +++ b/arch/s390/include/uapi/asm/unistd.h @@ -289,7 +289,8 @@ #define __NR_bpf 351 #define __NR_s390_pci_mmio_write 352 #define __NR_s390_pci_mmio_read 353 -#define NR_syscalls 354 +#define __NR_execveat 354 +#define NR_syscalls 355 /* * There are some system calls that are not present on 64 bit, some diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index a2987243bc76..939ec474b1dd 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -362,3 +362,4 @@ SYSCALL(sys_memfd_create,sys_memfd_create,compat_sys_memfd_create) /* 350 */ SYSCALL(sys_bpf,sys_bpf,compat_sys_bpf) SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write) SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read) +SYSCALL(sys_execveat,sys_execveat,compat_sys_execveat) diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c index f6b3cd056ec2..cc7328080b60 100644 --- a/arch/s390/kernel/uprobes.c +++ b/arch/s390/kernel/uprobes.c @@ -48,6 +48,30 @@ bool arch_uprobe_xol_was_trapped(struct task_struct *tsk) return false; } +static int check_per_event(unsigned short cause, unsigned long control, + struct pt_regs *regs) +{ + if (!(regs->psw.mask & PSW_MASK_PER)) + return 0; + /* user space single step */ + if (control == 0) + return 1; + /* over indication for storage alteration */ + if ((control & 0x20200000) && (cause & 0x2000)) + return 1; + if (cause & 0x8000) { + /* all branches */ + if ((control & 0x80800000) == 0x80000000) + return 1; + /* branch into selected range */ + if (((control & 0x80800000) == 0x80800000) && + regs->psw.addr >= current->thread.per_user.start && + regs->psw.addr <= current->thread.per_user.end) + return 1; + } + return 0; +} + int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { int fixup = probe_get_fixup_type(auprobe->insn); @@ -71,9 +95,13 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) if (regs->psw.addr - utask->xol_vaddr == ilen) regs->psw.addr = utask->vaddr + ilen; } - /* If per tracing was active generate trap */ - if (regs->psw.mask & PSW_MASK_PER) - do_per_trap(regs); + if (check_per_event(current->thread.per_event.cause, + current->thread.per_user.control, regs)) { + /* fix per address */ + current->thread.per_event.address = utask->vaddr; + /* trigger per event */ + set_pt_regs_flag(regs, PIF_PER_TRAP); + } return 0; } @@ -106,6 +134,7 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) clear_thread_flag(TIF_UPROBE_SINGLESTEP); regs->int_code = auprobe->saved_int_code; regs->psw.addr = current->utask->vaddr; + current->thread.per_event.address = current->utask->vaddr; } unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline, @@ -146,17 +175,20 @@ static void adjust_psw_addr(psw_t *psw, unsigned long len) __rc; \ }) -#define emu_store_ril(ptr, input) \ +#define emu_store_ril(regs, ptr, input) \ ({ \ unsigned int mask = sizeof(*(ptr)) - 1; \ + __typeof__(ptr) __ptr = (ptr); \ int __rc = 0; \ \ if (!test_facility(34)) \ __rc = EMU_ILLEGAL_OP; \ - else if ((u64 __force)ptr & mask) \ + else if ((u64 __force)__ptr & mask) \ __rc = EMU_SPECIFICATION; \ - else if (put_user(*(input), ptr)) \ + else if (put_user(*(input), __ptr)) \ __rc = EMU_ADDRESSING; \ + if (__rc == 0) \ + sim_stor_event(regs, __ptr, mask + 1); \ __rc; \ }) @@ -198,6 +230,25 @@ union split_register { }; /* + * If user per registers are setup to trace storage alterations and an + * emulated store took place on a fitting address a user trap is generated. + */ +static void sim_stor_event(struct pt_regs *regs, void *addr, int len) +{ + if (!(regs->psw.mask & PSW_MASK_PER)) + return; + if (!(current->thread.per_user.control & PER_EVENT_STORE)) + return; + if ((void *)current->thread.per_user.start > (addr + len)) + return; + if ((void *)current->thread.per_user.end < addr) + return; + current->thread.per_event.address = regs->psw.addr; + current->thread.per_event.cause = PER_EVENT_STORE >> 16; + set_pt_regs_flag(regs, PIF_PER_TRAP); +} + +/* * pc relative instructions are emulated, since parameters may not be * accessible from the xol area due to range limitations. */ @@ -249,13 +300,13 @@ static void handle_insn_ril(struct arch_uprobe *auprobe, struct pt_regs *regs) rc = emu_load_ril((u32 __user *)uptr, &rx->u64); break; case 0x07: /* sthrl */ - rc = emu_store_ril((u16 __user *)uptr, &rx->u16[3]); + rc = emu_store_ril(regs, (u16 __user *)uptr, &rx->u16[3]); break; case 0x0b: /* stgrl */ - rc = emu_store_ril((u64 __user *)uptr, &rx->u64); + rc = emu_store_ril(regs, (u64 __user *)uptr, &rx->u64); break; case 0x0f: /* strl */ - rc = emu_store_ril((u32 __user *)uptr, &rx->u32[1]); + rc = emu_store_ril(regs, (u32 __user *)uptr, &rx->u32[1]); break; } break; diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 7f0089d9a4aa..e34122e539a1 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -128,8 +128,6 @@ void vtime_account_irq_enter(struct task_struct *tsk) struct thread_info *ti = task_thread_info(tsk); u64 timer, system; - WARN_ON_ONCE(!irqs_disabled()); - timer = S390_lowcore.last_update_timer; S390_lowcore.last_update_timer = get_vtimer(); S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index be99357d238c..3cf8cc03fff6 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -322,11 +322,12 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table, static unsigned long __gmap_segment_gaddr(unsigned long *entry) { struct page *page; - unsigned long offset; + unsigned long offset, mask; offset = (unsigned long) entry / sizeof(unsigned long); offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE; - page = pmd_to_page((pmd_t *) entry); + mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1); + page = virt_to_page((void *)((unsigned long) entry & mask)); return page->index + offset; } diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index c52ac77408ca..524496d47ef5 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -431,8 +431,8 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, EMIT4_DISP(0x88500000, K); break; case BPF_ALU | BPF_NEG: /* A = -A */ - /* lnr %r5,%r5 */ - EMIT2(0x1155); + /* lcr %r5,%r5 */ + EMIT2(0x1355); break; case BPF_JMP | BPF_JA: /* ip += K */ offset = addrs[i + K] + jit->start - jit->prg; @@ -502,8 +502,8 @@ branch: if (filter->jt == filter->jf) { xbranch: /* Emit compare if the branch targets are different */ if (filter->jt != filter->jf) { jit->seen |= SEEN_XREG; - /* cr %r5,%r12 */ - EMIT2(0x195c); + /* clr %r5,%r12 */ + EMIT2(0x155c); } goto branch; case BPF_JMP | BPF_JSET | BPF_X: /* ip += (A & X) ? jt : jf */ diff --git a/block/blk-core.c b/block/blk-core.c index 30f6153a40c2..3ad405571dcc 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -473,6 +473,25 @@ void blk_queue_bypass_end(struct request_queue *q) } EXPORT_SYMBOL_GPL(blk_queue_bypass_end); +void blk_set_queue_dying(struct request_queue *q) +{ + queue_flag_set_unlocked(QUEUE_FLAG_DYING, q); + + if (q->mq_ops) + blk_mq_wake_waiters(q); + else { + struct request_list *rl; + + blk_queue_for_each_rl(rl, q) { + if (rl->rq_pool) { + wake_up(&rl->wait[BLK_RW_SYNC]); + wake_up(&rl->wait[BLK_RW_ASYNC]); + } + } + } +} +EXPORT_SYMBOL_GPL(blk_set_queue_dying); + /** * blk_cleanup_queue - shutdown a request queue * @q: request queue to shutdown @@ -486,7 +505,7 @@ void blk_cleanup_queue(struct request_queue *q) /* mark @q DYING, no new request or merges will be allowed afterwards */ mutex_lock(&q->sysfs_lock); - queue_flag_set_unlocked(QUEUE_FLAG_DYING, q); + blk_set_queue_dying(q); spin_lock_irq(lock); /* diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 32e8dbb9ad1c..60c9d4a93fe4 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -68,9 +68,9 @@ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) } /* - * Wakeup all potentially sleeping on normal (non-reserved) tags + * Wakeup all potentially sleeping on tags */ -static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags) +void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve) { struct blk_mq_bitmap_tags *bt; int i, wake_index; @@ -85,6 +85,12 @@ static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags) wake_index = bt_index_inc(wake_index); } + + if (include_reserve) { + bt = &tags->breserved_tags; + if (waitqueue_active(&bt->bs[0].wait)) + wake_up(&bt->bs[0].wait); + } } /* @@ -100,7 +106,7 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) atomic_dec(&tags->active_queues); - blk_mq_tag_wakeup_all(tags); + blk_mq_tag_wakeup_all(tags, false); } /* @@ -584,7 +590,7 @@ int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth) * static and should never need resizing. */ bt_update_count(&tags->bitmap_tags, tdepth); - blk_mq_tag_wakeup_all(tags); + blk_mq_tag_wakeup_all(tags, false); return 0; } diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 6206ed17ef76..a6fa0fc9d41a 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -54,6 +54,7 @@ extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags); extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag); extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth); +extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); enum { BLK_MQ_TAG_CACHE_MIN = 1, diff --git a/block/blk-mq.c b/block/blk-mq.c index da1ab5641227..2f95747c287e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -107,7 +107,7 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref) wake_up_all(&q->mq_freeze_wq); } -static void blk_mq_freeze_queue_start(struct request_queue *q) +void blk_mq_freeze_queue_start(struct request_queue *q) { bool freeze; @@ -120,6 +120,7 @@ static void blk_mq_freeze_queue_start(struct request_queue *q) blk_mq_run_queues(q, false); } } +EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start); static void blk_mq_freeze_queue_wait(struct request_queue *q) { @@ -136,7 +137,7 @@ void blk_mq_freeze_queue(struct request_queue *q) blk_mq_freeze_queue_wait(q); } -static void blk_mq_unfreeze_queue(struct request_queue *q) +void blk_mq_unfreeze_queue(struct request_queue *q) { bool wake; @@ -149,6 +150,24 @@ static void blk_mq_unfreeze_queue(struct request_queue *q) wake_up_all(&q->mq_freeze_wq); } } +EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); + +void blk_mq_wake_waiters(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx; + unsigned int i; + + queue_for_each_hw_ctx(q, hctx, i) + if (blk_mq_hw_queue_mapped(hctx)) + blk_mq_tag_wakeup_all(hctx->tags, true); + + /* + * If we are called because the queue has now been marked as + * dying, we need to ensure that processes currently waiting on + * the queue are notified as well. + */ + wake_up_all(&q->mq_freeze_wq); +} bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) { @@ -258,8 +277,10 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, ctx = alloc_data.ctx; } blk_mq_put_ctx(ctx); - if (!rq) + if (!rq) { + blk_mq_queue_exit(q); return ERR_PTR(-EWOULDBLOCK); + } return rq; } EXPORT_SYMBOL(blk_mq_alloc_request); @@ -383,6 +404,12 @@ void blk_mq_complete_request(struct request *rq) } EXPORT_SYMBOL(blk_mq_complete_request); +int blk_mq_request_started(struct request *rq) +{ + return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags); +} +EXPORT_SYMBOL_GPL(blk_mq_request_started); + void blk_mq_start_request(struct request *rq) { struct request_queue *q = rq->q; @@ -500,12 +527,38 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head) } EXPORT_SYMBOL(blk_mq_add_to_requeue_list); +void blk_mq_cancel_requeue_work(struct request_queue *q) +{ + cancel_work_sync(&q->requeue_work); +} +EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work); + void blk_mq_kick_requeue_list(struct request_queue *q) { kblockd_schedule_work(&q->requeue_work); } EXPORT_SYMBOL(blk_mq_kick_requeue_list); +void blk_mq_abort_requeue_list(struct request_queue *q) +{ + unsigned long flags; + LIST_HEAD(rq_list); + + spin_lock_irqsave(&q->requeue_lock, flags); + list_splice_init(&q->requeue_list, &rq_list); + spin_unlock_irqrestore(&q->requeue_lock, flags); + + while (!list_empty(&rq_list)) { + struct request *rq; + + rq = list_first_entry(&rq_list, struct request, queuelist); + list_del_init(&rq->queuelist); + rq->errors = -EIO; + blk_mq_end_request(rq, rq->errors); + } +} +EXPORT_SYMBOL(blk_mq_abort_requeue_list); + static inline bool is_flush_request(struct request *rq, struct blk_flush_queue *fq, unsigned int tag) { @@ -566,13 +619,24 @@ void blk_mq_rq_timed_out(struct request *req, bool reserved) break; } } - + static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, struct request *rq, void *priv, bool reserved) { struct blk_mq_timeout_data *data = priv; - if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) + if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) { + /* + * If a request wasn't started before the queue was + * marked dying, kill it here or it'll go unnoticed. + */ + if (unlikely(blk_queue_dying(rq->q))) { + rq->errors = -EIO; + blk_mq_complete_request(rq); + } + return; + } + if (rq->cmd_flags & REQ_NO_TIMEOUT) return; if (time_after_eq(jiffies, rq->deadline)) { @@ -1601,7 +1665,6 @@ static int blk_mq_init_hctx(struct request_queue *q, hctx->queue = q; hctx->queue_num = hctx_idx; hctx->flags = set->flags; - hctx->cmd_size = set->cmd_size; blk_mq_init_cpu_notifier(&hctx->cpu_notifier, blk_mq_hctx_notify, hctx); diff --git a/block/blk-mq.h b/block/blk-mq.h index 206230e64f79..4f4f943c22c3 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -32,6 +32,7 @@ void blk_mq_free_queue(struct request_queue *q); void blk_mq_clone_flush_request(struct request *flush_rq, struct request *orig_rq); int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); +void blk_mq_wake_waiters(struct request_queue *q); /* * CPU hotplug helpers diff --git a/block/blk-timeout.c b/block/blk-timeout.c index 56c025894cdf..246dfb16c3d9 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c @@ -190,6 +190,9 @@ void blk_add_timer(struct request *req) struct request_queue *q = req->q; unsigned long expiry; + if (req->cmd_flags & REQ_NO_TIMEOUT) + return; + /* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */ if (!q->mq_ops && !q->rq_timed_out_fn) return; diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index ae9f615382f6..aa2224aa7caa 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -530,7 +530,7 @@ static int null_add_dev(void) goto out_cleanup_queues; nullb->q = blk_mq_init_queue(&nullb->tag_set); - if (!nullb->q) { + if (IS_ERR(nullb->q)) { rv = -ENOMEM; goto out_cleanup_tags; } diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index b1d5d8797315..cb529e9a82dd 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -215,6 +215,7 @@ static void nvme_set_info(struct nvme_cmd_info *cmd, void *ctx, cmd->fn = handler; cmd->ctx = ctx; cmd->aborted = 0; + blk_mq_start_request(blk_mq_rq_from_pdu(cmd)); } /* Special values must be less than 0x1000 */ @@ -431,8 +432,13 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx, if (unlikely(status)) { if (!(status & NVME_SC_DNR || blk_noretry_request(req)) && (jiffies - req->start_time) < req->timeout) { + unsigned long flags; + blk_mq_requeue_request(req); - blk_mq_kick_requeue_list(req->q); + spin_lock_irqsave(req->q->queue_lock, flags); + if (!blk_queue_stopped(req->q)) + blk_mq_kick_requeue_list(req->q); + spin_unlock_irqrestore(req->q->queue_lock, flags); return; } req->errors = nvme_error_status(status); @@ -664,8 +670,6 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, } } - blk_mq_start_request(req); - nvme_set_info(cmd, iod, req_completion); spin_lock_irq(&nvmeq->q_lock); if (req->cmd_flags & REQ_DISCARD) @@ -835,6 +839,7 @@ static int nvme_submit_async_admin_req(struct nvme_dev *dev) if (IS_ERR(req)) return PTR_ERR(req); + req->cmd_flags |= REQ_NO_TIMEOUT; cmd_info = blk_mq_rq_to_pdu(req); nvme_set_info(cmd_info, req, async_req_completion); @@ -1016,14 +1021,19 @@ static void nvme_abort_req(struct request *req) struct nvme_command cmd; if (!nvmeq->qid || cmd_rq->aborted) { + unsigned long flags; + + spin_lock_irqsave(&dev_list_lock, flags); if (work_busy(&dev->reset_work)) - return; + goto out; list_del_init(&dev->node); dev_warn(&dev->pci_dev->dev, "I/O %d QID %d timeout, reset controller\n", req->tag, nvmeq->qid); dev->reset_workfn = nvme_reset_failed_dev; queue_work(nvme_workq, &dev->reset_work); + out: + spin_unlock_irqrestore(&dev_list_lock, flags); return; } @@ -1064,15 +1074,22 @@ static void nvme_cancel_queue_ios(struct blk_mq_hw_ctx *hctx, void *ctx; nvme_completion_fn fn; struct nvme_cmd_info *cmd; - static struct nvme_completion cqe = { - .status = cpu_to_le16(NVME_SC_ABORT_REQ << 1), - }; + struct nvme_completion cqe; + + if (!blk_mq_request_started(req)) + return; cmd = blk_mq_rq_to_pdu(req); if (cmd->ctx == CMD_CTX_CANCELLED) return; + if (blk_queue_dying(req->q)) + cqe.status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1); + else + cqe.status = cpu_to_le16(NVME_SC_ABORT_REQ << 1); + + dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid); ctx = cancel_cmd_info(cmd, &fn); @@ -1084,17 +1101,29 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req); struct nvme_queue *nvmeq = cmd->nvmeq; - dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag, - nvmeq->qid); - if (nvmeq->dev->initialized) - nvme_abort_req(req); - /* * The aborted req will be completed on receiving the abort req. * We enable the timer again. If hit twice, it'll cause a device reset, * as the device then is in a faulty state. */ - return BLK_EH_RESET_TIMER; + int ret = BLK_EH_RESET_TIMER; + + dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag, + nvmeq->qid); + + spin_lock_irq(&nvmeq->q_lock); + if (!nvmeq->dev->initialized) { + /* + * Force cancelled command frees the request, which requires we + * return BLK_EH_NOT_HANDLED. + */ + nvme_cancel_queue_ios(nvmeq->hctx, req, nvmeq, reserved); + ret = BLK_EH_NOT_HANDLED; + } else + nvme_abort_req(req); + spin_unlock_irq(&nvmeq->q_lock); + + return ret; } static void nvme_free_queue(struct nvme_queue *nvmeq) @@ -1131,10 +1160,16 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest) */ static int nvme_suspend_queue(struct nvme_queue *nvmeq) { - int vector = nvmeq->dev->entry[nvmeq->cq_vector].vector; + int vector; spin_lock_irq(&nvmeq->q_lock); + if (nvmeq->cq_vector == -1) { + spin_unlock_irq(&nvmeq->q_lock); + return 1; + } + vector = nvmeq->dev->entry[nvmeq->cq_vector].vector; nvmeq->dev->online_queues--; + nvmeq->cq_vector = -1; spin_unlock_irq(&nvmeq->q_lock); irq_set_affinity_hint(vector, NULL); @@ -1169,11 +1204,13 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid) adapter_delete_sq(dev, qid); adapter_delete_cq(dev, qid); } + if (!qid && dev->admin_q) + blk_mq_freeze_queue_start(dev->admin_q); nvme_clear_queue(nvmeq); } static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, - int depth, int vector) + int depth) { struct device *dmadev = &dev->pci_dev->dev; struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL); @@ -1199,7 +1236,6 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, nvmeq->cq_phase = 1; nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; nvmeq->q_depth = depth; - nvmeq->cq_vector = vector; nvmeq->qid = qid; dev->queue_count++; dev->queues[qid] = nvmeq; @@ -1244,6 +1280,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) struct nvme_dev *dev = nvmeq->dev; int result; + nvmeq->cq_vector = qid - 1; result = adapter_alloc_cq(dev, qid, nvmeq); if (result < 0) return result; @@ -1355,6 +1392,14 @@ static struct blk_mq_ops nvme_mq_ops = { .timeout = nvme_timeout, }; +static void nvme_dev_remove_admin(struct nvme_dev *dev) +{ + if (dev->admin_q && !blk_queue_dying(dev->admin_q)) { + blk_cleanup_queue(dev->admin_q); + blk_mq_free_tag_set(&dev->admin_tagset); + } +} + static int nvme_alloc_admin_tags(struct nvme_dev *dev) { if (!dev->admin_q) { @@ -1370,21 +1415,20 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev) return -ENOMEM; dev->admin_q = blk_mq_init_queue(&dev->admin_tagset); - if (!dev->admin_q) { + if (IS_ERR(dev->admin_q)) { blk_mq_free_tag_set(&dev->admin_tagset); return -ENOMEM; } - } + if (!blk_get_queue(dev->admin_q)) { + nvme_dev_remove_admin(dev); + return -ENODEV; + } + } else + blk_mq_unfreeze_queue(dev->admin_q); return 0; } -static void nvme_free_admin_tags(struct nvme_dev *dev) -{ - if (dev->admin_q) - blk_mq_free_tag_set(&dev->admin_tagset); -} - static int nvme_configure_admin_queue(struct nvme_dev *dev) { int result; @@ -1416,7 +1460,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) nvmeq = dev->queues[0]; if (!nvmeq) { - nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH, 0); + nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); if (!nvmeq) return -ENOMEM; } @@ -1439,18 +1483,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) if (result) goto free_nvmeq; - result = nvme_alloc_admin_tags(dev); - if (result) - goto free_nvmeq; - + nvmeq->cq_vector = 0; result = queue_request_irq(dev, nvmeq, nvmeq->irqname); if (result) - goto free_tags; + goto free_nvmeq; return result; - free_tags: - nvme_free_admin_tags(dev); free_nvmeq: nvme_free_queues(dev, 0); return result; @@ -1944,7 +1983,7 @@ static void nvme_create_io_queues(struct nvme_dev *dev) unsigned i; for (i = dev->queue_count; i <= dev->max_qid; i++) - if (!nvme_alloc_queue(dev, i, dev->q_depth, i - 1)) + if (!nvme_alloc_queue(dev, i, dev->q_depth)) break; for (i = dev->online_queues; i <= dev->queue_count - 1; i++) @@ -2235,13 +2274,18 @@ static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev) break; if (!schedule_timeout(ADMIN_TIMEOUT) || fatal_signal_pending(current)) { + /* + * Disable the controller first since we can't trust it + * at this point, but leave the admin queue enabled + * until all queue deletion requests are flushed. + * FIXME: This may take a while if there are more h/w + * queues than admin tags. + */ set_current_state(TASK_RUNNING); - nvme_disable_ctrl(dev, readq(&dev->bar->cap)); - nvme_disable_queue(dev, 0); - - send_sig(SIGKILL, dq->worker->task, 1); + nvme_clear_queue(dev->queues[0]); flush_kthread_worker(dq->worker); + nvme_disable_queue(dev, 0); return; } } @@ -2318,7 +2362,6 @@ static void nvme_del_queue_start(struct kthread_work *work) { struct nvme_queue *nvmeq = container_of(work, struct nvme_queue, cmdinfo.work); - allow_signal(SIGKILL); if (nvme_delete_sq(nvmeq)) nvme_del_queue_end(nvmeq); } @@ -2376,6 +2419,34 @@ static void nvme_dev_list_remove(struct nvme_dev *dev) kthread_stop(tmp); } +static void nvme_freeze_queues(struct nvme_dev *dev) +{ + struct nvme_ns *ns; + + list_for_each_entry(ns, &dev->namespaces, list) { + blk_mq_freeze_queue_start(ns->queue); + + spin_lock(ns->queue->queue_lock); + queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue); + spin_unlock(ns->queue->queue_lock); + + blk_mq_cancel_requeue_work(ns->queue); + blk_mq_stop_hw_queues(ns->queue); + } +} + +static void nvme_unfreeze_queues(struct nvme_dev *dev) +{ + struct nvme_ns *ns; + + list_for_each_entry(ns, &dev->namespaces, list) { + queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue); + blk_mq_unfreeze_queue(ns->queue); + blk_mq_start_stopped_hw_queues(ns->queue, true); + blk_mq_kick_requeue_list(ns->queue); + } +} + static void nvme_dev_shutdown(struct nvme_dev *dev) { int i; @@ -2384,8 +2455,10 @@ static void nvme_dev_shutdown(struct nvme_dev *dev) dev->initialized = 0; nvme_dev_list_remove(dev); - if (dev->bar) + if (dev->bar) { + nvme_freeze_queues(dev); csts = readl(&dev->bar->csts); + } if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) { for (i = dev->queue_count - 1; i >= 0; i--) { struct nvme_queue *nvmeq = dev->queues[i]; @@ -2400,12 +2473,6 @@ static void nvme_dev_shutdown(struct nvme_dev *dev) nvme_dev_unmap(dev); } -static void nvme_dev_remove_admin(struct nvme_dev *dev) -{ - if (dev->admin_q && !blk_queue_dying(dev->admin_q)) - blk_cleanup_queue(dev->admin_q); -} - static void nvme_dev_remove(struct nvme_dev *dev) { struct nvme_ns *ns; @@ -2413,8 +2480,10 @@ static void nvme_dev_remove(struct nvme_dev *dev) list_for_each_entry(ns, &dev->namespaces, list) { if (ns->disk->flags & GENHD_FL_UP) del_gendisk(ns->disk); - if (!blk_queue_dying(ns->queue)) + if (!blk_queue_dying(ns->queue)) { + blk_mq_abort_requeue_list(ns->queue); blk_cleanup_queue(ns->queue); + } } } @@ -2495,6 +2564,7 @@ static void nvme_free_dev(struct kref *kref) nvme_free_namespaces(dev); nvme_release_instance(dev); blk_mq_free_tag_set(&dev->tagset); + blk_put_queue(dev->admin_q); kfree(dev->queues); kfree(dev->entry); kfree(dev); @@ -2591,15 +2661,20 @@ static int nvme_dev_start(struct nvme_dev *dev) } nvme_init_queue(dev->queues[0], 0); + result = nvme_alloc_admin_tags(dev); + if (result) + goto disable; result = nvme_setup_io_queues(dev); if (result) - goto disable; + goto free_tags; nvme_set_irq_hints(dev); return result; + free_tags: + nvme_dev_remove_admin(dev); disable: nvme_disable_queue(dev, 0); nvme_dev_list_remove(dev); @@ -2639,6 +2714,9 @@ static int nvme_dev_resume(struct nvme_dev *dev) dev->reset_workfn = nvme_remove_disks; queue_work(nvme_workq, &dev->reset_work); spin_unlock(&dev_list_lock); + } else { + nvme_unfreeze_queues(dev); + nvme_set_irq_hints(dev); } dev->initialized = 1; return 0; @@ -2776,11 +2854,10 @@ static void nvme_remove(struct pci_dev *pdev) pci_set_drvdata(pdev, NULL); flush_work(&dev->reset_work); misc_deregister(&dev->miscdev); - nvme_dev_remove(dev); nvme_dev_shutdown(dev); + nvme_dev_remove(dev); nvme_dev_remove_admin(dev); nvme_free_queues(dev, 0); - nvme_free_admin_tags(dev); nvme_release_prp_pools(dev); kref_put(&dev->kref, nvme_free_dev); } diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 7ef7c098708f..cdfbd21e3597 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -638,7 +638,7 @@ static int virtblk_probe(struct virtio_device *vdev) goto out_put_disk; q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set); - if (!q) { + if (IS_ERR(q)) { err = -ENOMEM; goto out_free_tags; } diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c index a82e542ffc21..0b380603a578 100644 --- a/drivers/isdn/hardware/eicon/message.c +++ b/drivers/isdn/hardware/eicon/message.c @@ -4880,7 +4880,7 @@ static void sig_ind(PLCI *plci) byte SS_Ind[] = "\x05\x02\x00\x02\x00\x00"; /* Hold_Ind struct*/ byte CF_Ind[] = "\x09\x02\x00\x06\x00\x00\x00\x00\x00\x00"; byte Interr_Err_Ind[] = "\x0a\x02\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"; - byte CONF_Ind[] = "\x09\x16\x00\x06\x00\x00\0x00\0x00\0x00\0x00"; + byte CONF_Ind[] = "\x09\x16\x00\x06\x00\x00\x00\x00\x00\x00"; byte force_mt_info = false; byte dir; dword d; diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c index 51fd6b524371..d1b55fe62817 100644 --- a/drivers/misc/cxl/context.c +++ b/drivers/misc/cxl/context.c @@ -100,6 +100,46 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master, return 0; } +static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct cxl_context *ctx = vma->vm_file->private_data; + unsigned long address = (unsigned long)vmf->virtual_address; + u64 area, offset; + + offset = vmf->pgoff << PAGE_SHIFT; + + pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n", + __func__, ctx->pe, address, offset); + + if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { + area = ctx->afu->psn_phys; + if (offset > ctx->afu->adapter->ps_size) + return VM_FAULT_SIGBUS; + } else { + area = ctx->psn_phys; + if (offset > ctx->psn_size) + return VM_FAULT_SIGBUS; + } + + mutex_lock(&ctx->status_mutex); + + if (ctx->status != STARTED) { + mutex_unlock(&ctx->status_mutex); + pr_devel("%s: Context not started, failing problem state access\n", __func__); + return VM_FAULT_SIGBUS; + } + + vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT); + + mutex_unlock(&ctx->status_mutex); + + return VM_FAULT_NOPAGE; +} + +static const struct vm_operations_struct cxl_mmap_vmops = { + .fault = cxl_mmap_fault, +}; + /* * Map a per-context mmio space into the given vma. */ @@ -108,26 +148,25 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma) u64 len = vma->vm_end - vma->vm_start; len = min(len, ctx->psn_size); - if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - return vm_iomap_memory(vma, ctx->afu->psn_phys, ctx->afu->adapter->ps_size); - } + if (ctx->afu->current_mode != CXL_MODE_DEDICATED) { + /* make sure there is a valid per process space for this AFU */ + if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) { + pr_devel("AFU doesn't support mmio space\n"); + return -EINVAL; + } - /* make sure there is a valid per process space for this AFU */ - if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) { - pr_devel("AFU doesn't support mmio space\n"); - return -EINVAL; + /* Can't mmap until the AFU is enabled */ + if (!ctx->afu->enabled) + return -EBUSY; } - /* Can't mmap until the AFU is enabled */ - if (!ctx->afu->enabled) - return -EBUSY; - pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__, ctx->psn_phys, ctx->pe , ctx->master); + vma->vm_flags |= VM_IO | VM_PFNMAP; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - return vm_iomap_memory(vma, ctx->psn_phys, len); + vma->vm_ops = &cxl_mmap_vmops; + return 0; } /* @@ -150,12 +189,6 @@ static void __detach_context(struct cxl_context *ctx) afu_release_irqs(ctx); flush_work(&ctx->fault_work); /* Only needed for dedicated process */ wake_up_all(&ctx->wq); - - /* Release Problem State Area mapping */ - mutex_lock(&ctx->mapping_lock); - if (ctx->mapping) - unmap_mapping_range(ctx->mapping, 0, 0, 1); - mutex_unlock(&ctx->mapping_lock); } /* @@ -184,6 +217,17 @@ void cxl_context_detach_all(struct cxl_afu *afu) * created and torn down after the IDR removed */ __detach_context(ctx); + + /* + * We are force detaching - remove any active PSA mappings so + * userspace cannot interfere with the card if it comes back. + * Easiest way to exercise this is to unbind and rebind the + * driver via sysfs while it is in use. + */ + mutex_lock(&ctx->mapping_lock); + if (ctx->mapping) + unmap_mapping_range(ctx->mapping, 0, 0, 1); + mutex_unlock(&ctx->mapping_lock); } mutex_unlock(&afu->contexts_lock); } diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c index e9f2f10dbb37..b15d8113877c 100644 --- a/drivers/misc/cxl/file.c +++ b/drivers/misc/cxl/file.c @@ -140,18 +140,20 @@ static long afu_ioctl_start_work(struct cxl_context *ctx, pr_devel("%s: pe: %i\n", __func__, ctx->pe); - mutex_lock(&ctx->status_mutex); - if (ctx->status != OPENED) { - rc = -EIO; - goto out; - } - + /* Do this outside the status_mutex to avoid a circular dependency with + * the locking in cxl_mmap_fault() */ if (copy_from_user(&work, uwork, sizeof(struct cxl_ioctl_start_work))) { rc = -EFAULT; goto out; } + mutex_lock(&ctx->status_mutex); + if (ctx->status != OPENED) { + rc = -EIO; + goto out; + } + /* * if any of the reserved fields are set or any of the unused * flags are set it's invalid diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index e398eda07298..c8af3ce3ea38 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -184,15 +184,16 @@ static void alx_schedule_reset(struct alx_priv *alx) schedule_work(&alx->reset_wk); } -static bool alx_clean_rx_irq(struct alx_priv *alx, int budget) +static int alx_clean_rx_irq(struct alx_priv *alx, int budget) { struct alx_rx_queue *rxq = &alx->rxq; struct alx_rrd *rrd; struct alx_buffer *rxb; struct sk_buff *skb; u16 length, rfd_cleaned = 0; + int work = 0; - while (budget > 0) { + while (work < budget) { rrd = &rxq->rrd[rxq->rrd_read_idx]; if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT))) break; @@ -203,7 +204,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget) ALX_GET_FIELD(le32_to_cpu(rrd->word0), RRD_NOR) != 1) { alx_schedule_reset(alx); - return 0; + return work; } rxb = &rxq->bufs[rxq->read_idx]; @@ -243,7 +244,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget) } napi_gro_receive(&alx->napi, skb); - budget--; + work++; next_pkt: if (++rxq->read_idx == alx->rx_ringsz) @@ -258,21 +259,22 @@ next_pkt: if (rfd_cleaned) alx_refill_rx_ring(alx, GFP_ATOMIC); - return budget > 0; + return work; } static int alx_poll(struct napi_struct *napi, int budget) { struct alx_priv *alx = container_of(napi, struct alx_priv, napi); struct alx_hw *hw = &alx->hw; - bool complete = true; unsigned long flags; + bool tx_complete; + int work; - complete = alx_clean_tx_irq(alx) && - alx_clean_rx_irq(alx, budget); + tx_complete = alx_clean_tx_irq(alx); + work = alx_clean_rx_irq(alx, budget); - if (!complete) - return 1; + if (!tx_complete || work == budget) + return budget; napi_complete(&alx->napi); @@ -284,7 +286,7 @@ static int alx_poll(struct napi_struct *napi, int budget) alx_post_write(hw); - return 0; + return work; } static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr) diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 553dcd8a9df2..96bf01ba32dd 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -7413,6 +7413,8 @@ static inline void tg3_netif_start(struct tg3 *tp) } static void tg3_irq_quiesce(struct tg3 *tp) + __releases(tp->lock) + __acquires(tp->lock) { int i; @@ -7421,8 +7423,12 @@ static void tg3_irq_quiesce(struct tg3 *tp) tp->irq_sync = 1; smp_mb(); + spin_unlock_bh(&tp->lock); + for (i = 0; i < tp->irq_cnt; i++) synchronize_irq(tp->napi[i].irq_vec); + + spin_lock_bh(&tp->lock); } /* Fully shutdown all tg3 driver activity elsewhere in the system. @@ -9018,6 +9024,8 @@ static void tg3_restore_clk(struct tg3 *tp) /* tp->lock is held. */ static int tg3_chip_reset(struct tg3 *tp) + __releases(tp->lock) + __acquires(tp->lock) { u32 val; void (*write_op)(struct tg3 *, u32, u32); @@ -9073,9 +9081,13 @@ static int tg3_chip_reset(struct tg3 *tp) } smp_mb(); + tg3_full_unlock(tp); + for (i = 0; i < tp->irq_cnt; i++) synchronize_irq(tp->napi[i].irq_vec); + tg3_full_lock(tp, 0); + if (tg3_asic_rev(tp) == ASIC_REV_57780) { val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); @@ -10903,11 +10915,13 @@ static void tg3_timer(unsigned long __opaque) { struct tg3 *tp = (struct tg3 *) __opaque; - if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) - goto restart_timer; - spin_lock(&tp->lock); + if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) { + spin_unlock(&tp->lock); + goto restart_timer; + } + if (tg3_asic_rev(tp) == ASIC_REV_5717 || tg3_flag(tp, 57765_CLASS)) tg3_chk_missed_msi(tp); @@ -11101,11 +11115,13 @@ static void tg3_reset_task(struct work_struct *work) struct tg3 *tp = container_of(work, struct tg3, reset_task); int err; + rtnl_lock(); tg3_full_lock(tp, 0); if (!netif_running(tp->dev)) { tg3_flag_clear(tp, RESET_TASK_PENDING); tg3_full_unlock(tp); + rtnl_unlock(); return; } @@ -11138,6 +11154,7 @@ out: tg3_phy_start(tp); tg3_flag_clear(tp, RESET_TASK_PENDING); + rtnl_unlock(); } static int tg3_request_irq(struct tg3 *tp, int irq_num) diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c index 55eb7f2af2b4..7ef55f5fa664 100644 --- a/drivers/net/ethernet/cadence/at91_ether.c +++ b/drivers/net/ethernet/cadence/at91_ether.c @@ -340,7 +340,7 @@ static int __init at91ether_probe(struct platform_device *pdev) res = PTR_ERR(lp->pclk); goto err_free_dev; } - clk_enable(lp->pclk); + clk_prepare_enable(lp->pclk); lp->hclk = ERR_PTR(-ENOENT); lp->tx_clk = ERR_PTR(-ENOENT); @@ -406,7 +406,7 @@ static int __init at91ether_probe(struct platform_device *pdev) err_out_unregister_netdev: unregister_netdev(dev); err_disable_clock: - clk_disable(lp->pclk); + clk_disable_unprepare(lp->pclk); err_free_dev: free_netdev(dev); return res; @@ -424,7 +424,7 @@ static int at91ether_remove(struct platform_device *pdev) kfree(lp->mii_bus->irq); mdiobus_free(lp->mii_bus); unregister_netdev(dev); - clk_disable(lp->pclk); + clk_disable_unprepare(lp->pclk); free_netdev(dev); return 0; @@ -440,7 +440,7 @@ static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg) netif_stop_queue(net_dev); netif_device_detach(net_dev); - clk_disable(lp->pclk); + clk_disable_unprepare(lp->pclk); } return 0; } @@ -451,7 +451,7 @@ static int at91ether_resume(struct platform_device *pdev) struct macb *lp = netdev_priv(net_dev); if (netif_running(net_dev)) { - clk_enable(lp->pclk); + clk_prepare_enable(lp->pclk); netif_device_attach(net_dev); netif_start_queue(net_dev); diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 2215d432a059..a936ee8958c7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -2430,7 +2430,7 @@ static void cfg_queues(struct adapter *adapter) */ n10g = 0; for_each_port(adapter, pidx) - n10g += is_10g_port(&adap2pinfo(adapter, pidx)->link_cfg); + n10g += is_x_10g_port(&adap2pinfo(adapter, pidx)->link_cfg); /* * We default to 1 queue per non-10G port and up to # of cores queues diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 21dc9a20308c..60426cf890a7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -323,6 +323,8 @@ int t4vf_port_init(struct adapter *adapter, int pidx) return v; v = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype); + pi->mdio_addr = (v & FW_PORT_CMD_MDIOCAP_F) ? + FW_PORT_CMD_MDIOADDR_G(v) : -1; pi->port_type = FW_PORT_CMD_PTYPE_G(v); pi->mod_type = FW_PORT_MOD_TYPE_NA; diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c index a379c3e4b57f..13d00a38a5bd 100644 --- a/drivers/net/ethernet/dnet.c +++ b/drivers/net/ethernet/dnet.c @@ -398,13 +398,8 @@ static int dnet_poll(struct napi_struct *napi, int budget) * break out of while loop if there are no more * packets waiting */ - if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16)) { - napi_complete(napi); - int_enable = dnet_readl(bp, INTR_ENB); - int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF; - dnet_writel(bp, int_enable, INTR_ENB); - return 0; - } + if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16)) + break; cmd_word = dnet_readl(bp, RX_LEN_FIFO); pkt_len = cmd_word & 0xFFFF; @@ -433,20 +428,17 @@ static int dnet_poll(struct napi_struct *napi, int budget) "size %u.\n", dev->name, pkt_len); } - budget -= npackets; - if (npackets < budget) { /* We processed all packets available. Tell NAPI it can - * stop polling then re-enable rx interrupts */ + * stop polling then re-enable rx interrupts. + */ napi_complete(napi); int_enable = dnet_readl(bp, INTR_ENB); int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF; dnet_writel(bp, int_enable, INTR_ENB); - return 0; } - /* There are still packets waiting */ - return 1; + return npackets; } static irqreturn_t dnet_interrupt(int irq, void *dev_id) diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 469691ad4a1e..40132929daf7 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -424,6 +424,8 @@ struct bufdesc_ex { * (40ns * 6). */ #define FEC_QUIRK_BUG_CAPTURE (1 << 10) +/* Controller has only one MDIO bus */ +#define FEC_QUIRK_SINGLE_MDIO (1 << 11) struct fec_enet_priv_tx_q { int index; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 5ebdf8dc8a31..bba87775419d 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -91,7 +91,8 @@ static struct platform_device_id fec_devtype[] = { .driver_data = 0, }, { .name = "imx28-fec", - .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME, + .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | + FEC_QUIRK_SINGLE_MDIO, }, { .name = "imx6q-fec", .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | @@ -1937,7 +1938,7 @@ static int fec_enet_mii_init(struct platform_device *pdev) int err = -ENXIO, i; /* - * The dual fec interfaces are not equivalent with enet-mac. + * The i.MX28 dual fec interfaces are not equal. * Here are the differences: * * - fec0 supports MII & RMII modes while fec1 only supports RMII @@ -1952,7 +1953,7 @@ static int fec_enet_mii_init(struct platform_device *pdev) * mdio interface in board design, and need to be configured by * fec0 mii_bus. */ - if ((fep->quirks & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) { + if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) { /* fec1 uses fec0 mii_bus */ if (mii_cnt && fec0_mii_bus) { fep->mii_bus = fec0_mii_bus; @@ -2015,7 +2016,7 @@ static int fec_enet_mii_init(struct platform_device *pdev) mii_cnt++; /* save fec0 mii_bus */ - if (fep->quirks & FEC_QUIRK_ENET_MAC) + if (fep->quirks & FEC_QUIRK_SINGLE_MDIO) fec0_mii_bus = fep->mii_bus; return 0; @@ -3129,6 +3130,7 @@ fec_probe(struct platform_device *pdev) pdev->id_entry = of_id->data; fep->quirks = pdev->id_entry->driver_data; + fep->netdev = ndev; fep->num_rx_queues = num_rx_qs; fep->num_tx_queues = num_tx_qs; diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 5b8300a32bf5..4d61ef50b465 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -281,6 +281,17 @@ config I40E_DCB If unsure, say N. +config I40E_FCOE + bool "Fibre Channel over Ethernet (FCoE)" + default n + depends on I40E && DCB && FCOE + ---help--- + Say Y here if you want to use Fibre Channel over Ethernet (FCoE) + in the driver. This will create new netdev for exclusive FCoE + use with XL710 FCoE offloads enabled. + + If unsure, say N. + config I40EVF tristate "Intel(R) XL710 X710 Virtual Function Ethernet support" depends on PCI_MSI diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile index 4b94ddb29c24..c40581999121 100644 --- a/drivers/net/ethernet/intel/i40e/Makefile +++ b/drivers/net/ethernet/intel/i40e/Makefile @@ -44,4 +44,4 @@ i40e-objs := i40e_main.o \ i40e_virtchnl_pf.o i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o -i40e-$(CONFIG_FCOE:m=y) += i40e_fcoe.o +i40e-$(CONFIG_I40E_FCOE) += i40e_fcoe.o diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h index 045b5c4b98b3..ad802dd0f67a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h +++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h @@ -78,7 +78,7 @@ do { \ } while (0) typedef enum i40e_status_code i40e_status; -#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifdef CONFIG_I40E_FCOE #define I40E_FCOE -#endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */ +#endif #endif /* _I40E_OSDEP_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 04b441460bbd..cecb340898fe 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -658,6 +658,8 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring) return le32_to_cpu(*(volatile __le32 *)head); } +#define WB_STRIDE 0x3 + /** * i40e_clean_tx_irq - Reclaim resources after transmit completes * @tx_ring: tx ring to clean @@ -759,6 +761,18 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) tx_ring->q_vector->tx.total_bytes += total_bytes; tx_ring->q_vector->tx.total_packets += total_packets; + /* check to see if there are any non-cache aligned descriptors + * waiting to be written back, and kick the hardware to force + * them to be written back in case of napi polling + */ + if (budget && + !((i & WB_STRIDE) == WB_STRIDE) && + !test_bit(__I40E_DOWN, &tx_ring->vsi->state) && + (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) + tx_ring->arm_wb = true; + else + tx_ring->arm_wb = false; + if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) { /* schedule immediate reset if we believe we hung */ dev_info(tx_ring->dev, "Detected Tx Unit Hang\n" @@ -777,13 +791,16 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); dev_info(tx_ring->dev, - "tx hang detected on queue %d, resetting adapter\n", + "tx hang detected on queue %d, reset requested\n", tx_ring->queue_index); - tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev); + /* do not fire the reset immediately, wait for the stack to + * decide we are truly stuck, also prevents every queue from + * simultaneously requesting a reset + */ - /* the adapter is about to reset, no point in enabling stuff */ - return true; + /* the adapter is about to reset, no point in enabling polling */ + budget = 1; } netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev, @@ -806,7 +823,25 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) } } - return budget > 0; + return !!budget; +} + +/** + * i40e_force_wb - Arm hardware to do a wb on noncache aligned descriptors + * @vsi: the VSI we care about + * @q_vector: the vector on which to force writeback + * + **/ +static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) +{ + u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | + I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK + /* allow 00 to be written to the index */; + + wr32(&vsi->back->hw, + I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1), + val); } /** @@ -1290,9 +1325,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, * so the total length of IPv4 header is IHL*4 bytes * The UDP_0 bit *may* bet set if the *inner* header is UDP */ - if (ipv4_tunnel && - (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) && - !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) { + if (ipv4_tunnel) { skb->transport_header = skb->mac_header + sizeof(struct ethhdr) + (ip_hdr(skb)->ihl * 4); @@ -1302,15 +1335,19 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, skb->protocol == htons(ETH_P_8021AD)) ? VLAN_HLEN : 0; - rx_udp_csum = udp_csum(skb); - iph = ip_hdr(skb); - csum = csum_tcpudp_magic( - iph->saddr, iph->daddr, - (skb->len - skb_transport_offset(skb)), - IPPROTO_UDP, rx_udp_csum); + if ((ip_hdr(skb)->protocol == IPPROTO_UDP) && + (udp_hdr(skb)->check != 0)) { + rx_udp_csum = udp_csum(skb); + iph = ip_hdr(skb); + csum = csum_tcpudp_magic( + iph->saddr, iph->daddr, + (skb->len - skb_transport_offset(skb)), + IPPROTO_UDP, rx_udp_csum); - if (udp_hdr(skb)->check != csum) - goto checksum_fail; + if (udp_hdr(skb)->check != csum) + goto checksum_fail; + + } /* else its GRE and so no outer UDP header */ } skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -1581,6 +1618,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) struct i40e_vsi *vsi = q_vector->vsi; struct i40e_ring *ring; bool clean_complete = true; + bool arm_wb = false; int budget_per_ring; if (test_bit(__I40E_DOWN, &vsi->state)) { @@ -1591,8 +1629,10 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) /* Since the actual Tx work is minimal, we can give the Tx a larger * budget and be more aggressive about cleaning up the Tx descriptors. */ - i40e_for_each_ring(ring, q_vector->tx) + i40e_for_each_ring(ring, q_vector->tx) { clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); + arm_wb |= ring->arm_wb; + } /* We attempt to distribute budget to each Rx queue fairly, but don't * allow the budget to go below 1 because that would exit polling early. @@ -1603,8 +1643,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring); /* If work not completed, return budget and polling will return */ - if (!clean_complete) + if (!clean_complete) { + if (arm_wb) + i40e_force_wb(vsi, q_vector); return budget; + } /* Work is done so exit the polling mode and re-enable the interrupt */ napi_complete(napi); @@ -1840,17 +1883,16 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, if (err < 0) return err; - if (protocol == htons(ETH_P_IP)) { - iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); + iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); + ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); + + if (iph->version == 4) { tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); iph->tot_len = 0; iph->check = 0; tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, IPPROTO_TCP, 0); - } else if (skb_is_gso_v6(skb)) { - - ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) - : ipv6_hdr(skb); + } else if (ipv6h->version == 6) { tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); ipv6h->payload_len = 0; tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, @@ -1946,13 +1988,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; } } else if (tx_flags & I40E_TX_FLAGS_IPV6) { - if (tx_flags & I40E_TX_FLAGS_TSO) { - *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; + *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; + if (tx_flags & I40E_TX_FLAGS_TSO) ip_hdr(skb)->check = 0; - } else { - *cd_tunneling |= - I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; - } } /* Now set the ctx descriptor fields */ @@ -1962,7 +2000,10 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, ((skb_inner_network_offset(skb) - skb_transport_offset(skb)) >> 1) << I40E_TXD_CTX_QW0_NATLEN_SHIFT; - + if (this_ip_hdr->version == 6) { + tx_flags &= ~I40E_TX_FLAGS_IPV4; + tx_flags |= I40E_TX_FLAGS_IPV6; + } } else { network_hdr_len = skb_network_header_len(skb); this_ip_hdr = ip_hdr(skb); @@ -2198,7 +2239,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, /* Place RS bit on last descriptor of any packet that spans across the * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline. */ -#define WB_STRIDE 0x3 if (((i & WB_STRIDE) != WB_STRIDE) && (first <= &tx_ring->tx_bi[i]) && (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index e60d3accb2e2..18b00231d2f1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -241,6 +241,7 @@ struct i40e_ring { unsigned long last_rx_timestamp; bool ring_active; /* is ring online or not */ + bool arm_wb; /* do something to arm write back */ /* stats structs */ struct i40e_queue_stats stats; diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index c29ba80ae02b..37583a9d8853 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -473,6 +473,7 @@ static struct sh_eth_cpu_data r8a777x_data = { .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, + .fdr_value = 0x00000f0f, .apr = 1, .mpr = 1, @@ -495,6 +496,7 @@ static struct sh_eth_cpu_data r8a779x_data = { .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, + .fdr_value = 0x00000f0f, .apr = 1, .mpr = 1, @@ -536,6 +538,8 @@ static struct sh_eth_cpu_data sh7724_data = { EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, + .trscer_err_mask = DESC_I_RINT8, + .apr = 1, .mpr = 1, .tpauser = 1, @@ -856,6 +860,9 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd) if (!cd->eesr_err_check) cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK; + + if (!cd->trscer_err_mask) + cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK; } static int sh_eth_check_reset(struct net_device *ndev) @@ -1294,7 +1301,7 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start) /* Frame recv control (enable multiple-packets per rx irq) */ sh_eth_write(ndev, RMCR_RNC, RMCR); - sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER); + sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER); if (mdp->cd->bculr) sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */ diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 22301bf9c21d..71f5de1171bd 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -369,6 +369,8 @@ enum DESC_I_BIT { DESC_I_RINT1 = 0x0001, }; +#define DEFAULT_TRSCER_ERR_MASK (DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2) + /* RPADIR */ enum RPADIR_BIT { RPADIR_PADS1 = 0x20000, RPADIR_PADS0 = 0x10000, @@ -470,6 +472,9 @@ struct sh_eth_cpu_data { unsigned long tx_check; unsigned long eesr_err_check; + /* Error mask */ + unsigned long trscer_err_mask; + /* hardware features */ unsigned long irq_flags; /* IRQ configuration flags */ unsigned no_psr:1; /* EtherC DO NOT have PSR */ diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index e61ee8351272..64d1cef4cda1 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -610,7 +610,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) /* Clear all mcast from ALE */ cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS << - priv->host_port); + priv->host_port, -1); /* Flood All Unicast Packets to Host port */ cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1); @@ -634,6 +634,12 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) static void cpsw_ndo_set_rx_mode(struct net_device *ndev) { struct cpsw_priv *priv = netdev_priv(ndev); + int vid; + + if (priv->data.dual_emac) + vid = priv->slaves[priv->emac_port].port_vlan; + else + vid = priv->data.default_vlan; if (ndev->flags & IFF_PROMISC) { /* Enable promiscuous mode */ @@ -649,7 +655,8 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev) cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI); /* Clear all mcast from ALE */ - cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port); + cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port, + vid); if (!netdev_mc_empty(ndev)) { struct netdev_hw_addr *ha; diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index 097ebe7077ac..5246b3a18ff8 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -234,7 +234,7 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry, cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); } -int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask) +int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid) { u32 ale_entry[ALE_ENTRY_WORDS]; int ret, idx; @@ -245,6 +245,14 @@ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask) if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR) continue; + /* if vid passed is -1 then remove all multicast entry from + * the table irrespective of vlan id, if a valid vlan id is + * passed then remove only multicast added to that vlan id. + * if vlan id doesn't match then move on to next entry. + */ + if (vid != -1 && cpsw_ale_get_vlan_id(ale_entry) != vid) + continue; + if (cpsw_ale_get_mcast(ale_entry)) { u8 addr[6]; diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h index c0d4127aa549..af1e7ecd87c6 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.h +++ b/drivers/net/ethernet/ti/cpsw_ale.h @@ -92,7 +92,7 @@ void cpsw_ale_stop(struct cpsw_ale *ale); int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout); int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask); -int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask); +int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid); int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags, u16 vid); int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port, diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 93e224217e24..f7ff493f1e73 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -629,6 +629,7 @@ static int team_change_mode(struct team *team, const char *kind) static void team_notify_peers_work(struct work_struct *work) { struct team *team; + int val; team = container_of(work, struct team, notify_peers.dw.work); @@ -636,9 +637,14 @@ static void team_notify_peers_work(struct work_struct *work) schedule_delayed_work(&team->notify_peers.dw, 0); return; } + val = atomic_dec_if_positive(&team->notify_peers.count_pending); + if (val < 0) { + rtnl_unlock(); + return; + } call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev); rtnl_unlock(); - if (!atomic_dec_and_test(&team->notify_peers.count_pending)) + if (val) schedule_delayed_work(&team->notify_peers.dw, msecs_to_jiffies(team->notify_peers.interval)); } @@ -669,6 +675,7 @@ static void team_notify_peers_fini(struct team *team) static void team_mcast_rejoin_work(struct work_struct *work) { struct team *team; + int val; team = container_of(work, struct team, mcast_rejoin.dw.work); @@ -676,9 +683,14 @@ static void team_mcast_rejoin_work(struct work_struct *work) schedule_delayed_work(&team->mcast_rejoin.dw, 0); return; } + val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending); + if (val < 0) { + rtnl_unlock(); + return; + } call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev); rtnl_unlock(); - if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending)) + if (val) schedule_delayed_work(&team->mcast_rejoin.dw, msecs_to_jiffies(team->mcast_rejoin.interval)); } diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c index dcb6d33141e0..1e9cdca37014 100644 --- a/drivers/net/usb/kaweth.c +++ b/drivers/net/usb/kaweth.c @@ -1276,7 +1276,7 @@ static int usb_start_wait_urb(struct urb *urb, int timeout, int* actual_length) awd.done = 0; urb->context = &awd; - status = usb_submit_urb(urb, GFP_NOIO); + status = usb_submit_urb(urb, GFP_ATOMIC); if (status) { // something went wrong usb_free_urb(urb); diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c index e5be2d21868f..a5f9198d5747 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c @@ -69,8 +69,8 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL7260_UCODE_API_MAX 10 -#define IWL3160_UCODE_API_MAX 10 +#define IWL7260_UCODE_API_MAX 12 +#define IWL3160_UCODE_API_MAX 12 /* Oldest version we won't warn about */ #define IWL7260_UCODE_API_OK 10 @@ -105,7 +105,7 @@ #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" #define IWL7265D_FW_PRE "iwlwifi-7265D-" -#define IWL7265D_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" +#define IWL7265D_MODULE_FIRMWARE(api) IWL7265D_FW_PRE __stringify(api) ".ucode" #define NVM_HW_SECTION_NUM_FAMILY_7000 0 diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c index bf0a95cb7153..3668fc57e770 100644 --- a/drivers/net/wireless/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/iwlwifi/iwl-8000.c @@ -69,7 +69,7 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL8000_UCODE_API_MAX 10 +#define IWL8000_UCODE_API_MAX 12 /* Oldest version we won't warn about */ #define IWL8000_UCODE_API_OK 10 diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h index f2a047f6bb3e..1bbe4fc47b97 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h @@ -243,6 +243,9 @@ enum iwl_ucode_tlv_flag { * @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif. * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time * longer than the passive one, which is essential for fragmented scan. + * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command, + * regardless of the band or the number of the probes. FW will calculate + * the actual dwell time. */ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0), @@ -253,6 +256,7 @@ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_LMAC_SCAN = BIT(6), IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7), IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8), + IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13), }; /** diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h index 1f2acf47bfb2..201846de94e7 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h @@ -672,6 +672,7 @@ struct iwl_scan_channel_opt { * @IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED: all passive scans will be fragmented * @IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED: insert WFA vendor-specific TPC report * and DS parameter set IEs into probe requests. + * @IWL_MVM_LMAC_SCAN_FLAG_MATCH: Send match found notification on matches */ enum iwl_mvm_lmac_scan_flags { IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL = BIT(0), @@ -681,6 +682,7 @@ enum iwl_mvm_lmac_scan_flags { IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS = BIT(4), IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED = BIT(5), IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED = BIT(6), + IWL_MVM_LMAC_SCAN_FLAG_MATCH = BIT(9), }; enum iwl_scan_priority { diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index e5294d01181e..ec9a8e7bae1d 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -171,15 +171,21 @@ static void iwl_mvm_scan_fill_ssids(struct iwl_ssid_ie *cmd_ssid, * already included in the probe template, so we need to set only * req->n_ssids - 1 bits in addition to the first bit. */ -static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids) +static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm, + enum ieee80211_band band, int n_ssids) { + if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL) + return 10; if (band == IEEE80211_BAND_2GHZ) return 20 + 3 * (n_ssids + 1); return 10 + 2 * (n_ssids + 1); } -static u16 iwl_mvm_get_passive_dwell(enum ieee80211_band band) +static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm, + enum ieee80211_band band) { + if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL) + return 110; return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10; } @@ -331,7 +337,8 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm, */ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { u32 passive_dwell = - iwl_mvm_get_passive_dwell(IEEE80211_BAND_2GHZ); + iwl_mvm_get_passive_dwell(mvm, + IEEE80211_BAND_2GHZ); params->max_out_time = passive_dwell; } else { params->passive_fragmented = true; @@ -348,8 +355,8 @@ not_bound: params->dwell[band].passive = frag_passive_dwell; else params->dwell[band].passive = - iwl_mvm_get_passive_dwell(band); - params->dwell[band].active = iwl_mvm_get_active_dwell(band, + iwl_mvm_get_passive_dwell(mvm, band); + params->dwell[band].active = iwl_mvm_get_active_dwell(mvm, band, n_ssids); } } @@ -1448,6 +1455,8 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm, if (iwl_mvm_scan_pass_all(mvm, req)) flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL; + else + flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH; if (req->n_ssids == 1 && req->ssids[0].ssid_len != 0) flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION; diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index 4f15d9decc81..4333306ccdee 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -108,8 +108,12 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, tx_flags &= ~TX_CMD_FLG_SEQ_CTL; } - /* tid_tspec will default to 0 = BE when QOS isn't enabled */ - ac = tid_to_mac80211_ac[tx_cmd->tid_tspec]; + /* Default to 0 (BE) when tid_spec is set to IWL_TID_NON_QOS */ + if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT) + ac = tid_to_mac80211_ac[tx_cmd->tid_tspec]; + else + ac = tid_to_mac80211_ac[0]; + tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) << TX_CMD_FLG_BT_PRIO_POS; diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c index e56e77ef5d2e..917431e30f74 100644 --- a/drivers/net/wireless/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/iwlwifi/mvm/utils.c @@ -665,7 +665,7 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm) if (num_of_ant(mvm->fw->valid_rx_ant) == 1) return false; - if (!mvm->cfg->rx_with_siso_diversity) + if (mvm->cfg->rx_with_siso_diversity) return false; ieee80211_iterate_active_interfaces_atomic( diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index 2f0c4b170344..d5aadb00dd9e 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -527,8 +527,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) else if (cfg == &iwl7265_n_cfg) cfg_7265d = &iwl7265d_n_cfg; if (cfg_7265d && - (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) + (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) { cfg = cfg_7265d; + iwl_trans->cfg = cfg_7265d; + } #endif pci_set_drvdata(pdev, iwl_trans); diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c index 846a2e6e34d8..c70efb9a6e78 100644 --- a/drivers/net/wireless/rtlwifi/pci.c +++ b/drivers/net/wireless/rtlwifi/pci.c @@ -666,7 +666,8 @@ tx_status_ok: } static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw, - u8 *entry, int rxring_idx, int desc_idx) + struct sk_buff *new_skb, u8 *entry, + int rxring_idx, int desc_idx) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); @@ -674,11 +675,15 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw, u8 tmp_one = 1; struct sk_buff *skb; + if (likely(new_skb)) { + skb = new_skb; + goto remap; + } skb = dev_alloc_skb(rtlpci->rxbuffersize); if (!skb) return 0; - rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb; +remap: /* just set skb->cb to mapping addr for pci_unmap_single use */ *((dma_addr_t *)skb->cb) = pci_map_single(rtlpci->pdev, skb_tail_pointer(skb), @@ -686,6 +691,7 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw, bufferaddress = *((dma_addr_t *)skb->cb); if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress)) return 0; + rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb; if (rtlpriv->use_new_trx_flow) { rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false, HW_DESC_RX_PREPARE, @@ -781,6 +787,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) /*rx pkt */ struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[ rtlpci->rx_ring[rxring_idx].idx]; + struct sk_buff *new_skb; if (rtlpriv->use_new_trx_flow) { rx_remained_cnt = @@ -807,6 +814,13 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) pci_unmap_single(rtlpci->pdev, *((dma_addr_t *)skb->cb), rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE); + /* get a new skb - if fail, old one will be reused */ + new_skb = dev_alloc_skb(rtlpci->rxbuffersize); + if (unlikely(!new_skb)) { + pr_err("Allocation of new skb failed in %s\n", + __func__); + goto no_new; + } if (rtlpriv->use_new_trx_flow) { buffer_desc = &rtlpci->rx_ring[rxring_idx].buffer_desc @@ -911,14 +925,16 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) schedule_work(&rtlpriv->works.lps_change_work); } end: + skb = new_skb; +no_new: if (rtlpriv->use_new_trx_flow) { - _rtl_pci_init_one_rxdesc(hw, (u8 *)buffer_desc, + _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)buffer_desc, rxring_idx, - rtlpci->rx_ring[rxring_idx].idx); + rtlpci->rx_ring[rxring_idx].idx); } else { - _rtl_pci_init_one_rxdesc(hw, (u8 *)pdesc, rxring_idx, + _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)pdesc, + rxring_idx, rtlpci->rx_ring[rxring_idx].idx); - if (rtlpci->rx_ring[rxring_idx].idx == rtlpci->rxringcount - 1) rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, @@ -1307,7 +1323,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx) rtlpci->rx_ring[rxring_idx].idx = 0; for (i = 0; i < rtlpci->rxringcount; i++) { entry = &rtlpci->rx_ring[rxring_idx].buffer_desc[i]; - if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry, + if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry, rxring_idx, i)) return -ENOMEM; } @@ -1332,7 +1348,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx) for (i = 0; i < rtlpci->rxringcount; i++) { entry = &rtlpci->rx_ring[rxring_idx].desc[i]; - if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry, + if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry, rxring_idx, i)) return -ENOMEM; } diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 22bcb4e12e2a..d8c10764f130 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -88,10 +88,8 @@ struct netfront_cb { #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) struct netfront_stats { - u64 rx_packets; - u64 tx_packets; - u64 rx_bytes; - u64 tx_bytes; + u64 packets; + u64 bytes; struct u64_stats_sync syncp; }; @@ -160,7 +158,8 @@ struct netfront_info { struct netfront_queue *queues; /* Statistics */ - struct netfront_stats __percpu *stats; + struct netfront_stats __percpu *rx_stats; + struct netfront_stats __percpu *tx_stats; atomic_t rx_gso_checksum_fixup; }; @@ -565,7 +564,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); - struct netfront_stats *stats = this_cpu_ptr(np->stats); + struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats); struct xen_netif_tx_request *tx; char *data = skb->data; RING_IDX i; @@ -672,10 +671,10 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) if (notify) notify_remote_via_irq(queue->tx_irq); - u64_stats_update_begin(&stats->syncp); - stats->tx_bytes += skb->len; - stats->tx_packets++; - u64_stats_update_end(&stats->syncp); + u64_stats_update_begin(&tx_stats->syncp); + tx_stats->bytes += skb->len; + tx_stats->packets++; + u64_stats_update_end(&tx_stats->syncp); /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ xennet_tx_buf_gc(queue); @@ -931,7 +930,7 @@ static int checksum_setup(struct net_device *dev, struct sk_buff *skb) static int handle_incoming_queue(struct netfront_queue *queue, struct sk_buff_head *rxq) { - struct netfront_stats *stats = this_cpu_ptr(queue->info->stats); + struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats); int packets_dropped = 0; struct sk_buff *skb; @@ -952,10 +951,10 @@ static int handle_incoming_queue(struct netfront_queue *queue, continue; } - u64_stats_update_begin(&stats->syncp); - stats->rx_packets++; - stats->rx_bytes += skb->len; - u64_stats_update_end(&stats->syncp); + u64_stats_update_begin(&rx_stats->syncp); + rx_stats->packets++; + rx_stats->bytes += skb->len; + u64_stats_update_end(&rx_stats->syncp); /* Pass it up. */ napi_gro_receive(&queue->napi, skb); @@ -1079,18 +1078,22 @@ static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev, int cpu; for_each_possible_cpu(cpu) { - struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu); + struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu); + struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu); u64 rx_packets, rx_bytes, tx_packets, tx_bytes; unsigned int start; do { - start = u64_stats_fetch_begin_irq(&stats->syncp); + start = u64_stats_fetch_begin_irq(&tx_stats->syncp); + tx_packets = tx_stats->packets; + tx_bytes = tx_stats->bytes; + } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); - rx_packets = stats->rx_packets; - tx_packets = stats->tx_packets; - rx_bytes = stats->rx_bytes; - tx_bytes = stats->tx_bytes; - } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + do { + start = u64_stats_fetch_begin_irq(&rx_stats->syncp); + rx_packets = rx_stats->packets; + rx_bytes = rx_stats->bytes; + } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); tot->rx_packets += rx_packets; tot->tx_packets += tx_packets; @@ -1275,6 +1278,15 @@ static const struct net_device_ops xennet_netdev_ops = { #endif }; +static void xennet_free_netdev(struct net_device *netdev) +{ + struct netfront_info *np = netdev_priv(netdev); + + free_percpu(np->rx_stats); + free_percpu(np->tx_stats); + free_netdev(netdev); +} + static struct net_device *xennet_create_dev(struct xenbus_device *dev) { int err; @@ -1295,8 +1307,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) np->queues = NULL; err = -ENOMEM; - np->stats = netdev_alloc_pcpu_stats(struct netfront_stats); - if (np->stats == NULL) + np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats); + if (np->rx_stats == NULL) + goto exit; + np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats); + if (np->tx_stats == NULL) goto exit; netdev->netdev_ops = &xennet_netdev_ops; @@ -1327,7 +1342,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) return netdev; exit: - free_netdev(netdev); + xennet_free_netdev(netdev); return ERR_PTR(err); } @@ -1369,7 +1384,7 @@ static int netfront_probe(struct xenbus_device *dev, return 0; fail: - free_netdev(netdev); + xennet_free_netdev(netdev); dev_set_drvdata(&dev->dev, NULL); return err; } @@ -2189,9 +2204,7 @@ static int xennet_remove(struct xenbus_device *dev) info->queues = NULL; } - free_percpu(info->stats); - - free_netdev(info->netdev); + xennet_free_netdev(info->netdev); return 0; } diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 91e97ec01418..4d41bf75c233 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -1163,9 +1163,13 @@ static inline int ap_test_config_card_id(unsigned int id) */ static inline int ap_test_config_domain(unsigned int domain) { - if (!ap_configuration) - return 1; - return ap_test_config(ap_configuration->aqm, domain); + if (!ap_configuration) /* QCI not supported */ + if (domain < 16) + return 1; /* then domains 0...15 are configured */ + else + return 0; + else + return ap_test_config(ap_configuration->aqm, domain); } /** diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c index c1188ac053c9..2ccbc0788353 100644 --- a/drivers/thermal/imx_thermal.c +++ b/drivers/thermal/imx_thermal.c @@ -608,6 +608,7 @@ static int imx_thermal_suspend(struct device *dev) regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_MEASURE_TEMP); regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN); data->mode = THERMAL_DEVICE_DISABLED; + clk_disable_unprepare(data->thermal_clk); return 0; } @@ -617,6 +618,7 @@ static int imx_thermal_resume(struct device *dev) struct imx_thermal_data *data = dev_get_drvdata(dev); struct regmap *map = data->tempmon; + clk_prepare_enable(data->thermal_clk); /* Enabled thermal sensor after resume */ regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN); regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP); diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c index e145b66df444..d717f3dab6f1 100644 --- a/drivers/thermal/of-thermal.c +++ b/drivers/thermal/of-thermal.c @@ -149,7 +149,7 @@ EXPORT_SYMBOL_GPL(of_thermal_is_trip_valid); * * Return: pointer to trip points table, NULL otherwise */ -const struct thermal_trip * const +const struct thermal_trip * of_thermal_get_trip_points(struct thermal_zone_device *tz) { struct __thermal_zone *data = tz->devdata; diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c index 8803e693fe68..2580a4872f90 100644 --- a/drivers/thermal/rcar_thermal.c +++ b/drivers/thermal/rcar_thermal.c @@ -63,7 +63,7 @@ struct rcar_thermal_priv { struct mutex lock; struct list_head list; int id; - int ctemp; + u32 ctemp; }; #define rcar_thermal_for_each_priv(pos, common) \ @@ -145,7 +145,7 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv) { struct device *dev = rcar_priv_to_dev(priv); int i; - int ctemp, old, new; + u32 ctemp, old, new; int ret = -EINVAL; mutex_lock(&priv->lock); @@ -372,6 +372,7 @@ static int rcar_thermal_probe(struct platform_device *pdev) int i; int ret = -ENODEV; int idle = IDLE_INTERVAL; + u32 enr_bits = 0; common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL); if (!common) @@ -390,7 +391,7 @@ static int rcar_thermal_probe(struct platform_device *pdev) /* * platform has IRQ support. - * Then, drier use common register + * Then, driver uses common registers */ ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0, @@ -408,9 +409,6 @@ static int rcar_thermal_probe(struct platform_device *pdev) if (IS_ERR(common->base)) return PTR_ERR(common->base); - /* enable temperature comparation */ - rcar_thermal_common_write(common, ENR, 0x00030303); - idle = 0; /* polling delay is not needed */ } @@ -452,8 +450,15 @@ static int rcar_thermal_probe(struct platform_device *pdev) rcar_thermal_irq_enable(priv); list_move_tail(&priv->list, &common->head); + + /* update ENR bits */ + enr_bits |= 3 << (i * 8); } + /* enable temperature comparation */ + if (irq) + rcar_thermal_common_write(common, ENR, enr_bits); + platform_set_drvdata(pdev, common); dev_info(dev, "%d sensor probed\n", i); diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index 9083e7520623..0531c752fbbb 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -91,7 +91,7 @@ int of_parse_thermal_zones(void); void of_thermal_destroy_zones(void); int of_thermal_get_ntrips(struct thermal_zone_device *); bool of_thermal_is_trip_valid(struct thermal_zone_device *, int); -const struct thermal_trip * const +const struct thermal_trip * of_thermal_get_trip_points(struct thermal_zone_device *); #else static inline int of_parse_thermal_zones(void) { return 0; } @@ -105,7 +105,7 @@ static inline bool of_thermal_is_trip_valid(struct thermal_zone_device *tz, { return 0; } -static inline const struct thermal_trip * const +static inline const struct thermal_trip * of_thermal_get_trip_points(struct thermal_zone_device *tz) { return NULL; diff --git a/fs/locks.c b/fs/locks.c index 735b8d3fa78c..59e2f905e4ff 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -1702,7 +1702,7 @@ static int generic_delete_lease(struct file *filp) break; } trace_generic_delete_lease(inode, fl); - if (fl) + if (fl && IS_LEASE(fl)) error = fl->fl_lmops->lm_change(before, F_UNLCK, &dispose); spin_unlock(&inode->i_lock); locks_dispose_list(&dispose); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 8aded9ab2e4e..5735e7130d63 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -34,7 +34,6 @@ struct blk_mq_hw_ctx { unsigned long flags; /* BLK_MQ_F_* flags */ struct request_queue *queue; - unsigned int queue_num; struct blk_flush_queue *fq; void *driver_data; @@ -54,7 +53,7 @@ struct blk_mq_hw_ctx { unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; unsigned int numa_node; - unsigned int cmd_size; /* per-request extra data */ + unsigned int queue_num; atomic_t nr_active; @@ -195,13 +194,16 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); +int blk_mq_request_started(struct request *rq); void blk_mq_start_request(struct request *rq); void blk_mq_end_request(struct request *rq, int error); void __blk_mq_end_request(struct request *rq, int error); void blk_mq_requeue_request(struct request *rq); void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); +void blk_mq_cancel_requeue_work(struct request_queue *q); void blk_mq_kick_requeue_list(struct request_queue *q); +void blk_mq_abort_requeue_list(struct request_queue *q); void blk_mq_complete_request(struct request *rq); void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); @@ -212,6 +214,8 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, void *priv); +void blk_mq_unfreeze_queue(struct request_queue *q); +void blk_mq_freeze_queue_start(struct request_queue *q); /* * Driver command data is immediately after the request. So subtract request diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 445d59231bc4..c294e3e25e37 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -190,6 +190,7 @@ enum rq_flag_bits { __REQ_PM, /* runtime pm request */ __REQ_HASHED, /* on IO scheduler merge hash */ __REQ_MQ_INFLIGHT, /* track inflight for MQ */ + __REQ_NO_TIMEOUT, /* requests may never expire */ __REQ_NR_BITS, /* stops here */ }; @@ -243,5 +244,6 @@ enum rq_flag_bits { #define REQ_PM (1ULL << __REQ_PM) #define REQ_HASHED (1ULL << __REQ_HASHED) #define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) +#define REQ_NO_TIMEOUT (1ULL << __REQ_NO_TIMEOUT) #endif /* __LINUX_BLK_TYPES_H */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 679e6e90aa4c..52fd8e8694cf 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -852,11 +852,11 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * 3. Update dev->stats asynchronously and atomically, and define * neither operation. * - * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid); + * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid); * If device support VLAN filtering this function is called when a * VLAN id is registered. * - * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid); + * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid); * If device support VLAN filtering this function is called when a * VLAN id is unregistered. * @@ -2085,7 +2085,7 @@ extern rwlock_t dev_base_lock; /* Device list lock */ list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) #define for_each_netdev_in_bond_rcu(bond, slave) \ for_each_netdev_rcu(&init_net, slave) \ - if (netdev_master_upper_dev_get_rcu(slave) == bond) + if (netdev_master_upper_dev_get_rcu(slave) == (bond)) #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) static inline struct net_device *next_net_device(struct net_device *dev) diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 3a6dcaa359b7..f714e8633352 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -174,6 +174,10 @@ enum ovs_packet_attr { OVS_PACKET_ATTR_USERDATA, /* OVS_ACTION_ATTR_USERSPACE arg. */ OVS_PACKET_ATTR_EGRESS_TUN_KEY, /* Nested OVS_TUNNEL_KEY_ATTR_* attributes. */ + OVS_PACKET_ATTR_UNUSED1, + OVS_PACKET_ATTR_UNUSED2, + OVS_PACKET_ATTR_PROBE, /* Packet operation is a feature probe, + error logging should be suppressed. */ __OVS_PACKET_ATTR_MAX }; diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 1f1de715197c..e2aa7be3a847 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -154,7 +154,8 @@ int br_handle_frame_finish(struct sk_buff *skb) dst = NULL; if (is_broadcast_ether_addr(dest)) { - if (p->flags & BR_PROXYARP && + if (IS_ENABLED(CONFIG_INET) && + p->flags & BR_PROXYARP && skb->protocol == htons(ETH_P_ARP)) br_do_proxy_arp(skb, br, vid); diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 8e38f17288d3..8d614c93f86a 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -2043,6 +2043,12 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh) case NDTPA_BASE_REACHABLE_TIME: NEIGH_VAR_SET(p, BASE_REACHABLE_TIME, nla_get_msecs(tbp[i])); + /* update reachable_time as well, otherwise, the change will + * only be effective after the next time neigh_periodic_work + * decides to recompute it (can be multiple minutes) + */ + p->reachable_time = + neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); break; case NDTPA_GC_STALETIME: NEIGH_VAR_SET(p, GC_STALETIME, @@ -2921,6 +2927,31 @@ static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write, return ret; } +static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write, + void __user *buffer, + size_t *lenp, loff_t *ppos) +{ + struct neigh_parms *p = ctl->extra2; + int ret; + + if (strcmp(ctl->procname, "base_reachable_time") == 0) + ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos); + else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0) + ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos); + else + ret = -1; + + if (write && ret == 0) { + /* update reachable_time as well, otherwise, the change will + * only be effective after the next time neigh_periodic_work + * decides to recompute it + */ + p->reachable_time = + neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); + } + return ret; +} + #define NEIGH_PARMS_DATA_OFFSET(index) \ (&((struct neigh_parms *) 0)->data[index]) @@ -3047,6 +3078,19 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler; /* ReachableTime (in milliseconds) */ t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler; + } else { + /* Those handlers will update p->reachable_time after + * base_reachable_time(_ms) is set to ensure the new timer starts being + * applied after the next neighbour update instead of waiting for + * neigh_periodic_work to update its value (can be multiple minutes) + * So any handler that replaces them should do this as well + */ + /* ReachableTime */ + t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = + neigh_proc_base_reachable_time; + /* ReachableTime (in milliseconds) */ + t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = + neigh_proc_base_reachable_time; } /* Don't export sysctls to unprivileged users */ diff --git a/net/ipv4/netfilter/nft_redir_ipv4.c b/net/ipv4/netfilter/nft_redir_ipv4.c index ff2d23d8c87a..6ecfce63201a 100644 --- a/net/ipv4/netfilter/nft_redir_ipv4.c +++ b/net/ipv4/netfilter/nft_redir_ipv4.c @@ -27,10 +27,10 @@ static void nft_redir_ipv4_eval(const struct nft_expr *expr, memset(&mr, 0, sizeof(mr)); if (priv->sreg_proto_min) { - mr.range[0].min.all = (__force __be16) - data[priv->sreg_proto_min].data[0]; - mr.range[0].max.all = (__force __be16) - data[priv->sreg_proto_max].data[0]; + mr.range[0].min.all = + *(__be16 *)&data[priv->sreg_proto_min].data[0]; + mr.range[0].max.all = + *(__be16 *)&data[priv->sreg_proto_max].data[0]; mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED; } diff --git a/net/ipv6/netfilter/nft_redir_ipv6.c b/net/ipv6/netfilter/nft_redir_ipv6.c index 2433a6bfb191..11820b6b3613 100644 --- a/net/ipv6/netfilter/nft_redir_ipv6.c +++ b/net/ipv6/netfilter/nft_redir_ipv6.c @@ -27,10 +27,10 @@ static void nft_redir_ipv6_eval(const struct nft_expr *expr, memset(&range, 0, sizeof(range)); if (priv->sreg_proto_min) { - range.min_proto.all = (__force __be16) - data[priv->sreg_proto_min].data[0]; - range.max_proto.all = (__force __be16) - data[priv->sreg_proto_max].data[0]; + range.min_proto.all = + *(__be16 *)&data[priv->sreg_proto_min].data[0]; + range.max_proto.all = + *(__be16 *)&data[priv->sreg_proto_max].data[0]; range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; } diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c index 1d5341f3761d..5d3daae98bf0 100644 --- a/net/netfilter/ipvs/ip_vs_ftp.c +++ b/net/netfilter/ipvs/ip_vs_ftp.c @@ -183,6 +183,8 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, struct nf_conn *ct; struct net *net; + *diff = 0; + #ifdef CONFIG_IP_VS_IPV6 /* This application helper doesn't work with IPv6 yet, * so turn this into a no-op for IPv6 packets @@ -191,8 +193,6 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, return 1; #endif - *diff = 0; - /* Only useful for established sessions */ if (cp->state != IP_VS_TCP_S_ESTABLISHED) return 1; @@ -322,6 +322,9 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, struct ip_vs_conn *n_cp; struct net *net; + /* no diff required for incoming packets */ + *diff = 0; + #ifdef CONFIG_IP_VS_IPV6 /* This application helper doesn't work with IPv6 yet, * so turn this into a no-op for IPv6 packets @@ -330,9 +333,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, return 1; #endif - /* no diff required for incoming packets */ - *diff = 0; - /* Only useful for established sessions */ if (cp->state != IP_VS_TCP_S_ESTABLISHED) return 1; diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index a11674806707..46d1b26a468e 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -611,16 +611,15 @@ __nf_conntrack_confirm(struct sk_buff *skb) */ NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); pr_debug("Confirming conntrack %p\n", ct); - /* We have to check the DYING flag inside the lock to prevent - a race against nf_ct_get_next_corpse() possibly called from - user context, else we insert an already 'dead' hash, blocking - further use of that particular connection -JM */ + /* We have to check the DYING flag after unlink to prevent + * a race against nf_ct_get_next_corpse() possibly called from + * user context, else we insert an already 'dead' hash, blocking + * further use of that particular connection -JM. + */ + nf_ct_del_from_dying_or_unconfirmed_list(ct); - if (unlikely(nf_ct_is_dying(ct))) { - nf_conntrack_double_unlock(hash, reply_hash); - local_bh_enable(); - return NF_ACCEPT; - } + if (unlikely(nf_ct_is_dying(ct))) + goto out; /* See if there's one in the list already, including reverse: NAT could have grabbed it without realizing, since we're @@ -636,8 +635,6 @@ __nf_conntrack_confirm(struct sk_buff *skb) zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) goto out; - nf_ct_del_from_dying_or_unconfirmed_list(ct); - /* Timer relative to confirmation time, not original setting time, otherwise we'd get timer wrap in weird delay cases. */ @@ -673,6 +670,7 @@ __nf_conntrack_confirm(struct sk_buff *skb) return NF_ACCEPT; out: + nf_ct_add_to_dying_list(ct); nf_conntrack_double_unlock(hash, reply_hash); NF_CT_STAT_INC(net, insert_failed); local_bh_enable(); diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 129a8daa4abf..3b3ddb4fb9ee 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -713,16 +713,12 @@ static int nft_flush_table(struct nft_ctx *ctx) struct nft_chain *chain, *nc; struct nft_set *set, *ns; - list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) { + list_for_each_entry(chain, &ctx->table->chains, list) { ctx->chain = chain; err = nft_delrule_by_chain(ctx); if (err < 0) goto out; - - err = nft_delchain(ctx); - if (err < 0) - goto out; } list_for_each_entry_safe(set, ns, &ctx->table->sets, list) { @@ -735,6 +731,14 @@ static int nft_flush_table(struct nft_ctx *ctx) goto out; } + list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) { + ctx->chain = chain; + + err = nft_delchain(ctx); + if (err < 0) + goto out; + } + err = nft_deltable(ctx); out: return err; diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index cde4a6702fa3..c421d94c4652 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -321,7 +321,8 @@ replay: nlh = nlmsg_hdr(skb); err = 0; - if (nlh->nlmsg_len < NLMSG_HDRLEN) { + if (nlmsg_len(nlh) < sizeof(struct nfgenmsg) || + skb->len < nlh->nlmsg_len) { err = -EINVAL; goto ack; } @@ -469,7 +470,7 @@ static int nfnetlink_bind(struct net *net, int group) int type; if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX) - return -EINVAL; + return 0; type = nfnl_group2type[group]; diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c index afe2b0b45ec4..aff54fb1c8a0 100644 --- a/net/netfilter/nft_nat.c +++ b/net/netfilter/nft_nat.c @@ -65,10 +65,10 @@ static void nft_nat_eval(const struct nft_expr *expr, } if (priv->sreg_proto_min) { - range.min_proto.all = (__force __be16) - data[priv->sreg_proto_min].data[0]; - range.max_proto.all = (__force __be16) - data[priv->sreg_proto_max].data[0]; + range.min_proto.all = + *(__be16 *)&data[priv->sreg_proto_min].data[0]; + range.max_proto.all = + *(__be16 *)&data[priv->sreg_proto_max].data[0]; range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; } diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 4e9a5f035cbc..b07349e82d78 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -524,7 +524,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) struct vport *input_vport; int len; int err; - bool log = !a[OVS_FLOW_ATTR_PROBE]; + bool log = !a[OVS_PACKET_ATTR_PROBE]; err = -EINVAL; if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] || @@ -610,6 +610,7 @@ static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = { [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN }, [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED }, [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED }, + [OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG }, }; static const struct genl_ops dp_packet_genl_ops[] = { diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 6880f34a529a..9cfe2e1dd8b5 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2517,7 +2517,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) err = -EINVAL; if (sock->type == SOCK_DGRAM) { offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len); - if (unlikely(offset) < 0) + if (unlikely(offset < 0)) goto out_free; } else { if (ll_header_truncated(dev, len)) diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 96ceefeb9daf..a9e174fc0f91 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -220,10 +220,11 @@ static void bclink_retransmit_pkt(u32 after, u32 to) struct sk_buff *skb; skb_queue_walk(&bcl->outqueue, skb) { - if (more(buf_seqno(skb), after)) + if (more(buf_seqno(skb), after)) { + tipc_link_retransmit(bcl, skb, mod(to - after)); break; + } } - tipc_link_retransmit(bcl, skb, mod(to - after)); } /** |