aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap3
-rw-r--r--Documentation/ABI/testing/sysfs-bus-bcma2
-rw-r--r--Documentation/ABI/testing/sysfs-bus-fcoe2
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-proximity2
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu2
-rw-r--r--Documentation/ABI/testing/sysfs-platform-chipidea-usb26
-rw-r--r--Documentation/ABI/testing/sysfs-power2
-rw-r--r--Documentation/RCU/checklist.rst17
-rw-r--r--Documentation/RCU/lockdep.rst2
-rw-r--r--Documentation/RCU/rcu_dereference.rst14
-rw-r--r--Documentation/RCU/whatisRCU.rst47
-rw-r--r--Documentation/admin-guide/README.rst91
-rw-r--r--Documentation/admin-guide/hw-vuln/spectre.rst1
-rw-r--r--Documentation/admin-guide/kdump/vmcoreinfo.rst2
-rw-r--r--Documentation/admin-guide/mm/hugetlbpage.rst2
-rw-r--r--Documentation/bpf/instruction-set.rst2
-rw-r--r--Documentation/bpf/map_cgroup_storage.rst4
-rw-r--r--Documentation/conf.py42
-rw-r--r--Documentation/core-api/asm-annotations.rst (renamed from Documentation/asm-annotations.rst)7
-rw-r--r--Documentation/core-api/cpu_hotplug.rst2
-rw-r--r--Documentation/core-api/index.rst4
-rw-r--r--Documentation/core-api/irq/irq-domain.rst2
-rw-r--r--Documentation/core-api/wrappers/atomic_bitops.rst18
-rw-r--r--Documentation/core-api/wrappers/atomic_t.rst19
-rw-r--r--Documentation/core-api/wrappers/memory-barriers.rst18
-rw-r--r--Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt4
-rw-r--r--Documentation/devicetree/bindings/i2c/st,stm32-i2c.yaml7
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/fsl/imx8m-ddrc.yaml2
-rw-r--r--Documentation/devicetree/bindings/opp/opp-v2-kryo-cpu.yaml1
-rw-r--r--Documentation/devicetree/bindings/opp/opp-v2-qcom-level.yaml1
-rw-r--r--Documentation/doc-guide/sphinx.rst57
-rw-r--r--Documentation/driver-api/driver-model/devres.rst7
-rw-r--r--Documentation/driver-api/isa.rst2
-rw-r--r--Documentation/fb/udlfb.rst23
-rw-r--r--Documentation/filesystems/caching/backend-api.rst2
-rw-r--r--Documentation/filesystems/ext4/super.rst6
-rw-r--r--Documentation/filesystems/f2fs.rst5
-rw-r--r--Documentation/filesystems/idmappings.rst2
-rw-r--r--Documentation/filesystems/qnx6.rst2
-rw-r--r--Documentation/filesystems/spufs/spufs.rst2
-rw-r--r--Documentation/filesystems/xfs-delayed-logging-design.rst18
-rw-r--r--Documentation/i2c/dev-interface.rst2
-rw-r--r--Documentation/i2c/slave-interface.rst6
-rw-r--r--Documentation/i2c/writing-clients.rst4
-rw-r--r--Documentation/index.rst156
-rw-r--r--Documentation/kbuild/gcc-plugins.rst19
-rw-r--r--Documentation/locking/seqlock.rst2
-rw-r--r--Documentation/memory-barriers.txt177
-rw-r--r--Documentation/mm/unevictable-lru.rst6
-rw-r--r--Documentation/networking/mptcp-sysctl.rst1
-rw-r--r--Documentation/networking/nf_conntrack-sysctl.rst9
-rw-r--r--Documentation/process/5.Posting.rst6
-rw-r--r--Documentation/process/code-of-conduct-interpretation.rst26
-rw-r--r--Documentation/process/coding-style.rst62
-rw-r--r--Documentation/process/index.rst1
-rw-r--r--Documentation/process/maintainer-pgp-guide.rst286
-rw-r--r--Documentation/process/stable-kernel-rules.rst6
-rw-r--r--Documentation/process/submitting-patches.rst4
-rw-r--r--Documentation/scheduler/sched-design-CFS.rst2
-rw-r--r--Documentation/staging/index.rst42
-rw-r--r--Documentation/subsystem-apis.rst58
-rw-r--r--Documentation/trace/histogram.rst2
-rw-r--r--Documentation/trace/kprobes.rst4
-rw-r--r--Documentation/trace/timerlat-tracer.rst2
-rw-r--r--Documentation/translations/zh_CN/IRQ.txt39
-rw-r--r--Documentation/translations/zh_CN/PCI/acpi-info.rst139
-rw-r--r--Documentation/translations/zh_CN/PCI/index.rst13
-rw-r--r--Documentation/translations/zh_CN/admin-guide/README.rst101
-rw-r--r--Documentation/translations/zh_CN/admin-guide/bootconfig.rst293
-rw-r--r--Documentation/translations/zh_CN/admin-guide/index.rst2
-rw-r--r--Documentation/translations/zh_CN/core-api/circular-buffers.rst210
-rw-r--r--Documentation/translations/zh_CN/core-api/generic-radix-tree.rst23
-rw-r--r--Documentation/translations/zh_CN/core-api/idr.rst80
-rw-r--r--Documentation/translations/zh_CN/core-api/index.rst8
-rw-r--r--Documentation/translations/zh_CN/core-api/packing.rst160
-rw-r--r--Documentation/translations/zh_CN/devicetree/changesets.rst37
-rw-r--r--Documentation/translations/zh_CN/devicetree/dynamic-resolution-notes.rst31
-rw-r--r--Documentation/translations/zh_CN/devicetree/index.rst13
-rw-r--r--Documentation/translations/zh_CN/devicetree/kernel-api.rst58
-rw-r--r--Documentation/translations/zh_CN/devicetree/overlay-notes.rst140
-rw-r--r--Documentation/translations/zh_CN/driver-api/gpio/index.rst69
-rw-r--r--Documentation/translations/zh_CN/driver-api/gpio/legacy.rst (renamed from Documentation/translations/zh_CN/gpio.txt)184
-rw-r--r--Documentation/translations/zh_CN/driver-api/index.rst132
-rw-r--r--Documentation/translations/zh_CN/driver-api/io_ordering.rst60
-rw-r--r--Documentation/translations/zh_CN/index.rst4
-rw-r--r--Documentation/translations/zh_CN/io_ordering.txt67
-rw-r--r--Documentation/translations/zh_CN/oops-tracing.txt212
-rw-r--r--Documentation/translations/zh_CN/process/coding-style.rst274
-rw-r--r--Documentation/translations/zh_CN/process/email-clients.rst265
-rw-r--r--Documentation/translations/zh_CN/process/submit-checklist.rst84
-rw-r--r--Documentation/translations/zh_CN/process/submitting-patches.rst714
-rw-r--r--Documentation/translations/zh_CN/scheduler/sched-design-CFS.rst2
-rw-r--r--Documentation/translations/zh_TW/oops-tracing.txt212
-rw-r--r--Documentation/virt/kvm/x86/mmu.rst2
-rw-r--r--Documentation/w1/masters/ds2490.rst2
-rw-r--r--Documentation/w1/w1-generic.rst2
-rw-r--r--Documentation/x86/entry_64.rst4
-rw-r--r--MAINTAINERS29
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/compressed/misc.c2
-rw-r--r--arch/arm/boot/dts/am33xx-l4.dtsi3
-rw-r--r--arch/arm/boot/dts/am5748.dtsi4
-rw-r--r--arch/arm/boot/dts/integratorap-im-pd1.dts1
-rw-r--r--arch/arm/boot/dts/integratorap.dts9
-rw-r--r--arch/arm/boot/dts/lan966x.dtsi4
-rw-r--r--arch/arm/boot/dts/moxart-uc7112lx.dts2
-rw-r--r--arch/arm/boot/dts/moxart.dtsi4
-rw-r--r--arch/arm/mach-sunplus/Kconfig4
-rw-r--r--arch/arm/mm/dump.c2
-rw-r--r--arch/arm/mm/mmu.c4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-mx8menlo.dts10
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml-mba8mx.dts1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi10
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts10
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts12
-rw-r--r--arch/arm64/boot/dts/freescale/imx8ulp.dtsi3
-rw-r--r--arch/arm64/boot/dts/qcom/sc7280.dtsi3
-rw-r--r--arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts4
-rw-r--r--arch/arm64/boot/dts/qcom/sm8150.dtsi24
-rw-r--r--arch/arm64/boot/dts/qcom/sm8350.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/px30-engicam-px30-core.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts5
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi9
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts2
-rw-r--r--arch/arm64/configs/defconfig1
-rw-r--r--arch/arm64/kernel/topology.c2
-rw-r--r--arch/arm64/kvm/arm.c2
-rw-r--r--arch/arm64/mm/mmu.c32
-rw-r--r--arch/loongarch/include/asm/loongson.h2
-rw-r--r--arch/loongarch/kernel/head.S2
-rw-r--r--arch/loongarch/kernel/traps.c15
-rw-r--r--arch/m68k/Kconfig2
-rw-r--r--arch/m68k/configs/amiga_defconfig4
-rw-r--r--arch/m68k/configs/apollo_defconfig4
-rw-r--r--arch/m68k/configs/atari_defconfig4
-rw-r--r--arch/m68k/configs/bvme6000_defconfig4
-rw-r--r--arch/m68k/configs/hp300_defconfig4
-rw-r--r--arch/m68k/configs/mac_defconfig4
-rw-r--r--arch/m68k/configs/multi_defconfig4
-rw-r--r--arch/m68k/configs/mvme147_defconfig4
-rw-r--r--arch/m68k/configs/mvme16x_defconfig4
-rw-r--r--arch/m68k/configs/q40_defconfig4
-rw-r--r--arch/m68k/configs/sun3_defconfig4
-rw-r--r--arch/m68k/configs/sun3x_defconfig4
-rw-r--r--arch/m68k/include/uapi/asm/bootinfo-virt.h9
-rw-r--r--arch/m68k/include/uapi/asm/bootinfo.h7
-rw-r--r--arch/m68k/kernel/setup_mm.c19
-rw-r--r--arch/m68k/virt/config.c11
-rw-r--r--arch/mips/bcm47xx/prom.c4
-rw-r--r--arch/mips/boot/dts/brcm/bcm63268.dtsi18
-rw-r--r--arch/mips/boot/dts/lantiq/Makefile2
-rw-r--r--arch/mips/boot/dts/lantiq/danube_easy50712.dts (renamed from arch/mips/boot/dts/lantiq/easy50712.dts)0
-rw-r--r--arch/mips/cavium-octeon/oct_ilm.c17
-rw-r--r--arch/mips/cavium-octeon/setup.c2
-rw-r--r--arch/mips/configs/ar7_defconfig4
-rw-r--r--arch/mips/configs/ath25_defconfig4
-rw-r--r--arch/mips/configs/ath79_defconfig10
-rw-r--r--arch/mips/configs/bcm63xx_defconfig3
-rw-r--r--arch/mips/configs/bigsur_defconfig9
-rw-r--r--arch/mips/configs/bmips_be_defconfig3
-rw-r--r--arch/mips/configs/bmips_stb_defconfig23
-rw-r--r--arch/mips/configs/cavium_octeon_defconfig1
-rw-r--r--arch/mips/configs/db1xxx_defconfig1
-rw-r--r--arch/mips/configs/decstation_64_defconfig10
-rw-r--r--arch/mips/configs/decstation_defconfig10
-rw-r--r--arch/mips/configs/decstation_r4k_defconfig10
-rw-r--r--arch/mips/configs/fuloong2e_defconfig9
-rw-r--r--arch/mips/configs/generic/board-ocelot.config1
-rw-r--r--arch/mips/configs/gpr_defconfig8
-rw-r--r--arch/mips/configs/ip22_defconfig10
-rw-r--r--arch/mips/configs/ip27_defconfig19
-rw-r--r--arch/mips/configs/ip28_defconfig3
-rw-r--r--arch/mips/configs/ip32_defconfig2
-rw-r--r--arch/mips/configs/jazz_defconfig1
-rw-r--r--arch/mips/configs/lemote2f_defconfig9
-rw-r--r--arch/mips/configs/loongson1b_defconfig4
-rw-r--r--arch/mips/configs/loongson1c_defconfig4
-rw-r--r--arch/mips/configs/loongson2k_defconfig3
-rw-r--r--arch/mips/configs/loongson3_defconfig2
-rw-r--r--arch/mips/configs/malta_defconfig5
-rw-r--r--arch/mips/configs/malta_kvm_defconfig5
-rw-r--r--arch/mips/configs/malta_qemu_32r6_defconfig3
-rw-r--r--arch/mips/configs/maltaaprp_defconfig3
-rw-r--r--arch/mips/configs/maltasmvp_defconfig3
-rw-r--r--arch/mips/configs/maltasmvp_eva_defconfig3
-rw-r--r--arch/mips/configs/maltaup_defconfig3
-rw-r--r--arch/mips/configs/maltaup_xpa_defconfig5
-rw-r--r--arch/mips/configs/mtx1_defconfig10
-rw-r--r--arch/mips/configs/omega2p_defconfig3
-rw-r--r--arch/mips/configs/pic32mzda_defconfig1
-rw-r--r--arch/mips/configs/rb532_defconfig4
-rw-r--r--arch/mips/configs/rbtx49xx_defconfig7
-rw-r--r--arch/mips/configs/rm200_defconfig7
-rw-r--r--arch/mips/configs/rt305x_defconfig4
-rw-r--r--arch/mips/configs/sb1250_swarm_defconfig2
-rw-r--r--arch/mips/configs/vocore2_defconfig3
-rw-r--r--arch/mips/configs/xway_defconfig4
-rw-r--r--arch/mips/include/asm/irq.h4
-rw-r--r--arch/mips/include/asm/mach-ar7/ar7.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-fpa.h20
-rw-r--r--arch/mips/include/asm/octeon/octeon.h1
-rw-r--r--arch/mips/include/asm/octeon/pci-octeon.h2
-rw-r--r--arch/mips/include/asm/sibyte/sb1250.h1
-rw-r--r--arch/mips/include/asm/sni.h3
-rw-r--r--arch/mips/kernel/prom.c6
-rw-r--r--arch/mips/kernel/relocate.c2
-rw-r--r--arch/mips/kernel/segment.c15
-rw-r--r--arch/mips/kernel/setup.c21
-rw-r--r--arch/mips/lantiq/clk.c1
-rw-r--r--arch/mips/lantiq/prom.c26
-rw-r--r--arch/mips/lantiq/xway/vmmc.c24
-rw-r--r--arch/mips/lib/bswapdi.c14
-rw-r--r--arch/mips/lib/bswapsi.c10
-rw-r--r--arch/mips/loongson2ef/common/pci.c2
-rw-r--r--arch/mips/loongson32/common/platform.c16
-rw-r--r--arch/mips/math-emu/cp1emu.c2
-rw-r--r--arch/mips/pci/pci-ar2315.c2
-rw-r--r--arch/mips/pci/pci-lantiq.c28
-rw-r--r--arch/mips/pic32/pic32mzda/init.c2
-rw-r--r--arch/mips/ralink/bootrom.c15
-rw-r--r--arch/mips/sgi-ip27/ip27-xtalk.c70
-rw-r--r--arch/mips/sgi-ip30/ip30-xtalk.c70
-rw-r--r--arch/mips/sibyte/sb1250/irq.c6
-rw-r--r--arch/powerpc/mm/book3s64/radix_pgtable.c9
-rw-r--r--arch/riscv/Kconfig1
-rw-r--r--arch/riscv/Kconfig.erratas4
-rw-r--r--arch/riscv/errata/thead/errata.c1
-rw-r--r--arch/riscv/include/asm/cacheflush.h5
-rw-r--r--arch/riscv/kernel/setup.c2
-rw-r--r--arch/riscv/kernel/signal.c2
-rw-r--r--arch/riscv/mm/dma-noncoherent.c23
-rw-r--r--arch/s390/kvm/gaccess.c16
-rw-r--r--arch/s390/kvm/interrupt.c2
-rw-r--r--arch/s390/kvm/kvm-s390.c4
-rw-r--r--arch/s390/kvm/pci.c20
-rw-r--r--arch/s390/kvm/pci.h6
-rw-r--r--arch/um/Makefile8
-rw-r--r--arch/um/kernel/sysrq.c3
-rw-r--r--arch/um/kernel/um_arch.c2
-rw-r--r--arch/x86/events/intel/core.c40
-rw-r--r--arch/x86/events/intel/ds.c9
-rw-r--r--arch/x86/events/perf_event.h2
-rw-r--r--arch/x86/include/asm/intel-family.h3
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/include/asm/smp.h25
-rw-r--r--arch/x86/kernel/alternative.c45
-rw-r--r--arch/x86/kernel/cpu/sgx/encl.c5
-rw-r--r--arch/x86/kernel/cpu/sgx/main.c15
-rw-r--r--arch/x86/kvm/cpuid.c13
-rw-r--r--arch/x86/kvm/emulate.c3
-rw-r--r--arch/x86/kvm/mmu/mmu.c2
-rw-r--r--arch/x86/kvm/x86.c10
-rw-r--r--arch/x86/lib/usercopy.c2
-rw-r--r--arch/x86/mm/Makefile3
-rw-r--r--arch/x86/um/shared/sysdep/syscalls_32.h5
-rw-r--r--arch/x86/um/tls_32.c6
-rw-r--r--arch/x86/um/vdso/Makefile2
-rw-r--r--block/genhd.c3
-rw-r--r--certs/Kconfig2
-rw-r--r--drivers/acpi/processor_idle.c23
-rw-r--r--drivers/ata/libata-core.c4
-rw-r--r--drivers/ata/libata-sata.c24
-rw-r--r--drivers/ata/libata-scsi.c10
-rw-r--r--drivers/base/core.c2
-rw-r--r--drivers/block/virtio_blk.c11
-rw-r--r--drivers/clk/bcm/clk-iproc-pll.c12
-rw-r--r--drivers/clk/imx/clk-imx6sx.c4
-rw-r--r--drivers/clk/imx/clk-imx93.c2
-rw-r--r--drivers/clk/ingenic/tcu.c15
-rw-r--r--drivers/clk/microchip/clk-mpfs.c11
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6.c8
-rw-r--r--drivers/counter/104-quad-8.c6
-rw-r--r--drivers/crypto/virtio/virtio_crypto_akcipher_algs.c4
-rw-r--r--drivers/dax/hmem/device.c1
-rw-r--r--drivers/dma/ti/k3-udma-private.c6
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c21
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c2
-rw-r--r--drivers/firmware/arm_scmi/clock.c6
-rw-r--r--drivers/firmware/arm_scmi/optee.c1
-rw-r--r--drivers/firmware/arm_scmi/reset.c10
-rw-r--r--drivers/firmware/arm_scmi/scmi_pm_domain.c46
-rw-r--r--drivers/firmware/arm_scmi/sensors.c25
-rw-r--r--drivers/firmware/efi/efibc.c3
-rw-r--r--drivers/firmware/efi/libstub/secureboot.c8
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c7
-rw-r--r--drivers/fpga/intel-m10-bmc-sec-update.c8
-rw-r--r--drivers/gpio/gpio-ftgpio010.c22
-rw-r--r--drivers/gpio/gpio-mockup.c6
-rw-r--r--drivers/gpio/gpio-mvebu.c15
-rw-r--r--drivers/gpio/gpio-tqmx86.c4
-rw-r--r--drivers/gpio/gpiolib-cdev.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c264
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c155
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c12
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h220
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c47
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c53
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c420
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c89
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h4
-rw-r--r--drivers/gpu/drm/amd/include/mes_v11_api_def.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c44
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c142
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c13
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c13
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Kconfig1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c15
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c24
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c6
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c2
-rw-r--r--drivers/i2c/busses/i2c-davinci.c3
-rw-r--r--drivers/i2c/busses/i2c-imx.c2
-rw-r--r--drivers/i2c/busses/i2c-mlxbf.c68
-rw-r--r--drivers/i2c/i2c-mux.c5
-rw-r--r--drivers/input/keyboard/iqs62x-keys.c3
-rw-r--r--drivers/input/keyboard/snvs_pwrkey.c2
-rw-r--r--drivers/input/mouse/synaptics.c1
-rw-r--r--drivers/input/touchscreen/melfas_mip4.c2
-rw-r--r--drivers/iommu/intel/dmar.c7
-rw-r--r--drivers/iommu/intel/iommu.c29
-rw-r--r--drivers/irqchip/Kconfig8
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c14
-rw-r--r--drivers/irqchip/irq-stm32-exti.c2
-rw-r--r--drivers/media/dvb-core/dvb_vb2.c11
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c9
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.c2
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c26
-rw-r--r--drivers/mmc/core/sd.c3
-rw-r--r--drivers/mmc/host/mmc_hsq.c2
-rw-r--r--drivers/mmc/host/moxart-mmc.c17
-rw-r--r--drivers/mmc/host/sdhci.c4
-rw-r--r--drivers/net/bonding/bond_3ad.c5
-rw-r--r--drivers/net/bonding/bond_main.c72
-rw-r--r--drivers/net/can/c_can/c_can.h17
-rw-r--r--drivers/net/can/c_can/c_can_main.c11
-rw-r--r--drivers/net/can/flexcan/flexcan-core.c10
-rw-r--r--drivers/net/can/usb/gs_usb.c21
-rw-r--r--drivers/net/dsa/microchip/lan937x_main.c4
-rw-r--r--drivers/net/dsa/mt7530.c19
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c4
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c28
-rw-r--r--drivers/net/ethernet/freescale/enetc/Makefile1
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c53
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h12
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c32
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c23
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c17
-rw-r--r--drivers/net/ethernet/freescale/fec.h11
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c59
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c57
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c32
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c20
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c9
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c9
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c42
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c25
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c163
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h7
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c4
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c2
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_pci.c1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c10
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c10
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c7
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c2
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_channels.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/tx.c2
-rw-r--r--drivers/net/ethernet/sfc/tx.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c23
-rw-r--r--drivers/net/ethernet/sun/sunhme.c4
-rw-r--r--drivers/net/hippi/rrunner.c1
-rw-r--r--drivers/net/ipa/ipa_qmi.c8
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.c8
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.h37
-rw-r--r--drivers/net/ipa/ipa_table.c2
-rw-r--r--drivers/net/ipa/ipa_table.h3
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c6
-rw-r--r--drivers/net/mdio/of_mdio.c1
-rw-r--r--drivers/net/netdevsim/hwstats.c6
-rw-r--r--drivers/net/phy/aquantia_main.c53
-rw-r--r--drivers/net/phy/micrel.c18
-rw-r--r--drivers/net/phy/phy_device.c10
-rw-r--r--drivers/net/team/team.c24
-rw-r--r--drivers/net/tun.c9
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/usbnet.c7
-rw-r--r--drivers/net/wireguard/netlink.c13
-rw-r--r--drivers/net/wireguard/selftest/ratelimiter.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c2
-rw-r--r--drivers/nvdimm/namespace_devs.c2
-rw-r--r--drivers/nvdimm/pmem.c6
-rw-r--r--drivers/nvme/host/core.c6
-rw-r--r--drivers/nvme/host/pci.c4
-rw-r--r--drivers/opp/core.c2
-rw-r--r--drivers/perf/arm-cmn.c2
-rw-r--r--drivers/phy/marvell/phy-mvebu-a3700-comphy.c87
-rw-r--r--drivers/reset/reset-imx7.c1
-rw-r--r--drivers/reset/reset-microchip-sparx5.c22
-rw-r--r--drivers/reset/reset-npcm.c2
-rw-r--r--drivers/s390/block/dasd_alias.c9
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c30
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c2
-rw-r--r--drivers/scsi/qedf/qedf_main.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c4
-rw-r--r--drivers/soc/bcm/brcmstb/biuctrl.c1
-rw-r--r--drivers/soc/sunxi/sunxi_sram.c23
-rw-r--r--drivers/staging/media/rkvdec/rkvdec-h264.c4
-rw-r--r--drivers/thunderbolt/icm.c1
-rw-r--r--drivers/thunderbolt/nhi.h1
-rw-r--r--drivers/thunderbolt/switch.c1
-rw-r--r--drivers/tty/serial/8250/8250_omap.c1
-rw-r--r--drivers/tty/serial/fsl_lpuart.c9
-rw-r--r--drivers/tty/serial/serial-tegra.c5
-rw-r--r--drivers/tty/serial/sifive.c2
-rw-r--r--drivers/tty/serial/tegra-tcu.c2
-rw-r--r--drivers/usb/core/hub.c2
-rw-r--r--drivers/usb/dwc3/core.c13
-rw-r--r--drivers/usb/dwc3/dwc3-st.c2
-rw-r--r--drivers/usb/serial/option.c6
-rw-r--r--drivers/usb/storage/unusual_uas.h21
-rw-r--r--drivers/usb/typec/Kconfig1
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c2
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.c4
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c17
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c9
-rw-r--r--drivers/xen/xenbus/xenbus_client.c9
-rw-r--r--fs/btrfs/disk-io.c42
-rw-r--r--fs/btrfs/zoned.c40
-rw-r--r--fs/coredump.c38
-rw-r--r--fs/dax.c3
-rw-r--r--fs/exec.c7
-rw-r--r--fs/exfat/fatent.c3
-rw-r--r--fs/ext4/ext4.h10
-rw-r--r--fs/ext4/extents.c4
-rw-r--r--fs/ext4/ialloc.c2
-rw-r--r--fs/ext4/mballoc.c317
-rw-r--r--fs/ext4/mballoc.h1
-rw-r--r--fs/internal.h3
-rw-r--r--fs/ntfs/super.c3
-rw-r--r--fs/open.c2
-rw-r--r--fs/pstore/platform.c63
-rw-r--r--fs/read_write.c22
-rw-r--r--fs/xfs/xfs_notify_failure.c6
-rw-r--r--include/asm-generic/vmlinux.lds.h3
-rw-r--r--include/linux/cpumask.h5
-rw-r--r--include/linux/dmar.h4
-rw-r--r--include/linux/libata.h4
-rw-r--r--include/linux/memcontrol.h45
-rw-r--r--include/linux/memremap.h5
-rw-r--r--include/linux/rcupdate.h42
-rw-r--r--include/linux/rcutiny.h50
-rw-r--r--include/linux/rcutree.h40
-rw-r--r--include/linux/scmi_protocol.h4
-rw-r--r--include/linux/serial_core.h17
-rw-r--r--include/linux/srcutiny.h10
-rw-r--r--include/net/bluetooth/hci_sock.h2
-rw-r--r--include/net/bond_3ad.h2
-rw-r--r--include/net/bonding.h3
-rw-r--r--include/net/ieee802154_netdev.h37
-rw-r--r--include/trace/events/scmi.h30
-rw-r--r--include/uapi/linux/if_tun.h2
-rw-r--r--io_uring/io_uring.c10
-rw-r--r--io_uring/poll.c2
-rw-r--r--kernel/cgroup/cgroup.c5
-rw-r--r--kernel/events/core.c9
-rw-r--r--kernel/fork.c5
-rw-r--r--kernel/nsproxy.c3
-rw-r--r--kernel/rcu/rcutorture.c290
-rw-r--r--kernel/rcu/srcutiny.c14
-rw-r--r--kernel/rcu/tasks.h5
-rw-r--r--kernel/rcu/tiny.c27
-rw-r--r--kernel/rcu/tree.c330
-rw-r--r--kernel/rcu/tree_exp.h57
-rw-r--r--kernel/rcu/tree_nocb.h10
-rw-r--r--kernel/rcu/tree_plugin.h26
-rw-r--r--kernel/rcu/tree_stall.h5
-rw-r--r--kernel/sched/core.c14
-rw-r--r--kernel/smp.c3
-rw-r--r--kernel/workqueue.c6
-rw-r--r--lib/Kconfig.debug4
-rw-r--r--mm/damon/dbgfs.c19
-rw-r--r--mm/damon/sysfs.c2
-rw-r--r--mm/frontswap.c3
-rw-r--r--mm/gup.c34
-rw-r--r--mm/huge_memory.c6
-rw-r--r--mm/hugetlb.c14
-rw-r--r--mm/khugepaged.c10
-rw-r--r--mm/madvise.c7
-rw-r--r--mm/memory-failure.c27
-rw-r--r--mm/memory.c14
-rw-r--r--mm/migrate_device.c16
-rw-r--r--mm/page_alloc.c65
-rw-r--r--mm/page_isolation.c25
-rw-r--r--mm/secretmem.c2
-rw-r--r--mm/slab_common.c5
-rw-r--r--mm/slub.c18
-rw-r--r--mm/swap_state.c2
-rw-r--r--mm/util.c4
-rw-r--r--mm/vmscan.c4
-rw-r--r--net/batman-adv/hard-interface.c4
-rw-r--r--net/bridge/netfilter/ebtables.c4
-rw-r--r--net/compat.c1
-rw-r--r--net/core/flow_dissector.c5
-rw-r--r--net/core/net_namespace.c7
-rw-r--r--net/ieee802154/socket.c42
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv4/tcp.c29
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv6/af_inet6.c4
-rw-r--r--net/ipv6/ip6mr.c5
-rw-r--r--net/mac80211/mlme.c9
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c6
-rw-r--r--net/mac80211/status.c2
-rw-r--r--net/mac80211/tx.c4
-rw-r--r--net/mac80211/util.c4
-rw-r--r--net/mptcp/protocol.c24
-rw-r--r--net/mptcp/protocol.h2
-rw-r--r--net/mptcp/subflow.c33
-rw-r--r--net/netfilter/nf_conntrack_ftp.c6
-rw-r--r--net/netfilter/nf_conntrack_irc.c34
-rw-r--r--net/netfilter/nf_conntrack_sip.c4
-rw-r--r--net/netfilter/nf_tables_api.c8
-rw-r--r--net/netfilter/nfnetlink_osf.c4
-rw-r--r--net/sched/act_ct.c5
-rw-r--r--net/sched/cls_api.c1
-rw-r--r--net/sched/sch_taprio.c18
-rw-r--r--net/smc/smc_core.c5
-rw-r--r--net/wireless/util.c4
-rw-r--r--scripts/Makefile.debug21
-rwxr-xr-xscripts/checkpatch.pl8
-rwxr-xr-xscripts/clang-tools/run-clang-tools.py1
-rw-r--r--scripts/kconfig/lkc.h1
-rw-r--r--scripts/kconfig/menu.c5
-rw-r--r--sound/core/init.c10
-rw-r--r--sound/hda/intel-dsp-config.c10
-rw-r--r--sound/pci/hda/hda_bind.c4
-rw-r--r--sound/pci/hda/hda_intel.c2
-rw-r--r--sound/pci/hda/patch_hdmi.c25
-rw-r--r--sound/pci/hda/patch_realtek.c33
-rw-r--r--sound/soc/codecs/nau8824.c17
-rw-r--r--sound/soc/codecs/nau8824.h1
-rw-r--r--sound/soc/codecs/rt5640.c64
-rw-r--r--sound/soc/codecs/rt5640.h14
-rw-r--r--sound/soc/codecs/tas2770.c3
-rw-r--r--sound/soc/fsl/imx-card.c4
-rw-r--r--sound/soc/intel/boards/sof_sdw.c10
-rw-r--r--sound/usb/endpoint.c23
-rw-r--r--sound/usb/endpoint.h6
-rw-r--r--sound/usb/pcm.c14
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h5
-rw-r--r--tools/include/linux/gfp.h21
-rw-r--r--tools/include/linux/gfp_types.h1
-rw-r--r--tools/include/nolibc/arch-riscv.h2
-rw-r--r--tools/include/nolibc/sys.h4
-rw-r--r--tools/lib/perf/evlist.c5
-rw-r--r--tools/memory-model/Documentation/litmus-tests.txt37
-rw-r--r--tools/perf/builtin-record.c2
-rw-r--r--tools/perf/tests/mmap-basic.c3
-rw-r--r--tools/perf/tests/perf-record.c2
-rwxr-xr-xtools/perf/tests/shell/record.sh2
-rwxr-xr-xtools/perf/tests/shell/stat_bpf_counters_cgrp.sh83
-rwxr-xr-xtools/perf/tests/shell/test_brstack.sh3
-rw-r--r--tools/perf/tests/vmlinux-kallsyms.c5
-rw-r--r--tools/perf/tests/wp.c10
-rw-r--r--tools/perf/util/Build2
-rw-r--r--tools/perf/util/arm-spe.c2
-rw-r--r--tools/perf/util/bpf_counter_cgroup.c10
-rw-r--r--tools/perf/util/bpf_skel/bperf_cgroup.bpf.c13
-rw-r--r--tools/perf/util/bpf_skel/off_cpu.bpf.c18
-rw-r--r--tools/perf/util/genelf.c14
-rw-r--r--tools/perf/util/genelf.h4
-rw-r--r--tools/perf/util/parse-events-hybrid.c21
-rw-r--r--tools/perf/util/parse-events.c39
-rw-r--r--tools/perf/util/parse-events.h1
-rw-r--r--tools/perf/util/print-events.c39
-rw-r--r--tools/perf/util/scripting-engines/Build2
-rw-r--r--tools/perf/util/symbol-elf.c7
-rw-r--r--tools/perf/util/synthetic-events.c17
-rw-r--r--tools/testing/nvdimm/test/ndtest.c77
-rw-r--r--tools/testing/selftests/Makefile1
-rw-r--r--tools/testing/selftests/drivers/net/bonding/Makefile6
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/bond-arp-interval-causes-panic.sh49
-rw-r--r--tools/testing/selftests/drivers/net/bonding/config1
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/dev_addr_lists.sh109
-rw-r--r--tools/testing/selftests/drivers/net/bonding/lag_lib.sh61
-rw-r--r--tools/testing/selftests/drivers/net/team/Makefile6
-rw-r--r--tools/testing/selftests/drivers/net/team/config3
-rwxr-xr-xtools/testing/selftests/drivers/net/team/dev_addr_lists.sh51
-rw-r--r--tools/testing/selftests/kvm/Makefile11
-rw-r--r--tools/testing/selftests/kvm/access_tracking_perf_test.c25
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/vmx.h1
-rw-r--r--tools/testing/selftests/kvm/lib/assert.c20
-rw-r--r--tools/testing/selftests/kvm/lib/string_override.c39
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/vmx.c20
-rw-r--r--tools/testing/selftests/kvm/rseq_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c34
-rw-r--r--tools/testing/selftests/landlock/Makefile19
-rw-r--r--tools/testing/selftests/lib.mk4
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_multicast.sh92
-rwxr-xr-xtools/testing/selftests/net/forwarding/sch_red.sh1
-rw-r--r--tools/testing/selftests/net/reuseport_bpf.c2
-rwxr-xr-xtools/testing/selftests/netfilter/nft_concat_range.sh65
-rw-r--r--tools/testing/selftests/nolibc/.gitignore4
-rw-r--r--tools/testing/selftests/nolibc/Makefile135
-rw-r--r--tools/testing/selftests/nolibc/nolibc-test.c757
-rw-r--r--tools/testing/selftests/timens/Makefile2
-rw-r--r--tools/testing/selftests/timens/vfork_exec.c90
-rw-r--r--tools/testing/selftests/wireguard/qemu/Makefile2
-rw-r--r--tools/virtio/linux/virtio.h3
-rw-r--r--tools/virtio/linux/virtio_config.h5
678 files changed, 9278 insertions, 5487 deletions
diff --git a/.mailmap b/.mailmap
index d175777af078..191778125ef1 100644
--- a/.mailmap
+++ b/.mailmap
@@ -71,6 +71,9 @@ Ben M Cahill <[email protected]>
Björn Steinbrink <[email protected]>
diff --git a/Documentation/ABI/testing/sysfs-bus-bcma b/Documentation/ABI/testing/sysfs-bus-bcma
index 721b4aea3020..e93d3ddca844 100644
--- a/Documentation/ABI/testing/sysfs-bus-bcma
+++ b/Documentation/ABI/testing/sysfs-bus-bcma
@@ -3,7 +3,7 @@ Date: May 2011
KernelVersion: 3.0
Contact: Rafał Miłecki <[email protected]>
Description:
- Each BCMA core has it's manufacturer id. See
+ Each BCMA core has its manufacturer id. See
include/linux/bcma/bcma.h for possible values.
What: /sys/bus/bcma/devices/.../id
diff --git a/Documentation/ABI/testing/sysfs-bus-fcoe b/Documentation/ABI/testing/sysfs-bus-fcoe
index 8fe787cc4ab7..5a4f2091ac37 100644
--- a/Documentation/ABI/testing/sysfs-bus-fcoe
+++ b/Documentation/ABI/testing/sysfs-bus-fcoe
@@ -31,7 +31,7 @@ Description: 'FCoE Controller' instances on the fcoe bus.
1) Write interface name to ctlr_create 2) Configure the FCoE
Controller (ctlr_X) 3) Enable the FCoE Controller to begin
discovery and login. The FCoE Controller is destroyed by
- writing it's name, i.e. ctlr_X to the ctlr_delete file.
+ writing its name, i.e. ctlr_X to the ctlr_delete file.
Attributes:
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-proximity b/Documentation/ABI/testing/sysfs-bus-iio-proximity
index 3aac6dab8775..9b9d1cc9b703 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio-proximity
+++ b/Documentation/ABI/testing/sysfs-bus-iio-proximity
@@ -18,7 +18,7 @@ Description:
on the signal from which time of flight measurements are
taken.
The appropriate values to take is dependent on both the
- sensor and it's operating environment:
+ sensor and its operating environment:
* as3935 (0-31 range)
18 = indoors (default)
14 = outdoors
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index 760c889b6cd1..f54867cadb0f 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -296,7 +296,7 @@ Description: Processor frequency boosting control
This switch controls the boost setting for the whole system.
Boosting allows the CPU and the firmware to run at a frequency
- beyond it's nominal limit.
+ beyond its nominal limit.
More details can be found in
Documentation/admin-guide/pm/cpufreq.rst
diff --git a/Documentation/ABI/testing/sysfs-platform-chipidea-usb2 b/Documentation/ABI/testing/sysfs-platform-chipidea-usb2
index b0f4684a83fe..b9f7d924f28a 100644
--- a/Documentation/ABI/testing/sysfs-platform-chipidea-usb2
+++ b/Documentation/ABI/testing/sysfs-platform-chipidea-usb2
@@ -2,8 +2,8 @@ What: /sys/bus/platform/devices/ci_hdrc.0/role
Date: Mar 2017
Contact: Peter Chen <[email protected]>
Description:
- It returns string "gadget" or "host" when read it, it indicates
- current controller role.
+ When read, it returns string "gadget" or "host", indicating
+ the current controller role.
- It will do role switch when write "gadget" or "host" to it.
+ It will do role switch when "gadget" or "host" is written to it.
Only controller at dual-role configuration supports writing.
diff --git a/Documentation/ABI/testing/sysfs-power b/Documentation/ABI/testing/sysfs-power
index 90ec4987074b..f99d433ff311 100644
--- a/Documentation/ABI/testing/sysfs-power
+++ b/Documentation/ABI/testing/sysfs-power
@@ -152,7 +152,7 @@ Description:
case further investigation is required to determine which
device is causing the problem. Note that genuine RTC clock
values (such as when pm_trace has not been used), can still
- match a device and output it's name here.
+ match a device and output its name here.
What: /sys/power/pm_async
Date: January 2009
diff --git a/Documentation/RCU/checklist.rst b/Documentation/RCU/checklist.rst
index 42cc5d891bd2..048c5bc1f813 100644
--- a/Documentation/RCU/checklist.rst
+++ b/Documentation/RCU/checklist.rst
@@ -66,8 +66,13 @@ over a rather long period of time, but improvements are always welcome!
As a rough rule of thumb, any dereference of an RCU-protected
pointer must be covered by rcu_read_lock(), rcu_read_lock_bh(),
rcu_read_lock_sched(), or by the appropriate update-side lock.
- Disabling of preemption can serve as rcu_read_lock_sched(), but
- is less readable and prevents lockdep from detecting locking issues.
+ Explicit disabling of preemption (preempt_disable(), for example)
+ can serve as rcu_read_lock_sched(), but is less readable and
+ prevents lockdep from detecting locking issues.
+
+ Please not that you *cannot* rely on code known to be built
+ only in non-preemptible kernels. Such code can and will break,
+ especially in kernels built with CONFIG_PREEMPT_COUNT=y.
Letting RCU-protected pointers "leak" out of an RCU read-side
critical section is every bit as bad as letting them leak out
@@ -185,6 +190,9 @@ over a rather long period of time, but improvements are always welcome!
5. If call_rcu() or call_srcu() is used, the callback function will
be called from softirq context. In particular, it cannot block.
+ If you need the callback to block, run that code in a workqueue
+ handler scheduled from the callback. The queue_rcu_work()
+ function does this for you in the case of call_rcu().
6. Since synchronize_rcu() can block, it cannot be called
from any sort of irq context. The same rule applies
@@ -297,7 +305,8 @@ over a rather long period of time, but improvements are always welcome!
the machine.
d. Periodically invoke synchronize_rcu(), permitting a limited
- number of updates per grace period.
+ number of updates per grace period. Better yet, periodically
+ invoke rcu_barrier() to wait for all outstanding callbacks.
The same cautions apply to call_srcu() and kfree_rcu().
@@ -477,6 +486,6 @@ over a rather long period of time, but improvements are always welcome!
So if you need to wait for both an RCU grace period and for
all pre-existing call_rcu() callbacks, you will need to execute
both rcu_barrier() and synchronize_rcu(), if necessary, using
- something like workqueues to to execute them concurrently.
+ something like workqueues to execute them concurrently.
See rcubarrier.rst for more information.
diff --git a/Documentation/RCU/lockdep.rst b/Documentation/RCU/lockdep.rst
index cc860a0c296b..a94f55991a71 100644
--- a/Documentation/RCU/lockdep.rst
+++ b/Documentation/RCU/lockdep.rst
@@ -61,7 +61,7 @@ checking of rcu_dereference() primitives:
rcu_access_pointer(p):
Return the value of the pointer and omit all barriers,
but retain the compiler constraints that prevent duplicating
- or coalescsing. This is useful when when testing the
+ or coalescsing. This is useful when testing the
value of the pointer itself, for example, against NULL.
The rcu_dereference_check() check expression can be any boolean
diff --git a/Documentation/RCU/rcu_dereference.rst b/Documentation/RCU/rcu_dereference.rst
index 0b418a5b243c..81e828c8313b 100644
--- a/Documentation/RCU/rcu_dereference.rst
+++ b/Documentation/RCU/rcu_dereference.rst
@@ -128,10 +128,16 @@ Follow these rules to keep your RCU code working properly:
This sort of comparison occurs frequently when scanning
RCU-protected circular linked lists.
- Note that if checks for being within an RCU read-side
- critical section are not required and the pointer is never
- dereferenced, rcu_access_pointer() should be used in place
- of rcu_dereference().
+ Note that if the pointer comparison is done outside
+ of an RCU read-side critical section, and the pointer
+ is never dereferenced, rcu_access_pointer() should be
+ used in place of rcu_dereference(). In most cases,
+ it is best to avoid accidental dereferences by testing
+ the rcu_access_pointer() return value directly, without
+ assigning it to a variable.
+
+ Within an RCU read-side critical section, there is little
+ reason to use rcu_access_pointer().
- The comparison is against a pointer that references memory
that was initialized "a long time ago." The reason
diff --git a/Documentation/RCU/whatisRCU.rst b/Documentation/RCU/whatisRCU.rst
index 77ea260efd12..1c747ac3f2c8 100644
--- a/Documentation/RCU/whatisRCU.rst
+++ b/Documentation/RCU/whatisRCU.rst
@@ -6,13 +6,15 @@ What is RCU? -- "Read, Copy, Update"
Please note that the "What is RCU?" LWN series is an excellent place
to start learning about RCU:
-| 1. What is RCU, Fundamentally? http://lwn.net/Articles/262464/
-| 2. What is RCU? Part 2: Usage http://lwn.net/Articles/263130/
-| 3. RCU part 3: the RCU API http://lwn.net/Articles/264090/
-| 4. The RCU API, 2010 Edition http://lwn.net/Articles/418853/
-| 2010 Big API Table http://lwn.net/Articles/419086/
-| 5. The RCU API, 2014 Edition http://lwn.net/Articles/609904/
-| 2014 Big API Table http://lwn.net/Articles/609973/
+| 1. What is RCU, Fundamentally? https://lwn.net/Articles/262464/
+| 2. What is RCU? Part 2: Usage https://lwn.net/Articles/263130/
+| 3. RCU part 3: the RCU API https://lwn.net/Articles/264090/
+| 4. The RCU API, 2010 Edition https://lwn.net/Articles/418853/
+| 2010 Big API Table https://lwn.net/Articles/419086/
+| 5. The RCU API, 2014 Edition https://lwn.net/Articles/609904/
+| 2014 Big API Table https://lwn.net/Articles/609973/
+| 6. The RCU API, 2019 Edition https://lwn.net/Articles/777036/
+| 2019 Big API Table https://lwn.net/Articles/777165/
What is RCU?
@@ -915,13 +917,18 @@ which an RCU reference is held include:
The understanding that RCU provides a reference that only prevents a
change of type is particularly visible with objects allocated from a
slab cache marked ``SLAB_TYPESAFE_BY_RCU``. RCU operations may yield a
-reference to an object from such a cache that has been concurrently
-freed and the memory reallocated to a completely different object,
-though of the same type. In this case RCU doesn't even protect the
-identity of the object from changing, only its type. So the object
-found may not be the one expected, but it will be one where it is safe
-to take a reference or spinlock and then confirm that the identity
-matches the expectations.
+reference to an object from such a cache that has been concurrently freed
+and the memory reallocated to a completely different object, though of
+the same type. In this case RCU doesn't even protect the identity of the
+object from changing, only its type. So the object found may not be the
+one expected, but it will be one where it is safe to take a reference
+(and then potentially acquiring a spinlock), allowing subsequent code
+to check whether the identity matches expectations. It is tempting
+to simply acquire the spinlock without first taking the reference, but
+unfortunately any spinlock in a ``SLAB_TYPESAFE_BY_RCU`` object must be
+initialized after each and every call to kmem_cache_alloc(), which renders
+reference-free spinlock acquisition completely unsafe. Therefore, when
+using ``SLAB_TYPESAFE_BY_RCU``, make proper use of a reference counter.
With traditional reference counting -- such as that implemented by the
kref library in Linux -- there is typically code that runs when the last
@@ -1057,14 +1064,20 @@ SRCU: Initialization/cleanup::
init_srcu_struct
cleanup_srcu_struct
-All: lockdep-checked RCU-protected pointer access::
+All: lockdep-checked RCU utility APIs::
- rcu_access_pointer
- rcu_dereference_raw
RCU_LOCKDEP_WARN
rcu_sleep_check
RCU_NONIDLE
+All: Unchecked RCU-protected pointer access::
+
+ rcu_dereference_raw
+
+All: Unchecked RCU-protected pointer access with dereferencing prohibited::
+
+ rcu_access_pointer
+
See the comment headers in the source code (or the docbook generated
from them) for more information.
diff --git a/Documentation/admin-guide/README.rst b/Documentation/admin-guide/README.rst
index 9eb6b9042f75..9a969c0157f1 100644
--- a/Documentation/admin-guide/README.rst
+++ b/Documentation/admin-guide/README.rst
@@ -262,8 +262,6 @@ Compiling the kernel
- Make sure you have at least gcc 5.1 available.
For more information, refer to :ref:`Documentation/process/changes.rst <changes>`.
- Please note that you can still run a.out user programs with this kernel.
-
- Do a ``make`` to create a compressed kernel image. It is also
possible to do ``make install`` if you have lilo installed to suit the
kernel makefiles, but you may want to check your particular lilo setup first.
@@ -332,85 +330,10 @@ Compiling the kernel
If something goes wrong
-----------------------
- - If you have problems that seem to be due to kernel bugs, please check
- the file MAINTAINERS to see if there is a particular person associated
- with the part of the kernel that you are having trouble with. If there
- isn't anyone listed there, then the second best thing is to mail
- them to me ([email protected]), and possibly to any other
- relevant mailing-list or to the newsgroup.
-
- - In all bug-reports, *please* tell what kernel you are talking about,
- how to duplicate the problem, and what your setup is (use your common
- sense). If the problem is new, tell me so, and if the problem is
- old, please try to tell me when you first noticed it.
-
- - If the bug results in a message like::
-
- unable to handle kernel paging request at address C0000010
- Oops: 0002
- EIP: 0010:XXXXXXXX
- eax: xxxxxxxx ebx: xxxxxxxx ecx: xxxxxxxx edx: xxxxxxxx
- esi: xxxxxxxx edi: xxxxxxxx ebp: xxxxxxxx
- ds: xxxx es: xxxx fs: xxxx gs: xxxx
- Pid: xx, process nr: xx
- xx xx xx xx xx xx xx xx xx xx
-
- or similar kernel debugging information on your screen or in your
- system log, please duplicate it *exactly*. The dump may look
- incomprehensible to you, but it does contain information that may
- help debugging the problem. The text above the dump is also
- important: it tells something about why the kernel dumped code (in
- the above example, it's due to a bad kernel pointer). More information
- on making sense of the dump is in Documentation/admin-guide/bug-hunting.rst
-
- - If you compiled the kernel with CONFIG_KALLSYMS you can send the dump
- as is, otherwise you will have to use the ``ksymoops`` program to make
- sense of the dump (but compiling with CONFIG_KALLSYMS is usually preferred).
- This utility can be downloaded from
- https://www.kernel.org/pub/linux/utils/kernel/ksymoops/ .
- Alternatively, you can do the dump lookup by hand:
-
- - In debugging dumps like the above, it helps enormously if you can
- look up what the EIP value means. The hex value as such doesn't help
- me or anybody else very much: it will depend on your particular
- kernel setup. What you should do is take the hex value from the EIP
- line (ignore the ``0010:``), and look it up in the kernel namelist to
- see which kernel function contains the offending address.
-
- To find out the kernel function name, you'll need to find the system
- binary associated with the kernel that exhibited the symptom. This is
- the file 'linux/vmlinux'. To extract the namelist and match it against
- the EIP from the kernel crash, do::
-
- nm vmlinux | sort | less
-
- This will give you a list of kernel addresses sorted in ascending
- order, from which it is simple to find the function that contains the
- offending address. Note that the address given by the kernel
- debugging messages will not necessarily match exactly with the
- function addresses (in fact, that is very unlikely), so you can't
- just 'grep' the list: the list will, however, give you the starting
- point of each kernel function, so by looking for the function that
- has a starting address lower than the one you are searching for but
- is followed by a function with a higher address you will find the one
- you want. In fact, it may be a good idea to include a bit of
- "context" in your problem report, giving a few lines around the
- interesting one.
-
- If you for some reason cannot do the above (you have a pre-compiled
- kernel image or similar), telling me as much about your setup as
- possible will help. Please read
- 'Documentation/admin-guide/reporting-issues.rst' for details.
-
- - Alternatively, you can use gdb on a running kernel. (read-only; i.e. you
- cannot change values or set break points.) To do this, first compile the
- kernel with -g; edit arch/x86/Makefile appropriately, then do a ``make
- clean``. You'll also need to enable CONFIG_PROC_FS (via ``make config``).
-
- After you've rebooted with the new kernel, do ``gdb vmlinux /proc/kcore``.
- You can now use all the usual gdb commands. The command to look up the
- point where your system crashed is ``l *0xXXXXXXXX``. (Replace the XXXes
- with the EIP value.)
-
- gdb'ing a non-running kernel currently fails because ``gdb`` (wrongly)
- disregards the starting offset for which the kernel is compiled.
+If you have problems that seem to be due to kernel bugs, please follow the
+instructions at 'Documentation/admin-guide/reporting-issues.rst'.
+
+Hints on understanding kernel bug reports are in
+'Documentation/admin-guide/bug-hunting.rst'. More on debugging the kernel
+with gdb is in 'Documentation/dev-tools/gdb-kernel-debugging.rst' and
+'Documentation/dev-tools/kgdb.rst'.
diff --git a/Documentation/admin-guide/hw-vuln/spectre.rst b/Documentation/admin-guide/hw-vuln/spectre.rst
index 2ce2a38cdd55..c4dcdb3d0d45 100644
--- a/Documentation/admin-guide/hw-vuln/spectre.rst
+++ b/Documentation/admin-guide/hw-vuln/spectre.rst
@@ -613,6 +613,7 @@ kernel command line.
eibrs enhanced IBRS
eibrs,retpoline enhanced IBRS + Retpolines
eibrs,lfence enhanced IBRS + LFENCE
+ ibrs use IBRS to protect kernel
Not specifying this option is equivalent to
spectre_v2=auto.
diff --git a/Documentation/admin-guide/kdump/vmcoreinfo.rst b/Documentation/admin-guide/kdump/vmcoreinfo.rst
index 8419019b6a88..6726f439958c 100644
--- a/Documentation/admin-guide/kdump/vmcoreinfo.rst
+++ b/Documentation/admin-guide/kdump/vmcoreinfo.rst
@@ -200,7 +200,7 @@ prb
A pointer to the printk ringbuffer (struct printk_ringbuffer). This
may be pointing to the static boot ringbuffer or the dynamically
-allocated ringbuffer, depending on when the the core dump occurred.
+allocated ringbuffer, depending on when the core dump occurred.
Used by user-space tools to read the active kernel log buffer.
printk_rb_static
diff --git a/Documentation/admin-guide/mm/hugetlbpage.rst b/Documentation/admin-guide/mm/hugetlbpage.rst
index 8e2727dc18d4..19f27c0d92e0 100644
--- a/Documentation/admin-guide/mm/hugetlbpage.rst
+++ b/Documentation/admin-guide/mm/hugetlbpage.rst
@@ -65,7 +65,7 @@ HugePages_Surp
may be temporarily larger than the maximum number of surplus huge
pages when the system is under memory pressure.
Hugepagesize
- is the default hugepage size (in Kb).
+ is the default hugepage size (in kB).
Hugetlb
is the total amount of memory (in kB), consumed by huge
pages of all sizes.
diff --git a/Documentation/bpf/instruction-set.rst b/Documentation/bpf/instruction-set.rst
index 1b0e6711dec9..0ac7ae40be37 100644
--- a/Documentation/bpf/instruction-set.rst
+++ b/Documentation/bpf/instruction-set.rst
@@ -133,7 +133,7 @@ code field of ``BPF_END``.
The byte swap instructions operate on the destination register
only and do not use a separate source register or immediate value.
-The 1-bit source operand field in the opcode is used to to select what byte
+The 1-bit source operand field in the opcode is used to select what byte
order the operation convert from or to:
========= ===== =================================================
diff --git a/Documentation/bpf/map_cgroup_storage.rst b/Documentation/bpf/map_cgroup_storage.rst
index cab9543017bf..8e5fe532c07e 100644
--- a/Documentation/bpf/map_cgroup_storage.rst
+++ b/Documentation/bpf/map_cgroup_storage.rst
@@ -31,7 +31,7 @@ The map uses key of type of either ``__u64 cgroup_inode_id`` or
};
``cgroup_inode_id`` is the inode id of the cgroup directory.
-``attach_type`` is the the program's attach type.
+``attach_type`` is the program's attach type.
Linux 5.9 added support for type ``__u64 cgroup_inode_id`` as the key type.
When this key type is used, then all attach types of the particular cgroup and
@@ -155,7 +155,7 @@ However, the BPF program can still only associate with one map of each type
``BPF_MAP_TYPE_CGROUP_STORAGE`` or more than one
``BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE``.
-In all versions, userspace may use the the attach parameters of cgroup and
+In all versions, userspace may use the attach parameters of cgroup and
attach type pair in ``struct bpf_cgroup_storage_key`` as the key to the BPF map
APIs to read or update the storage for a given attachment. For Linux 5.9
attach type shared storages, only the first value in the struct, cgroup inode
diff --git a/Documentation/conf.py b/Documentation/conf.py
index 255384d094bf..b50c85083149 100644
--- a/Documentation/conf.py
+++ b/Documentation/conf.py
@@ -15,6 +15,18 @@
import sys
import os
import sphinx
+import shutil
+
+# helper
+# ------
+
+def have_command(cmd):
+ """Search ``cmd`` in the ``PATH`` environment.
+
+ If found, return True.
+ If not found, return False.
+ """
+ return shutil.which(cmd) is not None
# Get Sphinx version
major, minor, patch = sphinx.version_info[:3]
@@ -107,7 +119,32 @@ else:
autosectionlabel_prefix_document = True
autosectionlabel_maxdepth = 2
-extensions.append("sphinx.ext.imgmath")
+# Load math renderer:
+# For html builder, load imgmath only when its dependencies are met.
+# mathjax is the default math renderer since Sphinx 1.8.
+have_latex = have_command('latex')
+have_dvipng = have_command('dvipng')
+load_imgmath = have_latex and have_dvipng
+
+# Respect SPHINX_IMGMATH (for html docs only)
+if 'SPHINX_IMGMATH' in os.environ:
+ env_sphinx_imgmath = os.environ['SPHINX_IMGMATH']
+ if 'yes' in env_sphinx_imgmath:
+ load_imgmath = True
+ elif 'no' in env_sphinx_imgmath:
+ load_imgmath = False
+ else:
+ sys.stderr.write("Unknown env SPHINX_IMGMATH=%s ignored.\n" % env_sphinx_imgmath)
+
+# Always load imgmath for Sphinx <1.8 or for epub docs
+load_imgmath = (load_imgmath or (major == 1 and minor < 8)
+ or 'epub' in sys.argv)
+
+if load_imgmath:
+ extensions.append("sphinx.ext.imgmath")
+ math_renderer = 'imgmath'
+else:
+ math_renderer = 'mathjax'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
@@ -333,7 +370,8 @@ html_static_path = ['sphinx-static']
html_use_smartypants = False
# Custom sidebar templates, maps document names to template names.
-#html_sidebars = {}
+# Note that the RTD theme ignores this.
+html_sidebars = { '**': ['searchbox.html', 'localtoc.html', 'sourcelink.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
diff --git a/Documentation/asm-annotations.rst b/Documentation/core-api/asm-annotations.rst
index a64f2ca469d4..bc514ed59887 100644
--- a/Documentation/asm-annotations.rst
+++ b/Documentation/core-api/asm-annotations.rst
@@ -43,10 +43,11 @@ annotated objects like this, tools can be run on them to generate more useful
information. In particular, on properly annotated objects, ``objtool`` can be
run to check and fix the object if needed. Currently, ``objtool`` can report
missing frame pointer setup/destruction in functions. It can also
-automatically generate annotations for :doc:`ORC unwinder <x86/orc-unwinder>`
+automatically generate annotations for the ORC unwinder
+(Documentation/x86/orc-unwinder.rst)
for most code. Both of these are especially important to support reliable
-stack traces which are in turn necessary for :doc:`Kernel live patching
-<livepatch/livepatch>`.
+stack traces which are in turn necessary for kernel live patching
+(Documentation/livepatch/livepatch.rst).
Caveat and Discussion
---------------------
diff --git a/Documentation/core-api/cpu_hotplug.rst b/Documentation/core-api/cpu_hotplug.rst
index c6f4ba2fb32d..f75778d37488 100644
--- a/Documentation/core-api/cpu_hotplug.rst
+++ b/Documentation/core-api/cpu_hotplug.rst
@@ -560,7 +560,7 @@ available:
* cpuhp_state_remove_instance(state, node)
* cpuhp_state_remove_instance_nocalls(state, node)
-The arguments are the same as for the the cpuhp_state_add_instance*()
+The arguments are the same as for the cpuhp_state_add_instance*()
variants above.
The functions differ in the way how the installed callbacks are treated:
diff --git a/Documentation/core-api/index.rst b/Documentation/core-api/index.rst
index dc95df462eea..b0e7b4771fff 100644
--- a/Documentation/core-api/index.rst
+++ b/Documentation/core-api/index.rst
@@ -23,6 +23,7 @@ it.
printk-formats
printk-index
symbol-namespaces
+ asm-annotations
Data structures and low-level utilities
=======================================
@@ -44,6 +45,8 @@ Library functionality that is used throughout the kernel.
this_cpu_ops
timekeeping
errseq
+ wrappers/atomic_t
+ wrappers/atomic_bitops
Low level entry and exit
========================
@@ -67,6 +70,7 @@ Documentation/locking/index.rst for more related documentation.
local_ops
padata
../RCU/index
+ wrappers/memory-barriers.rst
Low-level hardware management
=============================
diff --git a/Documentation/core-api/irq/irq-domain.rst b/Documentation/core-api/irq/irq-domain.rst
index d30b4d0a9769..f88a6ee67a35 100644
--- a/Documentation/core-api/irq/irq-domain.rst
+++ b/Documentation/core-api/irq/irq-domain.rst
@@ -71,7 +71,7 @@ variety of methods:
Note that irq domain lookups must happen in contexts that are
compatible with a RCU read-side critical section.
-The irq_create_mapping() function must be called *atleast once*
+The irq_create_mapping() function must be called *at least once*
before any call to irq_find_mapping(), lest the descriptor will not
be allocated.
diff --git a/Documentation/core-api/wrappers/atomic_bitops.rst b/Documentation/core-api/wrappers/atomic_bitops.rst
new file mode 100644
index 000000000000..bf24e4081a8f
--- /dev/null
+++ b/Documentation/core-api/wrappers/atomic_bitops.rst
@@ -0,0 +1,18 @@
+.. SPDX-License-Identifier: GPL-2.0
+ This is a simple wrapper to bring atomic_bitops.txt into the RST world
+ until such a time as that file can be converted directly.
+
+=============
+Atomic bitops
+=============
+
+.. raw:: latex
+
+ \footnotesize
+
+.. include:: ../../atomic_bitops.txt
+ :literal:
+
+.. raw:: latex
+
+ \normalsize
diff --git a/Documentation/core-api/wrappers/atomic_t.rst b/Documentation/core-api/wrappers/atomic_t.rst
new file mode 100644
index 000000000000..ed109a964c77
--- /dev/null
+++ b/Documentation/core-api/wrappers/atomic_t.rst
@@ -0,0 +1,19 @@
+.. SPDX-License-Identifier: GPL-2.0
+ This is a simple wrapper to bring atomic_t.txt into the RST world
+ until such a time as that file can be converted directly.
+
+============
+Atomic types
+============
+
+.. raw:: latex
+
+ \footnotesize
+
+.. include:: ../../atomic_t.txt
+ :literal:
+
+.. raw:: latex
+
+ \normalsize
+
diff --git a/Documentation/core-api/wrappers/memory-barriers.rst b/Documentation/core-api/wrappers/memory-barriers.rst
new file mode 100644
index 000000000000..532460b5e3eb
--- /dev/null
+++ b/Documentation/core-api/wrappers/memory-barriers.rst
@@ -0,0 +1,18 @@
+.. SPDX-License-Identifier: GPL-2.0
+ This is a simple wrapper to bring memory-barriers.txt into the RST world
+ until such a time as that file can be converted directly.
+
+============================
+Linux kernel memory barriers
+============================
+
+.. raw:: latex
+
+ \footnotesize
+
+.. include:: ../../memory-barriers.txt
+ :literal:
+
+.. raw:: latex
+
+ \normalsize
diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
index 8a9f3559335b..7e14e26676ec 100644
--- a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -34,8 +34,8 @@ Example:
Use specific request line passing from dma
For example, MMC request line is 5
- sdhci: sdhci@98e00000 {
- compatible = "moxa,moxart-sdhci";
+ mmc: mmc@98e00000 {
+ compatible = "moxa,moxart-mmc";
reg = <0x98e00000 0x5C>;
interrupts = <5 0>;
clocks = <&clk_apb>;
diff --git a/Documentation/devicetree/bindings/i2c/st,stm32-i2c.yaml b/Documentation/devicetree/bindings/i2c/st,stm32-i2c.yaml
index a41588763786..bf396e9466aa 100644
--- a/Documentation/devicetree/bindings/i2c/st,stm32-i2c.yaml
+++ b/Documentation/devicetree/bindings/i2c/st,stm32-i2c.yaml
@@ -57,6 +57,11 @@ properties:
- description: interrupt ID for I2C event
- description: interrupt ID for I2C error
+ interrupt-names:
+ items:
+ - const: event
+ - const: error
+
resets:
maxItems: 1
@@ -92,6 +97,8 @@ properties:
- description: register offset within syscfg
- description: register bitmask for FMP bit
+ wakeup-source: true
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/memory-controllers/fsl/imx8m-ddrc.yaml b/Documentation/devicetree/bindings/memory-controllers/fsl/imx8m-ddrc.yaml
index 445e46feda69..2b39fce5f650 100644
--- a/Documentation/devicetree/bindings/memory-controllers/fsl/imx8m-ddrc.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/fsl/imx8m-ddrc.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: i.MX8M DDR Controller
maintainers:
- - Leonard Crestez <[email protected]>
+ - Peng Fan <[email protected]>
description:
The DDRC block is integrated in i.MX8M for interfacing with DDR based
diff --git a/Documentation/devicetree/bindings/opp/opp-v2-kryo-cpu.yaml b/Documentation/devicetree/bindings/opp/opp-v2-kryo-cpu.yaml
index 59663e897dae..a202b6c6561d 100644
--- a/Documentation/devicetree/bindings/opp/opp-v2-kryo-cpu.yaml
+++ b/Documentation/devicetree/bindings/opp/opp-v2-kryo-cpu.yaml
@@ -40,6 +40,7 @@ properties:
patternProperties:
'^opp-?[0-9]+$':
type: object
+ additionalProperties: false
properties:
opp-hz: true
diff --git a/Documentation/devicetree/bindings/opp/opp-v2-qcom-level.yaml b/Documentation/devicetree/bindings/opp/opp-v2-qcom-level.yaml
index 14a7a689ad6d..df8442fb11f0 100644
--- a/Documentation/devicetree/bindings/opp/opp-v2-qcom-level.yaml
+++ b/Documentation/devicetree/bindings/opp/opp-v2-qcom-level.yaml
@@ -19,6 +19,7 @@ properties:
patternProperties:
'^opp-?[0-9]+$':
type: object
+ additionalProperties: false
properties:
opp-level: true
diff --git a/Documentation/doc-guide/sphinx.rst b/Documentation/doc-guide/sphinx.rst
index 1228b85f6f77..c708cec889af 100644
--- a/Documentation/doc-guide/sphinx.rst
+++ b/Documentation/doc-guide/sphinx.rst
@@ -48,10 +48,6 @@ or ``virtualenv``, depending on how your distribution packaged Python 3.
on the Sphinx version, it should be installed separately,
with ``pip install sphinx_rtd_theme``.
- #) Some ReST pages contain math expressions. Due to the way Sphinx works,
- those expressions are written using LaTeX notation. It needs texlive
- installed with amsfonts and amsmath in order to evaluate them.
-
In summary, if you want to install Sphinx version 2.4.4, you should do::
$ virtualenv sphinx_2.4.4
@@ -86,6 +82,27 @@ Depending on the distribution, you may also need to install a series of
``texlive`` packages that provide the minimal set of functionalities
required for ``XeLaTeX`` to work.
+Math Expressions in HTML
+------------------------
+
+Some ReST pages contain math expressions. Due to the way Sphinx works,
+those expressions are written using LaTeX notation.
+There are two options for Sphinx to render math expressions in html output.
+One is an extension called `imgmath`_ which converts math expressions into
+images and embeds them in html pages.
+The other is an extension called `mathjax`_ which delegates math rendering
+to JavaScript capable web browsers.
+The former was the only option for pre-6.1 kernel documentation and it
+requires quite a few texlive packages including amsfonts and amsmath among
+others.
+
+Since kernel release 6.1, html pages with math expressions can be built
+without installing any texlive packages. See `Choice of Math Renderer`_ for
+further info.
+
+.. _imgmath: https://www.sphinx-doc.org/en/master/usage/extensions/math.html#module-sphinx.ext.imgmath
+.. _mathjax: https://www.sphinx-doc.org/en/master/usage/extensions/math.html#module-sphinx.ext.mathjax
+
.. _sphinx-pre-install:
Checking for Sphinx dependencies
@@ -164,6 +181,38 @@ To remove the generated documentation, run ``make cleandocs``.
as well would improve the quality of images embedded in PDF
documents, especially for kernel releases 5.18 and later.
+Choice of Math Renderer
+-----------------------
+
+Since kernel release 6.1, mathjax works as a fallback math renderer for
+html output.\ [#sph1_8]_
+
+Math renderer is chosen depending on available commands as shown below:
+
+.. table:: Math Renderer Choices for HTML
+
+ ============= ================= ============
+ Math renderer Required commands Image format
+ ============= ================= ============
+ imgmath latex, dvipng PNG (raster)
+ mathjax
+ ============= ================= ============
+
+The choice can be overridden by setting an environment variable
+``SPHINX_IMGMATH`` as shown below:
+
+.. table:: Effect of Setting ``SPHINX_IMGMATH``
+
+ ====================== ========
+ Setting Renderer
+ ====================== ========
+ ``SPHINX_IMGMATH=yes`` imgmath
+ ``SPHINX_IMGMATH=no`` mathjax
+ ====================== ========
+
+.. [#sph1_8] Fallback of math renderer requires Sphinx >=1.8.
+
+
Writing Documentation
=====================
diff --git a/Documentation/driver-api/driver-model/devres.rst b/Documentation/driver-api/driver-model/devres.rst
index 55272942e721..dc1b2353cea3 100644
--- a/Documentation/driver-api/driver-model/devres.rst
+++ b/Documentation/driver-api/driver-model/devres.rst
@@ -301,6 +301,7 @@ IO region
devm_release_region()
devm_release_resource()
devm_request_mem_region()
+ devm_request_free_mem_region()
devm_request_region()
devm_request_resource()
@@ -334,7 +335,7 @@ IRQ
devm_irq_alloc_descs_from()
devm_irq_alloc_generic_chip()
devm_irq_setup_generic_chip()
- devm_irq_sim_init()
+ devm_irq_domain_create_sim()
LED
devm_led_classdev_register()
@@ -392,7 +393,9 @@ PHY
PINCTRL
devm_pinctrl_get()
devm_pinctrl_put()
+ devm_pinctrl_get_select()
devm_pinctrl_register()
+ devm_pinctrl_register_and_init()
devm_pinctrl_unregister()
POWER
@@ -427,6 +430,8 @@ SLAVE DMA ENGINE
devm_acpi_dma_controller_register()
SPI
+ devm_spi_alloc_master()
+ devm_spi_alloc_slave()
devm_spi_register_master()
WATCHDOG
diff --git a/Documentation/driver-api/isa.rst b/Documentation/driver-api/isa.rst
index def4a7b690b5..3df1b1696524 100644
--- a/Documentation/driver-api/isa.rst
+++ b/Documentation/driver-api/isa.rst
@@ -100,7 +100,7 @@ I believe platform_data is available for this, but if rather not, moving
the isa_driver pointer to the private struct isa_dev is ofcourse fine as
well.
-Then, if the the driver did not provide a .match, it matches. If it did,
+Then, if the driver did not provide a .match, it matches. If it did,
the driver match() method is called to determine a match.
If it did **not** match, dev->platform_data is reset to indicate this to
diff --git a/Documentation/fb/udlfb.rst b/Documentation/fb/udlfb.rst
index 732b37db3504..99cfbb7a1922 100644
--- a/Documentation/fb/udlfb.rst
+++ b/Documentation/fb/udlfb.rst
@@ -86,17 +86,24 @@ Module Options
Special configuration for udlfb is usually unnecessary. There are a few
options, however.
-From the command line, pass options to modprobe
-modprobe udlfb fb_defio=0 console=1 shadow=1
+From the command line, pass options to modprobe::
-Or modify options on the fly at /sys/module/udlfb/parameters directory via
-sudo nano fb_defio
-change the parameter in place, and save the file.
+ modprobe udlfb fb_defio=0 console=1 shadow=1
-Unplug/replug USB device to apply with new settings
+Or change options on the fly by editing
+/sys/module/udlfb/parameters/PARAMETER_NAME ::
-Or for permanent option, create file like /etc/modprobe.d/udlfb.conf with text
-options udlfb fb_defio=0 console=1 shadow=1
+ cd /sys/module/udlfb/parameters
+ ls # to see a list of parameter names
+ sudo nano PARAMETER_NAME
+ # change the parameter in place, and save the file.
+
+Unplug/replug USB device to apply with new settings.
+
+Or to apply options permanently, create a modprobe configuration file
+like /etc/modprobe.d/udlfb.conf with text::
+
+ options udlfb fb_defio=0 console=1 shadow=1
Accepted boolean options:
diff --git a/Documentation/filesystems/caching/backend-api.rst b/Documentation/filesystems/caching/backend-api.rst
index d7507becf674..3a199fc50828 100644
--- a/Documentation/filesystems/caching/backend-api.rst
+++ b/Documentation/filesystems/caching/backend-api.rst
@@ -122,7 +122,7 @@ volumes, calling::
to tell fscache that a volume has been withdrawn. This waits for all
outstanding accesses on the volume to complete before returning.
-When the the cache is completely withdrawn, fscache should be notified by
+When the cache is completely withdrawn, fscache should be notified by
calling::
void fscache_relinquish_cache(struct fscache_cache *cache);
diff --git a/Documentation/filesystems/ext4/super.rst b/Documentation/filesystems/ext4/super.rst
index 268888522e35..0152888cac29 100644
--- a/Documentation/filesystems/ext4/super.rst
+++ b/Documentation/filesystems/ext4/super.rst
@@ -456,15 +456,15 @@ The ext4 superblock is laid out as follows in
* - 0x277
- __u8
- s_lastcheck_hi
- - Upper 8 bits of the s_lastcheck_hi field.
+ - Upper 8 bits of the s_lastcheck field.
* - 0x278
- __u8
- s_first_error_time_hi
- - Upper 8 bits of the s_first_error_time_hi field.
+ - Upper 8 bits of the s_first_error_time field.
* - 0x279
- __u8
- s_last_error_time_hi
- - Upper 8 bits of the s_last_error_time_hi field.
+ - Upper 8 bits of the s_last_error_time field.
* - 0x27A
- __u8
- s_pad[2]
diff --git a/Documentation/filesystems/f2fs.rst b/Documentation/filesystems/f2fs.rst
index d0c09663dae8..17df9a02ccff 100644
--- a/Documentation/filesystems/f2fs.rst
+++ b/Documentation/filesystems/f2fs.rst
@@ -286,9 +286,8 @@ compress_algorithm=%s:%d Control compress algorithm and its compress level, now,
algorithm level range
lz4 3 - 16
zstd 1 - 22
-compress_log_size=%u Support configuring compress cluster size, the size will
- be 4KB * (1 << %u), 16KB is minimum size, also it's
- default size.
+compress_log_size=%u Support configuring compress cluster size. The size will
+ be 4KB * (1 << %u). The default and minimum sizes are 16KB.
compress_extension=%s Support adding specified extension, so that f2fs can enable
compression on those corresponding files, e.g. if all files
with '.ext' has high compression rate, we can set the '.ext'
diff --git a/Documentation/filesystems/idmappings.rst b/Documentation/filesystems/idmappings.rst
index c1db8748389c..b9b31066aef2 100644
--- a/Documentation/filesystems/idmappings.rst
+++ b/Documentation/filesystems/idmappings.rst
@@ -661,7 +661,7 @@ idmappings::
mount idmapping: u0:k10000:r10000
Assume a file owned by ``u1000`` is read from disk. The filesystem maps this id
-to ``k21000`` according to it's idmapping. This is what is stored in the
+to ``k21000`` according to its idmapping. This is what is stored in the
inode's ``i_uid`` and ``i_gid`` fields.
When the caller queries the ownership of this file via ``stat()`` the kernel
diff --git a/Documentation/filesystems/qnx6.rst b/Documentation/filesystems/qnx6.rst
index fd13433d362c..523b798f04e7 100644
--- a/Documentation/filesystems/qnx6.rst
+++ b/Documentation/filesystems/qnx6.rst
@@ -176,7 +176,7 @@ Then userspace.
The requirement for a static, fixed preallocated system area comes from how
qnx6fs deals with writes.
-Each superblock got it's own half of the system area. So superblock #1
+Each superblock got its own half of the system area. So superblock #1
always uses blocks from the lower half while superblock #2 just writes to
blocks represented by the upper half bitmap system area bits.
diff --git a/Documentation/filesystems/spufs/spufs.rst b/Documentation/filesystems/spufs/spufs.rst
index 8a42859bb100..ca0441cbe37e 100644
--- a/Documentation/filesystems/spufs/spufs.rst
+++ b/Documentation/filesystems/spufs/spufs.rst
@@ -227,7 +227,7 @@ Files
from the data buffer, updating the value of the specified signal
notification register. The signal notification register will
either be replaced with the input data or will be updated to the
- bitwise OR or the old value and the input data, depending on the
+ bitwise OR of the old value and the input data, depending on the
contents of the signal1_type, or signal2_type respectively,
file.
diff --git a/Documentation/filesystems/xfs-delayed-logging-design.rst b/Documentation/filesystems/xfs-delayed-logging-design.rst
index 4ef419f54663..6402ab8e370c 100644
--- a/Documentation/filesystems/xfs-delayed-logging-design.rst
+++ b/Documentation/filesystems/xfs-delayed-logging-design.rst
@@ -100,7 +100,7 @@ transactions together::
ntp = xfs_trans_dup(tp);
xfs_trans_commit(tp);
- xfs_log_reserve(ntp);
+ xfs_trans_reserve(ntp);
This results in a series of "rolling transactions" where the inode is locked
across the entire chain of transactions. Hence while this series of rolling
@@ -191,7 +191,7 @@ transaction rolling mechanism to re-reserve space on every transaction roll. We
know from the implementation of the permanent transactions how many transaction
rolls are likely for the common modifications that need to be made.
-For example, and inode allocation is typically two transactions - one to
+For example, an inode allocation is typically two transactions - one to
physically allocate a free inode chunk on disk, and another to allocate an inode
from an inode chunk that has free inodes in it. Hence for an inode allocation
transaction, we might set the reservation log count to a value of 2 to indicate
@@ -200,7 +200,7 @@ chain. Each time a permanent transaction rolls, it consumes an entire unit
reservation.
Hence when the permanent transaction is first allocated, the log space
-reservation is increases from a single unit reservation to multiple unit
+reservation is increased from a single unit reservation to multiple unit
reservations. That multiple is defined by the reservation log count, and this
means we can roll the transaction multiple times before we have to re-reserve
log space when we roll the transaction. This ensures that the common
@@ -259,7 +259,7 @@ the next transaction in the sequeunce, but we have none remaining. We cannot
sleep during the transaction commit process waiting for new log space to become
available, as we may end up on the end of the FIFO queue and the items we have
locked while we sleep could end up pinning the tail of the log before there is
-enough free space in the log to fulfil all of the pending reservations and
+enough free space in the log to fulfill all of the pending reservations and
then wake up transaction commit in progress.
To take a new reservation without sleeping requires us to be able to take a
@@ -551,14 +551,14 @@ Essentially, this shows that an item that is in the AIL can still be modified
and relogged, so any tracking must be separate to the AIL infrastructure. As
such, we cannot reuse the AIL list pointers for tracking committed items, nor
can we store state in any field that is protected by the AIL lock. Hence the
-committed item tracking needs it's own locks, lists and state fields in the log
+committed item tracking needs its own locks, lists and state fields in the log
item.
Similar to the AIL, tracking of committed items is done through a new list
called the Committed Item List (CIL). The list tracks log items that have been
committed and have formatted memory buffers attached to them. It tracks objects
in transaction commit order, so when an object is relogged it is removed from
-it's place in the list and re-inserted at the tail. This is entirely arbitrary
+its place in the list and re-inserted at the tail. This is entirely arbitrary
and done to make it easy for debugging - the last items in the list are the
ones that are most recently modified. Ordering of the CIL is not necessary for
transactional integrity (as discussed in the next section) so the ordering is
@@ -615,7 +615,7 @@ those changes into the current checkpoint context. We then initialise a new
context and attach that to the CIL for aggregation of new transactions.
This allows us to unlock the CIL immediately after transfer of all the
-committed items and effectively allow new transactions to be issued while we
+committed items and effectively allows new transactions to be issued while we
are formatting the checkpoint into the log. It also allows concurrent
checkpoints to be written into the log buffers in the case of log force heavy
workloads, just like the existing transaction commit code does. This, however,
@@ -884,9 +884,9 @@ pin the object the first time it is inserted into the CIL - if it is already in
the CIL during a transaction commit, then we do not pin it again. Because there
can be multiple outstanding checkpoint contexts, we can still see elevated pin
counts, but as each checkpoint completes the pin count will retain the correct
-value according to it's context.
+value according to its context.
-Just to make matters more slightly more complex, this checkpoint level context
+Just to make matters slightly more complex, this checkpoint level context
for the pin count means that the pinning of an item must take place under the
CIL commit/flush lock. If we pin the object outside this lock, we cannot
guarantee which context the pin count is associated with. This is because of
diff --git a/Documentation/i2c/dev-interface.rst b/Documentation/i2c/dev-interface.rst
index 73ad34849f99..c277a8e1202b 100644
--- a/Documentation/i2c/dev-interface.rst
+++ b/Documentation/i2c/dev-interface.rst
@@ -148,7 +148,7 @@ You can do plain I2C transactions by using read(2) and write(2) calls.
You do not need to pass the address byte; instead, set it through
ioctl I2C_SLAVE before you try to access the device.
-You can do SMBus level transactions (see documentation file smbus-protocol
+You can do SMBus level transactions (see documentation file smbus-protocol.rst
for details) through the following functions::
__s32 i2c_smbus_write_quick(int file, __u8 value);
diff --git a/Documentation/i2c/slave-interface.rst b/Documentation/i2c/slave-interface.rst
index 82ea3e1d6fe4..58fb143baee4 100644
--- a/Documentation/i2c/slave-interface.rst
+++ b/Documentation/i2c/slave-interface.rst
@@ -32,9 +32,9 @@ User manual
===========
I2C slave backends behave like standard I2C clients. So, you can instantiate
-them as described in the document 'instantiating-devices'. The only difference
-is that i2c slave backends have their own address space. So, you have to add
-0x1000 to the address you would originally request. An example for
+them as described in the document instantiating-devices.rst. The only
+difference is that i2c slave backends have their own address space. So, you
+have to add 0x1000 to the address you would originally request. An example for
instantiating the slave-eeprom driver from userspace at the 7 bit address 0x64
on bus 1::
diff --git a/Documentation/i2c/writing-clients.rst b/Documentation/i2c/writing-clients.rst
index e3b126cf4a3b..47f7cbf4ed1a 100644
--- a/Documentation/i2c/writing-clients.rst
+++ b/Documentation/i2c/writing-clients.rst
@@ -364,7 +364,7 @@ stop condition is issued between transaction. The i2c_msg structure
contains for each message the client address, the number of bytes of the
message and the message data itself.
-You can read the file ``i2c-protocol`` for more information about the
+You can read the file i2c-protocol.rst for more information about the
actual I2C protocol.
@@ -414,7 +414,7 @@ transactions return 0 on success; the 'read' transactions return the read
value, except for block transactions, which return the number of values
read. The block buffers need not be longer than 32 bytes.
-You can read the file ``smbus-protocol`` for more information about the
+You can read the file smbus-protocol.rst for more information about the
actual SMBus protocol.
diff --git a/Documentation/index.rst b/Documentation/index.rst
index 4737c18c97ff..85eab6e990ab 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -1,11 +1,5 @@
.. SPDX-License-Identifier: GPL-2.0
-
-.. The Linux Kernel documentation master file, created by
- sphinx-quickstart on Fri Feb 12 13:51:46 2016.
- You can adapt this file completely to your liking, but it should at least
- contain the root `toctree` directive.
-
.. _linux_doc:
The Linux Kernel documentation
@@ -18,133 +12,84 @@ documents into a coherent whole. Please note that improvements to the
documentation are welcome; join the linux-doc list at vger.kernel.org if
you want to help out.
-Licensing documentation
------------------------
+Working with the development community
+--------------------------------------
-The following describes the license of the Linux kernel source code
-(GPLv2), how to properly mark the license of individual files in the source
-tree, as well as links to the full license text.
-
-* :ref:`kernel_licensing`
-
-User-oriented documentation
----------------------------
-
-The following manuals are written for *users* of the kernel — those who are
-trying to get it to work optimally on a given system.
+The essential guides for interacting with the kernel's development
+community and getting your work upstream.
.. toctree::
- :maxdepth: 2
-
- admin-guide/index
- kbuild/index
-
-Firmware-related documentation
-------------------------------
-The following holds information on the kernel's expectations regarding the
-platform firmwares.
+ :maxdepth: 1
-.. toctree::
- :maxdepth: 2
+ process/development-process
+ process/submitting-patches
+ Code of conduct <process/code-of-conduct>
+ maintainer/index
+ All development-process docs <process/index>
- firmware-guide/index
- devicetree/index
-Application-developer documentation
------------------------------------
+Internal API manuals
+--------------------
-The user-space API manual gathers together documents describing aspects of
-the kernel interface as seen by application developers.
+Manuals for use by developers working to interface with the rest of the
+kernel.
.. toctree::
- :maxdepth: 2
-
- userspace-api/index
+ :maxdepth: 1
+ core-api/index
+ driver-api/index
+ subsystem-apis
+ Locking in the kernel <locking/index>
-Introduction to kernel development
-----------------------------------
+Development tools and processes
+-------------------------------
-These manuals contain overall information about how to develop the kernel.
-The kernel community is quite large, with thousands of developers
-contributing over the course of a year. As with any large community,
-knowing how things are done will make the process of getting your changes
-merged much easier.
+Various other manuals with useful information for all kernel developers.
.. toctree::
- :maxdepth: 2
+ :maxdepth: 1
- process/index
- dev-tools/index
+ process/license-rules
doc-guide/index
+ dev-tools/index
+ dev-tools/testing-overview
kernel-hacking/index
trace/index
- maintainer/index
fault-injection/index
livepatch/index
-Kernel API documentation
-------------------------
+User-oriented documentation
+---------------------------
-These books get into the details of how specific kernel subsystems work
-from the point of view of a kernel developer. Much of the information here
-is taken directly from the kernel source, with supplemental material added
-as needed (or at least as we managed to add it — probably *not* all that is
-needed).
+The following manuals are written for *users* of the kernel — those who are
+trying to get it to work optimally on a given system and application
+developers seeking information on the kernel's user-space APIs.
.. toctree::
- :maxdepth: 2
+ :maxdepth: 1
- driver-api/index
- core-api/index
- locking/index
- accounting/index
- block/index
- cdrom/index
- cpu-freq/index
- fb/index
- fpga/index
- hid/index
- i2c/index
- iio/index
- isdn/index
- infiniband/index
- leds/index
- netlabel/index
- networking/index
- pcmcia/index
- power/index
- target/index
- timers/index
- spi/index
- w1/index
- watchdog/index
- virt/index
- input/index
- hwmon/index
- gpu/index
- security/index
- sound/index
- crypto/index
- filesystems/index
- mm/index
- bpf/index
- usb/index
- PCI/index
- scsi/index
- misc-devices/index
- scheduler/index
- mhi/index
- peci/index
-
-Architecture-agnostic documentation
------------------------------------
+ admin-guide/index
+ The kernel build system <kbuild/index>
+ admin-guide/reporting-issues.rst
+ User-space tools <tools/index>
+ userspace-api/index
+
+See also: the `Linux man pages <https://www.kernel.org/doc/man-pages/>`_,
+which are kept separately from the kernel's own documentation.
+
+Firmware-related documentation
+------------------------------
+The following holds information on the kernel's expectations regarding the
+platform firmwares.
.. toctree::
- :maxdepth: 2
+ :maxdepth: 1
+
+ firmware-guide/index
+ devicetree/index
- asm-annotations
Architecture-specific documentation
-----------------------------------
@@ -163,9 +108,8 @@ of the documentation body, or may require some adjustments and/or conversion
to ReStructured Text format, or are simply too old.
.. toctree::
- :maxdepth: 2
+ :maxdepth: 1
- tools/index
staging/index
diff --git a/Documentation/kbuild/gcc-plugins.rst b/Documentation/kbuild/gcc-plugins.rst
index 0ba76719f1b9..c578c6ba3eb6 100644
--- a/Documentation/kbuild/gcc-plugins.rst
+++ b/Documentation/kbuild/gcc-plugins.rst
@@ -90,7 +90,11 @@ e.g., on Ubuntu for gcc-10::
Or on Fedora::
- dnf install gcc-plugin-devel
+ dnf install gcc-plugin-devel libmpc-devel
+
+Or on Fedora when using cross-compilers that include plugins::
+
+ dnf install libmpc-devel
Enable the GCC plugin infrastructure and some plugin(s) you want to use
in the kernel config::
@@ -99,6 +103,19 @@ in the kernel config::
CONFIG_GCC_PLUGIN_LATENT_ENTROPY=y
...
+Run gcc (native or cross-compiler) to ensure plugin headers are detected::
+
+ gcc -print-file-name=plugin
+ CROSS_COMPILE=arm-linux-gnu- ${CROSS_COMPILE}gcc -print-file-name=plugin
+
+The word "plugin" means they are not detected::
+
+ plugin
+
+A full path means they are detected::
+
+ /usr/lib/gcc/x86_64-redhat-linux/12/plugin
+
To compile the minimum tool set including the plugin(s)::
make scripts
diff --git a/Documentation/locking/seqlock.rst b/Documentation/locking/seqlock.rst
index 64405e5da63e..bfda1a5fecad 100644
--- a/Documentation/locking/seqlock.rst
+++ b/Documentation/locking/seqlock.rst
@@ -39,7 +39,7 @@ as the writer can invalidate a pointer that the reader is following.
Sequence counters (``seqcount_t``)
==================================
-This is the the raw counting mechanism, which does not protect against
+This is the raw counting mechanism, which does not protect against
multiple writers. Write side critical sections must thus be serialized
by an external lock.
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index 832b5d36e279..06f80e3785c5 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -52,7 +52,7 @@ CONTENTS
- Varieties of memory barrier.
- What may not be assumed about memory barriers?
- - Data dependency barriers (historical).
+ - Address-dependency barriers (historical).
- Control dependencies.
- SMP barrier pairing.
- Examples of memory barrier sequences.
@@ -187,9 +187,9 @@ As a further example, consider this sequence of events:
B = 4; Q = P;
P = &B; D = *Q;
-There is an obvious data dependency here, as the value loaded into D depends on
-the address retrieved from P by CPU 2. At the end of the sequence, any of the
-following results are possible:
+There is an obvious address dependency here, as the value loaded into D depends
+on the address retrieved from P by CPU 2. At the end of the sequence, any of
+the following results are possible:
(Q == &A) and (D == 1)
(Q == &B) and (D == 2)
@@ -391,58 +391,62 @@ Memory barriers come in four basic varieties:
memory system as time progresses. All stores _before_ a write barrier
will occur _before_ all the stores after the write barrier.
- [!] Note that write barriers should normally be paired with read or data
- dependency barriers; see the "SMP barrier pairing" subsection.
+ [!] Note that write barriers should normally be paired with read or
+ address-dependency barriers; see the "SMP barrier pairing" subsection.
- (2) Data dependency barriers.
+ (2) Address-dependency barriers (historical).
- A data dependency barrier is a weaker form of read barrier. In the case
- where two loads are performed such that the second depends on the result
- of the first (eg: the first load retrieves the address to which the second
- load will be directed), a data dependency barrier would be required to
- make sure that the target of the second load is updated after the address
- obtained by the first load is accessed.
+ An address-dependency barrier is a weaker form of read barrier. In the
+ case where two loads are performed such that the second depends on the
+ result of the first (eg: the first load retrieves the address to which
+ the second load will be directed), an address-dependency barrier would
+ be required to make sure that the target of the second load is updated
+ after the address obtained by the first load is accessed.
- A data dependency barrier is a partial ordering on interdependent loads
- only; it is not required to have any effect on stores, independent loads
- or overlapping loads.
+ An address-dependency barrier is a partial ordering on interdependent
+ loads only; it is not required to have any effect on stores, independent
+ loads or overlapping loads.
As mentioned in (1), the other CPUs in the system can be viewed as
committing sequences of stores to the memory system that the CPU being
- considered can then perceive. A data dependency barrier issued by the CPU
- under consideration guarantees that for any load preceding it, if that
- load touches one of a sequence of stores from another CPU, then by the
- time the barrier completes, the effects of all the stores prior to that
- touched by the load will be perceptible to any loads issued after the data
- dependency barrier.
+ considered can then perceive. An address-dependency barrier issued by
+ the CPU under consideration guarantees that for any load preceding it,
+ if that load touches one of a sequence of stores from another CPU, then
+ by the time the barrier completes, the effects of all the stores prior to
+ that touched by the load will be perceptible to any loads issued after
+ the address-dependency barrier.
See the "Examples of memory barrier sequences" subsection for diagrams
showing the ordering constraints.
- [!] Note that the first load really has to have a _data_ dependency and
+ [!] Note that the first load really has to have an _address_ dependency and
not a control dependency. If the address for the second load is dependent
on the first load, but the dependency is through a conditional rather than
actually loading the address itself, then it's a _control_ dependency and
a full read barrier or better is required. See the "Control dependencies"
subsection for more information.
- [!] Note that data dependency barriers should normally be paired with
+ [!] Note that address-dependency barriers should normally be paired with
write barriers; see the "SMP barrier pairing" subsection.
+ [!] Kernel release v5.9 removed kernel APIs for explicit address-
+ dependency barriers. Nowadays, APIs for marking loads from shared
+ variables such as READ_ONCE() and rcu_dereference() provide implicit
+ address-dependency barriers.
(3) Read (or load) memory barriers.
- A read barrier is a data dependency barrier plus a guarantee that all the
- LOAD operations specified before the barrier will appear to happen before
- all the LOAD operations specified after the barrier with respect to the
- other components of the system.
+ A read barrier is an address-dependency barrier plus a guarantee that all
+ the LOAD operations specified before the barrier will appear to happen
+ before all the LOAD operations specified after the barrier with respect to
+ the other components of the system.
A read barrier is a partial ordering on loads only; it is not required to
have any effect on stores.
- Read memory barriers imply data dependency barriers, and so can substitute
- for them.
+ Read memory barriers imply address-dependency barriers, and so can
+ substitute for them.
[!] Note that read barriers should normally be paired with write barriers;
see the "SMP barrier pairing" subsection.
@@ -550,17 +554,21 @@ There are certain things that the Linux kernel memory barriers do not guarantee:
Documentation/core-api/dma-api.rst
-DATA DEPENDENCY BARRIERS (HISTORICAL)
--------------------------------------
+ADDRESS-DEPENDENCY BARRIERS (HISTORICAL)
+----------------------------------------
As of v4.15 of the Linux kernel, an smp_mb() was added to READ_ONCE() for
DEC Alpha, which means that about the only people who need to pay attention
to this section are those working on DEC Alpha architecture-specific code
and those working on READ_ONCE() itself. For those who need it, and for
those who are interested in the history, here is the story of
-data-dependency barriers.
+address-dependency barriers.
+
+[!] While address dependencies are observed in both load-to-load and
+load-to-store relations, address-dependency barriers are not necessary
+for load-to-store situations.
-The usage requirements of data dependency barriers are a little subtle, and
+The requirement of address-dependency barriers is a little subtle, and
it's not always obvious that they're needed. To illustrate, consider the
following sequence of events:
@@ -570,11 +578,14 @@ following sequence of events:
B = 4;
<write barrier>
WRITE_ONCE(P, &B);
- Q = READ_ONCE(P);
+ Q = READ_ONCE_OLD(P);
D = *Q;
-There's a clear data dependency here, and it would seem that by the end of the
-sequence, Q must be either &A or &B, and that:
+[!] READ_ONCE_OLD() corresponds to READ_ONCE() of pre-4.15 kernel, which
+doesn't imply an address-dependency barrier.
+
+There's a clear address dependency here, and it would seem that by the end of
+the sequence, Q must be either &A or &B, and that:
(Q == &A) implies (D == 1)
(Q == &B) implies (D == 4)
@@ -588,8 +599,8 @@ While this may seem like a failure of coherency or causality maintenance, it
isn't, and this behaviour can be observed on certain real CPUs (such as the DEC
Alpha).
-To deal with this, a data dependency barrier or better must be inserted
-between the address load and the data load:
+To deal with this, READ_ONCE() provides an implicit address-dependency barrier
+since kernel release v4.15:
CPU 1 CPU 2
=============== ===============
@@ -598,7 +609,7 @@ between the address load and the data load:
<write barrier>
WRITE_ONCE(P, &B);
Q = READ_ONCE(P);
- <data dependency barrier>
+ <implicit address-dependency barrier>
D = *Q;
This enforces the occurrence of one of the two implications, and prevents the
@@ -615,13 +626,13 @@ odd-numbered bank is idle, one can see the new value of the pointer P (&B),
but the old value of the variable B (2).
-A data-dependency barrier is not required to order dependent writes
-because the CPUs that the Linux kernel supports don't do writes
-until they are certain (1) that the write will actually happen, (2)
-of the location of the write, and (3) of the value to be written.
+An address-dependency barrier is not required to order dependent writes
+because the CPUs that the Linux kernel supports don't do writes until they
+are certain (1) that the write will actually happen, (2) of the location of
+the write, and (3) of the value to be written.
But please carefully read the "CONTROL DEPENDENCIES" section and the
-Documentation/RCU/rcu_dereference.rst file: The compiler can and does
-break dependencies in a great many highly creative ways.
+Documentation/RCU/rcu_dereference.rst file: The compiler can and does break
+dependencies in a great many highly creative ways.
CPU 1 CPU 2
=============== ===============
@@ -629,12 +640,12 @@ break dependencies in a great many highly creative ways.
B = 4;
<write barrier>
WRITE_ONCE(P, &B);
- Q = READ_ONCE(P);
+ Q = READ_ONCE_OLD(P);
WRITE_ONCE(*Q, 5);
-Therefore, no data-dependency barrier is required to order the read into
+Therefore, no address-dependency barrier is required to order the read into
Q with the store into *Q. In other words, this outcome is prohibited,
-even without a data-dependency barrier:
+even without an implicit address-dependency barrier of modern READ_ONCE():
(Q == &B) && (B == 4)
@@ -645,12 +656,12 @@ can be used to record rare error conditions and the like, and the CPUs'
naturally occurring ordering prevents such records from being lost.
-Note well that the ordering provided by a data dependency is local to
+Note well that the ordering provided by an address dependency is local to
the CPU containing it. See the section on "Multicopy atomicity" for
more information.
-The data dependency barrier is very important to the RCU system,
+The address-dependency barrier is very important to the RCU system,
for example. See rcu_assign_pointer() and rcu_dereference() in
include/linux/rcupdate.h. This permits the current target of an RCU'd
pointer to be replaced with a new modified target, without the replacement
@@ -667,20 +678,21 @@ not understand them. The purpose of this section is to help you prevent
the compiler's ignorance from breaking your code.
A load-load control dependency requires a full read memory barrier, not
-simply a data dependency barrier to make it work correctly. Consider the
-following bit of code:
+simply an (implicit) address-dependency barrier to make it work correctly.
+Consider the following bit of code:
q = READ_ONCE(a);
+ <implicit address-dependency barrier>
if (q) {
- <data dependency barrier> /* BUG: No data dependency!!! */
+ /* BUG: No address dependency!!! */
p = READ_ONCE(b);
}
-This will not have the desired effect because there is no actual data
+This will not have the desired effect because there is no actual address
dependency, but rather a control dependency that the CPU may short-circuit
by attempting to predict the outcome in advance, so that other CPUs see
-the load from b as having happened before the load from a. In such a
-case what's actually required is:
+the load from b as having happened before the load from a. In such a case
+what's actually required is:
q = READ_ONCE(a);
if (q) {
@@ -927,9 +939,9 @@ General barriers pair with each other, though they also pair with most
other types of barriers, albeit without multicopy atomicity. An acquire
barrier pairs with a release barrier, but both may also pair with other
barriers, including of course general barriers. A write barrier pairs
-with a data dependency barrier, a control dependency, an acquire barrier,
+with an address-dependency barrier, a control dependency, an acquire barrier,
a release barrier, a read barrier, or a general barrier. Similarly a
-read barrier, control dependency, or a data dependency barrier pairs
+read barrier, control dependency, or an address-dependency barrier pairs
with a write barrier, an acquire barrier, a release barrier, or a
general barrier:
@@ -948,7 +960,7 @@ Or:
a = 1;
<write barrier>
WRITE_ONCE(b, &a); x = READ_ONCE(b);
- <data dependency barrier>
+ <implicit address-dependency barrier>
y = *x;
Or even:
@@ -968,8 +980,8 @@ Basically, the read barrier always has to be there, even though it can be of
the "weaker" type.
[!] Note that the stores before the write barrier would normally be expected to
-match the loads after the read barrier or the data dependency barrier, and vice
-versa:
+match the loads after the read barrier or the address-dependency barrier, and
+vice versa:
CPU 1 CPU 2
=================== ===================
@@ -1021,8 +1033,8 @@ STORE B, STORE C } all occurring before the unordered set of { STORE D, STORE E
V
-Secondly, data dependency barriers act as partial orderings on data-dependent
-loads. Consider the following sequence of events:
+Secondly, address-dependency barriers act as partial orderings on address-
+dependent loads. Consider the following sequence of events:
CPU 1 CPU 2
======================= =======================
@@ -1067,8 +1079,8 @@ effectively random order, despite the write barrier issued by CPU 1:
In the above example, CPU 2 perceives that B is 7, despite the load of *C
(which would be B) coming after the LOAD of C.
-If, however, a data dependency barrier were to be placed between the load of C
-and the load of *C (ie: B) on CPU 2:
+If, however, an address-dependency barrier were to be placed between the load
+of C and the load of *C (ie: B) on CPU 2:
CPU 1 CPU 2
======================= =======================
@@ -1078,7 +1090,7 @@ and the load of *C (ie: B) on CPU 2:
<write barrier>
STORE C = &B LOAD X
STORE D = 4 LOAD C (gets &B)
- <data dependency barrier>
+ <address-dependency barrier>
LOAD *C (reads B)
then the following will occur:
@@ -1101,7 +1113,7 @@ then the following will occur:
| +-------+ | |
| | X->9 |------>| |
| +-------+ | |
- Makes sure all effects ---> \ ddddddddddddddddd | |
+ Makes sure all effects ---> \ aaaaaaaaaaaaaaaaa | |
prior to the store of C \ +-------+ | |
are perceptible to ----->| B->2 |------>| |
subsequent loads +-------+ | |
@@ -1292,7 +1304,7 @@ Which might appear as this:
LOAD with immediate effect : : +-------+
-Placing a read barrier or a data dependency barrier just before the second
+Placing a read barrier or an address-dependency barrier just before the second
load:
CPU 1 CPU 2
@@ -1816,20 +1828,20 @@ which may then reorder things however it wishes.
CPU MEMORY BARRIERS
-------------------
-The Linux kernel has eight basic CPU memory barriers:
+The Linux kernel has seven basic CPU memory barriers:
- TYPE MANDATORY SMP CONDITIONAL
- =============== ======================= ===========================
- GENERAL mb() smp_mb()
- WRITE wmb() smp_wmb()
- READ rmb() smp_rmb()
- DATA DEPENDENCY READ_ONCE()
+ TYPE MANDATORY SMP CONDITIONAL
+ ======================= =============== ===============
+ GENERAL mb() smp_mb()
+ WRITE wmb() smp_wmb()
+ READ rmb() smp_rmb()
+ ADDRESS DEPENDENCY READ_ONCE()
-All memory barriers except the data dependency barriers imply a compiler
-barrier. Data dependencies do not impose any additional compiler ordering.
+All memory barriers except the address-dependency barriers imply a compiler
+barrier. Address dependencies do not impose any additional compiler ordering.
-Aside: In the case of data dependencies, the compiler would be expected
+Aside: In the case of address dependencies, the compiler would be expected
to issue the loads in the correct order (eg. `a[b]` would have to load
the value of b before loading a[b]), however there is no guarantee in
the C specification that the compiler may not speculate the value of b
@@ -2749,7 +2761,8 @@ is discarded from the CPU's cache and reloaded. To deal with this, the
appropriate part of the kernel must invalidate the overlapping bits of the
cache on each CPU.
-See Documentation/core-api/cachetlb.rst for more information on cache management.
+See Documentation/core-api/cachetlb.rst for more information on cache
+management.
CACHE COHERENCY VS MMIO
@@ -2889,8 +2902,8 @@ AND THEN THERE'S THE ALPHA
The DEC Alpha CPU is one of the most relaxed CPUs there is. Not only that,
some versions of the Alpha CPU have a split data cache, permitting them to have
two semantically-related cache lines updated at separate times. This is where
-the data dependency barrier really becomes necessary as this synchronises both
-caches with the memory coherence system, thus making it seem like pointer
+the address-dependency barrier really becomes necessary as this synchronises
+both caches with the memory coherence system, thus making it seem like pointer
changes vs new data occur in the right order.
The Alpha defines the Linux kernel's memory model, although as of v4.15
diff --git a/Documentation/mm/unevictable-lru.rst b/Documentation/mm/unevictable-lru.rst
index b280367d6a44..4a0e158aa9ce 100644
--- a/Documentation/mm/unevictable-lru.rst
+++ b/Documentation/mm/unevictable-lru.rst
@@ -197,7 +197,7 @@ unevictable list for the memory cgroup and node being scanned.
There may be situations where a page is mapped into a VM_LOCKED VMA, but the
page is not marked as PG_mlocked. Such pages will make it all the way to
shrink_active_list() or shrink_page_list() where they will be detected when
-vmscan walks the reverse map in page_referenced() or try_to_unmap(). The page
+vmscan walks the reverse map in folio_referenced() or try_to_unmap(). The page
is culled to the unevictable list when it is released by the shrinker.
To "cull" an unevictable page, vmscan simply puts the page back on the LRU list
@@ -267,7 +267,7 @@ the LRU. Such pages can be "noticed" by memory management in several places:
(4) in the fault path and when a VM_LOCKED stack segment is expanded; or
(5) as mentioned above, in vmscan:shrink_page_list() when attempting to
- reclaim a page in a VM_LOCKED VMA by page_referenced() or try_to_unmap().
+ reclaim a page in a VM_LOCKED VMA by folio_referenced() or try_to_unmap().
mlocked pages become unlocked and rescued from the unevictable list when:
@@ -547,7 +547,7 @@ vmscan's shrink_inactive_list() and shrink_page_list() also divert obviously
unevictable pages found on the inactive lists to the appropriate memory cgroup
and node unevictable list.
-rmap's page_referenced_one(), called via vmscan's shrink_active_list() or
+rmap's folio_referenced_one(), called via vmscan's shrink_active_list() or
shrink_page_list(), and rmap's try_to_unmap_one() called via shrink_page_list(),
check for (3) pages still mapped into VM_LOCKED VMAs, and call mlock_vma_page()
to correct them. Such pages are culled to the unevictable list when released
diff --git a/Documentation/networking/mptcp-sysctl.rst b/Documentation/networking/mptcp-sysctl.rst
index e263dfcc4b40..213510698014 100644
--- a/Documentation/networking/mptcp-sysctl.rst
+++ b/Documentation/networking/mptcp-sysctl.rst
@@ -47,7 +47,6 @@ allow_join_initial_addr_port - BOOLEAN
Default: 1
pm_type - INTEGER
-
Set the default path manager type to use for each new MPTCP
socket. In-kernel path management will control subflow
connections and address advertisements according to
diff --git a/Documentation/networking/nf_conntrack-sysctl.rst b/Documentation/networking/nf_conntrack-sysctl.rst
index 834945ebc4cd..1120d71f28d7 100644
--- a/Documentation/networking/nf_conntrack-sysctl.rst
+++ b/Documentation/networking/nf_conntrack-sysctl.rst
@@ -70,15 +70,6 @@ nf_conntrack_generic_timeout - INTEGER (seconds)
Default for generic timeout. This refers to layer 4 unknown/unsupported
protocols.
-nf_conntrack_helper - BOOLEAN
- - 0 - disabled (default)
- - not 0 - enabled
-
- Enable automatic conntrack helper assignment.
- If disabled it is required to set up iptables rules to assign
- helpers to connections. See the CT target description in the
- iptables-extensions(8) man page for further information.
-
nf_conntrack_icmp_timeout - INTEGER (seconds)
default 30
diff --git a/Documentation/process/5.Posting.rst b/Documentation/process/5.Posting.rst
index 906235c11c24..d87f1fee4cbc 100644
--- a/Documentation/process/5.Posting.rst
+++ b/Documentation/process/5.Posting.rst
@@ -256,8 +256,10 @@ The tags in common use are:
- Cc: the named person received a copy of the patch and had the
opportunity to comment on it.
-Be careful in the addition of tags to your patches: only Cc: is appropriate
-for addition without the explicit permission of the person named.
+Be careful in the addition of tags to your patches, as only Cc: is appropriate
+for addition without the explicit permission of the person named; using
+Reported-by: is fine most of the time as well, but ask for permission if
+the bug was reported in private.
Sending the patch
diff --git a/Documentation/process/code-of-conduct-interpretation.rst b/Documentation/process/code-of-conduct-interpretation.rst
index e899f14a4ba2..922e0b547bc3 100644
--- a/Documentation/process/code-of-conduct-interpretation.rst
+++ b/Documentation/process/code-of-conduct-interpretation.rst
@@ -51,7 +51,7 @@ the Technical Advisory Board (TAB) or other maintainers if you're
uncertain how to handle situations that come up. It will not be
considered a violation report unless you want it to be. If you are
uncertain about approaching the TAB or any other maintainers, please
-reach out to our conflict mediator, Mishi Choudhary <[email protected]>.
+reach out to our conflict mediator, Joanna Lee <[email protected]>.
In the end, "be kind to each other" is really what the end goal is for
everybody. We know everyone is human and we all fail at times, but the
@@ -127,10 +127,12 @@ are listed at https://kernel.org/code-of-conduct.html. Members can not
access reports made before they joined or after they have left the
committee.
-The initial Code of Conduct Committee consists of volunteer members of
-the TAB, as well as a professional mediator acting as a neutral third
-party. The first task of the committee is to establish documented
-processes, which will be made public.
+The Code of Conduct Committee consists of volunteer community members
+appointed by the TAB, as well as a professional mediator acting as a
+neutral third party. The processes the Code of Conduct committee will
+use to address reports is varied and will depend on the individual
+circumstance, however, this file serves as documentation for the
+general process used.
Any member of the committee, including the mediator, can be contacted
directly if a reporter does not wish to include the full committee in a
@@ -141,16 +143,16 @@ processes (see above) and consults with the TAB as needed and
appropriate, for instance to request and receive information about the
kernel community.
-Any decisions by the committee will be brought to the TAB, for
-implementation of enforcement with the relevant maintainers if needed.
-A decision by the Code of Conduct Committee can be overturned by the TAB
-by a two-thirds vote.
+Any decisions regarding enforcement recommendations will be brought to
+the TAB for implementation of enforcement with the relevant maintainers
+if needed. A decision by the Code of Conduct Committee can be overturned
+by the TAB by a two-thirds vote.
At quarterly intervals, the Code of Conduct Committee and TAB will
provide a report summarizing the anonymised reports that the Code of
Conduct committee has received and their status, as well details of any
overridden decisions including complete and identifiable voting details.
-We expect to establish a different process for Code of Conduct Committee
-staffing beyond the bootstrap period. This document will be updated
-with that information when this occurs.
+Because how we interpret and enforce the Code of Conduct will evolve over
+time, this document will be updated when necessary to reflect any
+changes.
diff --git a/Documentation/process/coding-style.rst b/Documentation/process/coding-style.rst
index 03eb53fd029a..007e49ef6cec 100644
--- a/Documentation/process/coding-style.rst
+++ b/Documentation/process/coding-style.rst
@@ -1186,6 +1186,68 @@ expression used. For instance:
#endif /* CONFIG_SOMETHING */
+22) Do not crash the kernel
+---------------------------
+
+In general, the decision to crash the kernel belongs to the user, rather
+than to the kernel developer.
+
+Avoid panic()
+*************
+
+panic() should be used with care and primarily only during system boot.
+panic() is, for example, acceptable when running out of memory during boot and
+not being able to continue.
+
+Use WARN() rather than BUG()
+****************************
+
+Do not add new code that uses any of the BUG() variants, such as BUG(),
+BUG_ON(), or VM_BUG_ON(). Instead, use a WARN*() variant, preferably
+WARN_ON_ONCE(), and possibly with recovery code. Recovery code is not
+required if there is no reasonable way to at least partially recover.
+
+"I'm too lazy to do error handling" is not an excuse for using BUG(). Major
+internal corruptions with no way of continuing may still use BUG(), but need
+good justification.
+
+Use WARN_ON_ONCE() rather than WARN() or WARN_ON()
+**************************************************
+
+WARN_ON_ONCE() is generally preferred over WARN() or WARN_ON(), because it
+is common for a given warning condition, if it occurs at all, to occur
+multiple times. This can fill up and wrap the kernel log, and can even slow
+the system enough that the excessive logging turns into its own, additional
+problem.
+
+Do not WARN lightly
+*******************
+
+WARN*() is intended for unexpected, this-should-never-happen situations.
+WARN*() macros are not to be used for anything that is expected to happen
+during normal operation. These are not pre- or post-condition asserts, for
+example. Again: WARN*() must not be used for a condition that is expected
+to trigger easily, for example, by user space actions. pr_warn_once() is a
+possible alternative, if you need to notify the user of a problem.
+
+Do not worry about panic_on_warn users
+**************************************
+
+A few more words about panic_on_warn: Remember that ``panic_on_warn`` is an
+available kernel option, and that many users set this option. This is why
+there is a "Do not WARN lightly" writeup, above. However, the existence of
+panic_on_warn users is not a valid reason to avoid the judicious use
+WARN*(). That is because, whoever enables panic_on_warn has explicitly
+asked the kernel to crash if a WARN*() fires, and such users must be
+prepared to deal with the consequences of a system that is somewhat more
+likely to crash.
+
+Use BUILD_BUG_ON() for compile-time assertions
+**********************************************
+
+The use of BUILD_BUG_ON() is acceptable and encouraged, because it is a
+compile-time assertion that has no effect at runtime.
+
Appendix I) References
----------------------
diff --git a/Documentation/process/index.rst b/Documentation/process/index.rst
index 2ba2a1582bbe..d4b6217472b0 100644
--- a/Documentation/process/index.rst
+++ b/Documentation/process/index.rst
@@ -5,6 +5,7 @@
.. _process_index:
+=============================================
Working with the kernel development community
=============================================
diff --git a/Documentation/process/maintainer-pgp-guide.rst b/Documentation/process/maintainer-pgp-guide.rst
index 29e7d7b1cd44..40bfbd3b7648 100644
--- a/Documentation/process/maintainer-pgp-guide.rst
+++ b/Documentation/process/maintainer-pgp-guide.rst
@@ -121,57 +121,56 @@ edit your ``~/.gnupg/gpg-agent.conf`` file to set your own values::
to remove anything you had in place for older versions of GnuPG, as
it may not be doing the right thing any more.
-Set up a refresh cronjob
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-You will need to regularly refresh your keyring in order to get the
-latest changes on other people's public keys, which is best done with a
-daily cronjob::
-
- @daily /usr/bin/gpg2 --refresh >/dev/null 2>&1
-
-Check the full path to your ``gpg`` or ``gpg2`` command and use the
-``gpg2`` command if regular ``gpg`` for you is the legacy GnuPG v.1.
-
-.. _master_key:
+.. _protect_your_key:
-Protect your master PGP key
-===========================
+Protect your PGP key
+====================
This guide assumes that you already have a PGP key that you use for Linux
kernel development purposes. If you do not yet have one, please see the
"`Protecting Code Integrity`_" document mentioned earlier for guidance
on how to create a new one.
-You should also make a new key if your current one is weaker than 2048 bits
-(RSA).
-
-Master key vs. Subkeys
-----------------------
-
-Subkeys are fully independent PGP keypairs that are tied to the "master"
-key using certifying key signatures (certificates). It is important to
-understand the following:
-
-1. There are no technical differences between the "master key" and "subkeys."
-2. At creation time, we assign functional limitations to each key by
- giving it specific capabilities.
-3. A PGP key can have 4 capabilities:
-
- - **[S]** key can be used for signing
- - **[E]** key can be used for encryption
- - **[A]** key can be used for authentication
- - **[C]** key can be used for certifying other keys
-
-4. A single key may have multiple capabilities.
-5. A subkey is fully independent from the master key. A message
- encrypted to a subkey cannot be decrypted with the master key. If you
- lose your private subkey, it cannot be recreated from the master key
- in any way.
-
-The key carrying the **[C]** (certify) capability is considered the
-"master" key because it is the only key that can be used to indicate
-relationship with other keys. Only the **[C]** key can be used to:
+You should also make a new key if your current one is weaker than 2048
+bits (RSA).
+
+Understanding PGP Subkeys
+-------------------------
+
+A PGP key rarely consists of a single keypair -- usually it is a
+collection of independent subkeys that can be used for different
+purposes based on their capabilities, assigned at their creation time.
+PGP defines four capabilities that a key can have:
+
+- **[S]** keys can be used for signing
+- **[E]** keys can be used for encryption
+- **[A]** keys can be used for authentication
+- **[C]** keys can be used for certifying other keys
+
+The key with the **[C]** capability is often called the "master" key,
+but this terminology is misleading because it implies that the Certify
+key can be used in place of any of other subkey on the same chain (like
+a physical "master key" can be used to open the locks made for other
+keys). Since this is not the case, this guide will refer to it as "the
+Certify key" to avoid any ambiguity.
+
+It is critical to fully understand the following:
+
+1. All subkeys are fully independent from each other. If you lose a
+ private subkey, it cannot be restored or recreated from any other
+ private key on your chain.
+2. With the exception of the Certify key, there can be multiple subkeys
+ with identical capabilities (e.g. you can have 2 valid encryption
+ subkeys, 3 valid signing subkeys, but only one valid certification
+ subkey). All subkeys are fully independent -- a message encrypted to
+ one **[E]** subkey cannot be decrypted with any other **[E]** subkey
+ you may also have.
+3. A single subkey may have multiple capabilities (e.g. your **[C]** key
+ can also be your **[S]** key).
+
+The key carrying the **[C]** (certify) capability is the only key that
+can be used to indicate relationship with other keys. Only the **[C]**
+key can be used to:
- add or revoke other keys (subkeys) with S/E/A capabilities
- add, change or revoke identities (uids) associated with the key
@@ -180,7 +179,7 @@ relationship with other keys. Only the **[C]** key can be used to:
By default, GnuPG creates the following when generating new keys:
-- A master key carrying both Certify and Sign capabilities (**[SC]**)
+- One subkey carrying both Certify and Sign capabilities (**[SC]**)
- A separate subkey with the Encryption capability (**[E]**)
If you used the default parameters when generating your key, then that
@@ -192,9 +191,6 @@ for example::
uid [ultimate] Alice Dev <[email protected]>
ssb rsa2048 2018-01-23 [E] [expires: 2020-01-23]
-Any key carrying the **[C]** capability is your master key, regardless
-of any other capabilities it may have assigned to it.
-
The long line under the ``sec`` entry is your key fingerprint --
whenever you see ``[fpr]`` in the examples below, that 40-character
string is what it refers to.
@@ -215,37 +211,30 @@ strong passphrase. To set it or change it, use::
Create a separate Signing subkey
--------------------------------
-Our goal is to protect your master key by moving it to offline media, so
-if you only have a combined **[SC]** key, then you should create a separate
-signing subkey::
+Our goal is to protect your Certify key by moving it to offline media,
+so if you only have a combined **[SC]** key, then you should create a
+separate signing subkey::
$ gpg --quick-addkey [fpr] ed25519 sign
-Remember to tell the keyservers about this change, so others can pull down
-your new subkey::
-
- $ gpg --send-key [fpr]
-
.. note:: ECC support in GnuPG
GnuPG 2.1 and later has full support for Elliptic Curve
Cryptography, with ability to combine ECC subkeys with traditional
- RSA master keys. The main upside of ECC cryptography is that it is
- much faster computationally and creates much smaller signatures when
+ RSA keys. The main upside of ECC cryptography is that it is much
+ faster computationally and creates much smaller signatures when
compared byte for byte with 2048+ bit RSA keys. Unless you plan on
using a smartcard device that does not support ECC operations, we
recommend that you create an ECC signing subkey for your kernel
work.
- If for some reason you prefer to stay with RSA subkeys, just replace
- "ed25519" with "rsa2048" in the above command. Additionally, if you
- plan to use a hardware device that does not support ED25519 ECC
- keys, like Nitrokey Pro or a Yubikey, then you should use
- "nistp256" instead or "ed25519."
+ Note, that if you plan to use a hardware device that does not
+ support ED25519 ECC keys, you should choose "nistp256" instead or
+ "ed25519."
-Back up your master key for disaster recovery
----------------------------------------------
+Back up your Certify key for disaster recovery
+----------------------------------------------
The more signatures you have on your PGP key from other developers, the
more reasons you have to create a backup version that lives on something
@@ -277,9 +266,7 @@ home, such as your bank vault.
Your printer is probably no longer a simple dumb device connected to
your parallel port, but since the output is still encrypted with
your passphrase, printing out even to "cloud-integrated" modern
- printers should remain a relatively safe operation. One option is to
- change the passphrase on your master key immediately after you are
- done with paperkey.
+ printers should remain a relatively safe operation.
Back up your whole GnuPG directory
----------------------------------
@@ -300,7 +287,7 @@ will use for backup purposes. You will need to encrypt them using LUKS
-- refer to your distro's documentation on how to accomplish this.
For the encryption passphrase, you can use the same one as on your
-master key.
+PGP key.
Once the encryption process is over, re-insert the USB drive and make
sure it gets properly mounted. Copy your entire ``.gnupg`` directory
@@ -319,7 +306,7 @@ far away, because you'll need to use it every now and again for things
like editing identities, adding or revoking subkeys, or signing other
people's keys.
-Remove the master key from your homedir
+Remove the Certify key from your homedir
----------------------------------------
The files in our home directory are not as well protected as we like to
@@ -334,7 +321,7 @@ think. They can be leaked or stolen via many different means:
Protecting your key with a good passphrase greatly helps reduce the risk
of any of the above, but passphrases can be discovered via keyloggers,
shoulder-surfing, or any number of other means. For this reason, the
-recommended setup is to remove your master key from your home directory
+recommended setup is to remove your Certify key from your home directory
and store it on offline storage.
.. warning::
@@ -343,7 +330,7 @@ and store it on offline storage.
your GnuPG directory in its entirety. What we are about to do will
render your key useless if you do not have a usable backup!
-First, identify the keygrip of your master key::
+First, identify the keygrip of your Certify key::
$ gpg --with-keygrip --list-key [fpr]
@@ -359,7 +346,7 @@ The output will be something like this::
Keygrip = 3333000000000000000000000000000000000000
Find the keygrip entry that is beneath the ``pub`` line (right under the
-master key fingerprint). This will correspond directly to a file in your
+Certify key fingerprint). This will correspond directly to a file in your
``~/.gnupg`` directory::
$ cd ~/.gnupg/private-keys-v1.d
@@ -369,13 +356,13 @@ master key fingerprint). This will correspond directly to a file in your
3333000000000000000000000000000000000000.key
All you have to do is simply remove the .key file that corresponds to
-the master keygrip::
+the Certify key keygrip::
$ cd ~/.gnupg/private-keys-v1.d
$ rm 1111000000000000000000000000000000000000.key
Now, if you issue the ``--list-secret-keys`` command, it will show that
-the master key is missing (the ``#`` indicates it is not available)::
+the Certify key is missing (the ``#`` indicates it is not available)::
$ gpg --list-secret-keys
sec# rsa2048 2018-01-24 [SC] [expires: 2020-01-24]
@@ -404,7 +391,7 @@ file, which still contains your private keys.
Move the subkeys to a dedicated crypto device
=============================================
-Even though the master key is now safe from being leaked or stolen, the
+Even though the Certify key is now safe from being leaked or stolen, the
subkeys are still in your home directory. Anyone who manages to get
their hands on those will be able to decrypt your communication or fake
your signatures (if they know the passphrase). Furthermore, each time a
@@ -447,7 +434,8 @@ functionality. There are several options available:
- `Yubikey 5`_: proprietary hardware and software, but cheaper than
Nitrokey Pro and comes available in the USB-C form that is more useful
with newer laptops. Offers additional security features such as FIDO
- U2F, among others, and now finally supports ECC keys (NISTP).
+ U2F, among others, and now finally supports NISTP and ED25519 ECC
+ keys.
`LWN has a good review`_ of some of the above models, as well as several
others. Your choice will depend on cost, shipping availability in your
@@ -460,7 +448,7 @@ geographical region, and open/proprietary hardware considerations.
Foundation.
.. _`Nitrokey Start`: https://shop.nitrokey.com/shop/product/nitrokey-start-6
-.. _`Nitrokey Pro 2`: https://shop.nitrokey.com/shop/product/nitrokey-pro-2-3
+.. _`Nitrokey Pro 2`: https://shop.nitrokey.com/shop/product/nkpr2-nitrokey-pro-2-3
.. _`Yubikey 5`: https://www.yubico.com/products/yubikey-5-overview/
.. _Gnuk: https://www.fsij.org/doc-gnuk/
.. _`LWN has a good review`: https://lwn.net/Articles/736231/
@@ -627,10 +615,10 @@ Other common GnuPG operations
Here is a quick reference for some common operations you'll need to do
with your PGP key.
-Mounting your master key offline storage
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Mounting your safe offline storage
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-You will need your master key for any of the operations below, so you
+You will need your Certify key for any of the operations below, so you
will first need to mount your backup offline storage and tell GnuPG to
use it::
@@ -644,7 +632,7 @@ your regular home directory location).
Extending key expiration date
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The master key has the default expiration date of 2 years from the date
+The Certify key has the default expiration date of 2 years from the date
of creation. This is done both for security reasons and to make obsolete
keys eventually disappear from keyservers.
@@ -685,6 +673,7 @@ remote end.
.. _`Agent Forwarding over SSH`: https://wiki.gnupg.org/AgentForwarding
+.. _pgp_with_git:
Using PGP with Git
==================
@@ -828,6 +817,63 @@ You can tell git to always sign commits::
.. _verify_identities:
+
+How to work with signed patches
+-------------------------------
+
+It is possible to use your PGP key to sign patches sent to kernel
+developer mailing lists. Since existing email signature mechanisms
+(PGP-Mime or PGP-inline) tend to cause problems with regular code
+review tasks, you should use the tool kernel.org created for this
+purpose that puts cryptographic attestation signatures into message
+headers (a-la DKIM):
+
+- `Patatt Patch Attestation`_
+
+.. _`Patatt Patch Attestation`: https://pypi.org/project/patatt/
+
+Installing and configuring patatt
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Patatt is packaged for many distributions already, so please check there
+first. You can also install it from pypi using "``pip install patatt``".
+
+If you already have your PGP key configured with git (via the
+``user.signingKey`` configuration parameter), then patatt requires no
+further configuration. You can start signing your patches by installing
+the git-send-email hook in the repository you want::
+
+ patatt install-hook
+
+Now any patches you send with ``git send-email`` will be automatically
+signed with your cryptographic signature.
+
+Checking patatt signatures
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+If you are using ``b4`` to retrieve and apply patches, then it will
+automatically attempt to verify all DKIM and patatt signatures it
+encounters, for example::
+
+ [...]
+ Checking attestation on all messages, may take a moment...
+ ---
+ ✓ [PATCH v1 1/3] kselftest/arm64: Correct buffer allocation for SVE Z registers
+ ✓ [PATCH v1 2/3] arm64/sve: Document our actual ABI for clearing registers on syscall
+ ✓ [PATCH v1 3/3] kselftest/arm64: Enforce actual ABI for SVE syscalls
+ ---
+ ✓ Signed: openpgp/[email protected]
+ ✓ Signed: DKIM/kernel.org
+
+.. note::
+
+ Patatt and b4 are still in active development and you should check
+ the latest documentation for these projects for any new or updated
+ features.
+
+.. _kernel_identities:
+
How to verify kernel developer identities
=========================================
@@ -899,65 +945,17 @@ the new default in GnuPG v2). To set it, add (or modify) the
trust-model tofu+pgp
-How to use keyservers (more) safely
------------------------------------
-
-If you get a "No public key" error when trying to validate someone's
-tag, then you should attempt to lookup that key using a keyserver. It is
-important to keep in mind that there is absolutely no guarantee that the
-key you retrieve from PGP keyservers belongs to the actual person --
-that much is by design. You are supposed to use the Web of Trust to
-establish key validity.
-
-How to properly maintain the Web of Trust is beyond the scope of this
-document, simply because doing it properly requires both effort and
-dedication that tends to be beyond the caring threshold of most human
-beings. Here are some shortcuts that will help you reduce the risk of
-importing a malicious key.
-
-First, let's say you've tried to run ``git verify-tag`` but it returned
-an error saying the key is not found::
-
- $ git verify-tag sunxi-fixes-for-4.15-2
- gpg: Signature made Sun 07 Jan 2018 10:51:55 PM EST
- gpg: using RSA key DA73759BF8619E484E5A3B47389A54219C0F2430
- gpg: issuer "[email protected]"
- gpg: Can't check signature: No public key
-
-Let's query the keyserver for more info about that key fingerprint (the
-fingerprint probably belongs to a subkey, so we can't use it directly
-without finding out the ID of the master key it is associated with)::
-
- $ gpg --search DA73759BF8619E484E5A3B47389A54219C0F2430
- gpg: data source: hkp://keys.gnupg.net
- (1) Chen-Yu Tsai <[email protected]>
- 4096 bit RSA key C94035C21B4F2AEB, created: 2017-03-14, expires: 2019-03-15
- Keys 1-1 of 1 for "DA73759BF8619E484E5A3B47389A54219C0F2430". Enter number(s), N)ext, or Q)uit > q
-
-Locate the ID of the master key in the output, in our example
-``C94035C21B4F2AEB``. Now display the key of Linus Torvalds that you
-have on your keyring::
-
- $ gpg --list-key [email protected]
- pub rsa2048 2011-09-20 [SC]
- ABAF11C65A2970B130ABE3C479BE3E4300411886
- uid [ unknown] Linus Torvalds <[email protected]>
- sub rsa2048 2011-09-20 [E]
-
-Next, find a trust path from Linus Torvalds to the key-id you found via ``gpg
---search`` of the unknown key. For this, you can use several tools including
-https://github.com/mricon/wotmate,
-https://git.kernel.org/pub/scm/docs/kernel/pgpkeys.git/tree/graphs, and
-https://the.earth.li/~noodles/pathfind.html.
-
-If you get a few decent trust paths, then it's a pretty good indication
-that it is a valid key. You can add it to your keyring from the
-keyserver now::
-
- $ gpg --recv-key C94035C21B4F2AEB
-
-This process is not perfect, and you are obviously trusting the
-administrators of the PGP Pathfinder service to not be malicious (in
-fact, this goes against :ref:`devs_not_infra`). However, if you
-do not carefully maintain your own web of trust, then it is a marked
-improvement over blindly trusting keyservers.
+Using the kernel.org web of trust repository
+--------------------------------------------
+
+Kernel.org maintains a git repository with developers' public keys as a
+replacement for replicating keyserver networks that have gone mostly
+dark in the past few years. The full documentation for how to set up
+that repository as your source of public keys can be found here:
+
+- `Kernel developer PGP Keyring`_
+
+If you are a kernel developer, please consider submitting your key for
+inclusion into that keyring.
+
+.. _`Kernel developer PGP Keyring`: https://korg.docs.kernel.org/pgpkeys.html
diff --git a/Documentation/process/stable-kernel-rules.rst b/Documentation/process/stable-kernel-rules.rst
index c61865e91f52..2fd8aa593a28 100644
--- a/Documentation/process/stable-kernel-rules.rst
+++ b/Documentation/process/stable-kernel-rules.rst
@@ -97,6 +97,12 @@ text, like this:
commit <sha1> upstream.
+or alternatively:
+
+.. code-block:: none
+
+ [ Upstream commit <sha1> ]
+
Additionally, some patches submitted via :ref:`option_1` may have additional
patch prerequisites which can be cherry-picked. This can be specified in the
following format in the sign-off area:
diff --git a/Documentation/process/submitting-patches.rst b/Documentation/process/submitting-patches.rst
index be49d8f2601b..7dc94555417d 100644
--- a/Documentation/process/submitting-patches.rst
+++ b/Documentation/process/submitting-patches.rst
@@ -715,8 +715,8 @@ references.
.. _backtraces:
-Backtraces in commit mesages
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Backtraces in commit messages
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Backtraces help document the call chain leading to a problem. However,
not all backtraces are helpful. For example, early boot call chains are
diff --git a/Documentation/scheduler/sched-design-CFS.rst b/Documentation/scheduler/sched-design-CFS.rst
index 59b2d1fb4dc4..03db55504515 100644
--- a/Documentation/scheduler/sched-design-CFS.rst
+++ b/Documentation/scheduler/sched-design-CFS.rst
@@ -94,7 +94,7 @@ other HZ detail. Thus the CFS scheduler has no notion of "timeslices" in the
way the previous scheduler had, and has no heuristics whatsoever. There is
only one central tunable (you have to switch on CONFIG_SCHED_DEBUG):
- /proc/sys/kernel/sched_min_granularity_ns
+ /sys/kernel/debug/sched/min_granularity_ns
which can be used to tune the scheduler from "desktop" (i.e., low latencies) to
"server" (i.e., good batching) workloads. It defaults to a setting suitable
diff --git a/Documentation/staging/index.rst b/Documentation/staging/index.rst
index abd0d18254d2..ded8254bc0d7 100644
--- a/Documentation/staging/index.rst
+++ b/Documentation/staging/index.rst
@@ -14,45 +14,3 @@ Unsorted Documentation
static-keys
tee
xz
-
-Atomic Types
-============
-
-.. raw:: latex
-
- \footnotesize
-
-.. include:: ../atomic_t.txt
- :literal:
-
-.. raw:: latex
-
- \normalsize
-
-Atomic bitops
-=============
-
-.. raw:: latex
-
- \footnotesize
-
-.. include:: ../atomic_bitops.txt
- :literal:
-
-.. raw:: latex
-
- \normalsize
-
-Memory Barriers
-===============
-
-.. raw:: latex
-
- \footnotesize
-
-.. include:: ../memory-barriers.txt
- :literal:
-
-.. raw:: latex
-
- \normalsize
diff --git a/Documentation/subsystem-apis.rst b/Documentation/subsystem-apis.rst
new file mode 100644
index 000000000000..af65004a80aa
--- /dev/null
+++ b/Documentation/subsystem-apis.rst
@@ -0,0 +1,58 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==============================
+Kernel subsystem documentation
+==============================
+
+These books get into the details of how specific kernel subsystems work
+from the point of view of a kernel developer. Much of the information here
+is taken directly from the kernel source, with supplemental material added
+as needed (or at least as we managed to add it — probably *not* all that is
+needed).
+
+**Fixme**: much more organizational work is needed here.
+
+.. toctree::
+ :maxdepth: 1
+
+ driver-api/index
+ core-api/index
+ locking/index
+ accounting/index
+ block/index
+ cdrom/index
+ cpu-freq/index
+ fb/index
+ fpga/index
+ hid/index
+ i2c/index
+ iio/index
+ isdn/index
+ infiniband/index
+ leds/index
+ netlabel/index
+ networking/index
+ pcmcia/index
+ power/index
+ target/index
+ timers/index
+ spi/index
+ w1/index
+ watchdog/index
+ virt/index
+ input/index
+ hwmon/index
+ gpu/index
+ security/index
+ sound/index
+ crypto/index
+ filesystems/index
+ mm/index
+ bpf/index
+ usb/index
+ PCI/index
+ scsi/index
+ misc-devices/index
+ scheduler/index
+ mhi/index
+ peci/index
diff --git a/Documentation/trace/histogram.rst b/Documentation/trace/histogram.rst
index 859fd1b76c63..c1b685a38f6b 100644
--- a/Documentation/trace/histogram.rst
+++ b/Documentation/trace/histogram.rst
@@ -412,7 +412,7 @@ Extended error information
Because the default sort key above is 'hitcount', the above shows a
the list of call_sites by increasing hitcount, so that at the bottom
we see the functions that made the most kmalloc calls during the
- run. If instead we we wanted to see the top kmalloc callers in
+ run. If instead we wanted to see the top kmalloc callers in
terms of the number of bytes requested rather than the number of
calls, and we wanted the top caller to appear at the top, we can use
the 'sort' parameter, along with the 'descending' modifier::
diff --git a/Documentation/trace/kprobes.rst b/Documentation/trace/kprobes.rst
index f318bceda1e6..48cf778a2468 100644
--- a/Documentation/trace/kprobes.rst
+++ b/Documentation/trace/kprobes.rst
@@ -328,8 +328,8 @@ Configuring Kprobes
===================
When configuring the kernel using make menuconfig/xconfig/oldconfig,
-ensure that CONFIG_KPROBES is set to "y". Under "General setup", look
-for "Kprobes".
+ensure that CONFIG_KPROBES is set to "y", look for "Kprobes" under
+"General architecture-dependent options".
So that you can load and unload Kprobes-based instrumentation modules,
make sure "Loadable module support" (CONFIG_MODULES) and "Module
diff --git a/Documentation/trace/timerlat-tracer.rst b/Documentation/trace/timerlat-tracer.rst
index d643c95c01eb..db17df312bc8 100644
--- a/Documentation/trace/timerlat-tracer.rst
+++ b/Documentation/trace/timerlat-tracer.rst
@@ -20,7 +20,7 @@ For example::
[root@f32 ~]# cd /sys/kernel/tracing/
[root@f32 tracing]# echo timerlat > current_tracer
-It is possible to follow the trace by reading the trace trace file::
+It is possible to follow the trace by reading the trace file::
[root@f32 tracing]# cat trace
# tracer: timerlat
diff --git a/Documentation/translations/zh_CN/IRQ.txt b/Documentation/translations/zh_CN/IRQ.txt
deleted file mode 100644
index 9aec8dca4fcf..000000000000
--- a/Documentation/translations/zh_CN/IRQ.txt
+++ /dev/null
@@ -1,39 +0,0 @@
-Chinese translated version of Documentation/core-api/irq/index.rst
-
-If you have any comment or update to the content, please contact the
-original document maintainer directly. However, if you have a problem
-communicating in English you can also ask the Chinese maintainer for
-help. Contact the Chinese maintainer if this translation is outdated
-or if there is a problem with the translation.
-
-Maintainer: Eric W. Biederman <[email protected]>
-Chinese maintainer: Fu Wei <[email protected]>
----------------------------------------------------------------------
-Documentation/core-api/irq/index.rst 的中文翻译
-
-如果想评论或更新本文的内容,请直接联系原文档的维护者。如果你使用英文
-交流有困难的话,也可以向中文版维护者求助。如果本翻译更新不及时或者翻
-译存在问题,请联系中文版维护者。
-英文版维护者: Eric W. Biederman <[email protected]>
-中文版维护者: 傅炜 Fu Wei <[email protected]>
-中文版翻译者: 傅炜 Fu Wei <[email protected]>
-中文版校译者: 傅炜 Fu Wei <[email protected]>
-
-
-以下为正文
----------------------------------------------------------------------
-何为 IRQ?
-
-一个 IRQ 是来自某个设备的一个中断请求。目前,它们可以来自一个硬件引脚,
-或来自一个数据包。多个设备可能连接到同个硬件引脚,从而共享一个 IRQ。
-
-一个 IRQ 编号是用于告知硬件中断源的内核标识。通常情况下,这是一个
-全局 irq_desc 数组的索引,但是除了在 linux/interrupt.h 中的实现,
-具体的细节是体系结构特定的。
-
-一个 IRQ 编号是设备上某个可能的中断源的枚举。通常情况下,枚举的编号是
-该引脚在系统内中断控制器的所有输入引脚中的编号。对于 ISA 总线中的情况,
-枚举的是在两个 i8259 中断控制器中 16 个输入引脚。
-
-架构可以对 IRQ 编号指定额外的含义,在硬件涉及任何手工配置的情况下,
-是被提倡的。ISA 的 IRQ 是一个分配这类额外含义的典型例子。
diff --git a/Documentation/translations/zh_CN/PCI/acpi-info.rst b/Documentation/translations/zh_CN/PCI/acpi-info.rst
new file mode 100644
index 000000000000..a35f39dcd858
--- /dev/null
+++ b/Documentation/translations/zh_CN/PCI/acpi-info.rst
@@ -0,0 +1,139 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/PCI/acpi-info.rst
+
+:翻译:
+
+ 司延腾 Yanteng Si <[email protected]>
+
+:校译:
+
+
+=====================
+PCI主桥的ACPI注意事项
+=====================
+
+一般的规则是,ACPI命名空间应该描述操作系统可能使用的所有东西,除非有其他方法让操作系
+统找到它[1, 2]。
+
+例如,没有标准的硬件机制来枚举PCI主桥,所以ACPI命名空间必须描述每个主桥、访问它
+下面的PCI配置空间的方法、主桥转发到PCI的地址空间窗口(使用_CRS)以及传统的INTx
+中断的路由(使用_PRT)。
+
+在主桥下面的PCI设备,通常不需要通过ACPI描述。操作系统可以通过标准的PCI枚举机制来
+发现它们,使用配置访问来发现和识别设备,并读取和测量它们的BAR。然而,如果ACPI为它们
+提供电源管理或热插拔功能,或者如果设备有由平台中断控制器连接的INTx中断,需要一个_PRT
+来描述这些连接,这种情况下ACPI可以描述PCI设备。
+
+ACPI资源描述是通过ACPI命名空间中设备的_CRS对象完成的[2]。_CRS就像一个通用的PCI BAR:
+操作系统可以读取_CRS并找出正在消耗的资源,即使它没有该设备的驱动程序[3]。这一点很重要,
+因为它意味着一个旧的操作系统可以正确地工作,即使是在操作系统不知道的新设备的系统上。新设
+备可能什么都不做,但操作系统至少可以确保没有资源与它们冲突。
+
+像MCFG、HPET、ECDT等静态表,不是保留地址空间的机制。静态表是在操作系统在启动初期且在它
+能够解析ACPI命名空间之前需要知道的东西。如果定义了一个新的表,即使旧的操作系统忽略了这
+个表,它也需要正常运行。_CRS允许这样做,因为它是通用的,可以被旧的操作系统解析;而静态表
+则不允许。
+
+如果操作系统要管理一个通过ACPI描述的不可发现的设备,该设备将有一个特定的_HID/_CID,以
+告诉操作系统与之绑定的驱动程序,并且_CRS告诉操作系统和驱动程序该设备的寄存器在哪里。
+
+PCI主桥是PNP0A03或PNP0A08设备。它们的_CRS应该描述它们所消耗的所有地址空间。这包括它
+们转发到PCI总线上的所有窗口,以及不转发到PCI的主桥本身的寄存器。主桥的寄存器包括次要/下
+级总线寄存器,决定了桥下面的总线范围,窗口寄存器描述了桥洞,等等。这些都是设备相关的,非
+架构相关的东西,所以PNP0A03/PNP0A08驱动可以管理它们的唯一方法是通过_PRS/_CRS/_SRS,
+它包含了特定于设备的细节。主桥寄存器也包括ECAM空间,因为它是由主桥消耗的。
+
+ACPI定义了一个Consumer/Producer位来区分桥寄存器(“Consumer”下文译作消费者)和
+桥洞(“Producer”下文译作生产者)[4, 5],但是早期的BIOS没有正确使用这个位。其结果
+是,目前的ACPI规范只为扩展地址空间描述符定义了消费者/生产者;在旧的QWord/Word/Word地
+址空间描述符中,该位应该被忽略。因此,操作系统必须假定所有的QWord/Word/Word描述符都是
+窗口。
+
+在增加扩展地址空间描述符之前,消费者/生产者的失败意味着没有办法描述PNP0A03/PNP0A08设
+备本身的桥寄存器。解决办法是在PNP0C02捕捉器中描述桥寄存器(包括ECAM空间)[6]。
+除了ECAM之外,桥寄存器空间反正是特定于设备的,所以通用的PNP0A03/PNP0A08驱动程
+序(pci_root.c)没有必要了解它。
+
+新的架构应该能够在PNP0A03设备中使用“消费者”扩展地址空间描述符,用于桥寄存器,包括
+ECAM,尽管对[6]的严格解释可能禁止这样做。旧的x86和ia64内核假定所有的地址空间描述
+符,包括“消费者”扩展地址空间的描述符,都是窗口,所以在这些架构上以这种方式描述桥寄
+存器是不安全的。
+
+PNP0C02“主板”设备基本上是万能的。除了“不要将这些资源用于其他用途”之外,没有其他的编
+程模型。因此,PNP0C02 _CRS应该声明ACPI命名空间中(1)没有被_CRS声明的任何其他设备对
+象的地址空间,(2)不应该被OS分配给其他东西。
+
+除非有一个标准的固件接口用于配置访问,例如ia64 SAL接口[7],否则PCIe规范要求使用增强
+型配置访问方法(ECAM)。主桥消耗ECAM内存地址空间并将内存访问转换为PCI配置访问。该规范
+定义了ECAM地址空间的布局和功能;只有地址空间的基础是特定于设备的。ACPI操作系统从静态
+MCFG表或PNP0A03设备中的_CBA方法中了解基础地址。
+
+MCFG表必须描述非热插拔主桥的ECAM空间[8]。由于MCFG是一个静态表,不能通过热插拔更新,
+PNP0A03设备中的_CBA方法描述了可热插拔主桥的ECAM空间[9]。请注意,对于MCFG和_CBA,
+基址总是对应于总线0,即使桥器下面的总线范围(通过_CRS报告)不从0开始。
+
+
+[1] ACPI 6.2, sec 6.1:
+ 对于任何在非枚举类型的总线上的设备(例如,ISA总线),OSPM会枚举设备的标识符,ACPI
+ 系统固件必须为每个设备提供一个_HID对象...以使OSPM能够做到这一点。
+
+[2] ACPI 6.2, sec 3.7:
+ 操作系统枚举主板设备时,只需通过读取ACPI命名空间来寻找具有硬件ID的设备。
+
+ ACPI枚举的每个设备都包括ACPI命名空间中ACPI定义的对象,该对象报告设备可能占用的硬
+ 件资源[_PRS],报告设备当前使用的资源[_CRS]的对象,以及配置这些资源的对象[_SRS]。
+ 这些信息被即插即用操作系统(OSPM)用来配置设备。
+
+[3] ACPI 6.2, sec 6.2:
+ OSPM使用设备配置对象来配置通过ACPI列举的设备的硬件资源。设备配置对象提供了关于当前
+ 和可能的资源需求的信息,共享资源之间的关系,以及配置硬件资源的方法。
+
+ 当OSPM枚举一个设备时,它调用_PRS来确定该设备的资源需求。它也可以调用_CRS来找到该设
+ 备的当前资源设置。利用这些信息,即插即用系统决定设备应该消耗什么资源,并通过调用设备
+ 的_SRS控制方法来设置这些资源。
+
+ 在ACPI中,设备可以消耗资源(例如,传统的键盘),提供资源(例如,一个专有的PCI桥),
+ 或者两者都做。除非另有规定,设备的资源被假定为来自设备层次结构中设备上方最近的匹配资
+ 源。
+
+[4] ACPI 6.2, sec 6.4.3.5.1, 2, 3, 4:
+ QWord/DWord/Word 地址空间描述符 (.1, .2, .3)
+ 常规标志: Bit [0] 被忽略。
+
+ 扩展地址空间描述符 (.4)
+ 常规标志: Bit [0] 消费者/生产者:
+
+ * 1 – 这个设备消费这个资源
+ * 0 – 该设备生产和消费该资源
+
+[5] ACPI 6.2, sec 19.6.43:
+ ResourceUsage指定内存范围是由这个设备(ResourceConsumer)消费还是传递给子设备
+ (ResourceProducer)。如果没有指定,那么就假定是ResourceConsumer。
+
+[6] PCI Firmware 3.2, sec 4.1.2:
+ 如果操作系统不能原生的懂得保留MMCFG区域,MMCFG区域必须由固件保留。在MCFG表中或通
+ 过_CBA方法(见第4.1.3节)报告的地址范围必须通过声明主板资源来保留。对于大多数系统,
+ 主板资源将出现在ACPI命名空间的根部(在_SB下),在一个节点的_HID为EISAID(PNP0C0
+ 2),在这种情况下的资源不应该要求在根PCI总线的_CRS。这些资源可以选择在Int15 E820
+ 或EFIGetMemoryMap中作为保留内存返回,但必须始终通过ACPI作为主板资源报告。
+
+[7] PCI Express 4.0, sec 7.2.2:
+ 对于PC兼容的系统,或者没有实现允许访问配置空间的处理器架构特定固件接口标准的系统,需
+ 要使用本节中定义的ECAM。
+
+[8] PCI Firmware 3.2, sec 4.1.2:
+ MCFG表是一个ACPI表,用于沟通的基础地址对应的非热的可移动的PCI段组范围内的PCI段组在
+ 启动时提供给操作系统。这对PC兼容系统来说是必需的。
+
+ MCFG表仅用于沟通在启动时系统可用的PCI段组对应的基址。
+
+[9] PCI Firmware 3.2, sec 4.1.3:
+ _CBA (Memory mapped Configuration Base Address) 控制方法是一个可选的ACPI对
+ 象,用于返回热插拔主桥的64位内存映射的配置基址。_CBA 返回的基址是与处理器相关的地址。
+ _CBA 控制方法被评估为一个整数。
+
+ 这个控制方法出现在主桥对象下。当_CBA方法出现在一个活动的主桥对象下时,操作系统会评
+ 估这个结构,以确定内存映射的配置基址,对应于_CRS方法中指定的总线编号范围的PCI段组。
+ 一个包含_CBA方法的ACPI命名空间对象也必须包含一个相应的_SEG方法。
diff --git a/Documentation/translations/zh_CN/PCI/index.rst b/Documentation/translations/zh_CN/PCI/index.rst
index 16acb2bd9b58..cbeb33c34a98 100644
--- a/Documentation/translations/zh_CN/PCI/index.rst
+++ b/Documentation/translations/zh_CN/PCI/index.rst
@@ -10,9 +10,6 @@
:校译:
-
-.. _cn_PCI_index.rst:
-
===================
Linux PCI总线子系统
===================
@@ -26,12 +23,12 @@ Linux PCI总线子系统
pci-iov-howto
msi-howto
sysfs-pci
+ acpi-info
Todolist:
- acpi-info
- pci-error-recovery
- pcieaer-howto
- endpoint/index
- boot-interrupts
+* pci-error-recovery
+* pcieaer-howto
+* endpoint/index
+* boot-interrupts
diff --git a/Documentation/translations/zh_CN/admin-guide/README.rst b/Documentation/translations/zh_CN/admin-guide/README.rst
index d20949e8bf6f..e679cbc3c89d 100644
--- a/Documentation/translations/zh_CN/admin-guide/README.rst
+++ b/Documentation/translations/zh_CN/admin-guide/README.rst
@@ -6,10 +6,10 @@
吴想成 Wu XiangCheng <[email protected]>
-Linux内核5.x版本 <http://kernel.org/>
+Linux内核6.x版本 <http://kernel.org/>
=========================================
-以下是Linux版本5的发行注记。仔细阅读它们,
+以下是Linux版本6的发行注记。仔细阅读它们,
它们会告诉你这些都是什么,解释如何安装内核,以及遇到问题时该如何做。
什么是Linux?
@@ -61,27 +61,27 @@ Linux内核5.x版本 <http://kernel.org/>
- 如果您要安装完整的源代码,请把内核tar档案包放在您有权限的目录中(例如您
的主目录)并将其解包::
- xz -cd linux-5.x.tar.xz | tar xvf -
+ xz -cd linux-6.x.tar.xz | tar xvf -
将“X”替换成最新内核的版本号。
【不要】使用 /usr/src/linux 目录!这里有一组库头文件使用的内核头文件
(通常是不完整的)。它们应该与库匹配,而不是被内核的变化搞得一团糟。
- - 您还可以通过打补丁在5.x版本之间升级。补丁以xz格式分发。要通过打补丁进行
- 安装,请获取所有较新的补丁文件,进入内核源代码(linux-5.x)的目录并
+ - 您还可以通过打补丁在6.x版本之间升级。补丁以xz格式分发。要通过打补丁进行
+ 安装,请获取所有较新的补丁文件,进入内核源代码(linux-6.x)的目录并
执行::
- xz -cd ../patch-5.x.xz | patch -p1
+ xz -cd ../patch-6.x.xz | patch -p1
请【按顺序】替换所有大于当前源代码树版本的“x”,这样就可以了。您可能想要
删除备份文件(文件名类似xxx~ 或 xxx.orig),并确保没有失败的补丁(文件名
类似xxx# 或 xxx.rej)。如果有,不是你就是我犯了错误。
- 与5.x内核的补丁不同,5.x.y内核(也称为稳定版内核)的补丁不是增量的,而是
- 直接应用于基本的5.x内核。例如,如果您的基本内核是5.0,并且希望应用5.0.3
- 补丁,则不应先应用5.0.1和5.0.2的补丁。类似地,如果您运行的是5.0.2内核,
- 并且希望跳转到5.0.3,那么在应用5.0.3补丁之前,必须首先撤销5.0.2补丁
+ 与6.x内核的补丁不同,6.x.y内核(也称为稳定版内核)的补丁不是增量的,而是
+ 直接应用于基本的6.x内核。例如,如果您的基本内核是6.0,并且希望应用6.0.3
+ 补丁,则不应先应用6.0.1和6.0.2的补丁。类似地,如果您运行的是6.0.2内核,
+ 并且希望跳转到6.0.3,那么在应用6.0.3补丁之前,必须首先撤销6.0.2补丁
(即patch -R)。更多关于这方面的内容,请阅读
:ref:`Documentation/process/applying-patches.rst <applying_patches>` 。
@@ -103,7 +103,7 @@ Linux内核5.x版本 <http://kernel.org/>
软件要求
---------
- 编译和运行5.x内核需要各种软件包的最新版本。请参考
+ 编译和运行6.x内核需要各种软件包的最新版本。请参考
:ref:`Documentation/process/changes.rst <changes>`
来了解最低版本要求以及如何升级软件包。请注意,使用过旧版本的这些包可能会
导致很难追踪的间接错误,因此不要以为在生成或操作过程中出现明显问题时可以
@@ -116,12 +116,12 @@ Linux内核5.x版本 <http://kernel.org/>
``make O=output/dir`` 选项可以为输出文件(包括 .config)指定备用位置。
例如::
- kernel source code: /usr/src/linux-5.x
+ kernel source code: /usr/src/linux-6.x
build directory: /home/name/build/kernel
要配置和构建内核,请使用::
- cd /usr/src/linux-5.x
+ cd /usr/src/linux-6.x
make O=/home/name/build/kernel menuconfig
make O=/home/name/build/kernel
sudo make O=/home/name/build/kernel modules_install install
@@ -227,8 +227,6 @@ Linux内核5.x版本 <http://kernel.org/>
- 确保您至少有gcc 5.1可用。
有关更多信息,请参阅 :ref:`Documentation/process/changes.rst <changes>` 。
- 请注意,您仍然可以使用此内核运行a.out用户程序。
-
- 执行 ``make`` 来创建压缩内核映像。如果您安装了lilo以适配内核makefile,
那么也可以进行 ``make install`` ,但是您可能需要先检查特定的lilo设置。
@@ -282,67 +280,12 @@ Linux内核5.x版本 <http://kernel.org/>
若遇到问题
-----------
- - 如果您发现了一些可能由于内核缺陷所导致的问题,请检查MAINTAINERS(维护者)
- 文件看看是否有人与令您遇到麻烦的内核部分相关。如果无人在此列出,那么第二
- 个最好的方案就是把它们发给我([email protected]),也可能发送
- 到任何其他相关的邮件列表或新闻组。
-
- - 在所有的缺陷报告中,【请】告诉我们您在说什么内核,如何复现问题,以及您的
- 设置是什么的(使用您的常识)。如果问题是新的,请告诉我;如果问题是旧的,
- 请尝试告诉我您什么时候首次注意到它。
-
- - 如果缺陷导致如下消息::
-
- unable to handle kernel paging request at address C0000010
- Oops: 0002
- EIP: 0010:XXXXXXXX
- eax: xxxxxxxx ebx: xxxxxxxx ecx: xxxxxxxx edx: xxxxxxxx
- esi: xxxxxxxx edi: xxxxxxxx ebp: xxxxxxxx
- ds: xxxx es: xxxx fs: xxxx gs: xxxx
- Pid: xx, process nr: xx
- xx xx xx xx xx xx xx xx xx xx
-
- 或者类似的内核调试信息显示在屏幕上或在系统日志里,请【如实】复制它。
- 可能对你来说转储(dump)看起来不可理解,但它确实包含可能有助于调试问题的
- 信息。转储上方的文本也很重要:它说明了内核转储代码的原因(在上面的示例中,
- 是由于内核指针错误)。更多关于如何理解转储的信息,请参见
- Documentation/admin-guide/bug-hunting.rst。
-
- - 如果使用 CONFIG_KALLSYMS 编译内核,则可以按原样发送转储,否则必须使用
- ``ksymoops`` 程序来理解转储(但通常首选使用CONFIG_KALLSYMS编译)。
- 此实用程序可从
- https://www.kernel.org/pub/linux/utils/kernel/ksymoops/ 下载。
- 或者,您可以手动执行转储查找:
-
- - 在调试像上面这样的转储时,如果您可以查找EIP值的含义,这将非常有帮助。
- 十六进制值本身对我或其他任何人都没有太大帮助:它会取决于特定的内核设置。
- 您应该做的是从EIP行获取十六进制值(忽略 ``0010:`` ),然后在内核名字列表
- 中查找它,以查看哪个内核函数包含有问题的地址。
-
- 要找到内核函数名,您需要找到与显示症状的内核相关联的系统二进制文件。就是
- 文件“linux/vmlinux”。要提取名字列表并将其与内核崩溃中的EIP进行匹配,
- 请执行::
-
- nm vmlinux | sort | less
-
- 这将为您提供一个按升序排序的内核地址列表,从中很容易找到包含有问题的地址
- 的函数。请注意,内核调试消息提供的地址不一定与函数地址完全匹配(事实上,
- 这是不可能的),因此您不能只“grep”列表:不过列表将为您提供每个内核函数
- 的起点,因此通过查找起始地址低于你正在搜索的地址,但后一个函数的高于的
- 函数,你会找到您想要的。实际上,在您的问题报告中加入一些“上下文”可能是
- 一个好主意,给出相关的上下几行。
-
- 如果您由于某些原因无法完成上述操作(如您使用预编译的内核映像或类似的映像),
- 请尽可能多地告诉我您的相关设置信息,这会有所帮助。有关详细信息请阅读
- ‘Documentation/admin-guide/reporting-issues.rst’。
-
- - 或者,您可以在正在运行的内核上使用gdb(只读的;即不能更改值或设置断点)。
- 为此,请首先使用-g编译内核;适当地编辑arch/x86/Makefile,然后执行 ``make
- clean`` 。您还需要启用CONFIG_PROC_FS(通过 ``make config`` )。
-
- 使用新内核重新启动后,执行 ``gdb vmlinux /proc/kcore`` 。现在可以使用所有
- 普通的gdb命令。查找系统崩溃点的命令是 ``l *0xXXXXXXXX`` (将xxx替换为EIP
- 值)。
-
- 用gdb无法调试一个当前未运行的内核是由于gdb(错误地)忽略了编译内核的起始
- 偏移量。
+如果您发现了一些可能由于内核缺陷所导致的问题,请参阅:
+Documentation/translations/zh_CN/admin-guide/reporting-issues.rst 。
+
+想要理解内核错误报告,请参阅:
+Documentation/translations/zh_CN/admin-guide/bug-hunting.rst 。
+
+更多用GDB调试内核的信息,请参阅:
+Documentation/translations/zh_CN/dev-tools/gdb-kernel-debugging.rst
+和 Documentation/dev-tools/kgdb.rst 。
diff --git a/Documentation/translations/zh_CN/admin-guide/bootconfig.rst b/Documentation/translations/zh_CN/admin-guide/bootconfig.rst
new file mode 100644
index 000000000000..072d17f5f199
--- /dev/null
+++ b/Documentation/translations/zh_CN/admin-guide/bootconfig.rst
@@ -0,0 +1,293 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/admin-guide/bootconfig.rst
+
+:译者: 吴想成 Wu XiangCheng <[email protected]>
+
+========
+引导配置
+========
+
+:作者: Masami Hiramatsu <[email protected]>
+
+概述
+====
+
+引导配置扩展了现有的内核命令行,以一种更有效率的方式在引导内核时进一步支持
+键值数据。这允许管理员传递一份结构化关键字的配置文件。
+
+配置文件语法
+============
+
+引导配置文件的语法采用非常简单的键值结构。每个关键字由点连接的单词组成,键
+和值由 ``=`` 连接。值以分号( ``;`` )或换行符( ``\n`` )结尾。数组值中每
+个元素由逗号( ``,`` )分隔。::
+
+ KEY[.WORD[...]] = VALUE[, VALUE2[...]][;]
+
+与内核命令行语法不同,逗号和 ``=`` 周围允许有空格。
+
+关键字只允许包含字母、数字、连字符( ``-`` )和下划线( ``_`` )。值可包含
+可打印字符和空格,但分号( ``;`` )、换行符( ``\n`` )、逗号( ``,`` )、
+井号( ``#`` )和右大括号( ``}`` )等分隔符除外。
+
+如果你需要在值中使用这些分隔符,可以用双引号( ``"VALUE"`` )或单引号
+( ``'VALUE'`` )括起来。注意,引号无法转义。
+
+键的值可以为空或不存在。这些键用于检查该键是否存在(类似布尔值)。
+
+键值语法
+--------
+
+引导配置文件语法允许用户通过大括号合并键名部分相同的关键字。例如::
+
+ foo.bar.baz = value1
+ foo.bar.qux.quux = value2
+
+也可以写成::
+
+ foo.bar {
+ baz = value1
+ qux.quux = value2
+ }
+
+或者更紧凑一些,写成::
+
+ foo.bar { baz = value1; qux.quux = value2 }
+
+在这两种样式中,引导解析时相同的关键字都会自动合并。因此可以追加类似的树或
+键值。
+
+相同关键字的值
+--------------
+
+禁止两个或多个值或数组共享同一个关键字。例如::
+
+ foo = bar, baz
+ foo = qux # !错误! 我们不可以重定义相同的关键字
+
+如果你想要更新值,必须显式使用覆盖操作符 ``:=`` 。例如::
+
+ foo = bar, baz
+ foo := qux
+
+这样 ``foo`` 关键字的值就变成了 ``qux`` 。这对于通过添加(部分)自定义引导
+配置来覆盖默认值非常有用,免于解析默认引导配置。
+
+如果你想对现有关键字追加值作为数组成员,可以使用 ``+=`` 操作符。例如::
+
+ foo = bar, baz
+ foo += qux
+
+这样, ``foo`` 关键字就同时拥有了 ``bar`` , ``baz`` 和 ``qux`` 。
+
+此外,父关键字下可同时存在值和子关键字。
+例如,下列配置是可行的。::
+
+ foo = value1
+ foo.bar = value2
+ foo := value3 # 这会更新foo的值。
+
+注意,裸值不能直接放进结构化关键字中,必须在大括号外定义它。例如::
+
+ foo {
+ bar = value1
+ bar {
+ baz = value2
+ qux = value3
+ }
+ }
+
+同时,关键字下值节点的顺序是固定的。如果值和子关键字同时存在,值永远是该关
+键字的第一个子节点。因此如果用户先指定子关键字,如::
+
+ foo.bar = value1
+ foo = value2
+
+则在程序(和/proc/bootconfig)中,它会按如下显示::
+
+ foo = value2
+ foo.bar = value1
+
+注释
+----
+
+配置语法接受shell脚本风格的注释。注释以井号( ``#`` )开始,到换行符
+( ``\n`` )结束。
+
+::
+
+ # comment line
+ foo = value # value is set to foo.
+ bar = 1, # 1st element
+ 2, # 2nd element
+ 3 # 3rd element
+
+会被解析为::
+
+ foo = value
+ bar = 1, 2, 3
+
+注意你不能把注释放在值和分隔符( ``,`` 或 ``;`` )之间。如下配置语法是错误的::
+
+ key = 1 # comment
+ ,2
+
+
+/proc/bootconfig
+================
+
+/proc/bootconfig是引导配置的用户空间接口。与/proc/cmdline不同,此文件内容以
+键值列表样式显示。
+每个键值对一行,样式如下::
+
+ KEY[.WORDS...] = "[VALUE]"[,"VALUE2"...]
+
+
+用引导配置引导内核
+==================
+
+用引导配置引导内核有两种方法:将引导配置附加到initrd镜像或直接嵌入内核中。
+
+*initrd: initial RAM disk,初始内存磁盘*
+
+将引导配置附加到initrd
+----------------------
+
+由于默认情况下引导配置文件是用initrd加载的,因此它将被添加到initrd(initramfs)
+镜像文件的末尾,其中包含填充、大小、校验值和12字节幻数,如下所示::
+
+ [initrd][bootconfig][padding][size(le32)][checksum(le32)][#BOOTCONFIG\n]
+
+大小和校验值为小端序存放的32位无符号值。
+
+当引导配置被加到initrd镜像时,整个文件大小会对齐到4字节。空字符( ``\0`` )
+会填补对齐空隙。因此 ``size`` 就是引导配置文件的长度+填充的字节。
+
+Linux内核在内存中解码initrd镜像的最后部分以获取引导配置数据。由于这种“背负式”
+的方法,只要引导加载器传递了正确的initrd文件大小,就无需更改或更新引导加载器
+和内核镜像本身。如果引导加载器意外传递了更长的大小,内核将无法找到引导配置数
+据。
+
+Linux内核在tools/bootconfig下提供了 ``bootconfig`` 命令来完成此操作,管理员
+可以用它从initrd镜像中删除或追加配置文件。你可以用以下命令来构建它::
+
+ # make -C tools/bootconfig
+
+要向initrd镜像添加你的引导配置文件,请按如下命令操作(旧数据会自动移除)::
+
+ # tools/bootconfig/bootconfig -a your-config /boot/initrd.img-X.Y.Z
+
+要从镜像中移除配置,可以使用-d选项::
+
+ # tools/bootconfig/bootconfig -d /boot/initrd.img-X.Y.Z
+
+然后在内核命令行上添加 ``bootconfig`` 告诉内核去initrd文件末尾寻找内核配置。
+
+将引导配置嵌入内核
+------------------
+
+如果你不能使用initrd,也可以通过Kconfig选项将引导配置文件嵌入内核中。在此情
+况下,你需要用以下选项重新编译内核::
+
+ CONFIG_BOOT_CONFIG_EMBED=y
+ CONFIG_BOOT_CONFIG_EMBED_FILE="/引导配置/文件/的/路径"
+
+``CONFIG_BOOT_CONFIG_EMBED_FILE`` 需要从源码树或对象树开始的引导配置文件的
+绝对/相对路径。内核会将其嵌入作为默认引导配置。
+
+与将引导配置附加到initrd一样,你也需要在内核命令行上添加 ``bootconfig`` 告诉
+内核去启用内嵌的引导配置。
+
+注意,即使你已经设置了此选项,仍可用附加到initrd的其他引导配置覆盖内嵌的引导
+配置。
+
+通过引导配置传递内核参数
+========================
+
+除了内核命令行,引导配置也可以用于传递内核参数。所有 ``kernel`` 关键字下的键
+值对都将直接传递给内核命令行。此外, ``init`` 下的键值对将通过命令行传递给
+init进程。参数按以下顺序与用户给定的内核命令行字符串相连,因此命令行参数可以
+覆盖引导配置参数(这取决于子系统如何处理参数,但通常前面的参数将被后面的参数
+覆盖)::
+
+ [bootconfig params][cmdline params] -- [bootconfig init params][cmdline init params]
+
+如果引导配置文件给出的kernel/init参数是::
+
+ kernel {
+ root = 01234567-89ab-cdef-0123-456789abcd
+ }
+ init {
+ splash
+ }
+
+这将被复制到内核命令行字符串中,如下所示::
+
+ root="01234567-89ab-cdef-0123-456789abcd" -- splash
+
+如果用户给出的其他命令行是::
+
+ ro bootconfig -- quiet
+
+则最后的内核命令行如下::
+
+ root="01234567-89ab-cdef-0123-456789abcd" ro bootconfig -- splash quiet
+
+
+配置文件的限制
+==============
+
+当前最大的配置大小是32KB,关键字总数(不是键值条目)必须少于1024个节点。
+注意:这不是条目数而是节点数,条目必须消耗超过2个节点(一个关键字和一个值)。
+所以从理论上讲最多512个键值对。如果关键字平均包含3个单词,则可有256个键值对。
+在大多数情况下,配置项的数量将少于100个条目,小于8KB,因此这应该足够了。如果
+节点数超过1024,解析器将返回错误,即使文件大小小于32KB。(请注意,此最大尺寸
+不包括填充的空字符。)
+无论如何,因为 ``bootconfig`` 命令在附加启动配置到initrd映像时会验证它,用户
+可以在引导之前注意到它。
+
+
+引导配置API
+===========
+
+用户可以查询或遍历键值对,也可以查找(前缀)根关键字节点,并在查找该节点下的
+键值。
+
+如果您有一个关键字字符串,则可以直接使用 xbc_find_value() 查询该键的值。如果
+你想知道引导配置里有哪些关键字,可以使用 xbc_for_each_key_value() 迭代键值对。
+请注意,您需要使用 xbc_array_for_each_value() 访问数组的值,例如::
+
+ vnode = NULL;
+ xbc_find_value("key.word", &vnode);
+ if (vnode && xbc_node_is_array(vnode))
+ xbc_array_for_each_value(vnode, value) {
+ printk("%s ", value);
+ }
+
+如果您想查找具有前缀字符串的键,可以使用 xbc_find_node() 通过前缀字符串查找
+节点,然后用 xbc_node_for_each_key_value() 迭代前缀节点下的键。
+
+但最典型的用法是获取前缀下的命名值或前缀下的命名数组,例如::
+
+ root = xbc_find_node("key.prefix");
+ value = xbc_node_find_value(root, "option", &vnode);
+ ...
+ xbc_node_for_each_array_value(root, "array-option", value, anode) {
+ ...
+ }
+
+这将访问值“key.prefix.option”的值和“key.prefix.array-option”的数组。
+
+锁是不需要的,因为在初始化之后配置只读。如果需要修改,必须复制所有数据和关键字。
+
+
+函数与结构体
+============
+
+相关定义的kernel-doc参见:
+
+ - include/linux/bootconfig.h
+ - lib/bootconfig.c
diff --git a/Documentation/translations/zh_CN/admin-guide/index.rst b/Documentation/translations/zh_CN/admin-guide/index.rst
index 2f6970d0a032..ac2960da33e6 100644
--- a/Documentation/translations/zh_CN/admin-guide/index.rst
+++ b/Documentation/translations/zh_CN/admin-guide/index.rst
@@ -63,6 +63,7 @@ Todolist:
.. toctree::
:maxdepth: 1
+ bootconfig
clearing-warn-once
cpu-load
cputopology
@@ -80,7 +81,6 @@ Todolist:
* binderfs
* binfmt-misc
* blockdev/index
-* bootconfig
* braille-console
* btmrvl
* cgroup-v1/index
diff --git a/Documentation/translations/zh_CN/core-api/circular-buffers.rst b/Documentation/translations/zh_CN/core-api/circular-buffers.rst
new file mode 100644
index 000000000000..694ad8e61070
--- /dev/null
+++ b/Documentation/translations/zh_CN/core-api/circular-buffers.rst
@@ -0,0 +1,210 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/core-api/circular-buffers.rst
+
+:翻译:
+
+ 周彬彬 Binbin Zhou <[email protected]>
+
+:校译:
+
+ 司延腾 Yanteng Si <[email protected]>
+ 吴想成 Wu Xiangcheng <[email protected]>
+ 时奎亮 Alex Shi <[email protected]>
+
+==========
+环形缓冲区
+==========
+
+:作者: David Howells <[email protected]>
+:作者: Paul E. McKenney <[email protected]>
+
+
+Linux 提供了许多可用于实现循环缓冲的特性。有两组这样的特性:
+
+ (1) 用于确定2次方大小的缓冲区信息的便利函数。
+
+ (2) 可以代替缓冲区中对象的生产者和消费者共享锁的内存屏障。
+
+如下所述,要使用这些设施,只需要一个生产者和一个消费者。可以通过序列化来处理多个
+生产者,并通过序列化来处理多个消费者。
+
+.. Contents:
+
+ (*) 什么是环形缓冲区?
+
+ (*) 测量2次幂缓冲区
+
+ (*) 内存屏障与环形缓冲区的结合使用
+ - 生产者
+ - 消费者
+
+ (*) 延伸阅读
+
+
+
+什么是环形缓冲区?
+==================
+
+首先,什么是环形缓冲区?环形缓冲区是具有固定的有限大小的缓冲区,它有两个索引:
+
+ (1) 'head'索引 - 生产者将元素插入缓冲区的位置。
+
+ (2) 'tail'索引 - 消费者在缓冲区中找到下一个元素的位置。
+
+通常,当tail指针等于head指针时,表明缓冲区是空的;而当head指针比tail指针少一个时,
+表明缓冲区是满的。
+
+添加元素时,递增head索引;删除元素时,递增tail索引。tail索引不应该跳过head索引,
+两个索引在到达缓冲区末端时都应该被赋值为0,从而允许海量的数据流过缓冲区。
+
+通常情况下,元素都有相同的单元大小,但这并不是使用以下技术的严格要求。如果要在缓
+冲区中包含多个元素或可变大小的元素,则索引可以增加超过1,前提是两个索引都没有超过
+另一个。然而,实现者必须小心,因为超过一个单位大小的区域可能会覆盖缓冲区的末端并
+且缓冲区会被分成两段。
+
+测量2次幂缓冲区
+===============
+
+计算任意大小的环形缓冲区的占用或剩余容量通常是一个费时的操作,需要使用模(除法)
+指令。但是如果缓冲区的大小为2次幂,则可以使用更快的按位与指令代替。
+
+Linux提供了一组用于处理2次幂环形缓冲区的宏。可以通过以下方式使用::
+
+ #include <linux/circ_buf.h>
+
+这些宏包括:
+
+ (#) 测量缓冲区的剩余容量::
+
+ CIRC_SPACE(head_index, tail_index, buffer_size);
+
+ 返回缓冲区[1]中可插入元素的剩余空间大小。
+
+
+ (#) 测量缓冲区中的最大连续立即可用空间::
+
+ CIRC_SPACE_TO_END(head_index, tail_index, buffer_size);
+
+ 返回缓冲区[1]中剩余的连续空间的大小,元素可以立即插入其中,而不必绕回到缓冲
+ 区的开头。
+
+
+ (#) 测量缓冲区的使用数::
+
+ CIRC_CNT(head_index, tail_index, buffer_size);
+
+ 返回当前占用缓冲区[2]的元素数量。
+
+
+ (#) 测量缓冲区的连续使用数::
+
+ CIRC_CNT_TO_END(head_index, tail_index, buffer_size);
+
+ 返回可以从缓冲区中提取的连续元素[2]的数量,而不必绕回到缓冲区的开头。
+
+这里的每一个宏名义上都会返回一个介于0和buffer_size-1之间的值,但是:
+
+ (1) CIRC_SPACE*()是为了在生产者中使用。对生产者来说,它们将返回一个下限,因为生
+ 产者控制着head索引,但消费者可能仍然在另一个CPU上耗尽缓冲区并移动tail索引。
+
+ 对消费者来说,它将显示一个上限,因为生产者可能正忙于耗尽空间。
+
+ (2) CIRC_CNT*()是为了在消费者中使用。对消费者来说,它们将返回一个下限,因为消费
+ 者控制着tail索引,但生产者可能仍然在另一个CPU上填充缓冲区并移动head索引。
+
+ 对于生产者,它将显示一个上限,因为消费者可能正忙于清空缓冲区。
+
+ (3) 对于第三方来说,生产者和消费者对索引的写入顺序是无法保证的,因为它们是独立的,
+ 而且可能是在不同的CPU上进行的,所以在这种情况下的结果只是一种猜测,甚至可能
+ 是错误的。
+
+内存屏障与环形缓冲区的结合使用
+==============================
+
+通过将内存屏障与环形缓冲区结合使用,可以避免以下需求:
+
+ (1) 使用单个锁来控制对缓冲区两端的访问,从而允许同时填充和清空缓冲区;以及
+
+ (2) 使用原子计数器操作。
+
+这有两个方面:填充缓冲区的生产者和清空缓冲区的消费者。在任何时候,只应有一个生产
+者在填充缓冲区,同样的也只应有一个消费者在清空缓冲区,但双方可以同时操作。
+
+
+生产者
+------
+
+生产者看起来像这样::
+
+ spin_lock(&producer_lock);
+
+ unsigned long head = buffer->head;
+ /* spin_unlock()和下一个spin_lock()提供必要的排序。 */
+ unsigned long tail = READ_ONCE(buffer->tail);
+
+ if (CIRC_SPACE(head, tail, buffer->size) >= 1) {
+ /* 添加一个元素到缓冲区 */
+ struct item *item = buffer[head];
+
+ produce_item(item);
+
+ smp_store_release(buffer->head,
+ (head + 1) & (buffer->size - 1));
+
+ /* wake_up()将确保在唤醒任何人之前提交head */
+ wake_up(consumer);
+ }
+
+ spin_unlock(&producer_lock);
+
+这将表明CPU必须在head索引使其对消费者可用之前写入新项目的内容,同时CPU必须在唤醒
+消费者之前写入修改后的head索引。
+
+请注意,wake_up()并不保证任何形式的屏障,除非确实唤醒了某些东西。因此我们不能依靠
+它来进行排序。但是数组中始终有一个元素留空,因此生产者必须产生两个元素,然后才可
+能破坏消费者当前正在读取的元素。同时,消费者连续调用之间成对的解锁-加锁提供了索引
+读取(指示消费者已清空给定元素)和生产者对该相同元素的写入之间的必要顺序。
+
+
+消费者
+------
+
+消费者看起来像这样::
+
+ spin_lock(&consumer_lock);
+
+ /* 读取该索引处的内容之前,先读取索引 */
+ unsigned long head = smp_load_acquire(buffer->head);
+ unsigned long tail = buffer->tail;
+
+ if (CIRC_CNT(head, tail, buffer->size) >= 1) {
+
+ /* 从缓冲区中提取一个元素 */
+ struct item *item = buffer[tail];
+
+ consume_item(item);
+
+ /* 在递增tail之前完成对描述符的读取。 */
+ smp_store_release(buffer->tail,
+ (tail + 1) & (buffer->size - 1));
+ }
+
+ spin_unlock(&consumer_lock);
+
+这表明CPU在读取新元素之前确保索引是最新的,然后在写入新的尾指针之前应确保CPU已完
+成读取该元素,这将擦除该元素。
+
+请注意,使用READ_ONCE()和smp_load_acquire()来读取反向(head)索引。这可以防止编译
+器丢弃并重新加载其缓存值。如果您能确定反向(head)索引将仅使用一次,则这不是必须
+的。smp_load_acquire()还可以强制CPU对后续的内存引用进行排序。类似地,两种算法都使
+用smp_store_release()来写入线程的索引。这记录了我们正在写入可以并发读取的内容的事
+实,以防止编译器破坏存储,并强制对以前的访问进行排序。
+
+
+延伸阅读
+========
+
+关于Linux的内存屏障设施的描述,请查看Documentation/memory-barriers.txt。
diff --git a/Documentation/translations/zh_CN/core-api/generic-radix-tree.rst b/Documentation/translations/zh_CN/core-api/generic-radix-tree.rst
new file mode 100644
index 000000000000..eacd1d2ebddc
--- /dev/null
+++ b/Documentation/translations/zh_CN/core-api/generic-radix-tree.rst
@@ -0,0 +1,23 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/core-api/generic-radix-tree.rst
+
+:翻译:
+
+ 周彬彬 Binbin Zhou <[email protected]>
+
+===================
+通用基数树/稀疏数组
+===================
+
+通用基数树/稀疏数组的相关内容请见include/linux/generic-radix-tree.h文件中的
+“DOC: Generic radix trees/sparse arrays”。
+
+通用基数树函数
+--------------
+
+该API在以下内核代码中:
+
+include/linux/generic-radix-tree.h
diff --git a/Documentation/translations/zh_CN/core-api/idr.rst b/Documentation/translations/zh_CN/core-api/idr.rst
new file mode 100644
index 000000000000..97a16e76b81b
--- /dev/null
+++ b/Documentation/translations/zh_CN/core-api/idr.rst
@@ -0,0 +1,80 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/core-api/idr.rst
+
+:翻译:
+
+ 周彬彬 Binbin Zhou <[email protected]>
+
+:校译:
+
+ 司延腾 Yanteng Si <[email protected]>
+ 吴想成 Wu Xiangcheng <[email protected]>
+ 时奎亮 Alex Shi <[email protected]>
+
+======
+ID分配
+======
+
+:作者: Matthew Wilcox
+
+概述
+====
+
+要解决的一个常见问题是分配标识符(IDs);它通常是标识事物的数字。比如包括文件描述
+符、进程ID、网络协议中的数据包标识符、SCSI标记和设备实例编号。IDR和IDA为这个问题
+提供了一个合理的解决方案,以避免每个人都自创。IDR提供将ID映射到指针的能力,而IDA
+仅提供ID分配,因此内存效率更高。
+
+IDR接口已经被废弃,请使用 ``XArray`` 。
+
+IDR的用法
+=========
+
+首先初始化一个IDR,对于静态分配的IDR使用DEFINE_IDR(),或者对于动态分配的IDR使用
+idr_init()。
+
+您可以调用idr_alloc()来分配一个未使用的ID。通过调用idr_find()查询与该ID相关的指针,
+并通过调用idr_remove()释放该ID。
+
+如果需要更改与一个ID相关联的指针,可以调用idr_replace()。这样做的一个常见原因是通
+过将 ``NULL`` 指针传递给分配函数来保留ID;用保留的ID初始化对象,最后将初始化的对
+象插入IDR。
+
+一些用户需要分配大于 ``INT_MAX`` 的ID。到目前为止,所有这些用户都满足 ``UINT_MAX``
+的限制,他们使用idr_alloc_u32()。如果您需要超出u32的ID,我们将与您合作以满足您的
+需求。
+
+如果需要按顺序分配ID,可以使用idr_alloc_cyclic()。处理较大数量的ID时,IDR的效率会
+降低,所以使用这个函数会有一点代价。
+
+要对IDR使用的所有指针进行操作,您可以使用基于回调的idr_for_each()或迭代器样式的
+idr_for_each_entry()。您可能需要使用idr_for_each_entry_continue()来继续迭代。如果
+迭代器不符合您的需求,您也可以使用idr_get_next()。
+
+当使用完IDR后,您可以调用idr_destroy()来释放IDR占用的内存。这并不会释放IDR指向的
+对象;如果您想这样做,请使用其中一个迭代器来执行此操作。
+
+您可以使用idr_is_empty()来查看当前是否分配了任何ID。
+
+如果在从IDR分配一个新ID时需要带锁,您可能需要传递一组限制性的GFP标志,但这可能导
+致IDR无法分配内存。为了解决该问题,您可以在获取锁之前调用idr_preload(),然后在分
+配之后调用idr_preload_end()。
+
+IDR同步的相关内容请见include/linux/idr.h文件中的“DOC: idr sync”。
+
+IDA的用法
+=========
+
+IDA的用法的相关内容请见lib/idr.c文件中的“DOC: IDA description”。
+
+函数和数据结构
+==============
+
+该API在以下内核代码中:
+
+include/linux/idr.h
+
+lib/idr.c
diff --git a/Documentation/translations/zh_CN/core-api/index.rst b/Documentation/translations/zh_CN/core-api/index.rst
index 8a94ad87465d..37756d240b5e 100644
--- a/Documentation/translations/zh_CN/core-api/index.rst
+++ b/Documentation/translations/zh_CN/core-api/index.rst
@@ -44,15 +44,15 @@
assoc_array
xarray
rbtree
+ idr
+ circular-buffers
+ generic-radix-tree
+ packing
Todolist:
- idr
- circular-buffers
- generic-radix-tree
- packing
this_cpu_ops
timekeeping
errseq
diff --git a/Documentation/translations/zh_CN/core-api/packing.rst b/Documentation/translations/zh_CN/core-api/packing.rst
new file mode 100644
index 000000000000..c0aab3a349d0
--- /dev/null
+++ b/Documentation/translations/zh_CN/core-api/packing.rst
@@ -0,0 +1,160 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/core-api/packing.rst
+
+:翻译:
+
+ 周彬彬 Binbin Zhou <[email protected]>
+
+:校译:
+
+ 司延腾 Yanteng Si <[email protected]>
+ 吴想成 Wu Xiangcheng <[email protected]>
+ 时奎亮 Alex Shi <[email protected]>
+
+========================
+通用的位域打包和解包函数
+========================
+
+问题陈述
+--------
+
+使用硬件时,必须在几种与其交互的方法之间进行选择。
+
+可以将指针映射到在硬件设备的内存区上精心设计的结构体,并将其字段作为结构成员(可
+能声明为位域)访问。但是由于CPU和硬件设备之间潜在的字节顺序不匹配,以这种方式编写
+代码会降低其可移植性。
+
+此外,必须密切注意将硬件文档中的寄存器定义转换为结构的位域索引。此外,一些硬件
+(通常是网络设备)倾向于以违反任何合理字边界(有时甚至是64位)的方式对其寄存器字
+段进行分组。这就造成了不得不在结构中定义寄存器字段的“高”和“低”部分的不便。
+
+结构域定义的更可靠的替代方法是通过移动适当数量的位来提取所需的字段。但这仍然不能
+防止字节顺序不匹配,除非所有内存访问都是逐字节执行的。此外,代码很容易变得杂乱无
+章,同时可能会在所需的许多位移操作中丢失一些高层次的想法。
+
+许多驱动程序采用了位移的方法,然后试图用定制的宏来减少杂乱无章的东西,但更多的时
+候,这些宏所采用的捷径依旧妨碍了代码真正的可移植性。
+
+解决方案
+--------
+
+该API涉及2个基本操作:
+
+ - 将一个CPU可使用的数字打包到内存缓冲区中(具有硬件约束/特殊性)。
+ - 将内存缓冲区(具有硬件约束/特殊性)解压缩为一个CPU可使用的数字。
+
+该API提供了对所述硬件约束和特殊性以及CPU字节序的抽象,因此这两者之间可能不匹配。
+
+这些API函数的基本单元是u64。从CPU的角度来看,位63总是意味着字节7的位偏移量7,尽管
+只是逻辑上的。问题是:我们将这个比特放在内存的什么位置?
+
+以下示例介绍了打包u64字段的内存布局。打包缓冲区中的字节偏移量始终默认为0,1...7。
+示例显示的是逻辑字节和位所在的位置。
+
+1. 通常情况下(无特殊性),我们会这样做:
+
+::
+
+ 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32
+ 7 6 5 4
+ 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
+ 3 2 1 0
+
+也就是说,CPU可使用的u64的MSByte(7)位于内存偏移量0处,而u64的LSByte(0)位于内存偏移量7处。
+
+这对应于大多数人认为的“大端”,其中位i对应于数字2^i。这在代码注释中也称为“逻辑”符号。
+
+
+2. 如果设置了QUIRK_MSB_ON_THE_RIGHT,我们按如下方式操作:
+
+::
+
+ 56 57 58 59 60 61 62 63 48 49 50 51 52 53 54 55 40 41 42 43 44 45 46 47 32 33 34 35 36 37 38 39
+ 7 6 5 4
+ 24 25 26 27 28 29 30 31 16 17 18 19 20 21 22 23 8 9 10 11 12 13 14 15 0 1 2 3 4 5 6 7
+ 3 2 1 0
+
+也就是说,QUIRK_MSB_ON_THE_RIGHT不会影响字节定位,但会反转字节内的位偏移量。
+
+
+3. 如果设置了QUIRK_LITTLE_ENDIAN,我们按如下方式操作:
+
+::
+
+ 39 38 37 36 35 34 33 32 47 46 45 44 43 42 41 40 55 54 53 52 51 50 49 48 63 62 61 60 59 58 57 56
+ 4 5 6 7
+ 7 6 5 4 3 2 1 0 15 14 13 12 11 10 9 8 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
+ 0 1 2 3
+
+因此,QUIRK_LITTLE_ENDIAN意味着在内存区域内,每个4字节的字的每个字节都被放置在与
+该字的边界相比的镜像位置。
+
+
+4. 如果设置了QUIRK_MSB_ON_THE_RIGHT和QUIRK_LITTLE_ENDIAN,我们这样做:
+
+::
+
+ 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
+ 4 5 6 7
+ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+ 0 1 2 3
+
+
+5. 如果只设置了QUIRK_LSW32_IS_FIRST,我们这样做:
+
+::
+
+ 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
+ 3 2 1 0
+ 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32
+ 7 6 5 4
+
+在这种情况下,8字节内存区域解释如下:前4字节对应最不重要的4字节的字,后4字节对应
+更重要的4字节的字。
+
+6. 如果设置了QUIRK_LSW32_IS_FIRST和QUIRK_MSB_ON_THE_RIGHT,我们这样做:
+
+::
+
+ 24 25 26 27 28 29 30 31 16 17 18 19 20 21 22 23 8 9 10 11 12 13 14 15 0 1 2 3 4 5 6 7
+ 3 2 1 0
+ 56 57 58 59 60 61 62 63 48 49 50 51 52 53 54 55 40 41 42 43 44 45 46 47 32 33 34 35 36 37 38 39
+ 7 6 5 4
+
+
+7. 如果设置了QUIRK_LSW32_IS_FIRST和QUIRK_LITTLE_ENDIAN,则如下所示:
+
+::
+
+ 7 6 5 4 3 2 1 0 15 14 13 12 11 10 9 8 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
+ 0 1 2 3
+ 39 38 37 36 35 34 33 32 47 46 45 44 43 42 41 40 55 54 53 52 51 50 49 48 63 62 61 60 59 58 57 56
+ 4 5 6 7
+
+
+8. 如果设置了QUIRK_LSW32_IS_FIRST,QUIRK_LITTLE_ENDIAN和QUIRK_MSB_ON_THE_RIGHT,
+ 则如下所示:
+
+::
+
+ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+ 0 1 2 3
+ 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
+ 4 5 6 7
+
+
+我们总是认为我们的偏移量好像没有特殊性,然后在访问内存区域之前翻译它们。
+
+预期用途
+--------
+
+选择使用该API的驱动程序首先需要确定上述3种quirk组合(共8种)中的哪一种与硬件文档
+中描述的相匹配。然后,他们应该封装packing()函数,创建一个新的xxx_packing(),使用
+适当的QUIRK_* one-hot 位集合来调用它。
+
+packing()函数返回一个int类型的错误码,以防止程序员使用不正确的API。这些错误预计不
+会在运行时发生,因此xxx_packing()返回void并简单地接受这些错误是合理的。它可以选择
+转储栈或打印错误描述。
diff --git a/Documentation/translations/zh_CN/devicetree/changesets.rst b/Documentation/translations/zh_CN/devicetree/changesets.rst
new file mode 100644
index 000000000000..2ace05f3c377
--- /dev/null
+++ b/Documentation/translations/zh_CN/devicetree/changesets.rst
@@ -0,0 +1,37 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/Devicetree/changesets.rst
+
+:翻译:
+
+ 司延腾 Yanteng Si <[email protected]>
+
+:校译:
+
+
+============
+设备树变更集
+============
+
+设备树变更集是一种方法,它允许人们以这样一种方式在实时树中使用变化,即要么使用全部的
+变化,要么不使用。如果在使用变更集的过程中发生错误,那么树将被回滚到之前的状态。一个
+变更集也可以在使用后被删除。
+
+当一个变更集被使用时,所有的改变在发出OF_RECONFIG通知器之前被一次性使用到树上。这是
+为了让接收者在收到通知时看到一个完整的、一致的树的状态。
+
+一个变化集的顺序如下。
+
+1. of_changeset_init() - 初始化一个变更集。
+
+2. 一些DT树变化的调用,of_changeset_attach_node(), of_changeset_detach_node(),
+ of_changeset_add_property(), of_changeset_remove_property,
+ of_changeset_update_property()来准备一组变更。此时不会对活动树做任何变更。所有
+ 的变更操作都记录在of_changeset的 `entries` 列表中。
+
+3. of_changeset_apply() - 将变更使用到树上。要么整个变更集被使用,要么如果有错误,
+ 树会被恢复到之前的状态。核心通过锁确保正确的顺序。如果需要的话,可以使用一个解锁的
+ __of_changeset_apply版本。
+
+如果一个成功使用的变更集需要被删除,可以用of_changeset_revert()来完成。
diff --git a/Documentation/translations/zh_CN/devicetree/dynamic-resolution-notes.rst b/Documentation/translations/zh_CN/devicetree/dynamic-resolution-notes.rst
new file mode 100644
index 000000000000..115190341305
--- /dev/null
+++ b/Documentation/translations/zh_CN/devicetree/dynamic-resolution-notes.rst
@@ -0,0 +1,31 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/Devicetree/dynamic-resolution-notes.rst
+
+:翻译:
+
+ 司延腾 Yanteng Si <[email protected]>
+
+:校译:
+
+========================
+Devicetree动态解析器说明
+========================
+
+本文描述了内核内DeviceTree解析器的实现,它位于drivers/of/resolver.c中。
+
+解析器如何工作?
+----------------
+
+解析器被赋予一个任意的树作为输入,该树用适当的dtc选项编译,并有一个/plugin/标签。这就产
+生了适当的__fixups__和__local_fixups__节点。
+
+解析器依次通过以下步骤工作:
+
+1. 从实时树中获取最大的设备树phandle值 + 1.
+2. 调整树的所有本地 phandles,以解决这个量。
+3. 使用 __local__fixups__ 节点信息以相同的量调整所有本地引用。
+4. 对于__fixups__节点中的每个属性,找到它在实时树中引用的节点。这是用来标记该节点的标签。
+5. 检索fixup的目标的phandle。
+6. 对于属性中的每个fixup,找到节点:属性:偏移的位置,并用phandle值替换它。
diff --git a/Documentation/translations/zh_CN/devicetree/index.rst b/Documentation/translations/zh_CN/devicetree/index.rst
index 3fc355fe0037..7451dbfdd3e5 100644
--- a/Documentation/translations/zh_CN/devicetree/index.rst
+++ b/Documentation/translations/zh_CN/devicetree/index.rst
@@ -24,21 +24,16 @@ Open Firmware 和 Devicetree
usage-model
of_unittest
-
-Todolist:
-
-* kernel-api
+ kernel-api
Devicetree Overlays
===================
.. toctree::
:maxdepth: 1
-Todolist:
-
-* changesets
-* dynamic-resolution-notes
-* overlay-notes
+ changesets
+ dynamic-resolution-notes
+ overlay-notes
Devicetree Bindings
===================
diff --git a/Documentation/translations/zh_CN/devicetree/kernel-api.rst b/Documentation/translations/zh_CN/devicetree/kernel-api.rst
new file mode 100644
index 000000000000..6aa3b685494e
--- /dev/null
+++ b/Documentation/translations/zh_CN/devicetree/kernel-api.rst
@@ -0,0 +1,58 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/Devicetree/kernel-api.rst
+
+:翻译:
+
+ 司延腾 Yanteng Si <[email protected]>
+
+:校译:
+
+
+=================
+内核中的设备树API
+=================
+
+核心函数
+--------
+
+该API在以下内核代码中:
+
+drivers/of/base.c
+
+include/linux/of.h
+
+drivers/of/property.c
+
+include/linux/of_graph.h
+
+drivers/of/address.c
+
+drivers/of/irq.c
+
+drivers/of/fdt.c
+
+驱动模型函数
+------------
+
+该API在以下内核代码中:
+
+include/linux/of_device.h
+
+drivers/of/device.c
+
+include/linux/of_platform.h
+
+drivers/of/platform.c
+
+覆盖和动态DT函数
+----------------
+
+该API在以下内核代码中:
+
+drivers/of/resolver.c
+
+drivers/of/dynamic.c
+
+drivers/of/overlay.c
diff --git a/Documentation/translations/zh_CN/devicetree/overlay-notes.rst b/Documentation/translations/zh_CN/devicetree/overlay-notes.rst
new file mode 100644
index 000000000000..1bd482cb0a1b
--- /dev/null
+++ b/Documentation/translations/zh_CN/devicetree/overlay-notes.rst
@@ -0,0 +1,140 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/Devicetree/overlay-notes.rst
+
+:翻译:
+
+ 司延腾 Yanteng Si <[email protected]>
+
+:校译:
+
+==============
+设备树覆盖说明
+==============
+
+本文档描述了drivers/of/overlay.c中的内核内设备树覆盖功能的实现,是
+Documentation/devicetree/dynamic-resolution-notes.rst[1]的配套文档。
+
+覆盖如何工作
+------------
+
+设备树覆盖的目的是修改内核的实时树,并使修改以反映变化的方式影响内核的状态。
+由于内核主要处理的是设备,任何新的设备节点如果导致一个活动的设备,就应该创建它,
+而如果设备节点被禁用或被全部删除,受影响的设备应该被取消注册。
+
+让我们举个例子,我们有一个foo板,它的基本树形图如下::
+
+ ---- foo.dts ---------------------------------------------------------------
+ /* FOO平台 */
+ /dts-v1/;
+ / {
+ compatible = "corp,foo";
+
+ /* 共享的资源 */
+ res: res {
+ };
+
+ /* 芯片上的外围设备 */
+ ocp: ocp {
+ /* 总是被实例化的外围设备 */
+ peripheral1 { ... };
+ };
+ };
+ ---- foo.dts ---------------------------------------------------------------
+
+覆盖bar.dts,
+::
+
+ ---- bar.dts - 按标签覆盖目标位置 ----------------------------
+ /dts-v1/;
+ /插件/;
+ &ocp {
+ /* bar外围 */
+ bar {
+ compatible = "corp,bar";
+ ... /* 各种属性和子节点 */
+ };
+ };
+ ---- bar.dts ---------------------------------------------------------------
+
+当加载(并按照[1]中描述的方式解决)时,应该产生foo+bar.dts::
+
+ ---- foo+bar.dts -----------------------------------------------------------
+ /* FOO平台 + bar外围 */
+ / {
+ compatible = "corp,foo";
+
+ /* 共享资源 */
+ res: res {
+ };
+
+ /* 芯片上的外围设备 */
+ ocp: ocp {
+ /* 总是被实例化的外围设备 */
+ peripheral1 { ... };
+
+ /* bar外围 */
+ bar {
+ compatible = "corp,bar";
+ ... /* 各种属性和子节点 */
+ };
+ };
+ };
+ ---- foo+bar.dts -----------------------------------------------------------
+
+作为覆盖的结果,已经创建了一个新的设备节点(bar),因此将注册一个bar平台设备,
+如果加载了匹配的设备驱动程序,将按预期创建设备。
+
+如果基础DT不是用-@选项编译的,那么“&ocp”标签将不能用于将覆盖节点解析到基础
+DT中的适当位置。在这种情况下,可以提供目标路径。通过标签的目标位置的语法是比
+较好的,因为不管标签在DT中出现在哪里,覆盖都可以被应用到任何包含标签的基础DT上。
+
+上面的bar.dts例子被修改为使用目标路径语法,即为::
+
+ ---- bar.dts - 通过明确的路径覆盖目标位置 --------------------
+ /dts-v1/;
+ /插件/;
+ &{/ocp} {
+ /* bar外围 */
+ bar {
+ compatible = "corp,bar";
+ ... /* 各种外围设备和子节点 */
+ }
+ };
+ ---- bar.dts ---------------------------------------------------------------
+
+
+内核中关于覆盖的API
+-------------------
+
+该API相当容易使用。
+
+1) 调用of_overlay_fdt_apply()来创建和应用一个覆盖的变更集。返回值是一个
+ 错误或一个识别这个覆盖的cookie。
+
+2) 调用of_overlay_remove()来删除和清理先前通过调用of_overlay_fdt_apply()
+ 而创建的覆盖变更集。不允许删除一个被另一个覆盖的覆盖变化集。
+
+最后,如果你需要一次性删除所有的覆盖,只需调用of_overlay_remove_all(),
+它将以正确的顺序删除每一个覆盖。
+
+你可以选择注册在覆盖操作中被调用的通知器。详见
+of_overlay_notifier_register/unregister和enum of_overlay_notify_action。
+
+OF_OVERLAY_PRE_APPLY、OF_OVERLAY_POST_APPLY或OF_OVERLAY_PRE_REMOVE
+的通知器回调可以存储指向覆盖层中的设备树节点或其内容的指针,但这些指针不能持
+续到OF_OVERLAY_POST_REMOVE的通知器回调。在OF_OVERLAY_POST_REMOVE通
+知器被调用后,包含覆盖层的内存将被kfree()ed。请注意,即使OF_OVERLAY_POST_REMOVE
+的通知器返回错误,内存也会被kfree()ed。
+
+drivers/of/dynamic.c中的变更集通知器是第二种类型的通知器,可以通过应用或移除
+覆盖层来触发。这些通知器不允许在覆盖层或其内容中存储指向设备树节点的指针。当包含
+覆盖层的内存因移除覆盖层而被释放时,覆盖层代码并不能防止这类指针仍然有效。
+
+任何其他保留指向覆盖层节点或数据的指针的代码都被认为是一个错误,因为在移除覆盖层
+后,该指针将指向已释放的内存。
+
+覆盖层的用户必须特别注意系统上发生的整体操作,以确保其他内核代码不保留任何指向覆
+盖层节点或数据的指针。任何无意中使用这种指针的例子是,如果一个驱动或子系统模块在
+应用了覆盖后被加载,并且该驱动或子系统扫描了整个设备树或其大部分,包括覆盖节点。
diff --git a/Documentation/translations/zh_CN/driver-api/gpio/index.rst b/Documentation/translations/zh_CN/driver-api/gpio/index.rst
new file mode 100644
index 000000000000..9ab64e94aced
--- /dev/null
+++ b/Documentation/translations/zh_CN/driver-api/gpio/index.rst
@@ -0,0 +1,69 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../../disclaimer-zh_CN.rst
+
+:Original: Documentation/driver-api/gpio/index.rst
+
+:翻译:
+
+ 司延腾 Yanteng Si <[email protected]>
+
+:校译:
+
+=======================
+通用型输入/输出(GPIO)
+=======================
+
+目录:
+
+.. toctree::
+ :maxdepth: 2
+
+ legacy
+
+Todolist:
+
+* intro
+* using-gpio
+* driver
+* consumer
+* board
+* drivers-on-gpio
+* bt8xxgpio
+
+核心
+====
+
+该API在以下内核代码中:
+
+include/linux/gpio/driver.h
+
+drivers/gpio/gpiolib.c
+
+ACPI支持
+========
+
+该API在以下内核代码中:
+
+drivers/gpio/gpiolib-acpi.c
+
+设备树支持
+==========
+
+该API在以下内核代码中:
+
+drivers/gpio/gpiolib-of.c
+
+设备管理支持
+============
+
+该API在以下内核代码中:
+
+drivers/gpio/gpiolib-devres.c
+
+sysfs帮助(函数)
+=================
+
+该API在以下内核代码中:
+
+drivers/gpio/gpiolib-sysfs.c
diff --git a/Documentation/translations/zh_CN/gpio.txt b/Documentation/translations/zh_CN/driver-api/gpio/legacy.rst
index a23ee14fc927..6399521d0548 100644
--- a/Documentation/translations/zh_CN/gpio.txt
+++ b/Documentation/translations/zh_CN/driver-api/gpio/legacy.rst
@@ -1,39 +1,28 @@
-Chinese translated version of Documentation/admin-guide/gpio
+.. SPDX-License-Identifier: GPL-2.0
-If you have any comment or update to the content, please contact the
-original document maintainer directly. However, if you have a problem
-communicating in English you can also ask the Chinese maintainer for
-help. Contact the Chinese maintainer if this translation is outdated
-or if there is a problem with the translation.
+.. include:: ../../disclaimer-zh_CN.rst
-Maintainer: Grant Likely <[email protected]>
- Linus Walleij <[email protected]>
-Chinese maintainer: Fu Wei <[email protected]>
----------------------------------------------------------------------
-Documentation/admin-guide/gpio 的中文翻译
+:Original: Documentation/driver-api/gpio/legacy.rst
-如果想评论或更新本文的内容,请直接联系原文档的维护者。如果你使用英文
-交流有困难的话,也可以向中文版维护者求助。如果本翻译更新不及时或者翻
-译存在问题,请联系中文版维护者。
-英文版维护者: Grant Likely <[email protected]>
- Linus Walleij <[email protected]>
-中文版维护者: 傅炜 Fu Wei <[email protected]>
-中文版翻译者: 傅炜 Fu Wei <[email protected]>
-中文版校译者: 傅炜 Fu Wei <[email protected]>
+:翻译:
+ 傅炜 Fu Wei <[email protected]>
+ 司延腾 Yanteng Si <[email protected]>
-以下为正文
----------------------------------------------------------------------
-GPIO 接口
+:校译:
-本文档提供了一个在Linux下访问GPIO的公约概述。
+
+传统GPIO接口
+============
+
+本文档概述了Linux下的GPIO访问公约。
这些函数以 gpio_* 作为前缀。其他的函数不允许使用这样的前缀或相关的
__gpio_* 前缀。
-什么是GPIO?
-==========
+什么是GPIO?
+============
"通用输入/输出口"(GPIO)是一个灵活的由软件控制的数字信号。他们可
由多种芯片提供,且对于从事嵌入式和定制硬件的 Linux 开发者来说是
比较熟悉。每个GPIO 都代表一个连接到特定引脚或球栅阵列(BGA)封装中
@@ -99,6 +88,7 @@ GPIO 公约
标识 GPIO
---------
+
GPIO 是通过无符号整型来标识的,范围是 0 到 MAX_INT。保留“负”数
用于其他目的,例如标识信号“在这个板子上不可用”或指示错误。未接触底层
硬件的代码会忽略这些整数。
@@ -115,7 +105,7 @@ FPGA 的特定板子上使用 80-95。编号不一定要连续,那些平台中�
如果你要初始化一个带有无效 GPIO 编号的结构体,可以使用一些负编码
(如"-EINVAL"),那将使其永远不会是有效。来测试这样一个结构体中的编号
-是否关联一个 GPIO,你可使用以下断言:
+是否关联一个 GPIO,你可使用以下断言::
int gpio_is_valid(int number);
@@ -128,11 +118,12 @@ FPGA 的特定板子上使用 80-95。编号不一定要连续,那些平台中�
使用 GPIO
---------
+
对于一个 GPIO,系统应该做的第一件事情就是通过 gpio_request()
函数分配它,见下文。
接下来是设置I/O方向,这通常是在板级启动代码中为所使用的 GPIO 设置
-platform_device 时完成。
+platform_device 时完成::
/* 设置为输入或输出, 返回 0 或负的错误代码 */
int gpio_direction_input(unsigned gpio);
@@ -157,12 +148,13 @@ get/set(获取/设置)函数调用没法返回错误,且有可能是配置错误
访问自旋锁安全的 GPIO
--------------------
+---------------------
+
大多数 GPIO 控制器可以通过内存读/写指令来访问。这些指令不会休眠,可以
安全地在硬(非线程)中断例程和类似的上下文中完成。
对于那些用 gpio_cansleep()测试总是返回失败的 GPIO(见下文),使用
-以下的函数访问:
+以下的函数访问::
/* GPIO 输入:返回零或非零 */
int gpio_get_value(unsigned gpio);
@@ -188,17 +180,18 @@ GPIO值是布尔值,零表示低电平,非零表示高电平。当读取一�
访问可能休眠的 GPIO
------------------
+-------------------
+
某些 GPIO 控制器必须通过基于总线(如 I2C 或 SPI)的消息访问。读或写这些
GPIO 值的命令需要等待其信息排到队首才发送命令,再获得其反馈。期间需要
休眠,这不能在 IRQ 例程(中断上下文)中执行。
支持此类 GPIO 的平台通过以下函数返回非零值来区分出这种 GPIO。(此函数需要
-一个之前通过 gpio_request 分配到的有效 GPIO 编号):
+一个之前通过 gpio_request 分配到的有效 GPIO 编号)::
int gpio_cansleep(unsigned gpio);
-为了访问这种 GPIO,内核定义了一套不同的函数:
+为了访问这种 GPIO,内核定义了一套不同的函数::
/* GPIO 输入:返回零或非零 ,可能会休眠 */
int gpio_get_value_cansleep(unsigned gpio);
@@ -214,25 +207,26 @@ GPIO 值的命令需要等待其信息排到队首才发送命令,再获得其
事实,这些处理例程实际上和自旋锁安全的函数是一样的。
** 除此之外 ** 调用设置和配置此类 GPIO 的函数也必须在允许休眠的上下文中,
-因为它们可能也需要访问 GPIO 控制器芯片: (这些设置函数通常在板级启动代码或者
-驱动探测/断开代码中,所以这是一个容易满足的约束条件。)
+因为它们可能也需要访问 GPIO 控制器芯片 (这些设置函数通常在板级启动代码或者
+驱动探测/断开代码中,所以这是一个容易满足的约束条件。) ::
- gpio_direction_input()
- gpio_direction_output()
- gpio_request()
+ gpio_direction_input()
+ gpio_direction_output()
+ gpio_request()
-## gpio_request_one()
-## gpio_request_array()
-## gpio_free_array()
+ ## gpio_request_one()
+ ## gpio_request_array()
+ ## gpio_free_array()
- gpio_free()
- gpio_set_debounce()
+ gpio_free()
+ gpio_set_debounce()
声明和释放 GPIO
-----------------------------
-为了有助于捕获系统配置错误,定义了两个函数。
+----------------
+
+为了有助于捕获系统配置错误,定义了两个函数::
/* 申请 GPIO, 返回 0 或负的错误代码.
* 非空标签可能有助于诊断.
@@ -256,9 +250,9 @@ GPIO 值的命令需要等待其信息排到队首才发送命令,再获得其
某些平台可能也使用 GPIO 作为电源管理激活信号(例如通过关闭未使用芯片区和
简单地关闭未使用时钟)。
-对于 GPIO 使用 pinctrl 子系统已知的引脚,子系统应该被告知其使用情况;
+对于 GPIO 使用引脚控制子系统已知的引脚,子系统应该被告知其使用情况;
一个 gpiolib 驱动的 .request()操作应调用 pinctrl_gpio_request(),
-而 gpiolib 驱动的 .free()操作应调用 pinctrl_gpio_free()。pinctrl
+而 gpiolib 驱动的 .free()操作应调用 pinctrl_gpio_free()。引脚控制
子系统允许 pinctrl_gpio_request()在某个引脚或引脚组以复用形式“属于”
一个设备时都成功返回。
@@ -270,7 +264,7 @@ GPIO 值的命令需要等待其信息排到队首才发送命令,再获得其
某些平台允许部分或所有 GPIO 信号使用不同的引脚。类似的,GPIO 或引脚的
其他方面也需要配置,如上拉/下拉。平台软件应该在对这些 GPIO 调用
-gpio_request()前将这类细节配置好,例如使用 pinctrl 子系统的映射表,
+gpio_request()前将这类细节配置好,例如使用引脚控制子系统的映射表,
使得 GPIO 的用户无须关注这些细节。
还有一个值得注意的是在释放 GPIO 前,你必须停止使用它。
@@ -278,7 +272,7 @@ gpio_request()前将这类细节配置好,例如使用 pinctrl 子系统的映
注意:申请一个 GPIO 并没有以任何方式配置它,只不过标识那个 GPIO 处于使用
状态。必须有另外的代码来处理引脚配置(如控制 GPIO 使用的引脚、上拉/下拉)。
-考虑到大多数情况下声明 GPIO 之后就会立即配置它们,所以定义了以下三个辅助函数:
+考虑到大多数情况下声明 GPIO 之后就会立即配置它们,所以定义了以下三个辅助函数::
/* 申请一个 GPIO 信号, 同时通过特定的'flags'初始化配置,
* 其他和 gpio_request()的参数和返回值相同
@@ -326,7 +320,7 @@ gpio_request()前将这类细节配置好,例如使用 pinctrl 子系统的映
将来这些标志可能扩展到支持更多的属性。
更进一步,为了更简单地声明/释放多个 GPIO,'struct gpio'被引进来封装所有
-这三个领域:
+这三个领域::
struct gpio {
unsigned gpio;
@@ -334,7 +328,7 @@ gpio_request()前将这类细节配置好,例如使用 pinctrl 子系统的映
const char *label;
};
-一个典型的用例:
+一个典型的用例::
static struct gpio leds_gpios[] = {
{ 32, GPIOF_OUT_INIT_HIGH, "Power LED" }, /* 默认开启 */
@@ -356,9 +350,10 @@ gpio_request()前将这类细节配置好,例如使用 pinctrl 子系统的映
GPIO 映射到 IRQ
---------------------
+----------------
+
GPIO 编号是无符号整数;IRQ 编号也是。这些构成了两个逻辑上不同的命名空间
-(GPIO 0 不一定使用 IRQ 0)。你可以通过以下函数在它们之间实现映射:
+(GPIO 0 不一定使用 IRQ 0)。你可以通过以下函数在它们之间实现映射::
/* 映射 GPIO 编号到 IRQ 编号 */
int gpio_to_irq(unsigned gpio);
@@ -384,7 +379,8 @@ irq_to_gpio()返回的非错误值大多数通常可以被 gpio_get_value()所�
模拟开漏信号
-----------------------------
+------------
+
有时在只有低电平信号作为实际驱动结果(译者注:多个输出连接于一点,逻辑电平
结果为所有输出的逻辑与)的时候,共享的信号线需要使用“开漏”信号。(该术语
适用于 CMOS 管;而 TTL 用“集电极开路”。)一个上拉电阻使信号为高电平。这
@@ -408,9 +404,44 @@ irq_to_gpio()返回的非错误值大多数通常可以被 gpio_get_value()所�
这不一定是错误的。一个常见的例子就是 I2C 时钟的延长:一个需要较慢时钟的
从设备延迟 SCK 的上升沿,而 I2C 主设备相应地调整其信号传输速率。
+GPIO控制器和引脚控制子系统
+--------------------------
+
+SOC上的GPIO控制器可能与引脚控制子系统紧密结合,即引脚可以与可选的gpio功
+能一起被其他功能使用。我们已经涵盖了这样的情况,例如一个GPIO控制器需要保
+留一个引脚或通过调用以下任何一个引脚来设置其方向::
+
+ pinctrl_gpio_request()
+ pinctrl_gpio_free()
+ pinctrl_gpio_direction_input()
+ pinctrl_gpio_direction_output()
+
+但是,引脚控制子系统是如何将GPIO号码(这是一个全局事项)与某个引脚控制器
+上的某个引脚交叉关联的?
+
+这是通过注册引脚的“范围”来实现的,这基本上是交叉参考表。这些描述是在
+Documentation/driver-api/pin-control.rst
+
+虽然引脚分配完全由引脚控制子系统管理,但gpio(在gpiolib下)仍由gpio驱动
+维护。可能发生的情况是,SoC中的不同引脚范围由不同的gpio驱动器管理。
+
+这使得在调用 "pinctrl_gpio_request" 之前,让gpio驱动向pin ctrl子系
+统宣布它们的引脚范围是合理的,以便在使用任何gpio之前要求引脚控制子系统准
+备相应的引脚。
+
+为此,gpio控制器可以用引脚控制子系统注册其引脚范围。目前有两种方法:有或
+无DT。
+
+关于对DT的支持,请参考 Documentation/devicetree/bindings/gpio/gpio.txt.
+
+对于非DT支持,用户可以用适当的参数调用gpiochip_add_pin_range(),将一
+系列的gpio引脚注册到引脚控制驱动上。为此,必须将引脚控制设备的名称字符串
+作为参数之一传给这个程序。
+
+
+这些公约忽略了什么?
+====================
-这些公约忽略了什么?
-================
这些公约忽略的最大一件事就是引脚复用,因为这属于高度芯片特定的属性且
没有可移植性。某个平台可能不需要明确的复用信息;有的对于任意给定的引脚
可能只有两个功能选项;有的可能每个引脚有八个功能选项;有的可能可以将
@@ -433,8 +464,9 @@ Linux 的系统。)
当前,动态定义 GPIO 并不是标准的,例如作为配置一个带有某些 GPIO 扩展器的
附加电路板的副作用。
-GPIO 实现者的框架 (可选)
-=====================
+GPIO 实现者的框架(可选)
+=========================
+
前面提到了,有一个可选的实现框架,让平台使用相同的编程接口,更加简单地支持
不同种类的 GPIO 控制器。这个框架称为"gpiolib"。
@@ -444,15 +476,16 @@ GPIO 实现者的框架 (可选)
控制器驱动: gpio_chip
--------------------
+---------------------
+
在框架中每个 GPIO 控制器都包装为一个 "struct gpio_chip",他包含了
该类型的每个控制器的常用信息:
- - 设置 GPIO 方向的方法
- - 用于访问 GPIO 值的方法
- - 告知调用其方法是否可能休眠的标志
- - 可选的 debugfs 信息导出方法 (显示类似上拉配置一样的额外状态)
- - 诊断标签
+ - 设置 GPIO 方向的方法
+ - 用于访问 GPIO 值的方法
+ - 告知调用其方法是否可能休眠的标志
+ - 可选的 debugfs 信息导出方法 (显示类似上拉配置一样的额外状态)
+ - 诊断标签
也包含了来自 device.platform_data 的每个实例的数据:它第一个 GPIO 的
编号和它可用的 GPIO 的数量。
@@ -471,7 +504,8 @@ GPIO 实现者的框架 (可选)
平台支持
--------
+--------
+
为了支持这个框架,一个平台的 Kconfig 文件将会 "select"(选择)
ARCH_REQUIRE_GPIOLIB 或 ARCH_WANT_OPTIONAL_GPIOLIB,并让它的
<asm/gpio.h> 包含 <asm-generic/gpio.h>,同时定义三个方法:
@@ -489,7 +523,7 @@ ARCH_WANT_OPTIONAL_GPIOLIB 意味着 gpiolib 核心默认关闭,且用户可以
如果这些选项都没被选择,该平台就不通过 GPIO-lib 支持 GPIO,且代码不可以
被用户使能。
-以下这些方法的实现可以直接使用框架代码,并总是通过 gpio_chip 调度:
+以下这些方法的实现可以直接使用框架代码,并总是通过 gpio_chip 调度::
#define gpio_get_value __gpio_get_value
#define gpio_set_value __gpio_set_value
@@ -508,7 +542,8 @@ arch_initcall()或者更早的地方集成进平台初始化代码,使这些 G
且他们通常可以作为 IRQ 使用。
板级支持
--------
+--------
+
对于外部 GPIO 控制器(例如 I2C 或 SPI 扩展器、专用芯片、多功能器件、FPGA
或 CPLD),大多数常用板级特定代码都可以注册控制器设备,并保证他们的驱动知道
gpiochip_add()所使用的 GPIO 编号。他们的起始编号通常跟在平台特定的 GPIO
@@ -526,8 +561,9 @@ GPIO 可以工作之后才可被注册。解决这类依赖的的一种方法是
设备变成无效时移除它们。
-用户空间的 Sysfs 接口(可选)
-========================
+用户空间的 Sysfs 接口(可选)
+=============================
+
使用“gpiolib”实现框架的平台可以选择配置一个 GPIO 的 sysfs 用户接口。
这不同于 debugfs 接口,因为它提供的是对 GPIO方向和值的控制,而不只显示
一个GPIO 的状态摘要。此外,它可以出现在没有调试支持的产品级系统中。
@@ -548,6 +584,7 @@ GPIO 可以工作之后才可被注册。解决这类依赖的的一种方法是
Sysfs 中的路径
--------------
+
在/sys/class/gpio 中有 3 类入口:
- 用于在用户空间控制 GPIO 的控制接口;
@@ -625,8 +662,9 @@ GPIO 控制器的路径类似 /sys/class/gpio/gpiochip42/ (对于从#42 GPIO
从内核代码中导出
--------------
-内核代码可以明确地管理那些已通过 gpio_request()申请的 GPIO 的导出:
+----------------
+
+内核代码可以明确地管理那些已通过 gpio_request()申请的 GPIO 的导出::
/* 导出 GPIO 到用户空间 */
int gpio_export(unsigned gpio, bool direction_may_change);
@@ -648,3 +686,9 @@ GPIO 控制器的路径类似 /sys/class/gpio/gpiochip42/ (对于从#42 GPIO
在 GPIO 被导出之后,gpio_export_link()允许在 sysfs 文件系统的任何地方
创建一个到这个 GPIO sysfs 节点的符号链接。这样驱动就可以通过一个描述性的
名字,在 sysfs 中他们所拥有的设备下提供一个(到这个 GPIO sysfs 节点的)接口。
+
+
+API参考
+=======
+
+本节中列出的函数已被废弃。在新的代码中应该使用基于GPIO描述符的API。
diff --git a/Documentation/translations/zh_CN/driver-api/index.rst b/Documentation/translations/zh_CN/driver-api/index.rst
new file mode 100644
index 000000000000..ba354e1f4e6d
--- /dev/null
+++ b/Documentation/translations/zh_CN/driver-api/index.rst
@@ -0,0 +1,132 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/driver-api/index.rst
+
+:翻译:
+
+ 司延腾 Yanteng Si <[email protected]>
+
+:校译:
+
+========================
+Linux驱动实现者的API指南
+========================
+
+内核提供了各种各样的接口来支持设备驱动的开发。这份文档只是对其中一些接口进行了
+一定程度的整理——希望随着时间的推移,它能变得更好!可用的小节可以在下面看到。
+
+.. class:: toc-title
+
+ 目录列表:
+
+.. toctree::
+ :maxdepth: 2
+
+ gpio/index
+ io_ordering
+
+Todolist:
+
+* driver-model/index
+* basics
+* infrastructure
+* ioctl
+* early-userspace/index
+* pm/index
+* clk
+* device-io
+* dma-buf
+* device_link
+* component
+* message-based
+* infiniband
+* aperture
+* frame-buffer
+* regulator
+* reset
+* iio/index
+* input
+* usb/index
+* firewire
+* pci/index
+* cxl/index
+* spi
+* i2c
+* ipmb
+* ipmi
+* i3c/index
+* interconnect
+* devfreq
+* hsi
+* edac
+* scsi
+* libata
+* target
+* mailbox
+* mtdnand
+* miscellaneous
+* mei/index
+* mtd/index
+* mmc/index
+* nvdimm/index
+* w1
+* rapidio/index
+* s390-drivers
+* vme
+* 80211/index
+* uio-howto
+* firmware/index
+* pin-control
+* md/index
+* media/index
+* misc_devices
+* nfc/index
+* dmaengine/index
+* slimbus
+* soundwire/index
+* thermal/index
+* fpga/index
+* acpi/index
+* auxiliary_bus
+* backlight/lp855x-driver.rst
+* connector
+* console
+* dcdbas
+* eisa
+* isa
+* isapnp
+* io-mapping
+* generic-counter
+* memory-devices/index
+* men-chameleon-bus
+* ntb
+* nvmem
+* parport-lowlevel
+* pps
+* ptp
+* phy/index
+* pwm
+* pldmfw/index
+* rfkill
+* serial/index
+* sm501
+* surface_aggregator/index
+* switchtec
+* sync_file
+* tty/index
+* vfio-mediated-device
+* vfio
+* vfio-pci-device-specific-driver-acceptance
+* xilinx/index
+* xillybus
+* zorro
+* hte/index
+
+.. only:: subproject and html
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/translations/zh_CN/driver-api/io_ordering.rst b/Documentation/translations/zh_CN/driver-api/io_ordering.rst
new file mode 100644
index 000000000000..4dbfa4ce92a0
--- /dev/null
+++ b/Documentation/translations/zh_CN/driver-api/io_ordering.rst
@@ -0,0 +1,60 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/driver-api/io_ordering.rst
+
+:翻译:
+
+ 林永听 Lin Yongting <[email protected]>
+ 司延腾 Yanteng Si <[email protected]>
+
+:校译:
+
+===========================
+对内存映射地址的I/O写入排序
+===========================
+
+在某些平台上,所谓的内存映射I/O是弱顺序。在这些平台上,驱动开发者有责任
+保证I/O内存映射地址的写操作按程序图意的顺序达到设备。通常读取一个“安全”
+设备寄存器或桥寄存器,触发IO芯片清刷未处理的写操作到达设备后才处理读操作,
+而达到保证目的。驱动程序通常在spinlock保护的临界区退出之前使用这种技术。
+这也可以保证后面的写操作只在前面的写操作之后到达设备(这非常类似于内存
+屏障操作,mb(),不过仅适用于I/O)。
+
+假设一个设备驱动程的具体例子::
+
+ ...
+ CPU A: spin_lock_irqsave(&dev_lock, flags)
+ CPU A: val = readl(my_status);
+ CPU A: ...
+ CPU A: writel(newval, ring_ptr);
+ CPU A: spin_unlock_irqrestore(&dev_lock, flags)
+ ...
+ CPU B: spin_lock_irqsave(&dev_lock, flags)
+ CPU B: val = readl(my_status);
+ CPU B: ...
+ CPU B: writel(newval2, ring_ptr);
+ CPU B: spin_unlock_irqrestore(&dev_lock, flags)
+ ...
+
+上述例子中,设备可能会先接收到newval2的值,然后接收到newval的值,问题就
+发生了。不过很容易通过下面方法来修复::
+
+ ...
+ CPU A: spin_lock_irqsave(&dev_lock, flags)
+ CPU A: val = readl(my_status);
+ CPU A: ...
+ CPU A: writel(newval, ring_ptr);
+ CPU A: (void)readl(safe_register); /* 配置寄存器?*/
+ CPU A: spin_unlock_irqrestore(&dev_lock, flags)
+ ...
+ CPU B: spin_lock_irqsave(&dev_lock, flags)
+ CPU B: val = readl(my_status);
+ CPU B: ...
+ CPU B: writel(newval2, ring_ptr);
+ CPU B: (void)readl(safe_register); /* 配置寄存器?*/
+ CPU B: spin_unlock_irqrestore(&dev_lock, flags)
+
+在解决方案中,读取safe_register寄存器,触发IO芯片清刷未处理的写操作,
+再处理后面的读操作,防止引发数据不一致问题。
diff --git a/Documentation/translations/zh_CN/index.rst b/Documentation/translations/zh_CN/index.rst
index bf85baca8b3e..2fc60e60feb4 100644
--- a/Documentation/translations/zh_CN/index.rst
+++ b/Documentation/translations/zh_CN/index.rst
@@ -108,6 +108,7 @@ TODOList:
:maxdepth: 2
core-api/index
+ driver-api/index
locking/index
accounting/index
cpu-freq/index
@@ -120,10 +121,10 @@ TODOList:
scheduler/index
mm/index
peci/index
+ PCI/index
TODOList:
-* driver-api/index
* block/index
* cdrom/index
* ide/index
@@ -148,7 +149,6 @@ TODOList:
* crypto/index
* bpf/index
* usb/index
-* PCI/index
* scsi/index
* misc-devices/index
* mhi/index
diff --git a/Documentation/translations/zh_CN/io_ordering.txt b/Documentation/translations/zh_CN/io_ordering.txt
deleted file mode 100644
index 7bb3086227ae..000000000000
--- a/Documentation/translations/zh_CN/io_ordering.txt
+++ /dev/null
@@ -1,67 +0,0 @@
-Chinese translated version of Documentation/driver-api/io_ordering.rst
-
-If you have any comment or update to the content, please contact the
-original document maintainer directly. However, if you have a problem
-communicating in English you can also ask the Chinese maintainer for
-help. Contact the Chinese maintainer if this translation is outdated
-or if there is a problem with the translation.
-
-Chinese maintainer: Lin Yongting <[email protected]>
----------------------------------------------------------------------
-Documentation/driver-api/io_ordering.rst 的中文翻译
-
-如果想评论或更新本文的内容,请直接联系原文档的维护者。如果你使用英文
-交流有困难的话,也可以向中文版维护者求助。如果本翻译更新不及时或者翻
-译存在问题,请联系中文版维护者。
-
-中文版维护者: 林永听 Lin Yongting <[email protected]>
-中文版翻译者: 林永听 Lin Yongting <[email protected]>
-中文版校译者: 林永听 Lin Yongting <[email protected]>
-
-
-以下为正文
----------------------------------------------------------------------
-
-在某些平台上,所谓的内存映射I/O是弱顺序。在这些平台上,驱动开发者有责任
-保证I/O内存映射地址的写操作按程序图意的顺序达到设备。通常读取一个“安全”
-设备寄存器或桥寄存器,触发IO芯片清刷未处理的写操作到达设备后才处理读操作,
-而达到保证目的。驱动程序通常在spinlock保护的临界区退出之前使用这种技术。
-这也可以保证后面的写操作只在前面的写操作之后到达设备(这非常类似于内存
-屏障操作,mb(),不过仅适用于I/O)。
-
-假设一个设备驱动程的具体例子:
-
- ...
-CPU A: spin_lock_irqsave(&dev_lock, flags)
-CPU A: val = readl(my_status);
-CPU A: ...
-CPU A: writel(newval, ring_ptr);
-CPU A: spin_unlock_irqrestore(&dev_lock, flags)
- ...
-CPU B: spin_lock_irqsave(&dev_lock, flags)
-CPU B: val = readl(my_status);
-CPU B: ...
-CPU B: writel(newval2, ring_ptr);
-CPU B: spin_unlock_irqrestore(&dev_lock, flags)
- ...
-
-上述例子中,设备可能会先接收到newval2的值,然后接收到newval的值,问题就
-发生了。不过很容易通过下面方法来修复:
-
- ...
-CPU A: spin_lock_irqsave(&dev_lock, flags)
-CPU A: val = readl(my_status);
-CPU A: ...
-CPU A: writel(newval, ring_ptr);
-CPU A: (void)readl(safe_register); /* 配置寄存器?*/
-CPU A: spin_unlock_irqrestore(&dev_lock, flags)
- ...
-CPU B: spin_lock_irqsave(&dev_lock, flags)
-CPU B: val = readl(my_status);
-CPU B: ...
-CPU B: writel(newval2, ring_ptr);
-CPU B: (void)readl(safe_register); /* 配置寄存器?*/
-CPU B: spin_unlock_irqrestore(&dev_lock, flags)
-
-在解决方案中,读取safe_register寄存器,触发IO芯片清刷未处理的写操作,
-再处理后面的读操作,防止引发数据不一致问题。
diff --git a/Documentation/translations/zh_CN/oops-tracing.txt b/Documentation/translations/zh_CN/oops-tracing.txt
deleted file mode 100644
index c5f3bda7abcb..000000000000
--- a/Documentation/translations/zh_CN/oops-tracing.txt
+++ /dev/null
@@ -1,212 +0,0 @@
-Chinese translated version of Documentation/admin-guide/bug-hunting.rst
-
-If you have any comment or update to the content, please contact the
-original document maintainer directly. However, if you have a problem
-communicating in English you can also ask the Chinese maintainer for
-help. Contact the Chinese maintainer if this translation is outdated
-or if there is a problem with the translation.
-
-Chinese maintainer: Dave Young <[email protected]>
----------------------------------------------------------------------
-Documentation/admin-guide/bug-hunting.rst 的中文翻译
-
-如果想评论或更新本文的内容,请直接联系原文档的维护者。如果你使用英文
-交流有困难的话,也可以向中文版维护者求助。如果本翻译更新不及时或者翻
-译存在问题,请联系中文版维护者。
-
-中文版维护者: 杨瑞 Dave Young <[email protected]>
-中文版翻译者: 杨瑞 Dave Young <[email protected]>
-中文版校译者: 李阳 Li Yang <[email protected]>
- 王聪 Wang Cong <[email protected]>
-
-以下为正文
----------------------------------------------------------------------
-
-注意: ksymoops 在2.6中是没有用的。 请以原有格式使用Oops(来自dmesg,等等)。
-忽略任何这样那样关于“解码Oops”或者“通过ksymoops运行”的文档。 如果你贴出运行过
-ksymoops的来自2.6的Oops,人们只会让你重贴一次。
-
-快速总结
--------------
-
-发现Oops并发送给看似相关的内核领域的维护者。别太担心对不上号。如果你不确定就发给
-和你所做的事情相关的代码的负责人。 如果可重现试着描述怎样重构。 那甚至比oops更有
-价值。
-
-如果你对于发送给谁一无所知, 发给[email protected]。感谢你帮助Linux
-尽可能地稳定。
-
-Oops在哪里?
-----------------------
-
-通常Oops文本由klogd从内核缓冲区里读取并传给syslogd,由syslogd写到syslog文件中,
-典型地是/var/log/messages(依赖于/etc/syslog.conf)。有时klogd崩溃了,这种情况下你
-能够运行dmesg > file来从内核缓冲区中读取数据并保存下来。 否则你可以
-cat /proc/kmsg > file, 然而你必须介入中止传输, kmsg是一个“永不结束的文件”。如
-果机器崩溃坏到你不能输入命令或者磁盘不可用那么你有三种选择:-
-
-(1) 手抄屏幕上的文本待机器重启后再输入计算机。 麻烦但如果没有针对崩溃的准备,
-这是仅有的选择。 另外,你可以用数码相机把屏幕拍下来-不太好,但比没有强。 如果信
-息滚动到了终端的上面,你会发现以高分辩率启动(比如,vga=791)会让你读到更多的文
-本。(注意:这需要vesafb,所以对‘早期’的oops没有帮助)
-
-(2)用串口终端启动(请参看Documentation/admin-guide/serial-console.rst),运行一个null
-modem到另一台机器并用你喜欢的通讯工具获取输出。Minicom工作地很好。
-
-(3)使用Kdump(请参看Documentation/admin-guide/kdump/kdump.rst),
-使用在Documentation/admin-guide/kdump/gdbmacros.txt中定义的dmesg gdb宏,从旧的内存中提取内核
-环形缓冲区。
-
-完整信息
-----------------
-
-注意:以下来自于Linus的邮件适用于2.4内核。 我因为历史原因保留了它,并且因为其中
-一些信息仍然适用。 特别注意的是,请忽略任何ksymoops的引用。
-
-From: Linus Torvalds <[email protected]>
-
-怎样跟踪Oops.. [原发到linux-kernel的一封邮件]
-
-主要的窍门是有五年和这些烦人的oops消息打交道的经验;-)
-
-实际上,你有办法使它更简单。我有两个不同的方法:
-
- gdb /usr/src/linux/vmlinux
- gdb> disassemble <offending_function>
-
-那是发现问题的简单办法,至少如果bug报告做的好的情况下(象这个一样-运行ksymoops
-得到oops发生的函数及函数内的偏移)。
-
-哦,如果报告发生的内核以相同的编译器和相似的配置编译它会有帮助的。
-
-另一件要做的事是反汇编bug报告的“Code”部分:ksymoops也会用正确的工具来做这件事,
-但如果没有那些工具你可以写一个傻程序:
-
- char str[] = "\xXX\xXX\xXX...";
- main(){}
-
-并用gcc -g编译它然后执行“disassemble str”(XX部分是由Oops报告的值-你可以仅剪切
-粘贴并用“\x”替换空格-我就是这么做的,因为我懒得写程序自动做这一切)。
-
-另外,你可以用scripts/decodecode这个shell脚本。它的使用方法是:
-decodecode < oops.txt
-
-“Code”之后的十六进制字节可能(在某些架构上)有一些当前指令之前的指令字节以及
-当前和之后的指令字节
-
-Code: f9 0f 8d f9 00 00 00 8d 42 0c e8 dd 26 11 c7 a1 60 ea 2b f9 8b 50 08 a1
-64 ea 2b f9 8d 34 82 8b 1e 85 db 74 6d 8b 15 60 ea 2b f9 <8b> 43 04 39 42 54
-7e 04 40 89 42 54 8b 43 04 3b 05 00 f6 52 c0
-
-最后,如果你想知道代码来自哪里,你可以:
-
- cd /usr/src/linux
- make fs/buffer.s # 或任何产生BUG的文件
-
-然后你会比gdb反汇编更清楚的知道发生了什么。
-
-现在,问题是把你所拥有的所有数据结合起来:C源码(关于它应该怎样的一般知识),
-汇编代码及其反汇编得到的代码(另外还有从“oops”消息得到的寄存器状态-对了解毁坏的
-指针有用,而且当你有了汇编代码你也能拿其它的寄存器和任何它们对应的C表达式做匹配
-)。
-
-实际上,你仅需看看哪里不匹配(这个例子是“Code”反汇编和编译器生成的代码不匹配)。
-然后你须要找出为什么不匹配。通常很简单-你看到代码使用了空指针然后你看代码想知道
-空指针是怎么出现的,还有检查它是否合法..
-
-现在,如果明白这是一项耗时的工作而且需要一丁点儿的专心,没错。这就是我为什么大多
-只是忽略那些没有符号表信息的崩溃报告的原因:简单的说太难查找了(我有一些
-程序用于在内核代码段中搜索特定的模式,而且有时我也已经能找出那些崩溃的地方,但是
-仅仅是找出正确的序列也确实需要相当扎实的内核知识)
-
-_有时_会发生这种情况,我仅看到崩溃中的反汇编代码序列, 然后我马上就明白问题出在
-哪里。这时我才意识到自己干这个工作已经太长时间了;-)
-
- Linus
-
-
----------------------------------------------------------------------------
-关于Oops跟踪的注解:
-
-为了帮助Linus和其它内核开发者,klogd纳入了大量的支持来处理保护错误。为了拥有对
-地址解析的完整支持至少应该使用1.3-pl3的sysklogd包。
-
-当保护错误发生时,klogd守护进程自动把内核日志信息中的重要地址翻译成它们相应的符
-号。
-
-klogd执行两种类型的地址解析。首先是静态翻译其次是动态翻译。静态翻译和ksymoops
-一样使用System.map文件。为了做静态翻译klogd守护进程必须在初始化时能找到system
-map文件。关于klogd怎样搜索map文件请参看klogd手册页。
-
-动态地址翻译在使用内核可装载模块时很重要。 因为内核模块的内存是从内核动态内存池
-里分配的,所以不管是模块开始位置还是模块中函数和符号的位置都不是固定的。
-
-内核支持允许程序决定装载哪些模块和它们在内存中位置的系统调用。使用这些系统调用
-klogd守护进程生成一张符号表用于调试发生在可装载模块中的保护错误。
-
-至少klogd会提供产生保护错误的模块名。还可有额外的符号信息供可装载模块开发者选择
-以从模块中输出符号信息。
-
-因为内核模块环境可能是动态的,所以必须有一种机制当模块环境发生改变时来通知klogd
-守护进程。 有一些可用的命令行选项允许klogd向当前执行中的守护进程发送信号,告知符
-号信息应该被刷新了。 更多信息请参看klogd手册页。
-
-sysklogd发布时包含一个补丁修改了modules-2.0.0包,无论何时一个模块装载或者卸载都
-会自动向klogd发送信号。打上这个补丁提供了必要的对调试发生于内核可装载模块的保护
-错误的无缝支持。
-
-以下是被klogd处理过的发生在可装载模块中的一个保护错误例子:
----------------------------------------------------------------------------
-Aug 29 09:51:01 blizard kernel: Unable to handle kernel paging request at virtual address f15e97cc
-Aug 29 09:51:01 blizard kernel: current->tss.cr3 = 0062d000, %cr3 = 0062d000
-Aug 29 09:51:01 blizard kernel: *pde = 00000000
-Aug 29 09:51:01 blizard kernel: Oops: 0002
-Aug 29 09:51:01 blizard kernel: CPU: 0
-Aug 29 09:51:01 blizard kernel: EIP: 0010:[oops:_oops+16/3868]
-Aug 29 09:51:01 blizard kernel: EFLAGS: 00010212
-Aug 29 09:51:01 blizard kernel: eax: 315e97cc ebx: 003a6f80 ecx: 001be77b edx: 00237c0c
-Aug 29 09:51:01 blizard kernel: esi: 00000000 edi: bffffdb3 ebp: 00589f90 esp: 00589f8c
-Aug 29 09:51:01 blizard kernel: ds: 0018 es: 0018 fs: 002b gs: 002b ss: 0018
-Aug 29 09:51:01 blizard kernel: Process oops_test (pid: 3374, process nr: 21, stackpage=00589000)
-Aug 29 09:51:01 blizard kernel: Stack: 315e97cc 00589f98 0100b0b4 bffffed4 0012e38e 00240c64 003a6f80 00000001
-Aug 29 09:51:01 blizard kernel: 00000000 00237810 bfffff00 0010a7fa 00000003 00000001 00000000 bfffff00
-Aug 29 09:51:01 blizard kernel: bffffdb3 bffffed4 ffffffda 0000002b 0007002b 0000002b 0000002b 00000036
-Aug 29 09:51:01 blizard kernel: Call Trace: [oops:_oops_ioctl+48/80] [_sys_ioctl+254/272] [_system_call+82/128]
-Aug 29 09:51:01 blizard kernel: Code: c7 00 05 00 00 00 eb 08 90 90 90 90 90 90 90 90 89 ec 5d c3
----------------------------------------------------------------------------
-
-Dr. G.W. Wettstein Oncology Research Div. Computing Facility
-Roger Maris Cancer Center INTERNET: [email protected]
-820 4th St. N.
-Fargo, ND 58122
-Phone: 701-234-7556
-
-
----------------------------------------------------------------------------
-受污染的内核
-
-一些oops报告在程序记数器之后包含字符串'Tainted: '。这表明内核已经被一些东西给污
-染了。 该字符串之后紧跟着一系列的位置敏感的字符,每个代表一个特定的污染值。
-
- 1:'G'如果所有装载的模块都有GPL或相容的许可证,'P'如果装载了任何的专有模块。
-没有模块MODULE_LICENSE或者带有insmod认为是与GPL不相容的的MODULE_LICENSE的模块被
-认定是专有的。
-
- 2:'F'如果有任何通过“insmod -f”被强制装载的模块,' '如果所有模块都被正常装载。
-
- 3:'S'如果oops发生在SMP内核中,运行于没有证明安全运行多处理器的硬件。 当前这种
-情况仅限于几种不支持SMP的速龙处理器。
-
- 4:'R'如果模块通过“insmod -f”被强制装载,' '如果所有模块都被正常装载。
-
- 5:'M'如果任何处理器报告了机器检查异常,' '如果没有发生机器检查异常。
-
- 6:'B'如果页释放函数发现了一个错误的页引用或者一些非预期的页标志。
-
- 7:'U'如果用户或者用户应用程序特别请求设置污染标志,否则' '。
-
- 8:'D'如果内核刚刚死掉,比如有OOPS或者BUG。
-
-使用'Tainted: '字符串的主要原因是要告诉内核调试者,这是否是一个干净的内核亦或发
-生了任何的不正常的事。污染是永久的:即使出错的模块已经被卸载了,污染值仍然存在,
-以表明内核不再值得信任。
diff --git a/Documentation/translations/zh_CN/process/coding-style.rst b/Documentation/translations/zh_CN/process/coding-style.rst
index 638d714bec83..fa28ef0a7fee 100644
--- a/Documentation/translations/zh_CN/process/coding-style.rst
+++ b/Documentation/translations/zh_CN/process/coding-style.rst
@@ -1,21 +1,23 @@
.. include:: ../disclaimer-zh_CN.rst
-:Original: :ref:`Documentation/process/coding-style.rst <codingstyle>`
+:Original: Documentation/process/coding-style.rst
.. _cn_codingstyle:
-译者::
+:译者:
+ - 张乐 Zhang Le <[email protected]>
+ - Andy Deng <[email protected]>
+ - 吴想成 <[email protected]>
- 中文版维护者: 张乐 Zhang Le <[email protected]>
- 中文版翻译者: 张乐 Zhang Le <[email protected]>
- 中文版校译者: 王聪 Wang Cong <[email protected]>
- 管旭东 Xudong Guan <[email protected]>
- Li Zefan <[email protected]>
- Wang Chen <[email protected]>
+:校译:
+ - 王聪 Wang Cong <[email protected]>
+ - wheelz <[email protected]>
+ - 管旭东 Xudong Guan <[email protected]>
+ - Li Zefan <[email protected]>
+ - Wang Chen <[email protected]>
Linux 内核代码风格
-=========================
+==================
这是一个简短的文档,描述了 linux 内核的首选代码风格。代码风格是因人而异的,
而且我不愿意把自己的观点强加给任何人,但这就像我去做任何事情都必须遵循的原则
@@ -29,7 +31,7 @@ Linux 内核代码风格
1) 缩进
---------------
+-------
制表符是 8 个字符,所以缩进也是 8 个字符。有些异端运动试图将缩进变为 4 (甚至
2!) 字符深,这几乎相当于尝试将圆周率的值定义为 3。
@@ -73,6 +75,22 @@ Linux 内核代码风格
if (condition) do_this;
do_something_everytime;
+不要使用逗号来避免使用大括号:
+
+.. code-block:: c
+
+ if (condition)
+ do_this(), do_that();
+
+使用大括号包裹多语句:
+
+.. code-block:: c
+
+ if (condition) {
+ do_this();
+ do_that();
+ }
+
也不要在一行里放多个赋值语句。内核代码风格超级简单。就是避免可能导致别人误读
的表达式。
@@ -83,20 +101,25 @@ Linux 内核代码风格
2) 把长的行和字符串打散
-------------------------------
+-----------------------
代码风格的意义就在于使用平常使用的工具来维持代码的可读性和可维护性。
每一行的长度的限制是 80 列,我们强烈建议您遵守这个惯例。
长于 80 列的语句要打散成有意义的片段。除非超过 80 列能显著增加可读性,并且不
-会隐藏信息。子片段要明显短于母片段,并明显靠右。这同样适用于有着很长参数列表
-的函数头。然而,绝对不要打散对用户可见的字符串,例如 printk 信息,因为这样就
+会隐藏信息。
+
+子片段要明显短于母片段,并明显靠右。一种非常常用的样式是将子体与函数左括号对齐。
+
+这同样适用于有着很长参数列表的函数头。
+
+然而,绝对不要打散对用户可见的字符串,例如 printk 信息,因为这样就
很难对它们 grep。
3) 大括号和空格的放置
-------------------------------
+---------------------
C 语言风格中另外一个常见问题是大括号的放置。和缩进大小不同,选择或弃用某种放
置策略并没有多少技术上的原因,不过首选的方式,就像 Kernighan 和 Ritchie 展示
@@ -132,12 +155,12 @@ C 语言风格中另外一个常见问题是大括号的放置。和缩进大小
body of function
}
-全世界的异端可能会抱怨这个不一致性是... 呃... 不一致的,不过所有思维健全的人
+全世界的异端可能会抱怨这个不一致性是……呃……不一致,不过所有思维健全的人
都知道 (a) K&R 是 **正确的** 并且 (b) K&R 是正确的。此外,不管怎样函数都是特
殊的 (C 函数是不能嵌套的)。
注意结束大括号独自占据一行,除非它后面跟着同一个语句的剩余部分,也就是 do 语
-句中的 "while" 或者 if 语句中的 "else",像这样:
+句中的 ``while`` 或者 if 语句中的 ``else`` ,像这样:
.. code-block:: c
@@ -191,7 +214,7 @@ C 语言风格中另外一个常见问题是大括号的放置。和缩进大小
}
3.1) 空格
-********************
+*********
Linux 内核的空格使用方式 (主要) 取决于它是用于函数还是关键字。(大多数) 关键字
后要加一个空格。值得注意的例外是 sizeof, typeof, alignof 和 __attribute__,这
@@ -254,7 +277,7 @@ Linux 内核的空格使用方式 (主要) 取决于它是用于函数还是关�
4) 命名
-------------------------------
+-------
C 是一个简朴的语言,你的命名也应该这样。和 Modula-2 和 Pascal 程序员不同,
C 程序员不使用类似 ThisVariableIsATemporaryCounter 这样华丽的名字。C 程序员会
@@ -275,11 +298,31 @@ C 程序员不使用类似 ThisVariableIsATemporaryCounter 这样华丽的名字
可能的话。类似的, ``tmp`` 可以用来称呼任意类型的临时变量。
如果你怕混淆了你的本地变量名,你就遇到另一个问题了,叫做函数增长荷尔蒙失衡综
-合症。请看第六章 (函数)。
+合征。请看第六章 (函数)。
+对于符号名称和文档,避免引入新的“master/slave”(或独立于“master”的“slave”)
+和“blacklist/whitelist”。
+
+“master/slave”推荐替换为:
+ '{primary,main} / {secondary,replica,subordinate}'
+ '{initiator,requester} / {target,responder}'
+ '{controller,host} / {device,worker,proxy}'
+ 'leader/follower'
+ 'director/performer'
+
+“blacklist/whitelist”推荐替换为:
+ 'denylist/allowlist'
+ 'blocklist/passlist'
+
+引入新用法的例外情况是:维护用户空间ABI/API,或更新现有(截至2020年)硬件或
+协议规范的代码时要求这些术语。对于新规范,尽可能将术语的规范用法转换为内核
+编码标准。
+
+.. warning::
+ 以上主从、黑白名单规则不适用于中文文档,请勿更改中文术语!
5) Typedef
------------
+----------
不要使用类似 ``vps_t`` 之类的东西。
@@ -308,7 +351,7 @@ C 程序员不使用类似 ThisVariableIsATemporaryCounter 这样华丽的名字
.. note::
- 不透明性和 "访问函数" 本身是不好的。我们使用 pte_t 等类型的原因在于真
+ 不透明性和“访问函数”本身是不好的。我们使用 pte_t 等类型的原因在于真
的是完全没有任何共用的可访问信息。
(b) 清楚的整数类型,如此,这层抽象就可以 **帮助** 消除到底是 ``int`` 还是
@@ -353,7 +396,7 @@ C 程序员不使用类似 ThisVariableIsATemporaryCounter 这样华丽的名字
6) 函数
-------------------------------
+-------
函数应该简短而漂亮,并且只完成一件事情。函数应该可以一屏或者两屏显示完 (我们
都知道 ISO/ANSI 屏幕大小是 80x24),只做一件事情,而且把它做好。
@@ -383,12 +426,46 @@ C 程序员不使用类似 ThisVariableIsATemporaryCounter 这样华丽的名字
}
EXPORT_SYMBOL(system_is_up);
-在函数原型中,包含函数名和它们的数据类型。虽然 C 语言里没有这样的要求,在
+6.1) 函数原型
+*************
+
+在函数原型中包含参数名和它们的数据类型。虽然 C 语言里没有这样的要求,但在
Linux 里这是提倡的做法,因为这样可以很简单的给读者提供更多的有价值的信息。
+不要在函数声明里使用 ``extern`` 关键字,因为这会导致代码行变长,并且不是严格
+必需的。
+
+写函数原型时,请保持 `元素顺序规则 <https://lore.kernel.org/mm-commits/CAHk-=wiOCLRny5aifWNhr621kYrJwhfURsa0vFPeUEm8mF0ufg@mail.gmail.com/>`_ 。
+例如下列函数声明::
+
+ __init void * __must_check action(enum magic value, size_t size, u8 count,
+ char *fmt, ...) __printf(4, 5) __malloc;
+
+推荐的函数原型元素顺序是:
+
+- 储存类型(下方的 ``static __always_inline`` ,注意 ``__always_inline``
+ 技术上来讲是个属性但被当做 ``inline`` )
+- 储存类型属性(上方的 ``__init`` ——即节声明,但也像 ``__cold`` )
+- 返回类型(上方的 ``void *`` )
+- 返回类型属性(上方的 ``__must_check`` )
+- 函数名(上方的 ``action`` )
+- 函数参数(上方的 ``(enum magic value, size_t size, u8 count, char *fmt, ...)`` ,
+ 注意必须写上参数名)
+- 函数参数属性(上方的 ``__printf(4, 5)`` )
+- 函数行为属性(上方的 ``__malloc`` )
+
+请注意,对于函数 **定义** (即实际函数体),编译器不允许在函数参数之后添加函
+数参数属性。在这种情况下,它们应该跟随存储类型属性(例如,与上面的 **声明**
+示例相比,请注意下面的 ``__printf(4, 5)`` 的位置发生了变化)::
+
+ static __always_inline __init __printf(4, 5) void * __must_check action(enum magic value,
+ size_t size, u8 count, char *fmt, ...) __malloc
+ {
+ ...
+ }
7) 集中的函数退出途径
-------------------------------
+---------------------
虽然被某些人声称已经过时,但是 goto 语句的等价物还是经常被编译器所使用,具体
形式是无条件跳转指令。
@@ -432,7 +509,7 @@ Linux 里这是提倡的做法,因为这样可以很简单的给读者提供�
return result;
}
-一个需要注意的常见错误是 ``一个 err 错误`` ,就像这样:
+一个需要注意的常见错误是 ``单 err 错误`` ,就像这样:
.. code-block:: c
@@ -456,19 +533,19 @@ Linux 里这是提倡的做法,因为这样可以很简单的给读者提供�
8) 注释
-------------------------------
+-------
注释是好的,不过有过度注释的危险。永远不要在注释里解释你的代码是如何运作的:
更好的做法是让别人一看你的代码就可以明白,解释写的很差的代码是浪费时间。
-一般的,你想要你的注释告诉别人你的代码做了什么,而不是怎么做的。也请你不要把
+一般来说你用注释告诉别人你的代码做了什么,而不是怎么做的。也请你不要把
注释放在一个函数体内部:如果函数复杂到你需要独立的注释其中的一部分,你很可能
需要回到第六章看一看。你可以做一些小注释来注明或警告某些很聪明 (或者槽糕) 的
做法,但不要加太多。你应该做的,是把注释放在函数的头部,告诉人们它做了什么,
也可以加上它做这些事情的原因。
-当注释内核 API 函数时,请使用 kernel-doc 格式。请看
-Documentation/doc-guide/ 和 scripts/kernel-doc 以获得详细信息。
+当注释内核 API 函数时,请使用 kernel-doc 格式。详见
+Documentation/translations/zh_CN/doc-guide/index.rst 和 scripts/kernel-doc 。
长 (多行) 注释的首选风格是:
@@ -500,17 +577,18 @@ Documentation/doc-guide/ 和 scripts/kernel-doc 以获得详细信息。
9) 你已经把事情弄糟了
-------------------------------
+---------------------
-这没什么,我们都是这样。可能你的使用了很长时间 Unix 的朋友已经告诉你
+这没什么,我们都是这样。可能你长期使用 Unix 的朋友已经告诉你
``GNU emacs`` 能自动帮你格式化 C 源代码,而且你也注意到了,确实是这样,不过它
所使用的默认值和我们想要的相去甚远 (实际上,甚至比随机打的还要差——无数个猴子
-在 GNU emacs 里打字永远不会创造出一个好程序) (译注:Infinite Monkey Theorem)
+在 GNU emacs 里打字永远不会创造出一个好程序)
+*(译注:Infinite Monkey Theorem)*
所以你要么放弃 GNU emacs,要么改变它让它使用更合理的设定。要采用后一个方案,
你可以把下面这段粘贴到你的 .emacs 文件里。
-.. code-block:: none
+.. code-block:: elisp
(defun c-lineup-arglist-tabs-only (ignored)
"Line up argument lists by tabs, not spaces"
@@ -529,7 +607,7 @@ Documentation/doc-guide/ 和 scripts/kernel-doc 以获得详细信息。
(c-offsets-alist . (
(arglist-close . c-lineup-arglist-tabs-only)
(arglist-cont-nonempty .
- (c-lineup-gcc-asm-reg c-lineup-arglist-tabs-only))
+ (c-lineup-gcc-asm-reg c-lineup-arglist-tabs-only))
(arglist-intro . +)
(brace-list-intro . +)
(c . c-lineup-C-comments)
@@ -573,9 +651,14 @@ Documentation/doc-guide/ 和 scripts/kernel-doc 以获得详细信息。
``indent`` 有很多选项,特别是重新格式化注释的时候,你可能需要看一下它的手册。
不过记住: ``indent`` 不能修正坏的编程习惯。
+请注意,您还可以使用 ``clang-format`` 工具帮助您处理这些规则,快速自动重新格
+式化部分代码,并审阅整个文件以发现代码风格错误、打字错误和可能的改进。它还可
+以方便地排序 ``#include`` ,对齐变量/宏,重排文本和其他类似任务。
+详见 Documentation/process/clang-format.rst 。
+
10) Kconfig 配置文件
-------------------------------
+--------------------
对于遍布源码树的所有 Kconfig* 配置文件来说,它们缩进方式有所不同。紧挨着
``config`` 定义的行,用一个制表符缩进,然而 help 信息的缩进则额外增加 2 个空
@@ -598,11 +681,11 @@ Documentation/doc-guide/ 和 scripts/kernel-doc 以获得详细信息。
depends on ADFS_FS
...
-要查看配置文件的完整文档,请看 Documentation/kbuild/kconfig-language.rst。
+要查看配置文件的完整文档,请看 Documentation/kbuild/kconfig-language.rst 。
11) 数据结构
-------------------------------
+------------
如果一个数据结构,在创建和销毁它的单线执行环境之外可见,那么它必须要有一个引
用计数器。内核里没有垃圾收集 (并且内核之外的垃圾收集慢且效率低下),这意味着你
@@ -626,7 +709,7 @@ mm_count),和文件系统 (``struct super_block``: s_count 和 s_active) 中�
12) 宏,枚举和RTL
-------------------------------
+-----------------
用于定义常量的宏的名字及枚举里的标签需要大写。
@@ -638,7 +721,7 @@ mm_count),和文件系统 (``struct super_block``: s_count 和 s_active) 中�
宏的名字请用大写字母,不过形如函数的宏的名字可以用小写字母。
-一般的,如果能写成内联函数就不要写成像函数的宏。
+通常如果能写成内联函数就不要写成像函数的宏。
含有多个语句的宏应该被包含在一个 do-while 代码块里:
@@ -696,18 +779,18 @@ mm_count),和文件系统 (``struct super_block``: s_count 和 s_active) 中�
(ret); \
})
-ret 是本地变量的通用名字 - __foo_ret 更不容易与一个已存在的变量冲突。
+ret 是本地变量的通用名字—— __foo_ret 更不容易与一个已存在的变量冲突。
cpp 手册对宏的讲解很详细。gcc internals 手册也详细讲解了 RTL,内核里的汇编语
言经常用到它。
13) 打印内核消息
-------------------------------
+----------------
-内核开发者应该是受过良好教育的。请一定注意内核信息的拼写,以给人以好的印象。
+内核开发者应该看起来有文化。请一定注意内核信息的拼写,以给人良好的印象。
不要用不规范的单词比如 ``dont``,而要用 ``do not`` 或者 ``don't`` 。保证这些信
-息简单明了,无歧义。
+息简单明了、无歧义。
内核信息不必以英文句号结束。
@@ -724,17 +807,18 @@ dev_info() 等等。对于那些不和某个特定设备相关连的信息,<li
或设定了 CONFIG_DYNAMIC_DEBUG。实际这同样是为了 dev_dbg(),一个相关约定是在一
个已经开启了 DEBUG 时,使用 VERBOSE_DEBUG 来添加 dev_vdbg()。
-许多子系统拥有 Kconfig 调试选项来开启 -DDEBUG 在对应的 Makefile 里面;在其他
+许多子系统拥有 Kconfig 调试选项来开启对应 Makefile 里面的 -DDEBUG;在其他
情况下,特殊文件使用 #define DEBUG。当一条调试信息需要被无条件打印时,例如,
如果已经包含一个调试相关的 #ifdef 条件,printk(KERN_DEBUG ...) 就可被使用。
14) 分配内存
-------------------------------
+------------
内核提供了下面的一般用途的内存分配函数:
kmalloc(), kzalloc(), kmalloc_array(), kcalloc(), vmalloc() 和 vzalloc()。
-请参考 API 文档以获取有关它们的详细信息。
+请参考 API 文档以获取有关它们的详细信息:
+Documentation/translations/zh_CN/core-api/memory-allocation.rst 。
传递结构体大小的首选形式是这样的:
@@ -761,11 +845,13 @@ kmalloc(), kzalloc(), kmalloc_array(), kcalloc(), vmalloc() 和 vzalloc()。
p = kcalloc(n, sizeof(...), ...);
-两种形式检查分配大小 n * sizeof(...) 的溢出,如果溢出返回 NULL。
+两种形式都会检查分配 n * sizeof(...) 大小时内存的溢出,如果溢出返回 NULL。
+在没有 __GFP_NOWARN 的情况下使用时,这些通用分配函数都会在失败时发起堆栈转储,
+因此当返回NULL时,没有必要发出额外的失败消息。
15) 内联弊病
-------------------------------
+------------
有一个常见的误解是 ``内联`` 是 gcc 提供的可以让代码运行更快的一个选项。虽然使
用内联函数有时候是恰当的 (比如作为一种替代宏的方式,请看第十二章),不过很多情
@@ -786,7 +872,7 @@ inline gcc 也可以自动使其内联。而且其他用户可能会要求移除
16) 函数返回值及命名
-------------------------------
+--------------------
函数可以返回多种不同类型的值,最常见的一种是表明函数执行成功或者失败的值。这样
的一个值可以表示为一个错误代码整数 (-Exxx=失败,0=成功) 或者一个 ``成功``
@@ -797,7 +883,7 @@ inline gcc 也可以自动使其内联。而且其他用户可能会要求移除
产生这种 bug,请遵循下面的惯例::
如果函数的名字是一个动作或者强制性的命令,那么这个函数应该返回错误代
- 码整数。如果是一个判断,那么函数应该返回一个 "成功" 布尔值。
+ 码整数。如果是一个判断,那么函数应该返回一个“成功”布尔值。
比如, ``add work`` 是一个命令,所以 add_work() 在成功时返回 0,在失败时返回
-EBUSY。类似的,因为 ``PCI device present`` 是一个判断,所以 pci_dev_present()
@@ -806,13 +892,35 @@ inline gcc 也可以自动使其内联。而且其他用户可能会要求移除
所有 EXPORTed 函数都必须遵守这个惯例,所有的公共函数也都应该如此。私有
(static) 函数不需要如此,但是我们也推荐这样做。
-返回值是实际计算结果而不是计算是否成功的标志的函数不受此惯例的限制。一般的,
+返回值是实际计算结果而不是计算是否成功的标志的函数不受此惯例的限制。通常
他们通过返回一些正常值范围之外的结果来表示出错。典型的例子是返回指针的函数,
他们使用 NULL 或者 ERR_PTR 机制来报告错误。
+17) 使用布尔类型
+----------------
+
+Linux内核布尔(bool)类型是C99 _Bool类型的别名。布尔值只能为0或1,而对布尔的
+隐式或显式转换将自动将值转换为true或false。在使用布尔类型时 **不需要** 构造,
+它会消除一类错误。
+
+使用布尔值时,应使用true和false定义,而不是1和0。
-17) 不要重新发明内核宏
-------------------------------
+布尔函数返回类型和堆栈变量总是可以在适当的时候使用。鼓励使用布尔来提高可读性,
+并且布尔值在存储时通常比“int”更好。
+
+如果缓存行布局或值的大小很重要,请不要使用布尔,因为其大小和对齐方式根据编译
+的体系结构而不同。针对对齐和大小进行优化的结构体不应使用布尔。
+
+如果一个结构体有多个true/false值,请考虑将它们合并为具有1比特成员的位域,或使
+用适当的固定宽度类型,如u8。
+
+类似地,对于函数参数,多个true/false值可以合并为单个按位的“标志”参数,如果调
+用点具有裸true/false常量,“标志”参数通常是更具可读性的替代方法。
+
+总之,在结构体和参数中有限地使用布尔可以提高可读性。
+
+18) 不要重新发明内核宏
+----------------------
头文件 include/linux/kernel.h 包含了一些宏,你应该使用它们,而不要自己写一些
它们的变种。比如,如果你需要计算一个数组的长度,使用这个宏
@@ -832,11 +940,11 @@ inline gcc 也可以自动使其内联。而且其他用户可能会要求移除
在你的代码里自己重新定义。
-18) 编辑器模式行和其他需要罗嗦的事情
---------------------------------------------------
+19) 编辑器模式行和其他需要罗嗦的事情
+------------------------------------
有一些编辑器可以解释嵌入在源文件里的由一些特殊标记标明的配置信息。比如,emacs
-能够解释被标记成这样的行:
+能够解析被标记成这样的行:
.. code-block:: c
@@ -852,7 +960,7 @@ inline gcc 也可以自动使其内联。而且其他用户可能会要求移除
End:
*/
-Vim 能够解释这样的标记:
+Vim 能够解析这样的标记:
.. code-block:: c
@@ -863,8 +971,8 @@ Vim 能够解释这样的标记:
的模式,或者使用其他可以产生正确的缩进的巧妙方法。
-19) 内联汇编
-------------------------------
+20) 内联汇编
+------------
在特定架构的代码中,你可能需要内联汇编与 CPU 和平台相关功能连接。需要这么做时
就不要犹豫。然而,当 C 可以完成工作时,不要平白无故地使用内联汇编。在可能的情
@@ -880,8 +988,8 @@ Vim 能够解释这样的标记:
移除了。你不必总是这样做,尽管,这不必要的举动会限制优化。
在写一个包含多条指令的单个内联汇编语句时,把每条指令用引号分割而且各占一行,
-除了最后一条指令外,在每个指令结尾加上 \n\t,让汇编输出时可以正确地缩进下一条
-指令:
+除了最后一条指令外,在每个指令结尾加上 ``\n\t`` ,让汇编输出时可以正确地缩进
+下一条指令:
.. code-block:: c
@@ -890,10 +998,10 @@ Vim 能够解释这样的标记:
: /* outputs */ : /* inputs */ : /* clobbers */);
-20) 条件编译
-------------------------------
+21) 条件编译
+------------
-只要可能,就不要在 .c 文件里面使用预处理条件 (#if, #ifdef);这样做让代码更难
+只要可能,就不要在 .c 文件里面使用预处理条件 (#if, #ifdef);这样做会让代码更难
阅读并且更难去跟踪逻辑。替代方案是,在头文件中用预处理条件提供给那些 .c 文件
使用,再给 #else 提供一个空桩 (no-op stub) 版本,然后在 .c 文件内无条件地调用
那些 (定义在头文件内的) 函数。这样做,编译器会避免为桩函数 (stub) 的调用生成
@@ -904,8 +1012,8 @@ Vim 能够解释这样的标记:
条件到这个辅助函数内。
如果你有一个在特定配置中,可能变成未使用的函数或变量,编译器会警告它定义了但
-未使用,把它标记为 __maybe_unused 而不是将它包含在一个预处理条件中。(然而,如
-果一个函数或变量总是未使用,就直接删除它。)
+未使用,请把它标记为 __maybe_unused 而不是将它包含在一个预处理条件中。(然而,
+如果一个函数或变量总是未使用,就直接删除它。)
在代码中,尽可能地使用 IS_ENABLED 宏来转化某个 Kconfig 标记为 C 的布尔
表达式,并在一般的 C 条件中使用它:
@@ -931,23 +1039,45 @@ Vim 能够解释这样的标记:
#endif /* CONFIG_SOMETHING */
-附录 I) 参考
--------------------
+附录 I) 参考资料
+----------------
-The C Programming Language, 第二版
+The C Programming Language, 2nd Edition
作者:Brian W. Kernighan 和 Denni M. Ritchie.
Prentice Hall, Inc., 1988.
-ISBN 0-13-110362-8 (软皮), 0-13-110370-9 (硬皮).
+ISBN 0-13-110362-8 (平装), 0-13-110370-9 (精装).
+
+.. note::
+
+ 《C程序设计语言(第2版)》
+ 作者:[美] Brian W. Kernighan / [美] Dennis M. Ritchie
+ 译者:徐宝文 / 李志 / 尤晋元(审校)
+ 出版社:机械工业出版社,2019
+ ISBN:9787111617945
The Practice of Programming
作者:Brian W. Kernighan 和 Rob Pike.
Addison-Wesley, Inc., 1999.
ISBN 0-201-61586-X.
+.. note::
+
+ 《程序设计实践》
+ 作者:[美] Brian W. Kernighan / [美] Rob Pike
+ 出版社:机械工业出版社,2005
+ ISBN:9787111091578
+
+ 《程序设计实践》
+ 作者:[美] Brian W. Kernighan / Rob Pike
+ 译者:裘宗燕
+ 出版社:机械工业出版社,2000
+ ISBN:9787111075738
+
GNU 手册 - 遵循 K&R 标准和此文本 - cpp, gcc, gcc internals and indent,
都可以从 https://www.gnu.org/manual/ 找到
WG14 是 C 语言的国际标准化工作组,URL: http://www.open-std.org/JTC1/SC22/WG14/
-Kernel process/coding-style.rst,作者 [email protected] 发表于 OLS 2002:
+内核文档 Documentation/process/coding-style.rst,
+作者 [email protected] 发表于 OLS 2002:
http://www.kroah.com/linux/talks/ols_2002_kernel_codingstyle_talk/html/
diff --git a/Documentation/translations/zh_CN/process/email-clients.rst b/Documentation/translations/zh_CN/process/email-clients.rst
index 102023651118..34d51cdadc7b 100644
--- a/Documentation/translations/zh_CN/process/email-clients.rst
+++ b/Documentation/translations/zh_CN/process/email-clients.rst
@@ -1,17 +1,20 @@
-.. _cn_email_clients:
+.. SPDX-License-Identifier: GPL-2.0-or-later
.. include:: ../disclaimer-zh_CN.rst
-:Original: :ref:`Documentation/process/email-clients.rst <email_clients>`
+.. _cn_email_clients:
-译者::
+:Original: Documentation/process/email-clients.rst
- 中文版维护者: 贾威威 Harry Wei <[email protected]>
- 中文版翻译者: 贾威威 Harry Wei <[email protected]>
- 时奎亮 Alex Shi <[email protected]>
- 中文版校译者: Yinglin Luan <[email protected]>
- Xiaochen Wang <[email protected]>
- yaxinsn <[email protected]>
+:译者:
+ - 贾威威 Harry Wei <[email protected]>
+ - 时奎亮 Alex Shi <[email protected]>
+ - 吴想成 Wu XiangCheng <[email protected]>
+
+:校译:
+ - Yinglin Luan <[email protected]>
+ - Xiaochen Wang <[email protected]>
+ - yaxinsn <[email protected]>
Linux邮件客户端配置信息
=======================
@@ -27,12 +30,17 @@ Git
改日志。如果工作正常,再将补丁发送到相应的邮件列表。
-普通配置
+通用配置
--------
+
Linux内核补丁是通过邮件被提交的,最好把补丁作为邮件体的内嵌文本。有些维护者
接收附件,但是附件的内容格式应该是"text/plain"。然而,附件一般是不赞成的,
因为这会使补丁的引用部分在评论过程中变的很困难。
+同时也强烈建议在补丁或其他邮件的正文中使用纯文本格式。https://useplaintext.email
+有助于了解如何配置你喜欢的邮件客户端,并在您还没有首选的情况下列出一些推荐的
+客户端。
+
用来发送Linux内核补丁的邮件客户端在发送补丁时应该处于文本的原始状态。例如,
他们不能改变或者删除制表符或者空格,甚至是在每一行的开头或者结尾。
@@ -40,17 +48,17 @@ Linux内核补丁是通过邮件被提交的,最好把补丁作为邮件体的
不要让你的邮件客户端进行自动换行。这样也会破坏你的补丁。
-邮件客户端不能改变文本的字符集编码方式。要发送的补丁只能是ASCII或者UTF-8编码方式,
-如果你使用UTF-8编码方式发送邮件,那么你将会避免一些可能发生的字符集问题。
+邮件客户端不能改变文本的字符集编码方式。要发送的补丁只能是ASCII或者UTF-8编码
+方式,如果你使用UTF-8编码方式发送邮件,那么你将会避免一些可能发生的字符集问题。
-邮件客户端应该形成并且保持 References: 或者 In-Reply-To: 标题,那么
-邮件话题就不会中断。
+邮件客户端应该生成并且保持“References:”或者“In-Reply-To:”邮件头,这样邮件会话
+就不会中断。
-复制粘帖(或者剪贴粘帖)通常不能用于补丁,因为制表符会转换为空格。使用xclipboard, xclip
-或者xcutsel也许可以,但是最好测试一下或者避免使用复制粘帖。
+复制粘帖(或者剪贴粘帖)通常不能用于补丁,因为制表符会转换为空格。使用xclipboard,
+xclip或者xcutsel也许可以,但是最好测试一下或者避免使用复制粘帖。
-不要在使用PGP/GPG署名的邮件中包含补丁。这样会使得很多脚本不能读取和适用于你的补丁。
-(这个问题应该是可以修复的)
+不要在使用PGP/GPG签名的邮件中包含补丁。这样会使得很多脚本不能读取和适用于你的
+补丁。(这个问题应该是可以修复的)
在给内核邮件列表发送补丁之前,给自己发送一个补丁是个不错的主意,保存接收到的
邮件,将补丁用'patch'命令打上,如果成功了,再给内核邮件列表发送。
@@ -58,98 +66,133 @@ Linux内核补丁是通过邮件被提交的,最好把补丁作为邮件体的
一些邮件客户端提示
------------------
+
这里给出一些详细的MUA配置提示,可以用于给Linux内核发送补丁。这些并不意味是
所有的软件包配置总结。
说明:
-TUI = 以文本为基础的用户接口
-GUI = 图形界面用户接口
+
+- TUI = 以文本为基础的用户接口
+- GUI = 图形界面用户接口
Alpine (TUI)
-~~~~~~~~~~~~
+************
配置选项:
-在"Sending Preferences"部分:
-- "Do Not Send Flowed Text"必须开启
-- "Strip Whitespace Before Sending"必须关闭
+在 :menuselection:`Sending Preferences` 菜单:
+
+- :menuselection:`Do Not Send Flowed Text` 必须开启
+- :menuselection:`Strip Whitespace Before Sending` 必须关闭
+
+当写邮件时,光标应该放在补丁会出现的地方,然后按下 :kbd:`CTRL-R` 组合键,使指
+定的补丁文件嵌入到邮件中。
+
+Claws Mail (GUI)
+****************
+
+可以用,有人用它成功地发过补丁。
-当写邮件时,光标应该放在补丁会出现的地方,然后按下CTRL-R组合键,使指定的
-补丁文件嵌入到邮件中。
+用 :menuselection:`Message-->Insert File` (:kbd:`CTRL-I`) 或外置编辑器插入补丁。
+
+若要在Claws编辑窗口重修改插入的补丁,需关闭
+:menuselection:`Configuration-->Preferences-->Compose-->Wrapping`
+的 `Auto wrapping` 。
Evolution (GUI)
-~~~~~~~~~~~~~~~
+***************
-一些开发者成功的使用它发送补丁
+一些开发者成功的使用它发送补丁。
-当选择邮件选项:Preformat
- 从Format->Heading->Preformatted (Ctrl-7)或者工具栏
+撰写邮件时:
+从 :menuselection:`格式-->段落样式-->预格式化` (:kbd:`CTRL-7`)
+或工具栏选择 :menuselection:`预格式化` ;
然后使用:
- Insert->Text File... (Alt-n x)插入补丁文件。
+:menuselection:`插入-->文本文件...` (:kbd:`ALT-N x`) 插入补丁文件。
-你还可以"diff -Nru old.c new.c | xclip",选择Preformat,然后使用中间键进行粘帖。
+你还可以 ``diff -Nru old.c new.c | xclip`` ,选择 :menuselection:`预格式化` ,
+然后使用鼠标中键进行粘帖。
Kmail (GUI)
-~~~~~~~~~~~
+***********
一些开发者成功的使用它发送补丁。
-默认设置不为HTML格式是合适的;不要启用它。
+默认撰写设置禁用HTML格式是合适的;不要启用它。
-当书写一封邮件的时候,在选项下面不要选择自动换行。唯一的缺点就是你在邮件中输入的任何文本
-都不会被自动换行,因此你必须在发送补丁之前手动换行。最简单的方法就是启用自动换行来书写邮件,
-然后把它保存为草稿。一旦你在草稿中再次打开它,它已经全部自动换行了,那么你的邮件虽然没有
-选择自动换行,但是还不会失去已有的自动换行。
+当书写一封邮件的时候,在选项下面不要选择自动换行。唯一的缺点就是你在邮件中输
+入的任何文本都不会被自动换行,因此你必须在发送补丁之前手动换行。最简单的方法
+就是启用自动换行来书写邮件,然后把它保存为草稿。一旦你在草稿中再次打开它,它
+已经全部自动换行了,那么你的邮件虽然没有选择自动换行,但是还不会失去已有的自
+动换行。
-在邮件的底部,插入补丁之前,放上常用的补丁定界符:三个连字号(---)。
+在邮件的底部,插入补丁之前,放上常用的补丁定界符:三个连字符(``---``)。
-然后在"Message"菜单条目,选择插入文件,接着选取你的补丁文件。还有一个额外的选项,你可以
-通过它配置你的邮件建立工具栏菜单,还可以带上"insert file"图标。
+然后在 :menuselection:`信件` 菜单,选择 :menuselection:`插入文本文件` ,接
+着选取你的补丁文件。还有一个额外的选项,你可以通过它配置你的创建新邮件工具栏,
+加上 :menuselection:`插入文本文件` 图标。
-你可以安全地通过GPG标记附件,但是内嵌补丁最好不要使用GPG标记它们。作为内嵌文本的签发补丁,
-当从GPG中提取7位编码时会使他们变的更加复杂。
+将编辑器窗口拉到足够宽避免折行。对于KMail 1.13.5 (KDE 4.5.4),它会在发送邮件
+时对编辑器窗口中显示折行的地方自动换行。在选项菜单中取消自动换行仍不能解决。
+因此,如果你的补丁中有非常长的行,必须在发送之前把编辑器窗口拉得非常宽。
+参见:https://bugs.kde.org/show_bug.cgi?id=174034
-如果你非要以附件的形式发送补丁,那么就右键点击附件,然后选中属性,突出"Suggest automatic
-display",这样内嵌附件更容易让读者看到。
+你可以安全地用GPG签名附件,但是内嵌补丁最好不要使用GPG签名它们。作为内嵌文本
+插入的签名补丁将使其难以从7-bit编码中提取。
-当你要保存将要发送的内嵌文本补丁,你可以从消息列表窗格选择包含补丁的邮件,然后右击选择
-"save as"。你可以使用一个没有更改的包含补丁的邮件,如果它是以正确的形式组成。当你正真在它
-自己的窗口之下察看,那时没有选项可以保存邮件--已经有一个这样的bug被汇报到了kmail的bugzilla
-并且希望这将会被处理。邮件是以只针对某个用户可读写的权限被保存的,所以如果你想把邮件复制到其他地方,
-你不得不把他们的权限改为组或者整体可读。
+如果你非要以附件的形式发送补丁,那么就右键点击附件,然后选择
+:menuselection:`属性` ,打开 :menuselection:`建议自动显示` ,使附件内联更容
+易让读者看到。
+
+当你要保存将要发送的内嵌文本补丁,你可以从消息列表窗格选择包含补丁的邮件,然
+后右键选择 :menuselection:`另存为` 。如果整个电子邮件的组成正确,您可直接将
+其作为补丁使用。电子邮件以当前用户可读写权限保存,因此您必须 ``chmod`` ,以
+使其在复制到别处时用户组和其他人可读。
Lotus Notes (GUI)
-~~~~~~~~~~~~~~~~~
+*****************
不要使用它。
+IBM Verse (Web GUI)
+*******************
+
+同上条。
+
Mutt (TUI)
-~~~~~~~~~~
+**********
+
+很多Linux开发人员使用mutt客户端,这证明它肯定工作得非常漂亮。
-很多Linux开发人员使用mutt客户端,所以证明它肯定工作的非常漂亮。
+Mutt不自带编辑器,所以不管你使用什么编辑器,不自动断行就行。大多数编辑器都有
+:menuselection:`插入文件` 选项,它可以在不改变文件内容的情况下插入文件。
-Mutt不自带编辑器,所以不管你使用什么编辑器都不应该带有自动断行。大多数编辑器都带有
-一个"insert file"选项,它可以通过不改变文件内容的方式插入文件。
+用 ``vim`` 作为mutt的编辑器::
-'vim'作为mutt的编辑器:
set editor="vi"
- 如果使用xclip,敲入以下命令
+如果使用xclip,敲入以下命令::
+
:set paste
- 按中键之前或者shift-insert或者使用
+
+然后再按中键或者shift-insert或者使用::
+
:r filename
-如果想要把补丁作为内嵌文本。
-(a)ttach工作的很好,不带有"set paste"。
+把补丁插入为内嵌文本。
+在未设置 ``set paste`` 时(a)ttach工作的很好。
你可以通过 ``git format-patch`` 生成补丁,然后用 Mutt发送它们::
- $ mutt -H 0001-some-bug-fix.patch
+ $ mutt -H 0001-some-bug-fix.patch
配置选项:
+
它应该以默认设置的形式工作。
-然而,把"send_charset"设置为"us-ascii::utf-8"也是一个不错的主意。
+然而,把 ``send_charset`` 设置一下也是一个不错的主意::
+
+ set send_charset="us-ascii:utf-8"
Mutt 是高度可配置的。 这里是个使用mutt通过 Gmail 发送的补丁的最小配置::
@@ -178,71 +221,107 @@ Mutt 是高度可配置的。 这里是个使用mutt通过 Gmail 发送的补丁
set from = "[email protected]"
set use_from = yes
-Mutt文档含有更多信息:
+Mutt文档含有更多信息:
- http://dev.mutt.org/trac/wiki/UseCases/Gmail
+ https://gitlab.com/muttmua/mutt/-/wikis/UseCases/Gmail
- http://dev.mutt.org/doc/manual.html
+ http://www.mutt.org/doc/manual/
Pine (TUI)
-~~~~~~~~~~
+**********
Pine过去有一些空格删减问题,但是这些现在应该都被修复了。
-如果可以,请使用alpine(pine的继承者)
+如果可以,请使用alpine(pine的继承者)。
配置选项:
-- 最近的版本需要消除流程文本
-- "no-strip-whitespace-before-send"选项也是需要的。
+
+- 最近的版本需要 ``quell-flowed-text``
+- ``no-strip-whitespace-before-send`` 选项也是需要的。
Sylpheed (GUI)
-~~~~~~~~~~~~~~
+**************
- 内嵌文本可以很好的工作(或者使用附件)。
- 允许使用外部的编辑器。
-- 对于目录较多时非常慢。
+- 收件箱较多时非常慢。
- 如果通过non-SSL连接,无法使用TLS SMTP授权。
-- 在组成窗口中有一个很有用的ruler bar。
-- 给地址本中添加地址就不会正确的了解显示名。
+- 撰写窗口的标尺很有用。
+- 将地址添加到通讯簿时无法正确理解显示的名称。
Thunderbird (GUI)
-~~~~~~~~~~~~~~~~~
+*****************
+
+Thunderbird是Outlook的克隆版本,它很容易损坏文本,但也有一些方法强制修正。
+
+在完成修改后(包括安装扩展),您需要重新启动Thunderbird。
+
+- 允许使用外部编辑器:
+
+ 使用Thunderbird发补丁最简单的方法是使用扩展来打开您最喜欢的外部编辑器。
+
+ 下面是一些能够做到这一点的扩展样例。
+
+ - “External Editor Revived”
+
+ https://github.com/Frederick888/external-editor-revived
+
+ https://addons.thunderbird.net/en-GB/thunderbird/addon/external-editor-revived/
+
+ 它需要安装“本地消息主机(native messaging host)”。
+ 参见以下文档:
+ https://github.com/Frederick888/external-editor-revived/wiki
+
+ - “External Editor”
+
+ https://github.com/exteditor/exteditor
+
+ 下载并安装此扩展,然后打开 :menuselection:`新建消息` 窗口, 用
+ :menuselection:`查看-->工具栏-->自定义...` 给它增加一个按钮,直接点击此
+ 按钮即可使用外置编辑器。
+
+ 请注意,“External Editor”要求你的编辑器不能fork,换句话说,编辑器必须在
+ 关闭前不返回。你可能需要传递额外的参数或修改编辑器设置。最值得注意的是,
+ 如果您使用的是gvim,那么您必须将 :menuselection:`external editor` 设置的
+ 编辑器字段设置为 ``/usr/bin/gvim --nofork"`` (假设可执行文件在
+ ``/usr/bin`` ),以传递 ``-f`` 参数。如果您正在使用其他编辑器,请阅读其
+ 手册了解如何处理。
-默认情况下,thunderbird很容易损坏文本,但是还有一些方法可以强制它变得更好。
+若要修正内部编辑器,请执行以下操作:
-- 在用户帐号设置里,组成和寻址,不要选择"Compose messages in HTML format"。
+- 修改你的Thunderbird设置,不要使用 ``format=flowed`` !
+ 回到主窗口,按照
+ :menuselection:`主菜单-->首选项-->常规-->配置编辑器...`
+ 打开Thunderbird的配置编辑器。
-- 编辑你的Thunderbird配置设置来使它不要拆行使用:user_pref("mailnews.wraplength", 0);
+ - 将 ``mailnews.send_plaintext_flowed`` 设为 ``false``
-- 编辑你的Thunderbird配置设置,使它不要使用"format=flowed"格式:user_pref("mailnews.
- send_plaintext_flowed", false);
+ - 将 ``mailnews.wraplength`` 从 ``72`` 改为 ``0``
-- 你需要使Thunderbird变为预先格式方式:
- 如果默认情况下你书写的是HTML格式,那不是很难。仅仅从标题栏的下拉框中选择"Preformat"格式。
- 如果默认情况下你书写的是文本格式,你不得把它改为HTML格式(仅仅作为一次性的)来书写新的消息,
- 然后强制使它回到文本格式,否则它就会拆行。要实现它,在写信的图标上使用shift键来使它变为HTML
- 格式,然后标题栏的下拉框中选择"Preformat"格式。
+- 不要写HTML邮件!
+ 回到主窗口,打开
+ :menuselection:`主菜单-->账户设置-->你的@邮件.地址-->通讯录/编写&地址簿` ,
+ 关掉 ``以HTML格式编写消息`` 。
-- 允许使用外部的编辑器:
- 针对Thunderbird打补丁最简单的方法就是使用一个"external editor"扩展,然后使用你最喜欢的
- $EDITOR来读取或者合并补丁到文本中。要实现它,可以下载并且安装这个扩展,然后添加一个使用它的
- 按键View->Toolbars->Customize...最后当你书写信息的时候仅仅点击它就可以了。
+- 只用纯文本格式查看邮件!
+ 回到主窗口, :menuselection:`主菜单-->查看-->消息体为-->纯文本` !
TkRat (GUI)
-~~~~~~~~~~~
+***********
可以使用它。使用"Insert file..."或者外部的编辑器。
Gmail (Web GUI)
-~~~~~~~~~~~~~~~
+***************
不要使用它发送补丁。
Gmail网页客户端自动地把制表符转换为空格。
-虽然制表符转换为空格问题可以被外部编辑器解决,同时它还会使用回车换行把每行拆分为78个字符。
+虽然制表符转换为空格问题可以被外部编辑器解决,但它同时还会使用回车换行把每行
+拆分为78个字符。
-另一个问题是Gmail还会把任何不是ASCII的字符的信息改为base64编码。它把东西变的像欧洲人的名字。
+另一个问题是Gmail还会把任何含有非ASCII的字符的消息改用base64编码,如欧洲人的
+名字。
- ###
diff --git a/Documentation/translations/zh_CN/process/submit-checklist.rst b/Documentation/translations/zh_CN/process/submit-checklist.rst
index a64858d321fc..3d6ee21c74ae 100644
--- a/Documentation/translations/zh_CN/process/submit-checklist.rst
+++ b/Documentation/translations/zh_CN/process/submit-checklist.rst
@@ -1,105 +1,111 @@
.. include:: ../disclaimer-zh_CN.rst
-:Original: :ref:`Documentation/process/submit-checklist.rst <submitchecklist>`
-:Translator: Alex Shi <[email protected]>
+:Original: Documentation/process/submit-checklist.rst
+:Translator:
+ - Alex Shi <[email protected]>
+ - Wu XiangCheng <[email protected]>
.. _cn_submitchecklist:
-Linux内核补丁提交清单
-~~~~~~~~~~~~~~~~~~~~~
+Linux内核补丁提交检查单
+~~~~~~~~~~~~~~~~~~~~~~~
如果开发人员希望看到他们的内核补丁提交更快地被接受,那么他们应该做一些基本
的事情。
-这些都是在
-:ref:`Documentation/translations/zh_CN/process/submitting-patches.rst <cn_submittingpatches>`
+这些都是在 Documentation/translations/zh_CN/process/submitting-patches.rst
和其他有关提交Linux内核补丁的文档中提供的。
-1) 如果使用工具,则包括定义/声明该工具的文件。不要依赖于其他头文件拉入您使用
+1) 如果使用工具,则包括定义/声明该工具的文件。不要依赖其他头文件来引入您使用
的头文件。
2) 干净的编译:
- a) 使用适用或修改的 ``CONFIG`` 选项 ``=y``、``=m`` 和 ``=n`` 。没有GCC
+ a) 使用合适的 ``CONFIG`` 选项 ``=y``、``=m`` 和 ``=n`` 。没有 ``gcc``
警告/错误,没有链接器警告/错误。
- b) 通过allnoconfig、allmodconfig
+ b) 通过 ``allnoconfig`` 、 ``allmodconfig``
c) 使用 ``O=builddir`` 时可以成功编译
-3) 通过使用本地交叉编译工具或其他一些构建场在多个CPU体系结构上构建。
+ d) 任何 Doucmentation/ 下的变更都能成功构建且不引入新警告/错误。
+ 用 ``make htmldocs`` 或 ``make pdfdocs`` 检验构建情况并修复问题。
+
+3) 通过使用本地交叉编译工具或其他一些构建设施在多个CPU体系结构上构建。
4) PPC64是一种很好的交叉编译检查体系结构,因为它倾向于对64位的数使用无符号
长整型。
-5) 如下所述 :ref:`Documentation/translations/zh_CN/process/coding-style.rst <cn_codingstyle>`.
- 检查您的补丁是否为常规样式。在提交( ``scripts/check patch.pl`` )之前,
- 使用补丁样式检查器检查是否有轻微的冲突。您应该能够处理您的补丁中存在的所有
+5) 按 Documentation/translations/zh_CN/process/coding-style.rst 所述检查您的
+ 补丁是否为常规样式。在提交之前使用补丁样式检查器 ``scripts/checkpatch.pl``
+ 检查是否有轻微的冲突。您应该能够处理您的补丁中存在的所有
违规行为。
-6) 任何新的或修改过的 ``CONFIG`` 选项都不会弄脏配置菜单,并默认为关闭,除非
- 它们符合 ``Documentation/kbuild/kconfig-language.rst`` 中记录的异常条件,
- 菜单属性:默认值.
+6) 任何新的或修改过的 ``CONFIG`` 选项都不应搞乱配置菜单,并默认为关闭,除非
+ 它们符合 ``Documentation/kbuild/kconfig-language.rst`` 菜单属性:默认值中
+ 记录的例外条件。
7) 所有新的 ``kconfig`` 选项都有帮助文本。
8) 已仔细审查了相关的 ``Kconfig`` 组合。这很难用测试来纠正——脑力在这里是有
回报的。
-9) 用 sparse 检查干净。
+9) 通过 sparse 清查。
+ (参见 Documentation/translations/zh_CN/dev-tools/sparse.rst )
10) 使用 ``make checkstack`` 和 ``make namespacecheck`` 并修复他们发现的任何
问题。
.. note::
- ``checkstack`` 并没有明确指出问题,但是任何一个在堆栈上使用超过512
+ ``checkstack`` 并不会明确指出问题,但是任何一个在堆栈上使用超过512
字节的函数都可以进行更改。
-11) 包括 :ref:`kernel-doc <kernel_doc>` 内核文档以记录全局内核API。(静态函数
- 不需要,但也可以。)使用 ``make htmldocs`` 或 ``make pdfdocs`` 检查
- :ref:`kernel-doc <kernel_doc>` 并修复任何问题。
+11) 包括 :ref:`kernel-doc <kernel_doc_zh>` 内核文档以记录全局内核API。(静态
+ 函数不需要,但也可以。)使用 ``make htmldocs`` 或 ``make pdfdocs`` 检查
+ :ref:`kernel-doc <kernel_doc_zh>` 并修复任何问题。
-12) 通过以下选项同时启用的测试 ``CONFIG_PREEMPT``, ``CONFIG_DEBUG_PREEMPT``,
+12) 通过以下选项同时启用的测试: ``CONFIG_PREEMPT``, ``CONFIG_DEBUG_PREEMPT``,
``CONFIG_DEBUG_SLAB``, ``CONFIG_DEBUG_PAGEALLOC``, ``CONFIG_DEBUG_MUTEXES``,
``CONFIG_DEBUG_SPINLOCK``, ``CONFIG_DEBUG_ATOMIC_SLEEP``,
- ``CONFIG_PROVE_RCU`` and ``CONFIG_DEBUG_OBJECTS_RCU_HEAD``
-
-13) 已经过构建和运行时测试,包括有或没有 ``CONFIG_SMP``, ``CONFIG_PREEMPT``.
+ ``CONFIG_PROVE_RCU`` 和 ``CONFIG_DEBUG_OBJECTS_RCU_HEAD`` 。
-14) 如果补丁程序影响IO/磁盘等:使用或不使用 ``CONFIG_LBDAF`` 进行测试。
+13) 在 ``CONFIG_SMP``, ``CONFIG_PREEMPT`` 开启和关闭的情况下都进行构建和运行
+ 时测试。
-15) 所有代码路径都已在启用所有lockdep功能的情况下运行。
+14) 所有代码路径都已在启用所有死锁检测(lockdep)功能的情况下运行。
-16) 所有新的/proc条目都记录在 ``Documentation/``
+15) 所有新的 ``/proc`` 条目都记录在 ``Documentation/``
-17) 所有新的内核引导参数都记录在
+16) 所有新的内核引导参数都记录在
Documentation/admin-guide/kernel-parameters.rst 中。
-18) 所有新的模块参数都记录在 ``MODULE_PARM_DESC()``
+17) 所有新的模块参数都记录在 ``MODULE_PARM_DESC()``
-19) 所有新的用户空间接口都记录在 ``Documentation/ABI/`` 中。有关详细信息,
+18) 所有新的用户空间接口都记录在 ``Documentation/ABI/`` 中。有关详细信息,
请参阅 ``Documentation/ABI/README`` 。更改用户空间接口的补丁应该抄送
-20) 已通过至少注入slab和page分配失败进行检查。请参阅 ``Documentation/fault-injection/``
+19) 已通过至少注入slab和page分配失败进行检查。请参阅 ``Documentation/fault-injection/`` 。
如果新代码是实质性的,那么添加子系统特定的故障注入可能是合适的。
-21) 新添加的代码已经用 ``gcc -W`` 编译(使用 ``make EXTRA-CFLAGS=-W`` )。这
+20) 新添加的代码已经用 ``gcc -W`` 编译(使用 ``make EXTRA-CFLAGS=-W`` )。这
将产生大量噪声,但对于查找诸如“警告:有符号和无符号之间的比较”之类的错误
很有用。
-22) 在它被合并到-mm补丁集中之后进行测试,以确保它仍然与所有其他排队的补丁以
+21) 在它被合并到-mm补丁集中之后进行测试,以确保它仍然与所有其他排队的补丁以
及VM、VFS和其他子系统中的各种更改一起工作。
-23) 所有内存屏障例如 ``barrier()``, ``rmb()``, ``wmb()`` 都需要源代码中的注
+22) 所有内存屏障(例如 ``barrier()``, ``rmb()``, ``wmb()`` )都需要源代码注
释来解释它们正在执行的操作及其原因的逻辑。
-24) 如果补丁添加了任何ioctl,那么也要更新 ``Documentation/userspace-api/ioctl/ioctl-number.rst``
+23) 如果补丁添加了任何ioctl,那么也要更新
+ ``Documentation/userspace-api/ioctl/ioctl-number.rst`` 。
-25) 如果修改后的源代码依赖或使用与以下 ``Kconfig`` 符号相关的任何内核API或
+24) 如果修改后的源代码依赖或使用与以下 ``Kconfig`` 符号相关的任何内核API或
功能,则在禁用相关 ``Kconfig`` 符号和/或 ``=m`` (如果该选项可用)的情况
下测试以下多个构建[并非所有这些都同时存在,只是它们的各种/随机组合]:
- ``CONFIG_SMP``, ``CONFIG_SYSFS``, ``CONFIG_PROC_FS``, ``CONFIG_INPUT``, ``CONFIG_PCI``, ``CONFIG_BLOCK``, ``CONFIG_PM``, ``CONFIG_MAGIC_SYSRQ``,
- ``CONFIG_NET``, ``CONFIG_INET=n`` (但是后者伴随 ``CONFIG_NET=y``).
+ ``CONFIG_SMP``, ``CONFIG_SYSFS``, ``CONFIG_PROC_FS``, ``CONFIG_INPUT``,
+ ``CONFIG_PCI``, ``CONFIG_BLOCK``, ``CONFIG_PM``, ``CONFIG_MAGIC_SYSRQ``,
+ ``CONFIG_NET``, ``CONFIG_INET=n`` (但是最后一个需要 ``CONFIG_NET=y`` )。
diff --git a/Documentation/translations/zh_CN/process/submitting-patches.rst b/Documentation/translations/zh_CN/process/submitting-patches.rst
index ebb7f37575c1..f8978f02057c 100644
--- a/Documentation/translations/zh_CN/process/submitting-patches.rst
+++ b/Documentation/translations/zh_CN/process/submitting-patches.rst
@@ -1,142 +1,92 @@
-.. _cn_submittingpatches:
+.. SPDX-License-Identifier: GPL-2.0-or-later
.. include:: ../disclaimer-zh_CN.rst
-:Original: :ref:`Documentation/process/submitting-patches.rst <submittingpatches>`
+.. _cn_submittingpatches:
+
+:Original: Documentation/process/submitting-patches.rst
-译者::
+:译者:
+ - 钟宇 TripleX Chung <[email protected]>
+ - 时奎亮 Alex Shi <[email protected]>
+ - 吴想成 Wu XiangCheng <[email protected]>
- 中文版维护者: 钟宇 TripleX Chung <[email protected]>
- 中文版翻译者: 钟宇 TripleX Chung <[email protected]>
- 时奎亮 Alex Shi <[email protected]>
- 中文版校译者: 李阳 Li Yang <[email protected]>
- 王聪 Wang Cong <[email protected]>
+:校译:
+ - 李阳 Li Yang <[email protected]>
+ - 王聪 Wang Cong <[email protected]>
-如何让你的改动进入内核
-======================
+提交补丁:如何让你的改动进入内核
+================================
对于想要将改动提交到 Linux 内核的个人或者公司来说,如果不熟悉“规矩”,
-提交的流程会让人畏惧。本文档收集了一系列建议,这些建议可以大大的提高你
+提交的流程会让人畏惧。本文档包含了一系列建议,可以大大提高你
的改动被接受的机会.
-以下文档含有大量简洁的建议, 具体请见:
-:ref:`Documentation/process <development_process_main>`
-同样,:ref:`Documentation/translations/zh_CN/process/submit-checklist.rst <cn_submitchecklist>`
-给出在提交代码前需要检查的项目的列表。
+本文档以较为简洁的行文给出了大量建议。关于内核开发流程如何进行的详细信息,
+参见: Documentation/translations/zh_CN/process/development-process.rst 。
+Documentation/translations/zh_CN/process/submit-checklist.rst 给出了一系列
+提交补丁之前要检查的事项。设备树相关的补丁,请参阅
+Documentation/devicetree/bindings/submitting-patches.rst 。
-其中许多步骤描述了Git版本控制系统的默认行为;如果您使用Git来准备补丁,
-您将发现它为您完成的大部分机械工作,尽管您仍然需要准备和记录一组合理的
-补丁。一般来说,使用git将使您作为内核开发人员的生活更轻松。
+本文档假设您正在使用 ``git`` 准备你的补丁。如果您不熟悉 ``git`` ,最好学习
+如何使用它,这将使您作为内核开发人员的生活变得更加轻松。
+部分子系统和维护人员的树有一些关于其工作流程和要求的额外信息,请参阅
+Documentation/process/maintainer-handbooks.rst 。
-0) 获取当前源码树
------------------
+获取当前源码树
+--------------
-如果您没有一个可以使用当前内核源代码的存储库,请使用git获取一个。您将要
-从主线存储库开始,它可以通过以下方式获取::
+如果您手头没有当前内核源代码的存储库,请使用 ``git`` 获取一份。您需要先获取
+主线存储库,它可以通过以下命令拉取::
- git clone git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
+ git clone git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
-但是,请注意,您可能不希望直接针对主线树进行开发。大多数子系统维护人员运
+但是,请注意,您可能不想直接针对主线树进行开发。大多数子系统维护人员运
行自己的树,并希望看到针对这些树准备的补丁。请参见MAINTAINERS文件中子系
-统的 **T:** 项以查找该树,或者简单地询问维护者该树是否未在其中列出。
-
-仍然可以通过tarballs下载内核版本(如下一节所述),但这是进行内核开发的
-一种困难的方式。
-
-1) "diff -up"
--------------
-
-使用 "diff -up" 或者 "diff -uprN" 来创建补丁。
-
-所有内核的改动,都是以补丁的形式呈现的,补丁由 diff(1) 生成。创建补丁的
-时候,要确认它是以 "unified diff" 格式创建的,这种格式由 diff(1) 的 '-u'
-参数生成。而且,请使用 '-p' 参数,那样会显示每个改动所在的C函数,使得
-产生的补丁容易读得多。补丁应该基于内核源代码树的根目录,而不是里边的任
-何子目录。
-
-为一个单独的文件创建补丁,一般来说这样做就够了::
-
- SRCTREE=linux
- MYFILE=drivers/net/mydriver.c
+统的 **T:** 项以查找该树,或者直接询问维护者该树是否未在其中列出。
- cd $SRCTREE
- cp $MYFILE $MYFILE.orig
- vi $MYFILE # make your change
- cd ..
- diff -up $SRCTREE/$MYFILE{.orig,} > /tmp/patch
+.. _zh_describe_changes:
-为多个文件创建补丁,你可以解开一个没有修改过的内核源代码树,然后和你自
-己的代码树之间做 diff 。例如::
-
- MYSRC=/devel/linux
-
- tar xvfz linux-3.19.tar.gz
- mv linux-3.19 linux-3.19-vanilla
- diff -uprN -X linux-3.19-vanilla/Documentation/dontdiff \
- linux-3.19-vanilla $MYSRC > /tmp/patch
-
-"dontdiff" 是内核在编译的时候产生的文件的列表,列表中的文件在 diff(1)
-产生的补丁里会被跳过。
-
-确定你的补丁里没有包含任何不属于这次补丁提交的额外文件。记得在用diff(1)
-生成补丁之后,审阅一次补丁,以确保准确。
-
-如果你的改动很散乱,你应该研究一下如何将补丁分割成独立的部分,将改动分
-割成一系列合乎逻辑的步骤。这样更容易让其他内核开发者审核,如果你想你的
-补丁被接受,这是很重要的。请参阅:
-:ref:`cn_split_changes`
-
-如果你用 ``git`` , ``git rebase -i`` 可以帮助你这一点。如果你不用 ``git``,
-``quilt`` <https://savannah.nongnu.org/projects/quilt> 另外一个流行的选择。
-
-.. _cn_describe_changes:
-
-2) 描述你的改动
----------------
+描述你的改动
+------------
描述你的问题。无论您的补丁是一行错误修复还是5000行新功能,都必须有一个潜在
-的问题激励您完成这项工作。让审稿人相信有一个问题值得解决,让他们读完第一段
-是有意义的。
+的问题激励您完成这项工作。说服审阅者相信有一个问题值得解决,让他们读完第一段
+后就能明白这一点。
描述用户可见的影响。直接崩溃和锁定是相当有说服力的,但并不是所有的错误都那么
-明目张胆。即使在代码审查期间发现了这个问题,也要描述一下您认为它可能对用户产
+明目张胆。即使在代码审阅期间发现了这个问题,也要描述一下您认为它可能对用户产
生的影响。请记住,大多数Linux安装运行的内核来自二级稳定树或特定于供应商/产品
的树,只从上游精选特定的补丁,因此请包含任何可以帮助您将更改定位到下游的内容:
触发的场景、DMESG的摘录、崩溃描述、性能回归、延迟尖峰、锁定等。
-量化优化和权衡。如果您声称在性能、内存消耗、堆栈占用空间或二进制大小方面有所
-改进,请包括支持它们的数字。但也要描述不明显的成本。优化通常不是免费的,而是
-在CPU、内存和可读性之间进行权衡;或者,探索性的工作,在不同的工作负载之间进
+质量优化和权衡。如果您声称在性能、内存消耗、堆栈占用空间或二进制大小方面有所
+改进,请包括支持它们的数据。但也要描述不明显的成本。优化通常不是零成本的,而是
+在CPU、内存和可读性之间进行权衡;或者,做探索性的工作,在不同的工作负载之间进
行权衡。请描述优化的预期缺点,以便审阅者可以权衡成本和收益。
-一旦问题建立起来,就要详细地描述一下您实际在做什么。对于审阅者来说,用简单的
-英语描述代码的变化是很重要的,以验证代码的行为是否符合您的意愿。
+提出问题之后,就要详细地描述一下您实际在做的技术细节。对于审阅者来说,用简练的
+英语描述代码的变化是很重要的,以验证代码的行为是否符合您的意图。
-如果您将补丁描述写在一个表单中,这个表单可以很容易地作为“提交日志”放入Linux
-的源代码管理系统git中,那么维护人员将非常感谢您。见 :ref:`cn_explicit_in_reply_to`.
+如果您将补丁描述写成“标准格式”,可以很容易地作为“提交日志”放入Linux的源代
+码管理系统 ``git`` 中,那么维护人员将非常感谢您。
+参见 :ref:`zh_the_canonical_patch_format` 。
每个补丁只解决一个问题。如果你的描述开始变长,这就表明你可能需要拆分你的补丁。
-请见 :ref:`cn_split_changes`
+请见 :ref:`zh_split_changes` 。
-提交或重新提交修补程序或修补程序系列时,请包括完整的修补程序说明和理由。不要
+提交或重新提交补丁或补丁系列时,请包括完整的补丁说明和理由。不要
只说这是补丁(系列)的第几版。不要期望子系统维护人员引用更早的补丁版本或引用
URL来查找补丁描述并将其放入补丁中。也就是说,补丁(系列)及其描述应该是独立的。
-这对维护人员和审查人员都有好处。一些评审者可能甚至没有收到补丁的早期版本。
+这对维护人员和审阅者都有好处。一些审阅者可能甚至没有收到补丁的早期版本。
-描述你在命令语气中的变化,例如“make xyzzy do frotz”而不是“[This patch]make
+用祈使句描述你的变更,例如“make xyzzy do frotz”而不是“[This patch]make
xyzzy do frotz”或“[I]changed xyzzy to do frotz”,就好像你在命令代码库改变
它的行为一样。
-如果修补程序修复了一个记录的bug条目,请按编号和URL引用该bug条目。如果补丁来
-自邮件列表讨论,请给出邮件列表存档的URL;使用带有 ``Message-ID`` 的
-https://lore.kernel.org/ 重定向,以确保链接不会过时。
-
-但是,在没有外部资源的情况下,尽量让你的解释可理解。除了提供邮件列表存档或
-bug的URL之外,还要总结需要提交补丁的相关讨论要点。
-
-如果您想要引用一个特定的提交,不要只引用提交的 SHA-1 ID。还请包括提交的一行
+如果您想要引用一个特定的提交,不要只引用提交的SHA-1 ID。还请包括提交的一行
摘要,以便于审阅者了解它是关于什么的。例如::
Commit e21d2170f36602ae2708 ("video: remove unnecessary
@@ -144,82 +94,104 @@ bug的URL之外,还要总结需要提交补丁的相关讨论要点。
platform_set_drvdata(), but left the variable "dev" unused,
delete it.
-您还应该确保至少使用前12位 SHA-1 ID. 内核存储库包含*许多*对象,使与较短的ID
+您还应该确保至少使用前12位SHA-1 ID。内核存储库包含 *许多* 对象,使较短的ID
发生冲突的可能性很大。记住,即使现在不会与您的六个字符ID发生冲突,这种情况
-可能五年后改变。
+也可能在五年后改变。
+
+如果该变更的相关讨论或背景信息可以在网上查阅,请加上“Link:”标签指向它。例如
+你的补丁修复了一个缺陷,需要添加一个带有URL的标签指向邮件列表存档或缺陷跟踪器
+的相关报告;如果该补丁是由一些早先邮件列表讨论或网络上的记录引起的,请指向它。
+
+当链接到邮件列表存档时,请首选lore.kernel.org邮件存档服务。用邮件中的
+``Message-ID`` 头(去掉尖括号)可以创建链接URL。例如::
+
+ Link: https://lore.kernel.org/r/[email protected]/
+
+请检查该链接以确保可用且指向正确的邮件。
-如果修补程序修复了特定提交中的错误,例如,使用 ``git bisct`` ,请使用带有前
-12个字符SHA-1 ID 的"Fixes:"标记和单行摘要。为了简化不要将标记拆分为多个,
-行、标记不受分析脚本“75列换行”规则的限制。例如::
+不过,在没有外部资源的情况下,也要尽量让你的解释可理解。除了提供邮件列表存档或
+缺陷的URL之外,还要需要总结该补丁的相关讨论要点。
- Fixes: 54a4f0239f2e ("KVM: MMU: make kvm_mmu_zap_page() return the number of pages it actually freed")
+如果补丁修复了特定提交中的错误,例如使用 ``git bisct`` 发现了一个问题,请使用
+带有前12个字符SHA-1 ID的“Fixes:”标签和单行摘要。为了简化解析脚本,不要将该
+标签拆分为多行,标签不受“75列换行”规则的限制。例如::
-下列 ``git config`` 设置可以添加让 ``git log``, ``git show`` 漂亮的显示格式::
+ Fixes: 54a4f0239f2e ("KVM: MMU: make kvm_mmu_zap_page() return the number of pages it actually freed")
+
+下列 ``git config`` 设置可以让 ``git log``, ``git show`` 增加上述风格的显示格式::
[core]
abbrev = 12
[pretty]
fixes = Fixes: %h (\"%s\")
-.. _cn_split_changes:
+使用示例::
+
+ $ git log -1 --pretty=fixes 54a4f0239f2e
+ Fixes: 54a4f0239f2e ("KVM: MMU: make kvm_mmu_zap_page() return the number of pages it actually freed")
-3) 拆分你的改动
----------------
+.. _zh_split_changes:
-将每个逻辑更改分隔成一个单独的补丁。
+拆分你的改动
+------------
+
+将每个 **逻辑更改** 拆分成一个单独的补丁。
例如,如果你的改动里同时有bug修正和性能优化,那么把这些改动拆分到两个或
-者更多的补丁文件中。如果你的改动包含对API的修改,并且修改了驱动程序来适
-应这些新的API,那么把这些修改分成两个补丁。
+者更多的补丁文件中。如果你的改动包含对API的修改,并且增加了一个使用该新API
+的驱动,那么把这些修改分成两个补丁。
另一方面,如果你将一个单独的改动做成多个补丁文件,那么将它们合并成一个
单独的补丁文件。这样一个逻辑上单独的改动只被包含在一个补丁文件里。
-如果有一个补丁依赖另外一个补丁来完成它的改动,那没问题。简单的在你的补
-丁描述里指出“这个补丁依赖某补丁”就好了。
+需要记住的一点是,每个补丁的更改都应易于理解,以便审阅者验证。每个补丁都应该
+对其价值进行阐述。
+
+如果有一个补丁依赖另外一个补丁来完成它的改动,那没问题。直接在你的补
+丁描述里指出 **“这个补丁依赖某补丁”** 就好了。
-在将您的更改划分为一系列补丁时,要特别注意确保内核在系列中的每个补丁之后
-都能正常构建和运行。使用 ``git bisect`` 来追踪问题的开发者可能会在任何时
-候分割你的补丁系列;如果你在中间引入错误,他们不会感谢你。
+在将您的更改划分为一系列补丁时,要特别注意确保内核在应用系列中的每个补丁之后
+都能正常构建和运行。使用 ``git bisect`` 来追踪问题的开发者可能会在任何地方分
+割你的补丁系列;如果你在中间引入错误,他们不会感谢你。
-如果你不能将补丁浓缩成更少的文件,那么每次大约发送出15个,然后等待审查
+如果你不能将补丁系列浓缩得更小,那么每次大约发送出15个补丁,然后等待审阅
和集成。
-4) 检查你的更改风格
--------------------
+检查你的更改风格
+----------------
-检查您的补丁是否存在基本样式冲突,详细信息可在
-:ref:`Documentation/translations/zh_CN/process/coding-style.rst <cn_codingstyle>`
-中找到。如果不这样做,只会浪费审稿人的时间,并且会导致你的补丁被拒绝,甚至
+检查您的补丁是否违反了基本样式规定,详细信息参见
+Documentation/translations/zh_CN/process/coding-style.rst
+中找到。如果不这样做,只会浪费审阅者的时间,并且会导致你的补丁被拒绝,甚至
可能没有被阅读。
一个重要的例外是在将代码从一个文件移动到另一个文件时——在这种情况下,您不应
该在移动代码的同一个补丁中修改移动的代码。这清楚地描述了移动代码和您的更改
-的行为。这大大有助于审查实际差异,并允许工具更好地跟踪代码本身的历史。
+的行为。这大大有助于审阅实际差异,并允许工具更好地跟踪代码本身的历史。
在提交之前,使用补丁样式检查程序检查补丁(scripts/check patch.pl)。不过,
请注意,样式检查程序应该被视为一个指南,而不是作为人类判断的替代品。如果您
-的代码看起来更好,但有违规行为,那么最好不要使用它。
+的代码看起来更好,但有违规行为,那么最好别管它。
检查者报告三个级别:
- ERROR:很可能出错的事情
- - WARNING:需要仔细审查的事项
+ - WARNING:需要仔细审阅的事项
- CHECK:需要思考的事情
您应该能够判断您的补丁中存在的所有违规行为。
-5) 选择补丁收件人
------------------
+选择补丁收件人
+--------------
-您应该总是在任何补丁上复制相应的子系统维护人员,以获得他们维护的代码;查看
+您应该总是知会任何补丁相应代码的子系统维护人员;查看
维护人员文件和源代码修订历史记录,以了解这些维护人员是谁。脚本
-scripts/get_Maintainer.pl在这个步骤中非常有用。如果您找不到正在工作的子系统
+scripts/get_maintainer.pl在这个步骤中非常有用。如果您找不到正在工作的子系统
的维护人员,那么Andrew Morton([email protected])将充当最后的维护
人员。
-您通常还应该选择至少一个邮件列表来接收补丁集的。[email protected]
-作为最后一个解决办法的列表,但是这个列表上的体积已经引起了许多开发人员的拒绝。
+您通常还应该选择至少一个邮件列表来接收补丁集的副本。[email protected]
+是所有补丁的默认列表,但是这个列表的流量已经导致了许多开发人员不再看它。
在MAINTAINERS文件中查找子系统特定的列表;您的补丁可能会在那里得到更多的关注。
不过,请不要发送垃圾邮件到无关的列表。
@@ -229,189 +201,170 @@ http://vger.kernel.org/vger-lists.html 上找到它们的列表。不过,也�
不要一次发送超过15个补丁到vger邮件列表!!!!
-Linus Torvalds 是决定改动能否进入 Linux 内核的最终裁决者。他的 e-mail
-地址是 <[email protected]> 。他收到的 e-mail 很多,所以一般
-的说,最好别给他发 e-mail。
-
-如果您有修复可利用安全漏洞的补丁,请将该补丁发送到 [email protected]。对于
-严重的bug,可以考虑短期暂停以允许分销商向用户发布补丁;在这种情况下,显然不应
-将补丁发送到任何公共列表。
+Linus Torvalds是决定改动能否进入 Linux 内核的最终裁决者。他的邮件地址是
[email protected] 。他收到的邮件很多,所以一般来说最好 **别**
+给他发邮件。
-修复已发布内核中严重错误的补丁程序应该指向稳定版维护人员,方法是放这样的一行::
+如果您有修复可利用安全漏洞的补丁,请将该补丁发送到 [email protected] 。对于
+严重的bug,可以考虑短期禁令以允许分销商(有时间)向用户发布补丁;在这种情况下,
+显然不应将补丁发送到任何公共列表。
+参见 Documentation/translations/zh_CN/admin-guide/security-bugs.rst 。
+修复已发布内核中严重错误的补丁程序应该抄送给稳定版维护人员,方法是把以下列行
+放进补丁的签准区(注意,不是电子邮件收件人)::
-进入补丁的签准区(注意,不是电子邮件收件人)。除了这个文件之外,您还应该阅读
-:ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rules>`
-但是,请注意,一些子系统维护人员希望得出他们自己的结论,即哪些补丁应该被放到
-稳定的树上。尤其是网络维护人员,不希望看到单个开发人员在补丁中添加像上面这样
-的行。
+除了本文件之外,您还应该阅读
+Documentation/translations/zh_CN/process/stable-kernel-rules.rst 。
-如果更改影响到用户和内核接口,请向手册页维护人员(如维护人员文件中所列)发送
+如果更改影响到用户侧内核接口,请向手册页维护人员(如维护人员文件中所列)发送
手册页补丁,或至少发送更改通知,以便一些信息进入手册页。还应将用户空间API
-更改复制到 [email protected]
+更改抄送到 [email protected]
-6) 没有 MIME 编码,没有链接,没有压缩,没有附件,只有纯文本
------------------------------------------------------------
+不要MIME编码,不要链接,不要压缩,不要附件,只要纯文本
+------------------------------------------------------
Linus 和其他的内核开发者需要阅读和评论你提交的改动。对于内核开发者来说
-,可以“引用”你的改动很重要,使用一般的 e-mail 工具,他们就可以在你的
+,可以“引用”你的改动很重要,使用一般的邮件工具,他们就可以在你的
代码的任何位置添加评论。
-因为这个原因,所有的提交的补丁都是 e-mail 中“内嵌”的。
+因为这个原因,所有的提交的补丁都是邮件中“内嵌”的。最简单(和推荐)的方法就
+是使用 ``git send-email`` 。https://git-send-email.io 有 ``git send-email``
+的交互式教程。
+
+如果你选择不用 ``git send-email`` :
.. warning::
- 如果你使用剪切-粘贴你的补丁,小心你的编辑器的自动换行功能破坏你的补丁
-不要将补丁作为 MIME 编码的附件,不管是否压缩。很多流行的 e-mail 软件不
-是任何时候都将 MIME 编码的附件当作纯文本发送的,这会使得别人无法在你的
-代码中加评论。另外,MIME 编码的附件会让 Linus 多花一点时间来处理,这就
-降低了你的改动被接受的可能性。
+ 如果你使用剪切-粘贴你的补丁,小心你的编辑器的自动换行功能破坏你的补丁
-例外:如果你的邮递员弄坏了补丁,那么有人可能会要求你使用mime重新发送补丁
+不要将补丁作为MIME编码的附件,不管是否压缩。很多流行的邮件软件不
+是任何时候都将MIME编码的附件当作纯文本发送的,这会使得别人无法在你的
+代码中加评论。另外,MIME编码的附件会让Linus多花一点时间来处理,这就
+降低了你的改动被接受的可能性。
-请参阅 :ref:`Documentation/translations/zh_CN/process/email-clients.rst <cn_email_clients>`
-以获取有关配置电子邮件客户端以使其不受影响地发送修补程序的提示。
+例外:如果你的邮路损坏了补丁,那么有人可能会要求你使用MIME重新发送补丁。
-7) e-mail 的大小
-----------------
+请参阅 Documentation/translations/zh_CN/process/email-clients.rst
+以获取有关配置电子邮件客户端以使其不受影响地发送补丁的提示。
-大的改动对邮件列表不合适,对某些维护者也不合适。如果你的补丁,在不压缩
-的情况下,超过了300kB,那么你最好将补丁放在一个能通过 internet 访问的服
-务器上,然后用指向你的补丁的 URL 替代。但是请注意,如果您的补丁超过了
-300kb,那么它几乎肯定需要被破坏。
+回复审阅意见
+------------
-8)回复评审意见
----------------
+你的补丁几乎肯定会得到审阅者对补丁改进方法的评论(以回复邮件的形式)。您必须
+对这些评论作出回应;让补丁被忽略的一个好办法就是忽略审阅者的意见。直接回复邮
+件来回应意见即可。不会导致代码更改的意见或问题几乎肯定会带来注释或变更日志的
+改变,以便下一个审阅者更好地了解正在发生的事情。
-你的补丁几乎肯定会得到评审者对补丁改进方法的评论。您必须对这些评论作出
-回应;让补丁被忽略的一个好办法就是忽略审阅者的意见。不会导致代码更改的
-意见或问题几乎肯定会带来注释或变更日志的改变,以便下一个评审者更好地了解
-正在发生的事情。
+一定要告诉审阅者你在做什么改变,并感谢他们的时间。代码审阅是一个累人且耗时的
+过程,审阅者有时会变得暴躁。即使在这种情况下,也要礼貌地回应并解决他们指出的
+问题。当发送下一版时,在封面邮件或独立补丁里加上 ``patch changelog`` 说明与
+前一版本的不同之处(参见 :ref:`zh_the_canonical_patch_format` )。
-一定要告诉审稿人你在做什么改变,并感谢他们的时间。代码审查是一个累人且
-耗时的过程,审查人员有时会变得暴躁。即使在这种情况下,也要礼貌地回应并
-解决他们指出的问题。
+.. _zh_resend_reminders:
-9)不要泄气或不耐烦
--------------------
+不要泄气或不耐烦
+----------------
-提交更改后,请耐心等待。审阅者是忙碌的人,可能无法立即访问您的修补程序。
+提交更改后,请耐心等待。审阅者是大忙人,可能无法立即审阅您的补丁。
-曾几何时,补丁曾在没有评论的情况下消失在空白中,但开发过程比现在更加顺利。
+曾几何时,补丁曾在没收到评论的情况下消失在虚空中,但现在开发过程应该更加顺利了。
您应该在一周左右的时间内收到评论;如果没有收到评论,请确保您已将补丁发送
-到正确的位置。在重新提交或联系审阅者之前至少等待一周-在诸如合并窗口之类的
+到正确的位置。在重新提交或联系审阅者之前至少等待一周——在诸如合并窗口之类的
繁忙时间可能更长。
-10)主题中包含 PATCH
---------------------
+在等了几个星期后,用带RESEND的主题重发补丁也是可以的::
-由于到linus和linux内核的电子邮件流量很高,通常会在主题行前面加上[PATCH]
-前缀. 这使Linus和其他内核开发人员更容易将补丁与其他电子邮件讨论区分开。
+ [PATCH Vx RESEND] sub/sys: Condensed patch summary
-11)签署你的作品-开发者原始认证
--------------------------------
+当你发布补丁(系列)修改版的时候,不要加上“RESEND”——“RESEND”只适用于重
+新提交之前未经修改的补丁(系列)。
-为了加强对谁做了何事的追踪,尤其是对那些透过好几层的维护者的补丁,我们
-建议在发送出去的补丁上加一个 “sign-off” 的过程。
+主题中包含 PATCH
+----------------
-"sign-off" 是在补丁的注释的最后的简单的一行文字,认证你编写了它或者其他
+由于到Linus和linux-kernel的电子邮件流量很高,通常会在主题行前面加上[PATCH]
+前缀。这使Linus和其他内核开发人员更容易将补丁与其他电子邮件讨论区分开。
+
+``git send-email`` 会自动为你加上。
+
+签署你的作品——开发者来源认证
+------------------------------
+
+为了加强对谁做了何事的追踪,尤其是对那些透过好几层维护者才最终到达的补丁,我
+们在通过邮件发送的补丁上引入了“签署(sign-off)”流程。
+
+“签署”是在补丁注释最后的一行简单文字,认证你编写了它或者其他
人有权力将它作为开放源代码的补丁传递。规则很简单:如果你能认证如下信息:
-开发者来源证书 1.1
+开发者来源认证 1.1
^^^^^^^^^^^^^^^^^^
对于本项目的贡献,我认证如下信息:
- (a)这些贡献是完全或者部分的由我创建,我有权利以文件中指出
+ (a) 这些贡献是完全或者部分的由我创建,我有权利以文件中指出
的开放源代码许可证提交它;或者
- (b)这些贡献基于以前的工作,据我所知,这些以前的工作受恰当的开放
- 源代码许可证保护,而且,根据许可证,我有权提交修改后的贡献,
+
+ (b) 这些贡献基于以前的工作,据我所知,这些以前的工作受恰当的开放
+ 源代码许可证保护,而且,根据文件中指出的许可证,我有权提交修改后的贡献,
无论是完全还是部分由我创造,这些贡献都使用同一个开放源代码许可证
- (除非我被允许用其它的许可证),正如文件中指出的;或者
- (c)这些贡献由认证(a),(b)或者(c)的人直接提供给我,而
+ (除非我被允许用其它的许可证);或者
+
+ (c) 这些贡献由认证(a),(b)或者(c)的人直接提供给我,而
且我没有修改它。
- (d)我理解并同意这个项目和贡献是公开的,贡献的记录(包括我
- 一起提交的个人记录,包括 sign-off )被永久维护并且可以和这个项目
+
+ (d) 我理解并同意这个项目和贡献是公开的,贡献的记录(包括我
+ 一起提交的个人记录,包括sign-off)被永久维护并且可以和这个项目
或者开放源代码的许可证同步地再发行。
那么加入这样一行::
- Signed-off-by: Random J Developer <[email protected]>
-
-使用你的真名(抱歉,不能使用假名或者匿名。)
-
-有人在最后加上标签。现在这些东西会被忽略,但是你可以这样做,来标记公司
-内部的过程,或者只是指出关于 sign-off 的一些特殊细节。
-
-如果您是子系统或分支维护人员,有时需要稍微修改收到的补丁,以便合并它们,
-因为树和提交者中的代码不完全相同。如果你严格遵守规则(c),你应该要求提交者
-重新发布,但这完全是在浪费时间和精力。规则(b)允许您调整代码,但是更改一个
-提交者的代码并让他认可您的错误是非常不礼貌的。要解决此问题,建议在最后一个
-由签名行和您的行之间添加一行,指示更改的性质。虽然这并不是强制性的,但似乎
-在描述前加上您的邮件和/或姓名(全部用方括号括起来),这足以让人注意到您对最
-后一分钟的更改负有责任。例如::
-
- Signed-off-by: Random J Developer <[email protected]>
- [[email protected]: struct foo moved from foo.c to foo.h]
- Signed-off-by: Lucky K Maintainer <[email protected]>
-
-如果您维护一个稳定的分支机构,同时希望对作者进行致谢、跟踪更改、合并修复并
-保护提交者不受投诉,那么这种做法尤其有用。请注意,在任何情况下都不能更改作者
-的ID(From 头),因为它是出现在更改日志中的标识。
-
-对回合(back-porters)的特别说明:在提交消息的顶部(主题行之后)插入一个补丁
-的起源指示似乎是一种常见且有用的实践,以便于跟踪。例如,下面是我们在3.x稳定
-版本中看到的内容::
-
- Date: Tue Oct 7 07:26:38 2014 -0400
-
- libata: Un-break ATA blacklist
-
- commit 1c40279960bcd7d52dbdf1d466b20d24b99176c8 upstream.
-
-还有, 这里是一个旧版内核中的一个回合补丁::
+ Signed-off-by: Random J Developer <[email protected]>
- Date: Tue May 13 22:12:27 2008 +0200
+使用你的真名(抱歉,不能使用假名或者匿名。)如果使用 ``git commit -s`` 的话
+将会自动完成。撤销也应当包含“Signed-off-by”, ``git revert -s`` 会帮你搞定。
- wireless, airo: waitbusy() won't delay
+有些人会在最后加上额外的标签。现在这些东西会被忽略,但是你可以这样做,来标记
+公司内部的过程,或者只是指出关于签署的一些特殊细节。
- [backport of 2.6 commit b7acbdfbd1f277c1eb23f344f899cfa4cd0bf36a]
+作者签署之后的任何其他签署(Signed-off-by:'s)均来自处理和传递补丁的人员,但
+未参与其开发。签署链应当反映补丁传播到维护者并最终传播到Linus所经过的 **真实**
+路径,首个签署指明单个作者的主要作者身份。
-12)何时使用Acked-by:,CC:,和Co-Developed by:
-----------------------------------------------
+何时使用Acked-by:,CC:,和Co-Developed by:
+------------------------------------------
-Singed-off-by: 标记表示签名者参与了补丁的开发,或者他/她在补丁的传递路径中。
+Singed-off-by: 标签表示签名者参与了补丁的开发,或者他/她在补丁的传递路径中。
-如果一个人没有直接参与补丁的准备或处理,但希望表示并记录他们对补丁的批准,
-那么他们可以要求在补丁的变更日志中添加一个 Acked-by:
+如果一个人没有直接参与补丁的准备或处理,但希望表示并记录他们对补丁的批准/赞成,
+那么他们可以要求在补丁的变更日志中添加一个Acked-by:。
-Acked-by:通常由受影响代码的维护者使用,当该维护者既没有贡献也没有转发补丁时。
+Acked-by: 通常由受影响代码的维护者使用,当该维护者既没有贡献也没有转发补丁时。
-Acked-by: 不像签字人那样正式。这是一个记录,确认人至少审查了补丁,并表示接受。
-因此,补丁合并有时会手动将Acker的“Yep,looks good to me”转换为 Acked-By:(但
+Acked-by: 不像签署那样正式。这是一个记录,确认人至少审阅了补丁,并表示接受。
+因此,补丁合并有时会手动将Acker的“Yep,looks good to me”转换为 Acked-By:(但
请注意,通常最好要求一个明确的Ack)。
Acked-by:不一定表示对整个补丁的确认。例如,如果一个补丁影响多个子系统,并且
-有一个:来自一个子系统维护者,那么这通常表示只确认影响维护者代码的部分。这里
-应该仔细判断。如有疑问,应参考邮件列表档案中的原始讨论。
+有一个来自某个子系统维护者的Acked-By:,那么这通常表示只确认影响维护者代码的部
+分。这里应该仔细判断。如有疑问,应参考邮件列表存档中的原始讨论。
-如果某人有机会对补丁进行评论,但没有提供此类评论,您可以选择在补丁中添加 ``Cc:``
-这是唯一一个标签,它可以在没有被它命名的人显式操作的情况下添加,但它应该表明
-这个人是在补丁上抄送的。讨论中包含了潜在利益相关方。
+如果某人本应有机会对补丁进行评论,但没有提供此类评论,您可以选择在补丁中添加
+``Cc:`` 这是唯一可以在没有被该人明确同意的情况下添加的标签——但它应该表明
+这个人是在补丁上抄送的。此标签记录了讨论中包含的潜在利益相关方。
Co-developed-by: 声明补丁是由多个开发人员共同创建的;当几个人在一个补丁上工
-作时,它用于将属性赋予共同作者(除了 From: 所赋予的作者之外)。因为
-Co-developed-by: 表示作者身份,所以每个共同开发人:必须紧跟在相关合作作者的
-签名之后。标准的签核程序要求:标记的签核顺序应尽可能反映补丁的时间历史,而不
-管作者是通过 From :还是由 Co-developed-by: 共同开发的。值得注意的是,最后一
-个签字人:必须始终是提交补丁的开发人员。
+作时,它用于给出共同作者(除了From:所给出的作者之外)。因为Co-developed-by:
+表示作者身份,所以每个Co-developed-by:必须紧跟在相关合作作者的签署之后。标准
+签署程序要求Singed-off-by:标签的顺序应尽可能反映补丁的时间历史,无论作者是通
+过From:还是Co-developed-by:表明。值得注意的是,最后一个Singed-off-by:必须是
+提交补丁的开发人员。
-注意,当作者也是电子邮件标题“发件人:”行中列出的人时,“From: ” 标记是可选的。
+注意,如果From:作者也是电子邮件标题的From:行中列出的人,则From:标签是可选的。
-作者提交的补丁程序示例::
+被From:作者提交的补丁示例::
<changelog>
@@ -421,7 +374,7 @@ Co-developed-by: 表示作者身份,所以每个共同开发人:必须紧跟
Signed-off-by: Second Co-Author <[email protected]>
Signed-off-by: From Author <[email protected]>
-合作开发者提交的补丁示例::
+被合作开发者提交的补丁示例::
From: From Author <[email protected]>
@@ -434,76 +387,85 @@ Co-developed-by: 表示作者身份,所以每个共同开发人:必须紧跟
Signed-off-by: Submitting Co-Author <[email protected]>
-13)使用报告人:、测试人:、审核人:、建议人:、修复人:
---------------------------------------------------------
+使用Reported-by:、Tested-by:、Reviewed-by:、Suggested-by:和Fixes:
+-----------------------------------------------------------------
Reported-by: 给那些发现错误并报告错误的人致谢,它希望激励他们在将来再次帮助
-我们。请注意,如果bug是以私有方式报告的,那么在使用Reported-by标记之前,请
-先请求权限。
+我们。请注意,如果bug是以私有方式报告的,那么在使用Reported-by标签之前,请
+先请求许可。此标签是为Bug设计的;请不要将其用于感谢功能请求。
-Tested-by: 标记表示补丁已由指定的人(在某些环境中)成功测试。这个标签通知
-维护人员已经执行了一些测试,为将来的补丁提供了一种定位测试人员的方法,并确
-保测试人员的信誉。
+Tested-by: 标签表示补丁已由指定的人(在某些环境中)成功测试。这个标签通知
+维护人员已经执行了一些测试,为将来的补丁提供了一种定位测试人员的方法,并彰显测试人员的功劳。
-Reviewed-by:相反,根据审查人的声明,表明该补丁已被审查并被认为是可接受的:
+Reviewed-by:根据审阅者的监督声明,表明该补丁已被审阅并被认为是可接受的:
-审查人的监督声明
+审阅者的监督声明
^^^^^^^^^^^^^^^^
-通过提供我的 Reviewed-by,我声明:
+通过提供我的Reviewed-by:标签,我声明:
- (a) 我已经对这个补丁进行了一次技术审查,以评估它是否适合被包含到
+ (a) 我已经对这个补丁进行了一次技术审阅,以评估它是否适合被包含到
主线内核中。
(b) 与补丁相关的任何问题、顾虑或问题都已反馈给提交者。我对提交者对
我的评论的回应感到满意。
- (c) 虽然这一提交可能会改进一些东西,但我相信,此时,(1)对内核
+ (c) 虽然这一提交可能仍可被改进,但我相信,此时,(1)对内核
进行了有价值的修改,(2)没有包含争论中涉及的已知问题。
- (d) 虽然我已经审查了补丁并认为它是健全的,但我不会(除非另有明确
- 说明)作出任何保证或保证它将在任何给定情况下实现其规定的目的
+ (d) 虽然我已经审阅了补丁并认为它是健全的,但我不会(除非另有明确
+ 说明)作出任何保证或担保它会在任何给定情况下实现其规定的目的
或正常运行。
-Reviewed-by 是一种观点声明,即补丁是对内核的适当修改,没有任何遗留的严重技术
-问题。任何感兴趣的审阅者(完成工作的人)都可以为一个补丁提供一个 Review-by
-标签。此标签用于向审阅者提供致谢,并通知维护者已在修补程序上完成的审阅程度。
-Reviewed-by: 当由已知了解主题区域并执行彻底检查的审阅者提供时,通常会增加
+Reviewed-by是一种观点声明,即补丁是对内核的适当修改,没有任何遗留的严重技术
+问题。任何感兴趣的审阅者(完成工作的人)都可以为一个补丁提供一个Reviewed-by
+标签。此标签用于向审阅者提供致谢,并通知维护者补丁的审阅进度。
+当Reviewed-by:标签由已知了解主题区域并执行彻底检查的审阅者提供时,通常会增加
补丁进入内核的可能性。
+一旦从测试人员或审阅者的“Tested-by”和“Reviewed-by”标签出现在邮件列表中,
+作者应在发送下一个版本时将其添加到适用的补丁中。但是,如果补丁在以下版本中发
+生了实质性更改,这些标签可能不再适用,因此应该删除。通常,在补丁更改日志中
+(在 ``---`` 分隔符之后)应该提到删除某人的测试者或审阅者标签。
+
Suggested-by: 表示补丁的想法是由指定的人提出的,并确保将此想法归功于指定的
人。请注意,未经许可,不得添加此标签,特别是如果该想法未在公共论坛上发布。
-这就是说,如果我们勤快地致谢我们的创意者,他们很有希望在未来得到鼓舞,再次
+也就是说,如果我们勤快地致谢创意提供者,他们将受到鼓舞,很有希望在未来再次
帮助我们。
-Fixes: 指示补丁在以前的提交中修复了一个问题。它可以很容易地确定错误的来源,
-这有助于检查错误修复。这个标记还帮助稳定内核团队确定应该接收修复的稳定内核
-版本。这是指示补丁修复的错误的首选方法。请参阅 :ref:`cn_describe_changes`
-描述您的更改以了解更多详细信息。
+Fixes: 指示补丁修复了之前提交的一个问题。它可以便于确定错误的来源,这有助于
+检查错误修复。这个标签还帮助稳定内核团队确定应该接收修复的稳定内核版本。这是
+指示补丁修复的错误的首选方法。请参阅 :ref:`zh_describe_changes` 了解更多信息。
-.. _cn_the_canonical_patch_format:
+.. note::
-12)标准补丁格式
-----------------
+ 附加Fixes:标签不会改变稳定内核规则流程,也不改变所有稳定版补丁抄送
+ [email protected]的要求。有关更多信息,请阅读
+ Documentation/translations/zh_CN/process/stable-kernel-rules.rst 。
+
+.. _zh_the_canonical_patch_format:
+
+标准补丁格式
+------------
本节描述如何格式化补丁本身。请注意,如果您的补丁存储在 ``Git`` 存储库中,则
-可以使用 ``git format-patch`` 进行正确的补丁格式设置。但是,这些工具无法创建
+可以使用 ``git format-patch`` 进行正确的补丁格式化。但是,这些工具无法创建
必要的文本,因此请务必阅读下面的说明。
-标准的补丁,标题行是::
+标准的补丁标题行是::
Subject: [PATCH 001/123] 子系统:一句话概述
-标准补丁的信体存在如下部分:
+标准补丁的信体包含如下部分:
- - 一个 "from" 行指出补丁作者。后跟空行(仅当发送修补程序的人不是作者时才需要)。
+ - 一个 ``from`` 行指出补丁作者。后跟空行(仅当发送补丁的人不是作者时才需要)。
- - 解释的正文,行以75列包装,这将被复制到永久变更日志来描述这个补丁。
+ - 说明文字,每行最长75列,这将被复制到永久变更日志来描述这个补丁。
- 一个空行
- - 上面描述的“Signed-off-by” 行,也将出现在更改日志中。
+ - 上述的 ``Signed-off-by:`` 行,也将出现在更改日志中。
- 只包含 ``---`` 的标记线。
@@ -511,29 +473,29 @@ Fixes: 指示补丁在以前的提交中修复了一个问题。它可以很容�
- 实际补丁( ``diff`` 输出)。
-标题行的格式,使得对标题行按字母序排序非常的容易 - 很多 e-mail 客户端都
-可以支持 - 因为序列号是用零填充的,所以按数字排序和按字母排序是一样的。
+标题行的格式,使得对标题行按字母序排序非常的容易——很多邮件客户端都
+可以支持——因为序列号是用零填充的,所以按数字排序和按字母排序是一样的。
-e-mail 标题中的“子系统”标识哪个内核子系统将被打补丁。
+邮件标题中的“子系统”标识哪个内核子系统将被打补丁。
-e-mail 标题中的“一句话概述”扼要的描述 e-mail 中的补丁。“一句话概述”
+邮件标题中的“一句话概述”扼要的描述邮件中的补丁。“一句话概述”
不应该是一个文件名。对于一个补丁系列(“补丁系列”指一系列的多个相关补
丁),不要对每个补丁都使用同样的“一句话概述”。
-记住 e-mail 的“一句话概述”会成为该补丁的全局唯一标识。它会蔓延到 git
+记住邮件的“一句话概述”会成为该补丁的全局唯一标识。它会进入 ``git``
的改动记录里。然后“一句话概述”会被用在开发者的讨论里,用来指代这个补
-丁。用户将希望通过 google 来搜索"一句话概述"来找到那些讨论这个补丁的文
+丁。用户将希望通过搜索引擎搜索“一句话概述”来找到那些讨论这个补丁的文
章。当人们在两三个月后使用诸如 ``gitk`` 或 ``git log --oneline`` 之类
的工具查看数千个补丁时,也会很快看到它。
出于这些原因,概述必须不超过70-75个字符,并且必须描述补丁的更改以及为
-什么需要补丁。既要简洁又要描述性很有挑战性,但写得好的概述应该这样做。
+什么需要补丁。既要简洁又要描述性很有挑战性,但写得好的概述应该这样。
概述的前缀可以用方括号括起来:“Subject: [PATCH <tag>...] <概述>”。标记
不被视为概述的一部分,而是描述应该如何处理补丁。如果补丁的多个版本已发
-送出来以响应评审(即“v1,v2,v3”)或“rfc”,以指示评审请求,那么通用标记
-可能包括版本描述符。如果一个补丁系列中有四个补丁,那么各个补丁可以这样
-编号:1/4、2/4、3/4、4/4。这可以确保开发人员了解补丁应用的顺序,并且他们
+送出来以响应评审(即“v1,v2,v3”)则必须包含版本号,或包含“RFC”以指示
+评审请求。如果一个补丁系列中有四个补丁,那么各个补丁可以这样编号:1/4、2/4、
+3/4、4/4。这可以确保开发人员了解补丁应用的顺序,且
已经查看或应用了补丁系列中的所有补丁。
一些标题的例子::
@@ -541,95 +503,134 @@ e-mail 标题中的“一句话概述”扼要的描述 e-mail 中的补丁。�
Subject: [patch 2/5] ext2: improve scalability of bitmap searching
Subject: [PATCHv2 001/207] x86: fix eflags tracking
-"From" 行是信体里的最上面一行,具有如下格式:
+``From`` 行是信体里的最上面一行,具有如下格式::
+
From: Patch Author <[email protected]>
-"From" 行指明在永久改动日志里,谁会被确认为作者。如果没有 "From" 行,那
-么邮件头里的 "From: " 行会被用来决定改动日志中的作者。
+``From`` 行指明在永久改动日志里,谁会被确认为作者。如果没有 ``From`` 行,那
+么邮件头里的 ``From:`` 行会被用来决定改动日志中的作者。
-说明的主题将会被提交到永久的源代码改动日志里,因此对那些早已经不记得和
-这个补丁相关的讨论细节的有能力的读者来说,是有意义的。包括补丁程序定位
-错误的(内核日志消息、OOPS消息等)症状,对于搜索提交日志以寻找适用补丁的人
-尤其有用。如果一个补丁修复了一个编译失败,那么可能不需要包含所有编译失败;
+说明文字将会被提交到永久的源代码改动日志里,因此应针对那些早已经不记得和这
+个补丁相关的讨论细节的读者。包括补丁处理的故障症状(内核日志消息、oops消息
+等),这对于可能正在搜索提交日志以查找适用补丁的人特别有用。文本应该写得如
+此详细,以便在数周、数月甚至数年后阅读时,能够为读者提供所需的细节信息,以
+掌握创建补丁的 **原因** 。
+
+如果一个补丁修复了一个编译失败,那么可能不需要包含 *所有* 编译失败;
只要足够让搜索补丁的人能够找到它就行了。与概述一样,既要简洁又要描述性。
-"---" 标记行对于补丁处理工具要找到哪里是改动日志信息的结束,是不可缺少
+``---`` 标记行对于补丁处理工具要找到哪里是改动日志信息的结束,是不可缺少
的。
-对于 "---" 标记之后的额外注解,一个好的用途就是用来写 diffstat,用来显
-示修改了什么文件和每个文件都增加和删除了多少行。diffstat 对于比较大的补
-丁特别有用。其余那些只是和时刻或者开发者相关的注解,不合适放到永久的改
-动日志里的,也应该放这里。
-使用 diffstat的选项 "-p 1 -w 70" 这样文件名就会从内核源代码树的目录开始
+对于 ``---`` 标记之后的额外注解,一个好的用途就是用来写 ``diffstat`` ,用来显
+示修改了什么文件和每个文件都增加和删除了多少行。 ``diffstat`` 对于比较大的补
+丁特别有用。
+使用 ``diffstat`` 的选项 ``-p 1 -w 70`` 这样文件名就会从内核源代码树的目录开始
,不会占用太宽的空间(很容易适合80列的宽度,也许会有一些缩进。)
+( ``git`` 默认会生成合适的diffstat。)
+
+其余那些只适用于当时或者与维护者相关的注解,不合适放到永久的改动日志里的,也
+应该放这里。较好的例子就是 ``补丁更改记录`` ,记录了v1和v2版本补丁之间的差异。
+
+请将此信息放在将变更日志与补丁的其余部分分隔开的 ``---`` 行 **之后** 。版本
+信息不是提交到git树的变更日志的一部分。只是供审阅人员使用的附加信息。如果将
+其放置在提交标记上方,则需要手动交互才能将其删除。如果它位于分隔线以下,则在
+应用补丁时会自动剥离::
+
+ <commit message>
+ ...
+ Signed-off-by: Author <author@mail>
+ ---
+ V2 -> V3: Removed redundant helper function
+ V1 -> V2: Cleaned up coding style and addressed review comments
-在后面的参考资料中能看到适当的补丁格式的更多细节。
+ path/to/file | 5+++--
+ ...
-.. _cn_explicit_in_reply_to:
+在后面的参考资料中能看到正确补丁格式的更多细节。
-15) 明确回复邮件头(In-Reply-To)
--------------------------------
+.. _zh_backtraces:
-手动添加回复补丁的的标题头(In-Reply_To:) 是有帮助的(例如,使用 ``git send-email`` )
-将补丁与以前的相关讨论关联起来,例如,将bug修复程序链接到电子邮件和bug报告。
+提交消息中的回溯(Backtraces)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+回溯有助于记录导致问题的调用链。然而,并非所有回溯都有帮助。例如,早期引导调
+用链是独特而明显的。而逐字复制完整的dmesg输出则会增加时间戳、模块列表、寄存
+器和堆栈转储等分散注意力的信息。
+
+因此,最有用的回溯应该从转储中提取相关信息,以更容易集中在真实问题上。下面是
+一个剪裁良好的回溯示例::
+
+ unchecked MSR access error: WRMSR to 0xd51 (tried to write 0x0000000000000064)
+ at rIP: 0xffffffffae059994 (native_write_msr+0x4/0x20)
+ Call Trace:
+ mba_wrmsr
+ update_domains
+ rdtgroup_mkdir
+
+.. _zh_explicit_in_reply_to:
+
+明确回复邮件头(In-Reply-To)
+-----------------------------
+
+手动添加回复补丁的的邮件头(In-Reply_To:)是有用的(例如,使用 ``git send-email`` ),
+可以将补丁与以前的相关讨论关联起来,例如,将bug补丁链接到电子邮件和bug报告。
但是,对于多补丁系列,最好避免在回复时使用链接到该系列的旧版本。这样,
-补丁的多个版本就不会成为电子邮件客户端中无法管理的引用序列。如果链接有用,
+补丁的多个版本就不会成为电子邮件客户端中无法管理的引用树。如果链接有用,
可以使用 https://lore.kernel.org/ 重定向器(例如,在封面电子邮件文本中)
链接到补丁系列的早期版本。
-16) 发送git pull请求
---------------------
-
-如果您有一系列补丁,那么让维护人员通过git pull操作将它们直接拉入子系统存储
-库可能是最方便的。但是,请注意,从开发人员那里获取补丁比从邮件列表中获取补
-丁需要更高的信任度。因此,许多子系统维护人员不愿意接受请求,特别是来自新的
-未知开发人员的请求。如果有疑问,您可以在封面邮件中使用pull 请求作为补丁系列
-正常发布的一个选项,让维护人员可以选择使用其中之一。
-
-pull 请求的主题行中应该有[Git Pull]。请求本身应该在一行中包含存储库名称和
-感兴趣的分支;它应该看起来像::
+给出基础树信息
+--------------
- Please pull from
+当其他开发人员收到您的补丁并开始审阅时,知道应该将您的工作放到代码树历史记录
+中的什么位置通常很有用。这对于自动化持续集成流水(CI)特别有用,这些流水线试
+图运行一系列测试,以便在维护人员开始审阅之前确定提交的质量。
- git://jdelvare.pck.nerim.net/jdelvare-2.6 i2c-for-linus
+如果您使用 ``git format-patch`` 生成补丁,则可以通过 ``--base`` 标志在提交中
+自动包含基础树信息。使用此选项最简单、最方便的方法是配合主题分支::
- to get these changes:
+ $ git checkout -t -b my-topical-branch master
+ Branch 'my-topical-branch' set up to track local branch 'master'.
+ Switched to a new branch 'my-topical-branch'
+ [perform your edits and commits]
-pull 请求还应该包含一条整体消息,说明请求中将包含什么,一个补丁本身的 ``Git shortlog``
-以及一个显示补丁系列整体效果的 ``diffstat`` 。当然,将所有这些信息收集在一起
-的最简单方法是让 ``git`` 使用 ``git request-pull`` 命令为您完成这些工作。
+ $ git format-patch --base=auto --cover-letter -o outgoing/ master
+ outgoing/0000-cover-letter.patch
+ outgoing/0001-First-Commit.patch
+ outgoing/...
-一些维护人员(包括Linus)希望看到来自已签名提交的请求;这增加了他们对你的
-请求信心。特别是,在没有签名标签的情况下,Linus 不会从像 Github 这样的公共
-托管站点拉请求。
+当你编辑 ``outgoing/0000-cover-letter.patch`` 时,您会注意到在它的最底部有一
+行 ``base-commit:`` 尾注,它为审阅者和CI工具提供了足够的信息以正确执行
+``git am`` 而不必担心冲突::
-创建此类签名的第一步是生成一个 GNRPG 密钥,并由一个或多个核心内核开发人员对
-其进行签名。这一步对新开发人员来说可能很困难,但没有办法绕过它。参加会议是
-找到可以签署您的密钥的开发人员的好方法。
+ $ git checkout -b patch-review [base-commit-id]
+ Switched to a new branch 'patch-review'
+ $ git am patches.mbox
+ Applying: First Commit
+ Applying: ...
-一旦您在Git 中准备了一个您希望有人拉的补丁系列,就用 ``git tag -s`` 创建一
-个签名标记。这将创建一个新标记,标识该系列中的最后一次提交,并包含用您的私
-钥创建的签名。您还可以将changelog样式的消息添加到标记中;这是一个描述拉请求
-整体效果的理想位置。
+有关此选项的更多信息,请参阅 ``man git-format-patch`` 。
-如果维护人员将要从中提取的树不是您正在使用的存储库,请不要忘记将已签名的标记
-显式推送到公共树。
+.. note::
-生成拉请求时,请使用已签名的标记作为目标。这样的命令可以实现::
+ ``--base`` 功能是在2.9.0版git中引入的。
- git request-pull master git://my.public.tree/linux.git my-signed-tag
+如果您不使用git格式化补丁,仍然可以包含相同的 ``base-commit`` 尾注,以指示您
+的工作所基于的树的提交哈希。你应该在封面邮件或系列的第一个补丁中添加它,它应
+该放在 ``---`` 行的下面或所有其他内容之后,即只在你的电子邮件签名之前。
参考文献
--------
-Andrew Morton, "The perfect patch" (tpp).
+Andrew Morton,“完美的补丁”(tpp)
<https://www.ozlabs.org/~akpm/stuff/tpp.txt>
-Jeff Garzik, "Linux kernel patch submission format".
+Jeff Garzik,“Linux内核补丁提交格式”
<https://web.archive.org/web/20180829112450/http://linux.yyz.us/patch-format.html>
-Greg Kroah-Hartman, "How to piss off a kernel subsystem maintainer".
+Greg Kroah-Hartman,“如何惹恼内核子系统维护人员”
<http://www.kroah.com/log/linux/maintainer.html>
<http://www.kroah.com/log/linux/maintainer-02.html>
@@ -642,16 +643,15 @@ Greg Kroah-Hartman, "How to piss off a kernel subsystem maintainer".
<http://www.kroah.com/log/linux/maintainer-06.html>
-NO!!!! No more huge patch bombs to [email protected] people!
+不!!!别再发巨型补丁炸弹给[email protected]的人们了!
<https://lore.kernel.org/r/[email protected]>
-Kernel Documentation/process/coding-style.rst:
- :ref:`Documentation/translations/zh_CN/process/coding-style.rst <cn_codingstyle>`
+内核 Documentation/translations/zh_CN/process/coding-style.rst
-Linus Torvalds's mail on the canonical patch format:
+Linus Torvalds关于标准补丁格式的邮件
<https://lore.kernel.org/r/[email protected]>
-Andi Kleen, "On submitting kernel patches"
- Some strategies to get difficult or controversial changes in.
+Andi Kleen,“提交补丁之路”
+ 一些帮助合入困难或有争议的变更的策略。
http://halobates.de/on-submitting-patches.pdf
diff --git a/Documentation/translations/zh_CN/scheduler/sched-design-CFS.rst b/Documentation/translations/zh_CN/scheduler/sched-design-CFS.rst
index 26b0f36f793d..3076402406c4 100644
--- a/Documentation/translations/zh_CN/scheduler/sched-design-CFS.rst
+++ b/Documentation/translations/zh_CN/scheduler/sched-design-CFS.rst
@@ -80,7 +80,7 @@ p->se.vruntime。一旦p->se.vruntime变得足够大,其它的任务将成为�
CFS使用纳秒粒度的计时,不依赖于任何jiffies或HZ的细节。因此CFS并不像之前的调度器那样
有“时间片”的概念,也没有任何启发式的设计。唯一可调的参数(你需要打开CONFIG_SCHED_DEBUG)是:
- /proc/sys/kernel/sched_min_granularity_ns
+ /sys/kernel/debug/sched/min_granularity_ns
它可以用来将调度器从“桌面”模式(也就是低时延)调节为“服务器”(也就是高批处理)模式。
它的默认设置是适合桌面的工作负载。SCHED_BATCH也被CFS调度器模块处理。
diff --git a/Documentation/translations/zh_TW/oops-tracing.txt b/Documentation/translations/zh_TW/oops-tracing.txt
deleted file mode 100644
index be8e59f2abaf..000000000000
--- a/Documentation/translations/zh_TW/oops-tracing.txt
+++ /dev/null
@@ -1,212 +0,0 @@
-Chinese translated version of Documentation/admin-guide/bug-hunting.rst
-
-If you have any comment or update to the content, please contact the
-original document maintainer directly. However, if you have a problem
-communicating in English you can also ask the Chinese maintainer for
-help. Contact the Chinese maintainer if this translation is outdated
-or if there is a problem with the translation.
-
-Traditional Chinese maintainer: Hu Haowen <[email protected]>
----------------------------------------------------------------------
-Documentation/admin-guide/bug-hunting.rst 的繁體中文版翻譯
-
-如果想評論或更新本文的內容,請直接聯繫原文檔的維護者。如果你使用英文
-交流有困難的話,也可以向繁體中文版維護者求助。如果本翻譯更新不及時或
-者翻譯存在問題,請聯繫繁體中文版維護者。
-
-繁體中文版維護者: 胡皓文 Hu Haowen <[email protected]>
-繁體中文版翻譯者: 胡皓文 Hu Haowen <[email protected]>
-繁體中文版校譯者: 胡皓文 Hu Haowen <[email protected]>
-
-以下爲正文
----------------------------------------------------------------------
-
-注意: ksymoops 在2.6中是沒有用的。 請以原有格式使用Oops(來自dmesg,等等)。
-忽略任何這樣那樣關於「解碼Oops」或者「通過ksymoops運行」的文檔。 如果你貼出運行過
-ksymoops的來自2.6的Oops,人們只會讓你重貼一次。
-
-快速總結
--------------
-
-發現Oops並發送給看似相關的內核領域的維護者。別太擔心對不上號。如果你不確定就發給
-和你所做的事情相關的代碼的負責人。 如果可重現試著描述怎樣重構。 那甚至比oops更有
-價值。
-
-如果你對於發送給誰一無所知, 發給[email protected]。感謝你幫助Linux
-儘可能地穩定。
-
-Oops在哪裡?
-----------------------
-
-通常Oops文本由klogd從內核緩衝區里讀取並傳給syslogd,由syslogd寫到syslog文件中,
-典型地是/var/log/messages(依賴於/etc/syslog.conf)。有時klogd崩潰了,這種情況下你
-能夠運行dmesg > file來從內核緩衝區中讀取數據並保存下來。 否則你可以
-cat /proc/kmsg > file, 然而你必須介入中止傳輸, kmsg是一個「永不結束的文件」。如
-果機器崩潰壞到你不能輸入命令或者磁碟不可用那麼你有三種選擇:-
-
-(1) 手抄屏幕上的文本待機器重啓後再輸入計算機。 麻煩但如果沒有針對崩潰的準備,
-這是僅有的選擇。 另外,你可以用數位相機把屏幕拍下來-不太好,但比沒有強。 如果信
-息滾動到了終端的上面,你會發現以高分辯率啓動(比如,vga=791)會讓你讀到更多的文
-本。(注意:這需要vesafb,所以對『早期』的oops沒有幫助)
-
-(2)用串口終端啓動(請參看Documentation/admin-guide/serial-console.rst),運行一個null
-modem到另一台機器並用你喜歡的通訊工具獲取輸出。Minicom工作地很好。
-
-(3)使用Kdump(請參看Documentation/admin-guide/kdump/kdump.rst),
-使用在Documentation/admin-guide/kdump/gdbmacros.txt中定義的dmesg gdb宏,從舊的內存中提取內核
-環形緩衝區。
-
-完整信息
-----------------
-
-注意:以下來自於Linus的郵件適用於2.4內核。 我因爲歷史原因保留了它,並且因爲其中
-一些信息仍然適用。 特別注意的是,請忽略任何ksymoops的引用。
-
-From: Linus Torvalds <[email protected]>
-
-怎樣跟蹤Oops.. [原發到linux-kernel的一封郵件]
-
-主要的竅門是有五年和這些煩人的oops消息打交道的經驗;-)
-
-實際上,你有辦法使它更簡單。我有兩個不同的方法:
-
- gdb /usr/src/linux/vmlinux
- gdb> disassemble <offending_function>
-
-那是發現問題的簡單辦法,至少如果bug報告做的好的情況下(象這個一樣-運行ksymoops
-得到oops發生的函數及函數內的偏移)。
-
-哦,如果報告發生的內核以相同的編譯器和相似的配置編譯它會有幫助的。
-
-另一件要做的事是反彙編bug報告的「Code」部分:ksymoops也會用正確的工具來做這件事,
-但如果沒有那些工具你可以寫一個傻程序:
-
- char str[] = "\xXX\xXX\xXX...";
- main(){}
-
-並用gcc -g編譯它然後執行「disassemble str」(XX部分是由Oops報告的值-你可以僅剪切
-粘貼並用「\x」替換空格-我就是這麼做的,因爲我懶得寫程序自動做這一切)。
-
-另外,你可以用scripts/decodecode這個shell腳本。它的使用方法是:
-decodecode < oops.txt
-
-「Code」之後的十六進位字節可能(在某些架構上)有一些當前指令之前的指令字節以及
-當前和之後的指令字節
-
-Code: f9 0f 8d f9 00 00 00 8d 42 0c e8 dd 26 11 c7 a1 60 ea 2b f9 8b 50 08 a1
-64 ea 2b f9 8d 34 82 8b 1e 85 db 74 6d 8b 15 60 ea 2b f9 <8b> 43 04 39 42 54
-7e 04 40 89 42 54 8b 43 04 3b 05 00 f6 52 c0
-
-最後,如果你想知道代碼來自哪裡,你可以:
-
- cd /usr/src/linux
- make fs/buffer.s # 或任何產生BUG的文件
-
-然後你會比gdb反彙編更清楚的知道發生了什麼。
-
-現在,問題是把你所擁有的所有數據結合起來:C源碼(關於它應該怎樣的一般知識),
-彙編代碼及其反彙編得到的代碼(另外還有從「oops」消息得到的寄存器狀態-對了解毀壞的
-指針有用,而且當你有了彙編代碼你也能拿其它的寄存器和任何它們對應的C表達式做匹配
-)。
-
-實際上,你僅需看看哪裡不匹配(這個例子是「Code」反彙編和編譯器生成的代碼不匹配)。
-然後你須要找出爲什麼不匹配。通常很簡單-你看到代碼使用了空指針然後你看代碼想知道
-空指針是怎麼出現的,還有檢查它是否合法..
-
-現在,如果明白這是一項耗時的工作而且需要一丁點兒的專心,沒錯。這就是我爲什麼大多
-只是忽略那些沒有符號表信息的崩潰報告的原因:簡單的說太難查找了(我有一些
-程序用於在內核代碼段中搜索特定的模式,而且有時我也已經能找出那些崩潰的地方,但是
-僅僅是找出正確的序列也確實需要相當紮實的內核知識)
-
-_有時_會發生這種情況,我僅看到崩潰中的反彙編代碼序列, 然後我馬上就明白問題出在
-哪裡。這時我才意識到自己幹這個工作已經太長時間了;-)
-
- Linus
-
-
----------------------------------------------------------------------------
-關於Oops跟蹤的註解:
-
-爲了幫助Linus和其它內核開發者,klogd納入了大量的支持來處理保護錯誤。爲了擁有對
-地址解析的完整支持至少應該使用1.3-pl3的sysklogd包。
-
-當保護錯誤發生時,klogd守護進程自動把內核日誌信息中的重要地址翻譯成它們相應的符
-號。
-
-klogd執行兩種類型的地址解析。首先是靜態翻譯其次是動態翻譯。靜態翻譯和ksymoops
-一樣使用System.map文件。爲了做靜態翻譯klogd守護進程必須在初始化時能找到system
-map文件。關於klogd怎樣搜索map文件請參看klogd手冊頁。
-
-動態地址翻譯在使用內核可裝載模塊時很重要。 因爲內核模塊的內存是從內核動態內存池
-里分配的,所以不管是模塊開始位置還是模塊中函數和符號的位置都不是固定的。
-
-內核支持允許程序決定裝載哪些模塊和它們在內存中位置的系統調用。使用這些系統調用
-klogd守護進程生成一張符號表用於調試發生在可裝載模塊中的保護錯誤。
-
-至少klogd會提供產生保護錯誤的模塊名。還可有額外的符號信息供可裝載模塊開發者選擇
-以從模塊中輸出符號信息。
-
-因爲內核模塊環境可能是動態的,所以必須有一種機制當模塊環境發生改變時來通知klogd
-守護進程。 有一些可用的命令行選項允許klogd向當前執行中的守護進程發送信號,告知符
-號信息應該被刷新了。 更多信息請參看klogd手冊頁。
-
-sysklogd發布時包含一個補丁修改了modules-2.0.0包,無論何時一個模塊裝載或者卸載都
-會自動向klogd發送信號。打上這個補丁提供了必要的對調試發生於內核可裝載模塊的保護
-錯誤的無縫支持。
-
-以下是被klogd處理過的發生在可裝載模塊中的一個保護錯誤例子:
----------------------------------------------------------------------------
-Aug 29 09:51:01 blizard kernel: Unable to handle kernel paging request at virtual address f15e97cc
-Aug 29 09:51:01 blizard kernel: current->tss.cr3 = 0062d000, %cr3 = 0062d000
-Aug 29 09:51:01 blizard kernel: *pde = 00000000
-Aug 29 09:51:01 blizard kernel: Oops: 0002
-Aug 29 09:51:01 blizard kernel: CPU: 0
-Aug 29 09:51:01 blizard kernel: EIP: 0010:[oops:_oops+16/3868]
-Aug 29 09:51:01 blizard kernel: EFLAGS: 00010212
-Aug 29 09:51:01 blizard kernel: eax: 315e97cc ebx: 003a6f80 ecx: 001be77b edx: 00237c0c
-Aug 29 09:51:01 blizard kernel: esi: 00000000 edi: bffffdb3 ebp: 00589f90 esp: 00589f8c
-Aug 29 09:51:01 blizard kernel: ds: 0018 es: 0018 fs: 002b gs: 002b ss: 0018
-Aug 29 09:51:01 blizard kernel: Process oops_test (pid: 3374, process nr: 21, stackpage=00589000)
-Aug 29 09:51:01 blizard kernel: Stack: 315e97cc 00589f98 0100b0b4 bffffed4 0012e38e 00240c64 003a6f80 00000001
-Aug 29 09:51:01 blizard kernel: 00000000 00237810 bfffff00 0010a7fa 00000003 00000001 00000000 bfffff00
-Aug 29 09:51:01 blizard kernel: bffffdb3 bffffed4 ffffffda 0000002b 0007002b 0000002b 0000002b 00000036
-Aug 29 09:51:01 blizard kernel: Call Trace: [oops:_oops_ioctl+48/80] [_sys_ioctl+254/272] [_system_call+82/128]
-Aug 29 09:51:01 blizard kernel: Code: c7 00 05 00 00 00 eb 08 90 90 90 90 90 90 90 90 89 ec 5d c3
----------------------------------------------------------------------------
-
-Dr. G.W. Wettstein Oncology Research Div. Computing Facility
-Roger Maris Cancer Center INTERNET: [email protected]
-820 4th St. N.
-Fargo, ND 58122
-Phone: 701-234-7556
-
-
----------------------------------------------------------------------------
-受汙染的內核
-
-一些oops報告在程序記數器之後包含字符串'Tainted: '。這表明內核已經被一些東西給汙
-染了。 該字符串之後緊跟著一系列的位置敏感的字符,每個代表一個特定的汙染值。
-
- 1:'G'如果所有裝載的模塊都有GPL或相容的許可證,'P'如果裝載了任何的專有模塊。
-沒有模塊MODULE_LICENSE或者帶有insmod認爲是與GPL不相容的的MODULE_LICENSE的模塊被
-認定是專有的。
-
- 2:'F'如果有任何通過「insmod -f」被強制裝載的模塊,' '如果所有模塊都被正常裝載。
-
- 3:'S'如果oops發生在SMP內核中,運行於沒有證明安全運行多處理器的硬體。 當前這種
-情況僅限於幾種不支持SMP的速龍處理器。
-
- 4:'R'如果模塊通過「insmod -f」被強制裝載,' '如果所有模塊都被正常裝載。
-
- 5:'M'如果任何處理器報告了機器檢查異常,' '如果沒有發生機器檢查異常。
-
- 6:'B'如果頁釋放函數發現了一個錯誤的頁引用或者一些非預期的頁標誌。
-
- 7:'U'如果用戶或者用戶應用程式特別請求設置汙染標誌,否則' '。
-
- 8:'D'如果內核剛剛死掉,比如有OOPS或者BUG。
-
-使用'Tainted: '字符串的主要原因是要告訴內核調試者,這是否是一個乾淨的內核亦或發
-生了任何的不正常的事。汙染是永久的:即使出錯的模塊已經被卸載了,汙染值仍然存在,
-以表明內核不再值得信任。
-
diff --git a/Documentation/virt/kvm/x86/mmu.rst b/Documentation/virt/kvm/x86/mmu.rst
index 8739120f4300..8364afa228ec 100644
--- a/Documentation/virt/kvm/x86/mmu.rst
+++ b/Documentation/virt/kvm/x86/mmu.rst
@@ -377,7 +377,7 @@ Emulating cr0.wp
================
If tdp is not enabled, the host must keep cr0.wp=1 so page write protection
-works for the guest kernel, not guest guest userspace. When the guest
+works for the guest kernel, not guest userspace. When the guest
cr0.wp=1, this does not present a problem. However when the guest cr0.wp=0,
we cannot map the permissions for gpte.u=1, gpte.w=0 to any spte (the
semantics require allowing any guest kernel access plus user read access).
diff --git a/Documentation/w1/masters/ds2490.rst b/Documentation/w1/masters/ds2490.rst
index 7e5b50f9c0f5..842e7ae80424 100644
--- a/Documentation/w1/masters/ds2490.rst
+++ b/Documentation/w1/masters/ds2490.rst
@@ -52,7 +52,7 @@ Notes and limitations.
clear the entire bulk in buffer. It would be possible to read the
maximum buffer size to not run into this error condition, only extra
bytes in the buffer is a logic error in the driver. The code should
- should match reads and writes as well as data sizes. Reads and
+ match reads and writes as well as data sizes. Reads and
writes are serialized and the status verifies that the chip is idle
(and data is available) before the read is executed, so it should
not happen.
diff --git a/Documentation/w1/w1-generic.rst b/Documentation/w1/w1-generic.rst
index da4e8b4e9b01..99255b6d0e53 100644
--- a/Documentation/w1/w1-generic.rst
+++ b/Documentation/w1/w1-generic.rst
@@ -113,7 +113,7 @@ generally only make sense when searching is disabled, as a search will
redetect manually removed devices that are present and timeout manually
added devices that aren't on the bus.
-Bus searches occur at an interval, specified as a summ of timeout and
+Bus searches occur at an interval, specified as a sum of timeout and
timeout_us module parameters (either of which may be 0) for as long as
w1_master_search remains greater than 0 or is -1. Each search attempt
decrements w1_master_search by 1 (down to 0) and increments
diff --git a/Documentation/x86/entry_64.rst b/Documentation/x86/entry_64.rst
index e433e08f7018..0afdce3c06f4 100644
--- a/Documentation/x86/entry_64.rst
+++ b/Documentation/x86/entry_64.rst
@@ -33,8 +33,8 @@ Some of these entries are:
- interrupt: An array of entries. Every IDT vector that doesn't
explicitly point somewhere else gets set to the corresponding
value in interrupts. These point to a whole array of
- magically-generated functions that make their way to do_IRQ with
- the interrupt number as a parameter.
+ magically-generated functions that make their way to common_interrupt()
+ with the interrupt number as a parameter.
- APIC interrupts: Various special-purpose interrupts for things
like TLB shootdown.
diff --git a/MAINTAINERS b/MAINTAINERS
index fd46e8efa14b..dcf675bbfbe9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -669,7 +669,8 @@ F: fs/afs/
F: include/trace/events/afs.h
AGPGART DRIVER
-M: David Airlie <[email protected]>
+M: David Airlie <[email protected]>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm
F: drivers/char/agp/
@@ -1008,7 +1009,6 @@ F: drivers/spi/spi-amd.c
AMD MP2 I2C DRIVER
M: Elie Morisse <[email protected]>
-M: Nehal Shah <[email protected]>
M: Shyam Sundar S K <[email protected]>
S: Maintained
@@ -2577,7 +2577,7 @@ W: http://www.armlinux.org.uk/
ARM/QUALCOMM SUPPORT
M: Andy Gross <[email protected]>
-M: Bjorn Andersson <[email protected]>
+M: Bjorn Andersson <[email protected]>
R: Konrad Dybcio <[email protected]>
S: Maintained
@@ -2668,7 +2668,6 @@ M: Russell King <[email protected]>
L: [email protected] (moderated for non-subscribers)
S: Maintained
W: http://www.armlinux.org.uk/
-F: arch/arm/include/asm/hardware/entry-macro-iomd.S
F: arch/arm/include/asm/hardware/ioc.h
F: arch/arm/include/asm/hardware/iomd.h
F: arch/arm/include/asm/hardware/memc.h
@@ -5243,6 +5242,7 @@ F: block/blk-throttle.c
F: include/linux/blk-cgroup.h
CONTROL GROUP - CPUSET
+M: Waiman Long <[email protected]>
M: Zefan Li <[email protected]>
S: Maintained
@@ -6751,7 +6751,7 @@ F: Documentation/devicetree/bindings/display/panel/samsung,lms380kf01.yaml
F: drivers/gpu/drm/panel/panel-widechips-ws2401.c
DRM DRIVERS
-M: David Airlie <[email protected]>
+M: David Airlie <[email protected]>
M: Daniel Vetter <[email protected]>
S: Maintained
@@ -8650,8 +8650,8 @@ F: drivers/input/touchscreen/goodix*
GOOGLE ETHERNET DRIVERS
M: Jeroen de Borst <[email protected]>
-R: Catherine Sullivan <[email protected]>
-R: David Awogbemila <[email protected]>
+M: Catherine Sullivan <[email protected]>
+R: Shailend Chand <[email protected]>
S: Supported
F: Documentation/networking/device_drivers/ethernet/google/gve.rst
@@ -8941,7 +8941,7 @@ F: include/linux/hw_random.h
HARDWARE SPINLOCK CORE
M: Ohad Ben-Cohen <[email protected]>
-M: Bjorn Andersson <[email protected]>
+M: Bjorn Andersson <[email protected]>
R: Baolin Wang <[email protected]>
S: Maintained
@@ -14446,6 +14446,7 @@ M: Willy Tarreau <[email protected]>
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/wtarreau/nolibc.git
F: tools/include/nolibc/
+F: tools/testing/selftests/nolibc/
NSDEPS
M: Matthias Maennich <[email protected]>
@@ -16122,7 +16123,7 @@ F: drivers/gpio/gpio-sama5d2-piobu.c
F: drivers/pinctrl/pinctrl-at91*
PIN CONTROLLER - QUALCOMM
-M: Bjorn Andersson <[email protected]>
+M: Bjorn Andersson <[email protected]>
S: Maintained
F: Documentation/devicetree/bindings/pinctrl/qcom,*.txt
@@ -16815,7 +16816,7 @@ F: Documentation/devicetree/bindings/media/*camss*
F: drivers/media/platform/qcom/camss/
QUALCOMM CLOCK DRIVERS
-M: Bjorn Andersson <[email protected]>
+M: Bjorn Andersson <[email protected]>
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/qcom/linux.git
@@ -16854,6 +16855,7 @@ F: drivers/net/ethernet/qualcomm/emac/
QUALCOMM ETHQOS ETHERNET DRIVER
M: Vinod Koul <[email protected]>
+R: Bhupesh Sharma <[email protected]>
S: Maintained
F: Documentation/devicetree/bindings/net/qcom,ethqos.txt
@@ -17304,7 +17306,7 @@ S: Supported
F: fs/reiserfs/
REMOTE PROCESSOR (REMOTEPROC) SUBSYSTEM
-M: Bjorn Andersson <[email protected]>
+M: Bjorn Andersson <[email protected]>
M: Mathieu Poirier <[email protected]>
S: Maintained
@@ -17317,7 +17319,7 @@ F: include/linux/remoteproc.h
F: include/linux/remoteproc/
REMOTE PROCESSOR MESSAGING (RPMSG) SUBSYSTEM
-M: Bjorn Andersson <[email protected]>
+M: Bjorn Andersson <[email protected]>
M: Mathieu Poirier <[email protected]>
S: Maintained
@@ -19956,6 +19958,7 @@ S: Supported
F: drivers/net/team/
F: include/linux/if_team.h
F: include/uapi/linux/if_team.h
+F: tools/testing/selftests/drivers/net/team/
TECHNOLOGIC SYSTEMS TS-5500 PLATFORM SUPPORT
M: "Savoir-faire Linux Inc." <[email protected]>
@@ -21562,7 +21565,7 @@ F: drivers/gpio/gpio-virtio.c
F: include/uapi/linux/virtio_gpio.h
VIRTIO GPU DRIVER
-M: David Airlie <[email protected]>
+M: David Airlie <[email protected]>
M: Gerd Hoffmann <[email protected]>
R: Gurchetan Singh <[email protected]>
R: Chia-I Wu <[email protected]>
diff --git a/Makefile b/Makefile
index 298f69060f10..8478e13e9424 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 0
SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION =
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*
diff --git a/arch/arm/boot/compressed/misc.c b/arch/arm/boot/compressed/misc.c
index cb2e069dc73f..abfed1aa2baa 100644
--- a/arch/arm/boot/compressed/misc.c
+++ b/arch/arm/boot/compressed/misc.c
@@ -23,7 +23,9 @@ unsigned int __machine_arch_type;
#include <linux/types.h>
#include <linux/linkage.h>
#include "misc.h"
+#ifdef CONFIG_ARCH_EP93XX
#include "misc-ep93xx.h"
+#endif
static void putstr(const char *ptr);
diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi
index 7da42a5b959c..7e50fe633d8a 100644
--- a/arch/arm/boot/dts/am33xx-l4.dtsi
+++ b/arch/arm/boot/dts/am33xx-l4.dtsi
@@ -1502,8 +1502,7 @@
mmc1: mmc@0 {
compatible = "ti,am335-sdhci";
ti,needs-special-reset;
- dmas = <&edma_xbar 24 0 0
- &edma_xbar 25 0 0>;
+ dmas = <&edma 24 0>, <&edma 25 0>;
dma-names = "tx", "rx";
interrupts = <64>;
reg = <0x0 0x1000>;
diff --git a/arch/arm/boot/dts/am5748.dtsi b/arch/arm/boot/dts/am5748.dtsi
index c260aa1a85bd..a1f029e9d1f3 100644
--- a/arch/arm/boot/dts/am5748.dtsi
+++ b/arch/arm/boot/dts/am5748.dtsi
@@ -25,6 +25,10 @@
status = "disabled";
};
+&usb4_tm {
+ status = "disabled";
+};
+
&atl_tm {
status = "disabled";
};
diff --git a/arch/arm/boot/dts/integratorap-im-pd1.dts b/arch/arm/boot/dts/integratorap-im-pd1.dts
index 4c22e4436271..cc514cf07bff 100644
--- a/arch/arm/boot/dts/integratorap-im-pd1.dts
+++ b/arch/arm/boot/dts/integratorap-im-pd1.dts
@@ -249,6 +249,7 @@
/* 640x480 16bpp @ 25.175MHz is 36827428 bytes/s */
max-memory-bandwidth = <40000000>;
memory-region = <&impd1_ram>;
+ dma-ranges;
port@0 {
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/integratorap.dts b/arch/arm/boot/dts/integratorap.dts
index 9b652cc27b14..9148287fa0a9 100644
--- a/arch/arm/boot/dts/integratorap.dts
+++ b/arch/arm/boot/dts/integratorap.dts
@@ -160,6 +160,7 @@
pci: pciv3@62000000 {
compatible = "arm,integrator-ap-pci", "v3,v360epc-pci";
+ device_type = "pci";
#interrupt-cells = <1>;
#size-cells = <2>;
#address-cells = <3>;
@@ -261,7 +262,7 @@
lm0: bus@c0000000 {
compatible = "simple-bus";
ranges = <0x00000000 0xc0000000 0x10000000>;
- dma-ranges = <0x00000000 0x80000000 0x10000000>;
+ dma-ranges = <0x00000000 0xc0000000 0x10000000>;
reg = <0xc0000000 0x10000000>;
#address-cells = <1>;
#size-cells = <1>;
@@ -269,7 +270,7 @@
lm1: bus@d0000000 {
compatible = "simple-bus";
ranges = <0x00000000 0xd0000000 0x10000000>;
- dma-ranges = <0x00000000 0x80000000 0x10000000>;
+ dma-ranges = <0x00000000 0xd0000000 0x10000000>;
reg = <0xd0000000 0x10000000>;
#address-cells = <1>;
#size-cells = <1>;
@@ -277,7 +278,7 @@
lm2: bus@e0000000 {
compatible = "simple-bus";
ranges = <0x00000000 0xe0000000 0x10000000>;
- dma-ranges = <0x00000000 0x80000000 0x10000000>;
+ dma-ranges = <0x00000000 0xe0000000 0x10000000>;
reg = <0xe0000000 0x10000000>;
#address-cells = <1>;
#size-cells = <1>;
@@ -285,7 +286,7 @@
lm3: bus@f0000000 {
compatible = "simple-bus";
ranges = <0x00000000 0xf0000000 0x10000000>;
- dma-ranges = <0x00000000 0x80000000 0x10000000>;
+ dma-ranges = <0x00000000 0xf0000000 0x10000000>;
reg = <0xf0000000 0x10000000>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/lan966x.dtsi b/arch/arm/boot/dts/lan966x.dtsi
index 894bf9da19a4..0bf818713422 100644
--- a/arch/arm/boot/dts/lan966x.dtsi
+++ b/arch/arm/boot/dts/lan966x.dtsi
@@ -541,13 +541,13 @@
phy0: ethernet-phy@1 {
reg = <1>;
- interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
phy1: ethernet-phy@2 {
reg = <2>;
- interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/moxart-uc7112lx.dts b/arch/arm/boot/dts/moxart-uc7112lx.dts
index eb5291b0ee3a..e07b807b4cec 100644
--- a/arch/arm/boot/dts/moxart-uc7112lx.dts
+++ b/arch/arm/boot/dts/moxart-uc7112lx.dts
@@ -79,7 +79,7 @@
clocks = <&ref12>;
};
-&sdhci {
+&mmc {
status = "okay";
};
diff --git a/arch/arm/boot/dts/moxart.dtsi b/arch/arm/boot/dts/moxart.dtsi
index f5f070a87482..764832ddfa78 100644
--- a/arch/arm/boot/dts/moxart.dtsi
+++ b/arch/arm/boot/dts/moxart.dtsi
@@ -93,8 +93,8 @@
clock-names = "PCLK";
};
- sdhci: sdhci@98e00000 {
- compatible = "moxa,moxart-sdhci";
+ mmc: mmc@98e00000 {
+ compatible = "moxa,moxart-mmc";
reg = <0x98e00000 0x5C>;
interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk_apb>;
diff --git a/arch/arm/mach-sunplus/Kconfig b/arch/arm/mach-sunplus/Kconfig
index 926cde5e3cd9..d0c2416e6f24 100644
--- a/arch/arm/mach-sunplus/Kconfig
+++ b/arch/arm/mach-sunplus/Kconfig
@@ -18,8 +18,8 @@ config SOC_SP7021
select ARM_PSCI
select PINCTRL
select PINCTRL_SPPCTL
- select SERIAL_SUNPLUS
- select SERIAL_SUNPLUS_CONSOLE
+ select SERIAL_SUNPLUS if TTY
+ select SERIAL_SUNPLUS_CONSOLE if TTY
help
Support for Sunplus SP7021 SoC. It is based on ARM 4-core
Cortex-A7 with various peripherals (e.g.: I2C, SPI, SDIO,
diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c
index fb688003d156..712da6a81b23 100644
--- a/arch/arm/mm/dump.c
+++ b/arch/arm/mm/dump.c
@@ -346,7 +346,7 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
addr = start + i * PMD_SIZE;
domain = get_domain_name(pmd);
if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd))
- note_page(st, addr, 3, pmd_val(*pmd), domain);
+ note_page(st, addr, 4, pmd_val(*pmd), domain);
else
walk_pte(st, pmd, addr, domain);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index a49f0b9c0f75..463fc2a8448f 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -300,7 +300,11 @@ static struct mem_type mem_types[] __ro_after_init = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_XN | L_PTE_RDONLY,
.prot_l1 = PMD_TYPE_TABLE,
+#ifdef CONFIG_ARM_LPAE
+ .prot_sect = PMD_TYPE_SECT | L_PMD_SECT_RDONLY | PMD_SECT_AP2,
+#else
.prot_sect = PMD_TYPE_SECT,
+#endif
.domain = DOMAIN_KERNEL,
},
[MT_ROM] = {
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-mx8menlo.dts b/arch/arm64/boot/dts/freescale/imx8mm-mx8menlo.dts
index c97f4e06ae5f..32f6f2f50c10 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-mx8menlo.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-mx8menlo.dts
@@ -152,11 +152,11 @@
* CPLD_reset is RESET_SOFT in schematic
*/
gpio-line-names =
- "CPLD_D[1]", "CPLD_int", "CPLD_reset", "",
- "", "CPLD_D[0]", "", "",
- "", "", "", "CPLD_D[2]",
- "CPLD_D[3]", "CPLD_D[4]", "CPLD_D[5]", "CPLD_D[6]",
- "CPLD_D[7]", "", "", "",
+ "CPLD_D[6]", "CPLD_int", "CPLD_reset", "",
+ "", "CPLD_D[7]", "", "",
+ "", "", "", "CPLD_D[5]",
+ "CPLD_D[4]", "CPLD_D[3]", "CPLD_D[2]", "CPLD_D[1]",
+ "CPLD_D[0]", "", "", "",
"", "", "", "",
"", "", "", "KBD_intK",
"", "", "", "";
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml-mba8mx.dts b/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml-mba8mx.dts
index 286d2df01cfa..7e0aeb2db305 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml-mba8mx.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml-mba8mx.dts
@@ -5,7 +5,6 @@
/dts-v1/;
-#include <dt-bindings/phy/phy-imx8-pcie.h>
#include "imx8mm-tqma8mqml.dtsi"
#include "mba8mx.dtsi"
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi
index 16ee9b5179e6..f649dfacb4b6 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi
@@ -3,6 +3,7 @@
* Copyright 2020-2021 TQ-Systems GmbH
*/
+#include <dt-bindings/phy/phy-imx8-pcie.h>
#include "imx8mm.dtsi"
/ {
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
index b379c461aa13..3ec0c9ac3170 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
@@ -367,8 +367,8 @@
nxp,dvs-standby-voltage = <850000>;
regulator-always-on;
regulator-boot-on;
- regulator-max-microvolt = <950000>;
- regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-min-microvolt = <805000>;
regulator-name = "On-module +VDD_ARM (BUCK2)";
regulator-ramp-delay = <3125>;
};
@@ -376,8 +376,8 @@
reg_vdd_dram: BUCK3 {
regulator-always-on;
regulator-boot-on;
- regulator-max-microvolt = <950000>;
- regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-min-microvolt = <805000>;
regulator-name = "On-module +VDD_GPU_VPU_DDR (BUCK3)";
};
@@ -416,7 +416,7 @@
reg_vdd_snvs: LDO2 {
regulator-always-on;
regulator-boot-on;
- regulator-max-microvolt = <900000>;
+ regulator-max-microvolt = <800000>;
regulator-min-microvolt = <800000>;
regulator-name = "On-module +V0.8_SNVS (LDO2)";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
index 0c71b740a316..cb2836bfbd95 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
@@ -672,7 +672,6 @@
<&clk IMX8MN_CLK_GPU_SHADER>,
<&clk IMX8MN_CLK_GPU_BUS_ROOT>,
<&clk IMX8MN_CLK_GPU_AHB>;
- resets = <&src IMX8MQ_RESET_GPU_RESET>;
};
pgc_dispmix: power-domain@3 {
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts b/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts
index d8ca52976170..0e237b2f9541 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts
@@ -57,13 +57,13 @@
switch-1 {
label = "S12";
linux,code = <BTN_0>;
- gpios = <&gpio5 26 GPIO_ACTIVE_LOW>;
+ gpios = <&gpio5 27 GPIO_ACTIVE_LOW>;
};
switch-2 {
label = "S13";
linux,code = <BTN_1>;
- gpios = <&gpio5 27 GPIO_ACTIVE_LOW>;
+ gpios = <&gpio5 26 GPIO_ACTIVE_LOW>;
};
};
@@ -394,6 +394,8 @@
&pcf85063 {
/* RTC_EVENT# is connected on MBa8MPxL */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcf85063>;
interrupt-parent = <&gpio4>;
interrupts = <28 IRQ_TYPE_EDGE_FALLING>;
};
@@ -630,6 +632,10 @@
fsl,pins = <MX8MP_IOMUXC_SAI5_RXC__GPIO3_IO20 0x10>; /* Power enable */
};
+ pinctrl_pcf85063: pcf85063grp {
+ fsl,pins = <MX8MP_IOMUXC_SAI3_RXFS__GPIO4_IO28 0x80>;
+ };
+
/* LVDS Backlight */
pinctrl_pwm2: pwm2grp {
fsl,pins = <MX8MP_IOMUXC_SAI5_RXD0__PWM2_OUT 0x14>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
index 6630ec561dc2..211e6a1b296e 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
@@ -123,8 +123,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_reg_can>;
regulator-name = "can2_stby";
- gpio = <&gpio3 19 GPIO_ACTIVE_HIGH>;
- enable-active-high;
+ gpio = <&gpio3 19 GPIO_ACTIVE_LOW>;
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
};
@@ -484,35 +483,40 @@
lan1: port@0 {
reg = <0>;
label = "lan1";
+ phy-mode = "internal";
local-mac-address = [00 00 00 00 00 00];
};
lan2: port@1 {
reg = <1>;
label = "lan2";
+ phy-mode = "internal";
local-mac-address = [00 00 00 00 00 00];
};
lan3: port@2 {
reg = <2>;
label = "lan3";
+ phy-mode = "internal";
local-mac-address = [00 00 00 00 00 00];
};
lan4: port@3 {
reg = <3>;
label = "lan4";
+ phy-mode = "internal";
local-mac-address = [00 00 00 00 00 00];
};
lan5: port@4 {
reg = <4>;
label = "lan5";
+ phy-mode = "internal";
local-mac-address = [00 00 00 00 00 00];
};
- port@6 {
- reg = <6>;
+ port@5 {
+ reg = <5>;
label = "cpu";
ethernet = <&fec>;
phy-mode = "rgmii-id";
diff --git a/arch/arm64/boot/dts/freescale/imx8ulp.dtsi b/arch/arm64/boot/dts/freescale/imx8ulp.dtsi
index 60c1b018bf03..bb56390b8f54 100644
--- a/arch/arm64/boot/dts/freescale/imx8ulp.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8ulp.dtsi
@@ -172,6 +172,7 @@
compatible = "fsl,imx8ulp-pcc3";
reg = <0x292d0000 0x10000>;
#clock-cells = <1>;
+ #reset-cells = <1>;
};
tpm5: tpm@29340000 {
@@ -270,6 +271,7 @@
compatible = "fsl,imx8ulp-pcc4";
reg = <0x29800000 0x10000>;
#clock-cells = <1>;
+ #reset-cells = <1>;
};
lpi2c6: i2c@29840000 {
@@ -414,6 +416,7 @@
compatible = "fsl,imx8ulp-pcc5";
reg = <0x2da70000 0x10000>;
#clock-cells = <1>;
+ #reset-cells = <1>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
index 13d7f267b289..dac3b69e314f 100644
--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
@@ -3374,6 +3374,8 @@
<&gem_noc MASTER_APPSS_PROC 0 &cnoc2 SLAVE_USB3_0 0>;
interconnect-names = "usb-ddr", "apps-usb";
+ wakeup-source;
+
usb_1_dwc3: usb@a600000 {
compatible = "snps,dwc3";
reg = <0 0x0a600000 0 0xe000>;
@@ -3384,7 +3386,6 @@
phys = <&usb_1_hsphy>, <&usb_1_ssphy>;
phy-names = "usb2-phy", "usb3-phy";
maximum-speed = "super-speed";
- wakeup-source;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
index 84dc92dda0b8..4c404e2eafba 100644
--- a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
+++ b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
@@ -235,13 +235,13 @@
};
&remoteproc_adsp {
- firmware-name = "qcom/sc8280xp/qcadsp8280.mbn";
+ firmware-name = "qcom/sc8280xp/LENOVO/21BX/qcadsp8280.mbn";
status = "okay";
};
&remoteproc_nsp0 {
- firmware-name = "qcom/sc8280xp/qccdsp8280.mbn";
+ firmware-name = "qcom/sc8280xp/LENOVO/21BX/qccdsp8280.mbn";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
index 7d509ecd44da..916f12b799b7 100644
--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
@@ -3394,57 +3394,49 @@
compute-cb@1 {
compatible = "qcom,fastrpc-compute-cb";
reg = <1>;
- iommus = <&apps_smmu 0x1401 0x2040>,
- <&apps_smmu 0x1421 0x0>,
- <&apps_smmu 0x2001 0x420>,
- <&apps_smmu 0x2041 0x0>;
+ iommus = <&apps_smmu 0x1001 0x0460>;
};
compute-cb@2 {
compatible = "qcom,fastrpc-compute-cb";
reg = <2>;
- iommus = <&apps_smmu 0x2 0x3440>,
- <&apps_smmu 0x22 0x3400>;
+ iommus = <&apps_smmu 0x1002 0x0460>;
};
compute-cb@3 {
compatible = "qcom,fastrpc-compute-cb";
reg = <3>;
- iommus = <&apps_smmu 0x3 0x3440>,
- <&apps_smmu 0x1423 0x0>,
- <&apps_smmu 0x2023 0x0>;
+ iommus = <&apps_smmu 0x1003 0x0460>;
};
compute-cb@4 {
compatible = "qcom,fastrpc-compute-cb";
reg = <4>;
- iommus = <&apps_smmu 0x4 0x3440>,
- <&apps_smmu 0x24 0x3400>;
+ iommus = <&apps_smmu 0x1004 0x0460>;
};
compute-cb@5 {
compatible = "qcom,fastrpc-compute-cb";
reg = <5>;
- iommus = <&apps_smmu 0x5 0x3440>,
- <&apps_smmu 0x25 0x3400>;
+ iommus = <&apps_smmu 0x1005 0x0460>;
};
compute-cb@6 {
compatible = "qcom,fastrpc-compute-cb";
reg = <6>;
- iommus = <&apps_smmu 0x6 0x3460>;
+ iommus = <&apps_smmu 0x1006 0x0460>;
};
compute-cb@7 {
compatible = "qcom,fastrpc-compute-cb";
reg = <7>;
- iommus = <&apps_smmu 0x7 0x3460>;
+ iommus = <&apps_smmu 0x1007 0x0460>;
};
compute-cb@8 {
compatible = "qcom,fastrpc-compute-cb";
reg = <8>;
- iommus = <&apps_smmu 0x8 0x3460>;
+ iommus = <&apps_smmu 0x1008 0x0460>;
};
/* note: secure cb9 in downstream */
diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
index e72a04411888..d9b08dfc2980 100644
--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
@@ -2128,7 +2128,7 @@
ufs_mem_phy: phy@1d87000 {
compatible = "qcom,sm8350-qmp-ufs-phy";
- reg = <0 0x01d87000 0 0xe10>;
+ reg = <0 0x01d87000 0 0x1c4>;
#address-cells = <2>;
#size-cells = <2>;
ranges;
diff --git a/arch/arm64/boot/dts/rockchip/px30-engicam-px30-core.dtsi b/arch/arm64/boot/dts/rockchip/px30-engicam-px30-core.dtsi
index 7249871530ab..5eecbefa8a33 100644
--- a/arch/arm64/boot/dts/rockchip/px30-engicam-px30-core.dtsi
+++ b/arch/arm64/boot/dts/rockchip/px30-engicam-px30-core.dtsi
@@ -2,8 +2,8 @@
/*
* Copyright (c) 2020 Fuzhou Rockchip Electronics Co., Ltd
* Copyright (c) 2020 Engicam srl
- * Copyright (c) 2020 Amarula Solutons
- * Copyright (c) 2020 Amarula Solutons(India)
+ * Copyright (c) 2020 Amarula Solutions
+ * Copyright (c) 2020 Amarula Solutions(India)
*/
#include <dt-bindings/gpio/gpio.h>
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
index 31ebb4e5fd33..0f9cc042d9bf 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
@@ -88,3 +88,8 @@
};
};
};
+
+&wlan_host_wake_l {
+ /* Kevin has an external pull up, but Bob does not. */
+ rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_up>;
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
index cd074641884b..ee6095baba4d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
@@ -244,6 +244,14 @@
&edp {
status = "okay";
+ /*
+ * eDP PHY/clk don't sync reliably at anything other than 24 MHz. Only
+ * set this here, because rk3399-gru.dtsi ensures we can generate this
+ * off GPLL=600MHz, whereas some other RK3399 boards may not.
+ */
+ assigned-clocks = <&cru PCLK_EDP>;
+ assigned-clock-rates = <24000000>;
+
ports {
edp_out: port@1 {
reg = <1>;
@@ -578,6 +586,7 @@ ap_i2c_tp: &i2c5 {
};
wlan_host_wake_l: wlan-host-wake-l {
+ /* Kevin has an external pull up, but Bob does not */
rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
index b1ac3a89f259..aa3e21bd6c8f 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
@@ -62,7 +62,6 @@
vcc5v0_host: vcc5v0-host-regulator {
compatible = "regulator-fixed";
gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_LOW>;
- enable-active-low;
pinctrl-names = "default";
pinctrl-0 = <&vcc5v0_host_en>;
regulator-name = "vcc5v0_host";
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts
index d943559b157c..a05460b92415 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts
@@ -189,7 +189,6 @@
vcc3v3_sd: vcc3v3_sd {
compatible = "regulator-fixed";
- enable-active-low;
gpio = <&gpio0 RK_PA5 GPIO_ACTIVE_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&vcc_sd_h>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
index 02d5f5a8ca03..528bb4e8ac77 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
@@ -506,7 +506,7 @@
disable-wp;
pinctrl-names = "default";
pinctrl-0 = <&sdmmc0_bus4 &sdmmc0_clk &sdmmc0_cmd &sdmmc0_det>;
- sd-uhs-sdr104;
+ sd-uhs-sdr50;
vmmc-supply = <&vcc3v3_sd>;
vqmmc-supply = <&vccio_sd>;
status = "okay";
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts b/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts
index 5e34bd0b214d..93d383b8be87 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts
@@ -678,7 +678,7 @@
};
&usb_host0_xhci {
- extcon = <&usb2phy0>;
+ dr_mode = "host";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts b/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts
index 6ff89ff95ad1..674792567fa6 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts
@@ -656,7 +656,7 @@
};
&usb2phy0_otg {
- vbus-supply = <&vcc5v0_usb_otg>;
+ phy-supply = <&vcc5v0_usb_otg>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts b/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
index 6b5093a1a6cf..b2e040dffb59 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
@@ -581,7 +581,7 @@
};
&usb2phy0_otg {
- vbus-supply = <&vcc5v0_usb_otg>;
+ phy-supply = <&vcc5v0_usb_otg>;
status = "okay";
};
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index d5b2d2dd4904..5b167649097e 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -48,6 +48,7 @@ CONFIG_ARCH_KEEMBAY=y
CONFIG_ARCH_MEDIATEK=y
CONFIG_ARCH_MESON=y
CONFIG_ARCH_MVEBU=y
+CONFIG_ARCH_NXP=y
CONFIG_ARCH_MXC=y
CONFIG_ARCH_NPCM=y
CONFIG_ARCH_QCOM=y
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index ad2bfc794257..44ebf5b2fc4b 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -237,7 +237,7 @@ static void amu_fie_setup(const struct cpumask *cpus)
for_each_cpu(cpu, cpus) {
if (!freq_counters_valid(cpu) ||
freq_inv_set_max_ratio(cpu,
- cpufreq_get_hw_max_freq(cpu) * 1000,
+ cpufreq_get_hw_max_freq(cpu) * 1000ULL,
arch_timer_get_rate()))
return;
}
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 2ff0ef62abad..917086be5c6b 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -2114,7 +2114,7 @@ static int finalize_hyp_mode(void)
* at, which would end badly once inaccessible.
*/
kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start);
- kmemleak_free_part(__va(hyp_mem_base), hyp_mem_size);
+ kmemleak_free_part_phys(hyp_mem_base, hyp_mem_size);
return pkvm_drop_host_privileges();
}
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index e7ad44585f40..eb489302c28a 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -331,12 +331,6 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
}
BUG_ON(p4d_bad(p4d));
- /*
- * No need for locking during early boot. And it doesn't work as
- * expected with KASLR enabled.
- */
- if (system_state != SYSTEM_BOOTING)
- mutex_lock(&fixmap_lock);
pudp = pud_set_fixmap_offset(p4dp, addr);
do {
pud_t old_pud = READ_ONCE(*pudp);
@@ -368,15 +362,13 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
} while (pudp++, addr = next, addr != end);
pud_clear_fixmap();
- if (system_state != SYSTEM_BOOTING)
- mutex_unlock(&fixmap_lock);
}
-static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
- unsigned long virt, phys_addr_t size,
- pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(int),
- int flags)
+static void __create_pgd_mapping_locked(pgd_t *pgdir, phys_addr_t phys,
+ unsigned long virt, phys_addr_t size,
+ pgprot_t prot,
+ phys_addr_t (*pgtable_alloc)(int),
+ int flags)
{
unsigned long addr, end, next;
pgd_t *pgdp = pgd_offset_pgd(pgdir, virt);
@@ -400,8 +392,20 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
} while (pgdp++, addr = next, addr != end);
}
+static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
+ unsigned long virt, phys_addr_t size,
+ pgprot_t prot,
+ phys_addr_t (*pgtable_alloc)(int),
+ int flags)
+{
+ mutex_lock(&fixmap_lock);
+ __create_pgd_mapping_locked(pgdir, phys, virt, size, prot,
+ pgtable_alloc, flags);
+ mutex_unlock(&fixmap_lock);
+}
+
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-extern __alias(__create_pgd_mapping)
+extern __alias(__create_pgd_mapping_locked)
void create_kpti_ng_temp_pgd(pgd_t *pgdir, phys_addr_t phys, unsigned long virt,
phys_addr_t size, pgprot_t prot,
phys_addr_t (*pgtable_alloc)(int), int flags);
diff --git a/arch/loongarch/include/asm/loongson.h b/arch/loongarch/include/asm/loongson.h
index 6e8f6972ceb6..00db93edae1b 100644
--- a/arch/loongarch/include/asm/loongson.h
+++ b/arch/loongarch/include/asm/loongson.h
@@ -14,8 +14,6 @@
#include <asm/addrspace.h>
#include <asm/bootinfo.h>
-extern const struct plat_smp_ops loongson3_smp_ops;
-
#define LOONGSON_REG(x) \
(*(volatile u32 *)((char *)TO_UNCACHE(LOONGSON_REG_BASE) + (x)))
diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S
index c60eb66793e3..331864369e49 100644
--- a/arch/loongarch/kernel/head.S
+++ b/arch/loongarch/kernel/head.S
@@ -14,6 +14,8 @@
__REF
+ .align 12
+
SYM_CODE_START(kernel_entry) # kernel entry point
/* Config direct window and set PG */
diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c
index aa1c95aaf595..5010e95cef84 100644
--- a/arch/loongarch/kernel/traps.c
+++ b/arch/loongarch/kernel/traps.c
@@ -461,11 +461,9 @@ asmlinkage void noinstr do_watch(struct pt_regs *regs)
asmlinkage void noinstr do_ri(struct pt_regs *regs)
{
- int status = -1;
+ int status = SIGILL;
unsigned int opcode = 0;
unsigned int __user *era = (unsigned int __user *)exception_era(regs);
- unsigned long old_era = regs->csr_era;
- unsigned long old_ra = regs->regs[1];
irqentry_state_t state = irqentry_enter(regs);
local_irq_enable();
@@ -477,21 +475,12 @@ asmlinkage void noinstr do_ri(struct pt_regs *regs)
die_if_kernel("Reserved instruction in kernel code", regs);
- compute_return_era(regs);
-
if (unlikely(get_user(opcode, era) < 0)) {
status = SIGSEGV;
current->thread.error_code = 1;
}
- if (status < 0)
- status = SIGILL;
-
- if (unlikely(status > 0)) {
- regs->csr_era = old_era; /* Undo skip-over. */
- regs->regs[1] = old_ra;
- force_sig(status);
- }
+ force_sig(status);
out:
local_irq_disable();
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index b06faf6c0b27..7bff88118507 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -87,7 +87,7 @@ config MMU_SUN3
config KEXEC
bool "kexec system call"
- depends on M68KCLASSIC
+ depends on M68KCLASSIC && MMU
select KEXEC_CORE
help
kexec is a system call that implements the ability to shutdown your
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index a6a886a89be2..e2038d9499e4 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -84,7 +84,6 @@ CONFIG_NETFILTER=y
CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_ZONES=y
-# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
@@ -573,9 +572,9 @@ CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
+CONFIG_CRYPTO_HCTR2=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
@@ -594,6 +593,7 @@ CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_ARIA=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4_GENERIC=m
CONFIG_CRYPTO_TEA=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index bffd24c2755e..ddd201259e43 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -80,7 +80,6 @@ CONFIG_NETFILTER=y
CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_ZONES=y
-# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
@@ -530,9 +529,9 @@ CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
+CONFIG_CRYPTO_HCTR2=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
@@ -551,6 +550,7 @@ CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_ARIA=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4_GENERIC=m
CONFIG_CRYPTO_TEA=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 0013425b1e08..d9f783707387 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -87,7 +87,6 @@ CONFIG_NETFILTER=y
CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_ZONES=y
-# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
@@ -550,9 +549,9 @@ CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
+CONFIG_CRYPTO_HCTR2=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
@@ -571,6 +570,7 @@ CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_ARIA=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4_GENERIC=m
CONFIG_CRYPTO_TEA=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 42d969697f7f..68957c6bcff1 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -77,7 +77,6 @@ CONFIG_NETFILTER=y
CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_ZONES=y
-# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
@@ -522,9 +521,9 @@ CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
+CONFIG_CRYPTO_HCTR2=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
@@ -543,6 +542,7 @@ CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_ARIA=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4_GENERIC=m
CONFIG_CRYPTO_TEA=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 97d6d9acb395..825c6a02fa9d 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -79,7 +79,6 @@ CONFIG_NETFILTER=y
CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_ZONES=y
-# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
@@ -532,9 +531,9 @@ CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
+CONFIG_CRYPTO_HCTR2=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
@@ -553,6 +552,7 @@ CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_ARIA=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4_GENERIC=m
CONFIG_CRYPTO_TEA=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 8cbfc1c659a3..17f64c562bf1 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -78,7 +78,6 @@ CONFIG_NETFILTER=y
CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_ZONES=y
-# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
@@ -552,9 +551,9 @@ CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
+CONFIG_CRYPTO_HCTR2=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
@@ -573,6 +572,7 @@ CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_ARIA=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4_GENERIC=m
CONFIG_CRYPTO_TEA=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 9f45fe60757f..f5f4c572b694 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -98,7 +98,6 @@ CONFIG_NETFILTER=y
CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_ZONES=y
-# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
@@ -638,9 +637,9 @@ CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
+CONFIG_CRYPTO_HCTR2=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
@@ -659,6 +658,7 @@ CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_ARIA=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4_GENERIC=m
CONFIG_CRYPTO_TEA=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 4736cfacf6a2..b4a0bbef7e39 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -76,7 +76,6 @@ CONFIG_NETFILTER=y
CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_ZONES=y
-# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
@@ -521,9 +520,9 @@ CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
+CONFIG_CRYPTO_HCTR2=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
@@ -542,6 +541,7 @@ CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_ARIA=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4_GENERIC=m
CONFIG_CRYPTO_TEA=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 638cd38aa7d2..c6a6d5926793 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -77,7 +77,6 @@ CONFIG_NETFILTER=y
CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_ZONES=y
-# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
@@ -522,9 +521,9 @@ CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
+CONFIG_CRYPTO_HCTR2=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
@@ -543,6 +542,7 @@ CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_ARIA=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4_GENERIC=m
CONFIG_CRYPTO_TEA=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index ec8b6bb70ebd..49c9c89f0caf 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -78,7 +78,6 @@ CONFIG_NETFILTER=y
CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_ZONES=y
-# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
@@ -539,9 +538,9 @@ CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
+CONFIG_CRYPTO_HCTR2=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
@@ -560,6 +559,7 @@ CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_ARIA=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4_GENERIC=m
CONFIG_CRYPTO_TEA=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 7d8dc578d59c..9b44eeb9c07f 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -74,7 +74,6 @@ CONFIG_NETFILTER=y
CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_ZONES=y
-# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
@@ -521,9 +520,9 @@ CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
+CONFIG_CRYPTO_HCTR2=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
@@ -542,6 +541,7 @@ CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_ARIA=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4_GENERIC=m
CONFIG_CRYPTO_TEA=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 96290aee5302..d2ffb0a65b44 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -74,7 +74,6 @@ CONFIG_NETFILTER=y
CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_ZONES=y
-# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
@@ -520,9 +519,9 @@ CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
+CONFIG_CRYPTO_HCTR2=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
@@ -541,6 +540,7 @@ CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_ARIA=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4_GENERIC=m
CONFIG_CRYPTO_TEA=m
diff --git a/arch/m68k/include/uapi/asm/bootinfo-virt.h b/arch/m68k/include/uapi/asm/bootinfo-virt.h
index b091ee9b06e0..7dbcd7bec103 100644
--- a/arch/m68k/include/uapi/asm/bootinfo-virt.h
+++ b/arch/m68k/include/uapi/asm/bootinfo-virt.h
@@ -13,13 +13,8 @@
#define BI_VIRT_VIRTIO_BASE 0x8004
#define BI_VIRT_CTRL_BASE 0x8005
-/*
- * A random seed used to initialize the RNG. Record format:
- *
- * - length [ 2 bytes, 16-bit big endian ]
- * - seed data [ `length` bytes, padded to preserve 2-byte alignment ]
- */
-#define BI_VIRT_RNG_SEED 0x8006
+/* No longer used -- replaced with BI_RNG_SEED -- but don't reuse this index:
+ * #define BI_VIRT_RNG_SEED 0x8006 */
#define VIRT_BOOTI_VERSION MK_BI_VERSION(2, 0)
diff --git a/arch/m68k/include/uapi/asm/bootinfo.h b/arch/m68k/include/uapi/asm/bootinfo.h
index 95ecf3ae4c49..024e87d7095f 100644
--- a/arch/m68k/include/uapi/asm/bootinfo.h
+++ b/arch/m68k/include/uapi/asm/bootinfo.h
@@ -64,6 +64,13 @@ struct mem_info {
/* (struct mem_info) */
#define BI_COMMAND_LINE 0x0007 /* kernel command line parameters */
/* (string) */
+/*
+ * A random seed used to initialize the RNG. Record format:
+ *
+ * - length [ 2 bytes, 16-bit big endian ]
+ * - seed data [ `length` bytes, padded to preserve 4-byte struct alignment ]
+ */
+#define BI_RNG_SEED 0x0008
/*
diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c
index e62fa8f2149b..3a2bb2e8fdad 100644
--- a/arch/m68k/kernel/setup_mm.c
+++ b/arch/m68k/kernel/setup_mm.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/nvram.h>
#include <linux/initrd.h>
+#include <linux/random.h>
#include <asm/bootinfo.h>
#include <asm/byteorder.h>
@@ -109,10 +110,9 @@ extern void paging_init(void);
static void __init m68k_parse_bootinfo(const struct bi_record *record)
{
+ const struct bi_record *first_record = record;
uint16_t tag;
- save_bootinfo(record);
-
while ((tag = be16_to_cpu(record->tag)) != BI_LAST) {
int unknown = 0;
const void *data = record->data;
@@ -148,10 +148,21 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record)
break;
case BI_COMMAND_LINE:
- strlcpy(m68k_command_line, data,
+ strscpy(m68k_command_line, data,
sizeof(m68k_command_line));
break;
+ case BI_RNG_SEED: {
+ u16 len = be16_to_cpup(data);
+ add_bootloader_randomness(data + 2, len);
+ /*
+ * Zero the data to preserve forward secrecy, and zero the
+ * length to prevent kexec from using it.
+ */
+ memzero_explicit((void *)data, len + 2);
+ break;
+ }
+
default:
if (MACH_IS_AMIGA)
unknown = amiga_parse_bootinfo(record);
@@ -182,6 +193,8 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record)
record = (struct bi_record *)((unsigned long)record + size);
}
+ save_bootinfo(first_record);
+
m68k_realnum_memory = m68k_num_memory;
#ifdef CONFIG_SINGLE_MEMORY_CHUNK
if (m68k_num_memory > 1) {
diff --git a/arch/m68k/virt/config.c b/arch/m68k/virt/config.c
index 4ab22946ff68..632ba200ad42 100644
--- a/arch/m68k/virt/config.c
+++ b/arch/m68k/virt/config.c
@@ -2,7 +2,6 @@
#include <linux/reboot.h>
#include <linux/serial_core.h>
-#include <linux/random.h>
#include <clocksource/timer-goldfish.h>
#include <asm/bootinfo.h>
@@ -93,16 +92,6 @@ int __init virt_parse_bootinfo(const struct bi_record *record)
data += 4;
virt_bi_data.virtio.irq = be32_to_cpup(data);
break;
- case BI_VIRT_RNG_SEED: {
- u16 len = be16_to_cpup(data);
- add_bootloader_randomness(data + 2, len);
- /*
- * Zero the data to preserve forward secrecy, and zero the
- * length to prevent kexec from using it.
- */
- memzero_explicit((void *)data, len + 2);
- break;
- }
default:
unknown = 1;
break;
diff --git a/arch/mips/bcm47xx/prom.c b/arch/mips/bcm47xx/prom.c
index ab203e66ba0d..a9bea411d928 100644
--- a/arch/mips/bcm47xx/prom.c
+++ b/arch/mips/bcm47xx/prom.c
@@ -86,7 +86,7 @@ static __init void prom_init_mem(void)
pr_debug("Assume 128MB RAM\n");
break;
}
- if (!memcmp(prom_init, prom_init + mem, 32))
+ if (!memcmp((void *)prom_init, (void *)prom_init + mem, 32))
break;
}
lowmem = mem;
@@ -159,7 +159,7 @@ void __init bcm47xx_prom_highmem_init(void)
off = EXTVBASE + __pa(off);
for (extmem = 128 << 20; extmem < 512 << 20; extmem <<= 1) {
- if (!memcmp(prom_init, (void *)(off + extmem), 16))
+ if (!memcmp((void *)prom_init, (void *)(off + extmem), 16))
break;
}
extmem -= lowmem;
diff --git a/arch/mips/boot/dts/brcm/bcm63268.dtsi b/arch/mips/boot/dts/brcm/bcm63268.dtsi
index c3ce49ec675f..8926417a8fbc 100644
--- a/arch/mips/boot/dts/brcm/bcm63268.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm63268.dtsi
@@ -105,14 +105,20 @@
interrupts = <2>, <3>;
};
- wdt: watchdog@1000009c {
- compatible = "brcm,bcm7038-wdt";
- reg = <0x1000009c 0xc>;
+ timer-mfd@10000080 {
+ compatible = "brcm,bcm7038-twd", "simple-mfd", "syscon";
+ reg = <0x10000080 0x30>;
+ ranges = <0x0 0x10000080 0x30>;
- clocks = <&periph_osc>;
- clock-names = "refclk";
+ wdt: watchdog@1c {
+ compatible = "brcm,bcm7038-wdt";
+ reg = <0x1c 0xc>;
- timeout-sec = <30>;
+ clocks = <&periph_osc>;
+ clock-names = "refclk";
+
+ timeout-sec = <30>;
+ };
};
uart0: serial@10000180 {
diff --git a/arch/mips/boot/dts/lantiq/Makefile b/arch/mips/boot/dts/lantiq/Makefile
index f5dfc06242b9..ae6e3e21ebeb 100644
--- a/arch/mips/boot/dts/lantiq/Makefile
+++ b/arch/mips/boot/dts/lantiq/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-dtb-$(CONFIG_DT_EASY50712) += easy50712.dtb
+dtb-$(CONFIG_DT_EASY50712) += danube_easy50712.dtb
obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/mips/boot/dts/lantiq/easy50712.dts b/arch/mips/boot/dts/lantiq/danube_easy50712.dts
index 1ce20b7d05cb..1ce20b7d05cb 100644
--- a/arch/mips/boot/dts/lantiq/easy50712.dts
+++ b/arch/mips/boot/dts/lantiq/danube_easy50712.dts
diff --git a/arch/mips/cavium-octeon/oct_ilm.c b/arch/mips/cavium-octeon/oct_ilm.c
index 6a4694538bb6..dc05262e85ff 100644
--- a/arch/mips/cavium-octeon/oct_ilm.c
+++ b/arch/mips/cavium-octeon/oct_ilm.c
@@ -28,7 +28,7 @@ struct latency_info {
static struct latency_info li;
static struct dentry *dir;
-static int show_latency(struct seq_file *m, void *v)
+static int oct_ilm_show(struct seq_file *m, void *v)
{
u64 cpuclk, avg, max, min;
struct latency_info curr_li = li;
@@ -43,18 +43,7 @@ static int show_latency(struct seq_file *m, void *v)
curr_li.interrupt_cnt, avg, max, min);
return 0;
}
-
-static int oct_ilm_open(struct inode *inode, struct file *file)
-{
- return single_open(file, show_latency, NULL);
-}
-
-static const struct file_operations oct_ilm_ops = {
- .open = oct_ilm_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(oct_ilm);
static int reset_statistics(void *data, u64 value)
{
@@ -67,7 +56,7 @@ DEFINE_DEBUGFS_ATTRIBUTE(reset_statistics_ops, NULL, reset_statistics, "%llu\n")
static void init_debugfs(void)
{
dir = debugfs_create_dir("oct_ilm", 0);
- debugfs_create_file("statistics", 0222, dir, NULL, &oct_ilm_ops);
+ debugfs_create_file("statistics", 0222, dir, NULL, &oct_ilm_fops);
debugfs_create_file("reset", 0222, dir, NULL, &reset_statistics_ops);
}
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index e7f994393ae8..a71727f7a608 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -530,7 +530,7 @@ void octeon_user_io_init(void)
/* Get the current settings for CP0_CVMMEMCTL_REG */
cvmmemctl.u64 = read_c0_cvmmemctl();
/* R/W If set, marked write-buffer entries time out the same
- * as as other entries; if clear, marked write-buffer entries
+ * as other entries; if clear, marked write-buffer entries
* use the maximum timeout. */
cvmmemctl.s.dismarkwblongto = 1;
/* R/W If set, a merged store does not clear the write-buffer
diff --git a/arch/mips/configs/ar7_defconfig b/arch/mips/configs/ar7_defconfig
index cf9c6329b807..ed4a6388791e 100644
--- a/arch/mips/configs/ar7_defconfig
+++ b/arch/mips/configs/ar7_defconfig
@@ -32,9 +32,6 @@ CONFIG_IP_ROUTE_MULTIPATH=y
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_MROUTE=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_DIAG is not set
CONFIG_TCP_CONG_ADVANCED=y
# CONFIG_TCP_CONG_BIC is not set
@@ -117,7 +114,6 @@ CONFIG_JFFS2_SUMMARY=y
CONFIG_JFFS2_COMPRESSION_OPTIONS=y
CONFIG_SQUASHFS=y
# CONFIG_CRYPTO_HW is not set
-# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_STRIP_ASM_SYMS=y
CONFIG_DEBUG_FS=y
CONFIG_CMDLINE_BOOL=y
diff --git a/arch/mips/configs/ath25_defconfig b/arch/mips/configs/ath25_defconfig
index 7143441f5476..afd1c16242e9 100644
--- a/arch/mips/configs/ath25_defconfig
+++ b/arch/mips/configs/ath25_defconfig
@@ -29,9 +29,6 @@ CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
CONFIG_CFG80211=m
CONFIG_MAC80211=m
@@ -108,7 +105,6 @@ CONFIG_SQUASHFS_XZ=y
# CONFIG_XZ_DEC_ARMTHUMB is not set
# CONFIG_XZ_DEC_SPARC is not set
CONFIG_PRINTK_TIME=y
-# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_STRIP_ASM_SYMS=y
CONFIG_DEBUG_FS=y
# CONFIG_SCHED_DEBUG is not set
diff --git a/arch/mips/configs/ath79_defconfig b/arch/mips/configs/ath79_defconfig
index 96622a2ad333..0b741716c852 100644
--- a/arch/mips/configs/ath79_defconfig
+++ b/arch/mips/configs/ath79_defconfig
@@ -10,12 +10,6 @@ CONFIG_EMBEDDED=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_ATH79=y
-CONFIG_ATH79_MACH_AP121=y
-CONFIG_ATH79_MACH_AP136=y
-CONFIG_ATH79_MACH_AP81=y
-CONFIG_ATH79_MACH_DB120=y
-CONFIG_ATH79_MACH_PB44=y
-CONFIG_ATH79_MACH_UBNT_XM=y
CONFIG_HZ_100=y
# CONFIG_SECCOMP is not set
CONFIG_PCI=y
@@ -29,9 +23,6 @@ CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
CONFIG_CFG80211=m
CONFIG_MAC80211=m
@@ -92,7 +83,6 @@ CONFIG_LEDS_GPIO=y
# CONFIG_DNOTIFY is not set
# CONFIG_PROC_PAGE_MONITOR is not set
CONFIG_CRC_ITU_T=m
-# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_STRIP_ASM_SYMS=y
CONFIG_DEBUG_FS=y
# CONFIG_SCHED_DEBUG is not set
diff --git a/arch/mips/configs/bcm63xx_defconfig b/arch/mips/configs/bcm63xx_defconfig
index 861f680184b9..34d0ca638ef0 100644
--- a/arch/mips/configs/bcm63xx_defconfig
+++ b/arch/mips/configs/bcm63xx_defconfig
@@ -24,9 +24,6 @@ CONFIG_PCMCIA_BCM63XX=y
CONFIG_NET=y
CONFIG_UNIX=y
CONFIG_INET=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
CONFIG_CFG80211=y
diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig
index d83e7d600b0a..d15961f00ece 100644
--- a/arch/mips/configs/bigsur_defconfig
+++ b/arch/mips/configs/bigsur_defconfig
@@ -49,8 +49,6 @@ CONFIG_IP_PIMSM_V2=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
-CONFIG_INET_XFRM_MODE_TRANSPORT=m
-CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_TCP_MD5SIG=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
@@ -59,7 +57,6 @@ CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_MIP6=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
CONFIG_IPV6_SIT_6RD=y
CONFIG_IPV6_TUNNEL=m
CONFIG_IPV6_MULTIPLE_TABLES=y
@@ -101,7 +98,6 @@ CONFIG_BAYCOM_SER_HDX=m
CONFIG_YAM=m
CONFIG_FW_LOADER=m
CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_EEPROM_LEGACY=y
CONFIG_EEPROM_MAX6875=y
@@ -230,12 +226,8 @@ CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
@@ -243,7 +235,6 @@ CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
diff --git a/arch/mips/configs/bmips_be_defconfig b/arch/mips/configs/bmips_be_defconfig
index 032bb51defe8..daef132d000b 100644
--- a/arch/mips/configs/bmips_be_defconfig
+++ b/arch/mips/configs/bmips_be_defconfig
@@ -17,9 +17,6 @@ CONFIG_PACKET=y
CONFIG_PACKET_DIAG=y
CONFIG_UNIX=y
CONFIG_INET=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_DIAG is not set
CONFIG_CFG80211=y
CONFIG_NL80211_TESTMODE=y
diff --git a/arch/mips/configs/bmips_stb_defconfig b/arch/mips/configs/bmips_stb_defconfig
index 5956fb95c19f..cd0dc37c3d84 100644
--- a/arch/mips/configs/bmips_stb_defconfig
+++ b/arch/mips/configs/bmips_stb_defconfig
@@ -12,7 +12,6 @@ CONFIG_HIGHMEM=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_SMP=y
CONFIG_NR_CPUS=4
-CONFIG_CC_STACKPROTECTOR_STRONG=y
# CONFIG_SECCOMP is not set
CONFIG_MIPS_O32_FP64_SUPPORT=y
# CONFIG_RD_GZIP is not set
@@ -21,8 +20,6 @@ CONFIG_MIPS_O32_FP64_SUPPORT=y
CONFIG_RD_XZ=y
# CONFIG_RD_LZO is not set
# CONFIG_RD_LZ4 is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_PCI=y
CONFIG_PCI_MSI=y
CONFIG_PCIEASPM_POWERSAVE=y
@@ -30,7 +27,6 @@ CONFIG_PCIEPORTBUS=y
CONFIG_PCIE_BRCMSTB=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_STAT=y
-CONFIG_CPU_FREQ_STAT_DETAILS=y
CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
@@ -44,15 +40,11 @@ CONFIG_PACKET=y
CONFIG_PACKET_DIAG=y
CONFIG_UNIX=y
CONFIG_INET=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_DIAG is not set
CONFIG_CFG80211=y
CONFIG_NL80211_TESTMODE=y
CONFIG_WIRELESS=y
CONFIG_MAC80211=y
-CONFIG_NL80211=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_STANDALONE is not set
@@ -70,10 +62,6 @@ CONFIG_IP_PNP_RARP=y
CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
CONFIG_INET_UDP_DIAG=y
CONFIG_TCP_CONG_ADVANCED=y
CONFIG_TCP_CONG_BIC=y
@@ -93,7 +81,6 @@ CONFIG_NET_SWITCHDEV=y
CONFIG_DMA_CMA=y
CONFIG_CMA_ALIGNMENT=12
CONFIG_SPI=y
-CONFIG_SPI_BRCMSTB=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
@@ -105,14 +92,11 @@ CONFIG_MTD_CFI_STAA=y
CONFIG_MTD_ROM=y
CONFIG_MTD_ABSENT=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_M25P80=y
-CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_BRCMNAND=y
CONFIG_MTD_SPI_NOR=y
# CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is not set
CONFIG_MTD_UBI=y
CONFIG_MTD_UBI_GLUEBI=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
@@ -120,7 +104,6 @@ CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_MULTI_LUN=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_NETDEVICES=y
CONFIG_VLAN_8021Q=y
@@ -135,7 +118,6 @@ CONFIG_INPUT_UINPUT=y
# CONFIG_SERIO is not set
CONFIG_VT=y
CONFIG_VT_HW_CONSOLE_BINDING=y
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
CONFIG_SERIAL_8250_CONSOLE=y
@@ -203,17 +185,14 @@ CONFIG_CMDLINE="earlycon"
CONFIG_MIPS_CMDLINE_DTB_EXTEND=y
# CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER is not set
# CONFIG_CRYPTO_HW is not set
-CONFIG_DT_BCM974XX=y
CONFIG_FW_CFE=y
CONFIG_ATA=y
CONFIG_SATA_AHCI_PLATFORM=y
-CONFIG_AHCI_BRCMSTB=y
CONFIG_GENERIC_PHY=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
CONFIG_PHY_BRCM_USB=y
CONFIG_PHY_BRCM_SATA=y
-CONFIG_PM_RUNTIME=y
CONFIG_PM_DEBUG=y
CONFIG_SYSVIPC=y
CONFIG_FUNCTION_GRAPH_TRACER=y
@@ -227,3 +206,5 @@ CONFIG_FTRACE_SYSCALLS=y
CONFIG_TRACER_SNAPSHOT=y
CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
CONFIG_STACK_TRACER=y
+CONFIG_AHCI_BRCM=y
+CONFIG_MTD_RAW_NAND=y
diff --git a/arch/mips/configs/cavium_octeon_defconfig b/arch/mips/configs/cavium_octeon_defconfig
index 97ceaf080c0c..a2311495af79 100644
--- a/arch/mips/configs/cavium_octeon_defconfig
+++ b/arch/mips/configs/cavium_octeon_defconfig
@@ -71,7 +71,6 @@ CONFIG_NETDEVICES=y
# CONFIG_NET_VENDOR_DEC is not set
# CONFIG_NET_VENDOR_DLINK is not set
# CONFIG_NET_VENDOR_EMULEX is not set
-# CONFIG_NET_VENDOR_HP is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MELLANOX is not set
diff --git a/arch/mips/configs/db1xxx_defconfig b/arch/mips/configs/db1xxx_defconfig
index b8bd66300996..a8b62df3c021 100644
--- a/arch/mips/configs/db1xxx_defconfig
+++ b/arch/mips/configs/db1xxx_defconfig
@@ -61,7 +61,6 @@ CONFIG_INET6_AH=y
CONFIG_INET6_ESP=y
CONFIG_INET6_IPCOMP=y
CONFIG_IPV6_MIP6=y
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=y
CONFIG_IPV6_VTI=y
CONFIG_IPV6_SIT_6RD=y
CONFIG_IPV6_GRE=y
diff --git a/arch/mips/configs/decstation_64_defconfig b/arch/mips/configs/decstation_64_defconfig
index 0021427a1bbe..4f74c4bde9f6 100644
--- a/arch/mips/configs/decstation_64_defconfig
+++ b/arch/mips/configs/decstation_64_defconfig
@@ -37,9 +37,6 @@ CONFIG_SYN_COOKIES=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
-CONFIG_INET_XFRM_MODE_TRANSPORT=m
-CONFIG_INET_XFRM_MODE_TUNNEL=m
-CONFIG_INET_XFRM_MODE_BEET=m
CONFIG_TCP_MD5SIG=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
@@ -47,7 +44,6 @@ CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_MIP6=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
CONFIG_NETWORK_SECMARK=y
@@ -79,7 +75,6 @@ CONFIG_NETDEVICES=y
CONFIG_DECLANCE=y
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_VENDOR_AURORA is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_CADENCE is not set
# CONFIG_NET_VENDOR_CAVIUM is not set
@@ -193,12 +188,8 @@ CONFIG_CRYPTO_CRC32=m
CONFIG_CRYPTO_CRCT10DIF=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_ARC4=m
@@ -208,7 +199,6 @@ CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
diff --git a/arch/mips/configs/decstation_defconfig b/arch/mips/configs/decstation_defconfig
index 7a97a0818ce4..f0d0546ea1e5 100644
--- a/arch/mips/configs/decstation_defconfig
+++ b/arch/mips/configs/decstation_defconfig
@@ -33,9 +33,6 @@ CONFIG_SYN_COOKIES=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
-CONFIG_INET_XFRM_MODE_TRANSPORT=m
-CONFIG_INET_XFRM_MODE_TUNNEL=m
-CONFIG_INET_XFRM_MODE_BEET=m
CONFIG_TCP_MD5SIG=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
@@ -43,7 +40,6 @@ CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_MIP6=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
CONFIG_NETWORK_SECMARK=y
@@ -75,7 +71,6 @@ CONFIG_NETDEVICES=y
CONFIG_DECLANCE=y
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_VENDOR_AURORA is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_CADENCE is not set
# CONFIG_NET_VENDOR_CAVIUM is not set
@@ -188,12 +183,8 @@ CONFIG_CRYPTO_CRC32=m
CONFIG_CRYPTO_CRCT10DIF=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_ARC4=m
@@ -203,7 +194,6 @@ CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
diff --git a/arch/mips/configs/decstation_r4k_defconfig b/arch/mips/configs/decstation_r4k_defconfig
index a0643363526d..df5ff9ddf8f4 100644
--- a/arch/mips/configs/decstation_r4k_defconfig
+++ b/arch/mips/configs/decstation_r4k_defconfig
@@ -32,9 +32,6 @@ CONFIG_SYN_COOKIES=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
-CONFIG_INET_XFRM_MODE_TRANSPORT=m
-CONFIG_INET_XFRM_MODE_TUNNEL=m
-CONFIG_INET_XFRM_MODE_BEET=m
CONFIG_TCP_MD5SIG=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
@@ -42,7 +39,6 @@ CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_MIP6=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
CONFIG_NETWORK_SECMARK=y
@@ -74,7 +70,6 @@ CONFIG_NETDEVICES=y
CONFIG_DECLANCE=y
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_VENDOR_AURORA is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_CADENCE is not set
# CONFIG_NET_VENDOR_CAVIUM is not set
@@ -188,12 +183,8 @@ CONFIG_CRYPTO_CRC32=m
CONFIG_CRYPTO_CRCT10DIF=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_ARC4=m
@@ -203,7 +194,6 @@ CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
diff --git a/arch/mips/configs/fuloong2e_defconfig b/arch/mips/configs/fuloong2e_defconfig
index ba47c5e929b7..843d6a5a4f61 100644
--- a/arch/mips/configs/fuloong2e_defconfig
+++ b/arch/mips/configs/fuloong2e_defconfig
@@ -35,8 +35,6 @@ CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_NET_IPIP=m
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
@@ -92,7 +90,6 @@ CONFIG_MTD_CFI_AMDSTD=m
CONFIG_MTD_CFI_STAA=m
CONFIG_MTD_PHYSMAP=m
CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_RAM=m
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
@@ -159,7 +156,6 @@ CONFIG_USB_MOUSE=y
CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_OTG_PRODUCTLIST=y
-CONFIG_USB_WUSB_CBAF=m
CONFIG_USB_C67X00_HCD=m
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_ROOT_HUB_TT=y
@@ -219,15 +215,10 @@ CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_DEFLATE=m
CONFIG_CRYPTO_LZO=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC_CCITT=y
CONFIG_CRC7=m
-# CONFIG_ENABLE_MUST_CHECK is not set
diff --git a/arch/mips/configs/generic/board-ocelot.config b/arch/mips/configs/generic/board-ocelot.config
index 510709565404..8cfbafa532e0 100644
--- a/arch/mips/configs/generic/board-ocelot.config
+++ b/arch/mips/configs/generic/board-ocelot.config
@@ -25,7 +25,6 @@ CONFIG_NETDEVICES=y
CONFIG_NET_SWITCHDEV=y
CONFIG_NET_DSA=y
CONFIG_MSCC_OCELOT_SWITCH=y
-CONFIG_MSCC_OCELOT_SWITCH_OCELOT=y
CONFIG_MDIO_MSCC_MIIM=y
CONFIG_MICROSEMI_PHY=y
diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig
index d82f4ebf687f..7cd321b47d01 100644
--- a/arch/mips/configs/gpr_defconfig
+++ b/arch/mips/configs/gpr_defconfig
@@ -29,9 +29,6 @@ CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
CONFIG_NETWORK_SECMARK=y
CONFIG_NETFILTER=y
@@ -220,9 +217,6 @@ CONFIG_HDLC_X25=m
CONFIG_PCI200SYN=m
CONFIG_WANXL=m
CONFIG_FARSYNC=m
-CONFIG_DSCC4=m
-CONFIG_DSCC4_PCISYNC=y
-CONFIG_DSCC4_PCI_RST=y
CONFIG_LAPBETHER=m
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
@@ -288,7 +282,6 @@ CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
@@ -299,7 +292,6 @@ CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_DEFLATE=m
-# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_MAGIC_SYSRQ=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="console=ttyS0,115200 root=/dev/nfs rw ip=auto"
diff --git a/arch/mips/configs/ip22_defconfig b/arch/mips/configs/ip22_defconfig
index 70a4ba90f491..13df29901237 100644
--- a/arch/mips/configs/ip22_defconfig
+++ b/arch/mips/configs/ip22_defconfig
@@ -36,9 +36,6 @@ CONFIG_IP_PNP_BOOTP=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
-CONFIG_INET_XFRM_MODE_TRANSPORT=m
-CONFIG_INET_XFRM_MODE_TUNNEL=m
-CONFIG_INET_XFRM_MODE_BEET=m
CONFIG_TCP_MD5SIG=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
@@ -47,7 +44,6 @@ CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_MIP6=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
CONFIG_IPV6_TUNNEL=m
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
@@ -226,7 +222,6 @@ CONFIG_SERIO_RAW=m
CONFIG_VT_HW_CONSOLE_BINDING=y
CONFIG_SERIAL_IP22_ZILOG=m
# CONFIG_HW_RANDOM is not set
-CONFIG_RAW_DRIVER=m
# CONFIG_HWMON is not set
CONFIG_THERMAL=y
CONFIG_WATCHDOG=y
@@ -320,11 +315,7 @@ CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
@@ -333,7 +324,6 @@ CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig
index 821630ac1be7..3e86f8106ba0 100644
--- a/arch/mips/configs/ip27_defconfig
+++ b/arch/mips/configs/ip27_defconfig
@@ -33,9 +33,6 @@ CONFIG_NET_KEY_MIGRATE=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
-CONFIG_INET_XFRM_MODE_TRANSPORT=m
-CONFIG_INET_XFRM_MODE_TUNNEL=m
-CONFIG_INET_XFRM_MODE_BEET=m
CONFIG_TCP_MD5SIG=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
@@ -44,10 +41,6 @@ CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_MIP6=m
-CONFIG_INET6_XFRM_MODE_TRANSPORT=m
-CONFIG_INET6_XFRM_MODE_TUNNEL=m
-CONFIG_INET6_XFRM_MODE_BEET=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
CONFIG_IPV6_SIT=m
CONFIG_IPV6_SIT_6RD=y
CONFIG_IPV6_TUNNEL=m
@@ -92,7 +85,6 @@ CONFIG_CFG80211=m
CONFIG_MAC80211=m
CONFIG_RFKILL=m
CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
CONFIG_SCSI=y
@@ -115,7 +107,6 @@ CONFIG_SCSI_AIC94XX=m
# CONFIG_AIC94XX_DEBUG is not set
CONFIG_SCSI_MVSAS=m
# CONFIG_SCSI_MVSAS_DEBUG is not set
-CONFIG_SCSI_DPT_I2O=m
CONFIG_SCSI_MPT2SAS=m
CONFIG_LIBFC=m
CONFIG_SCSI_QLOGIC_1280=y
@@ -126,8 +117,6 @@ CONFIG_SCSI_DH_RDAC=m
CONFIG_SCSI_DH_HP_SW=m
CONFIG_SCSI_DH_EMC=m
CONFIG_SCSI_DH_ALUA=m
-CONFIG_SCSI_OSD_INITIATOR=m
-CONFIG_SCSI_OSD_ULD=m
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
CONFIG_MD_LINEAR=m
@@ -166,7 +155,6 @@ CONFIG_JME=m
CONFIG_MLX4_EN=m
# CONFIG_MLX4_DEBUG is not set
CONFIG_KS8851_MLL=m
-CONFIG_VXGE=m
CONFIG_AX88796=m
CONFIG_AX88796_93CX6=y
CONFIG_ETHOC=m
@@ -264,7 +252,6 @@ CONFIG_I2C_VIAPRO=m
CONFIG_I2C_OCORES=m
CONFIG_I2C_PCA_PLATFORM=m
CONFIG_I2C_SIMTEC=m
-CONFIG_I2C_PARPORT_LIGHT=m
CONFIG_I2C_TAOS_EVM=m
CONFIG_I2C_STUB=m
# CONFIG_HWMON is not set
@@ -309,7 +296,6 @@ CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_SQUASHFS=m
CONFIG_OMFS_FS=m
-CONFIG_EXOFS_FS=m
CONFIG_NFS_FS=y
CONFIG_SECURITYFS=y
CONFIG_CRYPTO_CRYPTD=m
@@ -321,12 +307,8 @@ CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
@@ -335,7 +317,6 @@ CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
diff --git a/arch/mips/configs/ip28_defconfig b/arch/mips/configs/ip28_defconfig
index 0921ef38e9fb..ba13eea0509f 100644
--- a/arch/mips/configs/ip28_defconfig
+++ b/arch/mips/configs/ip28_defconfig
@@ -29,9 +29,6 @@ CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
CONFIG_TCP_MD5SIG=y
# CONFIG_IPV6 is not set
CONFIG_SCSI=y
diff --git a/arch/mips/configs/ip32_defconfig b/arch/mips/configs/ip32_defconfig
index 74020aa3440b..8ced2224c328 100644
--- a/arch/mips/configs/ip32_defconfig
+++ b/arch/mips/configs/ip32_defconfig
@@ -43,7 +43,6 @@ CONFIG_IPV6_TUNNEL=m
CONFIG_NETWORK_SECMARK=y
CONFIG_CONNECTOR=y
CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_RAID_ATTRS=y
CONFIG_SCSI=y
@@ -165,7 +164,6 @@ CONFIG_CRYPTO_MICHAEL_MIC=y
CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
-CONFIG_CRYPTO_TGR192=y
CONFIG_CRYPTO_WP512=y
CONFIG_CRYPTO_ANUBIS=y
CONFIG_CRYPTO_ARC4=y
diff --git a/arch/mips/configs/jazz_defconfig b/arch/mips/configs/jazz_defconfig
index 843f360da5f2..106b21cb677f 100644
--- a/arch/mips/configs/jazz_defconfig
+++ b/arch/mips/configs/jazz_defconfig
@@ -32,7 +32,6 @@ CONFIG_PARPORT_1284=y
CONFIG_DEVTMPFS=y
CONFIG_BLK_DEV_FD=m
CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=m
CONFIG_CDROM_PKTCDVD=m
diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig
index 791894c4d8fb..7e598d338979 100644
--- a/arch/mips/configs/lemote2f_defconfig
+++ b/arch/mips/configs/lemote2f_defconfig
@@ -43,9 +43,6 @@ CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
CONFIG_SYN_COOKIES=y
-CONFIG_INET_XFRM_MODE_TRANSPORT=m
-CONFIG_INET_XFRM_MODE_TUNNEL=m
-CONFIG_INET_XFRM_MODE_BEET=m
CONFIG_TCP_CONG_ADVANCED=y
CONFIG_TCP_CONG_BIC=y
CONFIG_DEFAULT_BIC=y
@@ -77,7 +74,6 @@ CONFIG_MAC80211_LEDS=y
CONFIG_RFKILL=m
CONFIG_RFKILL_INPUT=y
CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_BLK_DEV_SD=y
@@ -312,12 +308,8 @@ CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA1=m
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
@@ -326,7 +318,6 @@ CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
diff --git a/arch/mips/configs/loongson1b_defconfig b/arch/mips/configs/loongson1b_defconfig
index 25e70423e17d..68207b31dc20 100644
--- a/arch/mips/configs/loongson1b_defconfig
+++ b/arch/mips/configs/loongson1b_defconfig
@@ -28,9 +28,6 @@ CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
# CONFIG_WIRELESS is not set
@@ -115,7 +112,6 @@ CONFIG_NLS_ISO8859_1=m
# CONFIG_CRYPTO_ECHAINIV is not set
# CONFIG_CRYPTO_HW is not set
CONFIG_DYNAMIC_DEBUG=y
-# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
# CONFIG_SCHED_DEBUG is not set
diff --git a/arch/mips/configs/loongson1c_defconfig b/arch/mips/configs/loongson1c_defconfig
index 3a158d4d2fab..c3910a9dee9e 100644
--- a/arch/mips/configs/loongson1c_defconfig
+++ b/arch/mips/configs/loongson1c_defconfig
@@ -29,9 +29,6 @@ CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
# CONFIG_WIRELESS is not set
@@ -116,7 +113,6 @@ CONFIG_NLS_ISO8859_1=m
# CONFIG_CRYPTO_ECHAINIV is not set
# CONFIG_CRYPTO_HW is not set
CONFIG_DYNAMIC_DEBUG=y
-# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
# CONFIG_SCHED_DEBUG is not set
diff --git a/arch/mips/configs/loongson2k_defconfig b/arch/mips/configs/loongson2k_defconfig
index e948ca487e2d..728bef666f7a 100644
--- a/arch/mips/configs/loongson2k_defconfig
+++ b/arch/mips/configs/loongson2k_defconfig
@@ -95,7 +95,6 @@ CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_MTD=m
CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_CRYPTOLOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_RAID_ATTRS=m
@@ -229,7 +228,6 @@ CONFIG_SERIAL_8250_RSA=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_NONSTANDARD=y
CONFIG_HW_RANDOM=y
-CONFIG_RAW_DRIVER=m
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_PIIX4=y
CONFIG_GPIO_LOONGSON=y
@@ -336,7 +334,6 @@ CONFIG_DEFAULT_SECURITY_DAC=y
CONFIG_CRYPTO_SEQIV=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MD5=y
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST5=m
diff --git a/arch/mips/configs/loongson3_defconfig b/arch/mips/configs/loongson3_defconfig
index 25ecd15bc952..aca66a5f330d 100644
--- a/arch/mips/configs/loongson3_defconfig
+++ b/arch/mips/configs/loongson3_defconfig
@@ -143,7 +143,6 @@ CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_MTD=m
CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_CRYPTOLOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_VIRTIO_BLK=y
@@ -268,7 +267,6 @@ CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_NONSTANDARD=y
CONFIG_VIRTIO_CONSOLE=y
CONFIG_HW_RANDOM=y
-CONFIG_RAW_DRIVER=m
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_PIIX4=y
CONFIG_GPIO_LOONGSON=y
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig
index 7a5bdd236a2a..265d38dffbf6 100644
--- a/arch/mips/configs/malta_defconfig
+++ b/arch/mips/configs/malta_defconfig
@@ -42,8 +42,6 @@ CONFIG_SYN_COOKIES=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
-CONFIG_INET_XFRM_MODE_TRANSPORT=m
-CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_TCP_MD5SIG=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
@@ -229,7 +227,6 @@ CONFIG_MTD_UBI=m
CONFIG_MTD_UBI_GLUEBI=m
CONFIG_BLK_DEV_FD=m
CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
@@ -237,7 +234,6 @@ CONFIG_ATA_OVER_ETH=m
CONFIG_RAID_ATTRS=m
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
-CONFIG_CHR_DEV_OSST=m
CONFIG_BLK_DEV_SR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
@@ -408,7 +404,6 @@ CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig
index b5ba08d7ab57..1d2b248c7cd3 100644
--- a/arch/mips/configs/malta_kvm_defconfig
+++ b/arch/mips/configs/malta_kvm_defconfig
@@ -46,8 +46,6 @@ CONFIG_SYN_COOKIES=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
-CONFIG_INET_XFRM_MODE_TRANSPORT=m
-CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_TCP_MD5SIG=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
@@ -233,7 +231,6 @@ CONFIG_MTD_UBI=m
CONFIG_MTD_UBI_GLUEBI=m
CONFIG_BLK_DEV_FD=m
CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
@@ -241,7 +238,6 @@ CONFIG_ATA_OVER_ETH=m
CONFIG_RAID_ATTRS=m
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
-CONFIG_CHR_DEV_OSST=m
CONFIG_BLK_DEV_SR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
@@ -415,7 +411,6 @@ CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
diff --git a/arch/mips/configs/malta_qemu_32r6_defconfig b/arch/mips/configs/malta_qemu_32r6_defconfig
index 6fb9bc29f4a0..fd63a2b152f6 100644
--- a/arch/mips/configs/malta_qemu_32r6_defconfig
+++ b/arch/mips/configs/malta_qemu_32r6_defconfig
@@ -76,7 +76,6 @@ CONFIG_NET_ACT_POLICE=y
# CONFIG_WIRELESS is not set
CONFIG_DEVTMPFS=y
CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
# CONFIG_SCSI_LOWLEVEL is not set
@@ -98,7 +97,6 @@ CONFIG_PCNET32=y
# CONFIG_NET_VENDOR_DEC is not set
# CONFIG_NET_VENDOR_DLINK is not set
# CONFIG_NET_VENDOR_EMULEX is not set
-# CONFIG_NET_VENDOR_HP is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MELLANOX is not set
@@ -172,7 +170,6 @@ CONFIG_NLS_ISO8859_1=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
diff --git a/arch/mips/configs/maltaaprp_defconfig b/arch/mips/configs/maltaaprp_defconfig
index eb72df528243..1f07e354c954 100644
--- a/arch/mips/configs/maltaaprp_defconfig
+++ b/arch/mips/configs/maltaaprp_defconfig
@@ -78,7 +78,6 @@ CONFIG_NET_ACT_POLICE=y
# CONFIG_WIRELESS is not set
CONFIG_DEVTMPFS=y
CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
# CONFIG_SCSI_LOWLEVEL is not set
@@ -100,7 +99,6 @@ CONFIG_PCNET32=y
# CONFIG_NET_VENDOR_DEC is not set
# CONFIG_NET_VENDOR_DLINK is not set
# CONFIG_NET_VENDOR_EMULEX is not set
-# CONFIG_NET_VENDOR_HP is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MELLANOX is not set
@@ -173,7 +171,6 @@ CONFIG_NLS_ISO8859_1=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
diff --git a/arch/mips/configs/maltasmvp_defconfig b/arch/mips/configs/maltasmvp_defconfig
index 1fb40d310f49..5cd3eca236de 100644
--- a/arch/mips/configs/maltasmvp_defconfig
+++ b/arch/mips/configs/maltasmvp_defconfig
@@ -79,7 +79,6 @@ CONFIG_NET_ACT_POLICE=y
# CONFIG_WIRELESS is not set
CONFIG_DEVTMPFS=y
CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
# CONFIG_SCSI_LOWLEVEL is not set
@@ -99,7 +98,6 @@ CONFIG_PCNET32=y
# CONFIG_NET_VENDOR_DEC is not set
# CONFIG_NET_VENDOR_DLINK is not set
# CONFIG_NET_VENDOR_EMULEX is not set
-# CONFIG_NET_VENDOR_HP is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MELLANOX is not set
@@ -174,7 +172,6 @@ CONFIG_NLS_ISO8859_1=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
diff --git a/arch/mips/configs/maltasmvp_eva_defconfig b/arch/mips/configs/maltasmvp_eva_defconfig
index 75cb778c6149..45688e742a15 100644
--- a/arch/mips/configs/maltasmvp_eva_defconfig
+++ b/arch/mips/configs/maltasmvp_eva_defconfig
@@ -80,7 +80,6 @@ CONFIG_NET_ACT_POLICE=y
# CONFIG_WIRELESS is not set
CONFIG_DEVTMPFS=y
CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
# CONFIG_SCSI_LOWLEVEL is not set
@@ -102,7 +101,6 @@ CONFIG_PCNET32=y
# CONFIG_NET_VENDOR_DEC is not set
# CONFIG_NET_VENDOR_DLINK is not set
# CONFIG_NET_VENDOR_EMULEX is not set
-# CONFIG_NET_VENDOR_HP is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MELLANOX is not set
@@ -176,7 +174,6 @@ CONFIG_NLS_ISO8859_1=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
diff --git a/arch/mips/configs/maltaup_defconfig b/arch/mips/configs/maltaup_defconfig
index 7b4f247dc60c..136f965784db 100644
--- a/arch/mips/configs/maltaup_defconfig
+++ b/arch/mips/configs/maltaup_defconfig
@@ -77,7 +77,6 @@ CONFIG_NET_ACT_POLICE=y
# CONFIG_WIRELESS is not set
CONFIG_DEVTMPFS=y
CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
# CONFIG_SCSI_LOWLEVEL is not set
@@ -99,7 +98,6 @@ CONFIG_PCNET32=y
# CONFIG_NET_VENDOR_DEC is not set
# CONFIG_NET_VENDOR_DLINK is not set
# CONFIG_NET_VENDOR_EMULEX is not set
-# CONFIG_NET_VENDOR_HP is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MELLANOX is not set
@@ -172,7 +170,6 @@ CONFIG_NLS_ISO8859_1=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
diff --git a/arch/mips/configs/maltaup_xpa_defconfig b/arch/mips/configs/maltaup_xpa_defconfig
index 8d58653f1b4e..75b8da8d9927 100644
--- a/arch/mips/configs/maltaup_xpa_defconfig
+++ b/arch/mips/configs/maltaup_xpa_defconfig
@@ -45,8 +45,6 @@ CONFIG_SYN_COOKIES=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
-CONFIG_INET_XFRM_MODE_TRANSPORT=m
-CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_TCP_MD5SIG=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
@@ -231,7 +229,6 @@ CONFIG_MTD_UBI=m
CONFIG_MTD_UBI_GLUEBI=m
CONFIG_BLK_DEV_FD=m
CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
@@ -239,7 +236,6 @@ CONFIG_ATA_OVER_ETH=m
CONFIG_RAID_ATTRS=m
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
-CONFIG_CHR_DEV_OSST=m
CONFIG_BLK_DEV_SR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
@@ -414,7 +410,6 @@ CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
index 4194e79b435c..efbfaa539938 100644
--- a/arch/mips/configs/mtx1_defconfig
+++ b/arch/mips/configs/mtx1_defconfig
@@ -58,13 +58,9 @@ CONFIG_SYN_COOKIES=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
-CONFIG_INET_XFRM_MODE_TRANSPORT=m
-CONFIG_INET_XFRM_MODE_TUNNEL=m
-CONFIG_INET_XFRM_MODE_BEET=m
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
CONFIG_IPV6_TUNNEL=m
CONFIG_NETWORK_SECMARK=y
CONFIG_NETFILTER=y
@@ -284,7 +280,6 @@ CONFIG_PCMCIA_XIRCOM=m
CONFIG_DL2K=m
CONFIG_SUNDANCE=m
CONFIG_PCMCIA_FMVJ18X=m
-CONFIG_HP100=m
CONFIG_E100=m
CONFIG_E1000=m
CONFIG_IXGB=m
@@ -368,9 +363,6 @@ CONFIG_HDLC_X25=m
CONFIG_PCI200SYN=m
CONFIG_WANXL=m
CONFIG_FARSYNC=m
-CONFIG_DSCC4=m
-CONFIG_DSCC4_PCISYNC=y
-CONFIG_DSCC4_PCI_RST=y
CONFIG_LAPBETHER=m
# CONFIG_KEYBOARD_ATKBD is not set
CONFIG_KEYBOARD_GPIO=y
@@ -683,7 +675,6 @@ CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
@@ -692,5 +683,4 @@ CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/mips/configs/omega2p_defconfig b/arch/mips/configs/omega2p_defconfig
index fc39ddf610a9..9c34daf83563 100644
--- a/arch/mips/configs/omega2p_defconfig
+++ b/arch/mips/configs/omega2p_defconfig
@@ -35,9 +35,6 @@ CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
# CONFIG_WIRELESS is not set
diff --git a/arch/mips/configs/pic32mzda_defconfig b/arch/mips/configs/pic32mzda_defconfig
index fd567247adc7..48dd02d01ac1 100644
--- a/arch/mips/configs/pic32mzda_defconfig
+++ b/arch/mips/configs/pic32mzda_defconfig
@@ -45,7 +45,6 @@ CONFIG_KEYBOARD_GPIO_POLLED=m
CONFIG_SERIAL_PIC32=y
CONFIG_SERIAL_PIC32_CONSOLE=y
CONFIG_HW_RANDOM=y
-CONFIG_RAW_DRIVER=m
CONFIG_GPIO_SYSFS=y
# CONFIG_HWMON is not set
CONFIG_HIDRAW=y
diff --git a/arch/mips/configs/rb532_defconfig b/arch/mips/configs/rb532_defconfig
index 252d472387aa..93306f5e045b 100644
--- a/arch/mips/configs/rb532_defconfig
+++ b/arch/mips/configs/rb532_defconfig
@@ -33,9 +33,6 @@ CONFIG_IP_MULTIPLE_TABLES=y
CONFIG_IP_ROUTE_MULTIPATH=y
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
CONFIG_INET_DIAG=m
CONFIG_TCP_CONG_ADVANCED=y
CONFIG_TCP_CONG_CUBIC=m
@@ -162,5 +159,4 @@ CONFIG_SQUASHFS=y
CONFIG_CRYPTO_TEST=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC16=m
-# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_STRIP_ASM_SYMS=y
diff --git a/arch/mips/configs/rbtx49xx_defconfig b/arch/mips/configs/rbtx49xx_defconfig
index f8212a813be7..30c195f28278 100644
--- a/arch/mips/configs/rbtx49xx_defconfig
+++ b/arch/mips/configs/rbtx49xx_defconfig
@@ -21,9 +21,6 @@ CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
# CONFIG_WIRELESS is not set
CONFIG_MTD=y
@@ -51,7 +48,6 @@ CONFIG_TC35815=y
CONFIG_SERIAL_TXX9_CONSOLE=y
CONFIG_SERIAL_TXX9_STDSERIAL=y
CONFIG_SPI=y
-CONFIG_SPI_TXX9=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_TXX9_WDT=m
@@ -65,8 +61,6 @@ CONFIG_SND=m
# CONFIG_SND_SPI is not set
# CONFIG_SND_MIPS is not set
CONFIG_SND_SOC=m
-CONFIG_SND_SOC_TXX9ACLC=m
-CONFIG_SND_SOC_TXX9ACLC_GENERIC=m
# CONFIG_USB_SUPPORT is not set
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
@@ -78,7 +72,6 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_INTF_DEV_UIE_EMUL=y
CONFIG_RTC_DRV_RS5C348=y
CONFIG_RTC_DRV_DS1742=y
-CONFIG_RTC_DRV_TX4939=y
CONFIG_DMADEVICES=y
CONFIG_TXX9_DMAC=m
# CONFIG_DNOTIFY is not set
diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig
index 7d6f235e8ccb..d6981855221b 100644
--- a/arch/mips/configs/rm200_defconfig
+++ b/arch/mips/configs/rm200_defconfig
@@ -29,9 +29,6 @@ CONFIG_NET_IPIP=m
CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
-CONFIG_INET_XFRM_MODE_TRANSPORT=m
-CONFIG_INET_XFRM_MODE_TUNNEL=m
-CONFIG_INET_XFRM_MODE_BEET=m
CONFIG_TCP_MD5SIG=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
@@ -39,7 +36,6 @@ CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_MIP6=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
CONFIG_IPV6_TUNNEL=m
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
@@ -192,9 +188,7 @@ CONFIG_PARIDE_KTTI=m
CONFIG_PARIDE_ON20=m
CONFIG_PARIDE_ON26=m
CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_SX8=m
CONFIG_BLK_DEV_RAM=m
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
@@ -400,7 +394,6 @@ CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
diff --git a/arch/mips/configs/rt305x_defconfig b/arch/mips/configs/rt305x_defconfig
index eb359db15dba..bf017d493002 100644
--- a/arch/mips/configs/rt305x_defconfig
+++ b/arch/mips/configs/rt305x_defconfig
@@ -35,9 +35,6 @@ CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_MROUTE=y
CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_DIAG is not set
CONFIG_TCP_CONG_ADVANCED=y
# CONFIG_TCP_CONG_BIC is not set
@@ -140,7 +137,6 @@ CONFIG_CRC32_SARWATE=y
# CONFIG_XZ_DEC_ARMTHUMB is not set
# CONFIG_XZ_DEC_SPARC is not set
CONFIG_PRINTK_TIME=y
-# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_STRIP_ASM_SYMS=y
CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/mips/configs/sb1250_swarm_defconfig b/arch/mips/configs/sb1250_swarm_defconfig
index de94bf756a93..030186f89501 100644
--- a/arch/mips/configs/sb1250_swarm_defconfig
+++ b/arch/mips/configs/sb1250_swarm_defconfig
@@ -88,7 +88,6 @@ CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
@@ -96,7 +95,6 @@ CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_FCRYPT=m
-CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_DEFLATE=m
diff --git a/arch/mips/configs/vocore2_defconfig b/arch/mips/configs/vocore2_defconfig
index a14f8ea5c386..0722a3bf03c0 100644
--- a/arch/mips/configs/vocore2_defconfig
+++ b/arch/mips/configs/vocore2_defconfig
@@ -35,9 +35,6 @@ CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
# CONFIG_WIRELESS is not set
diff --git a/arch/mips/configs/xway_defconfig b/arch/mips/configs/xway_defconfig
index eeb689f715cb..eb5acf1f24ae 100644
--- a/arch/mips/configs/xway_defconfig
+++ b/arch/mips/configs/xway_defconfig
@@ -37,9 +37,6 @@ CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_MROUTE=y
CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_DIAG is not set
CONFIG_TCP_CONG_ADVANCED=y
# CONFIG_TCP_CONG_BIC is not set
@@ -146,7 +143,6 @@ CONFIG_CRYPTO_ARC4=m
CONFIG_CRC_ITU_T=m
CONFIG_CRC32_SARWATE=y
CONFIG_PRINTK_TIME=y
-# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_STRIP_ASM_SYMS=y
CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index 57561e0e6e8d..44f9824c1d8c 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -63,10 +63,6 @@ extern void do_domain_IRQ(struct irq_domain *domain, unsigned int irq);
extern void arch_init_irq(void);
extern void spurious_interrupt(void);
-extern int allocate_irqno(void);
-extern void alloc_legacy_irqno(void);
-extern void free_irqno(unsigned int irq);
-
/*
* Before R2 the timer and performance counter interrupts were both fixed to
* IE7. Since R2 their number has to be read from the c0_intctl register.
diff --git a/arch/mips/include/asm/mach-ar7/ar7.h b/arch/mips/include/asm/mach-ar7/ar7.h
index cbe75ade3277..1e8621a6afa3 100644
--- a/arch/mips/include/asm/mach-ar7/ar7.h
+++ b/arch/mips/include/asm/mach-ar7/ar7.h
@@ -104,8 +104,6 @@ struct plat_dsl_data {
int reset_bit_sar;
};
-extern int ar7_cpu_clock, ar7_bus_clock, ar7_dsp_clock;
-
static inline int ar7_is_titan(void)
{
return (readl((void *)KSEG1ADDR(AR7_REGS_GPIO + 0x24)) & 0xffff) ==
diff --git a/arch/mips/include/asm/octeon/cvmx-fpa.h b/arch/mips/include/asm/octeon/cvmx-fpa.h
index 29ae63606ab4..f6dfcca97f19 100644
--- a/arch/mips/include/asm/octeon/cvmx-fpa.h
+++ b/arch/mips/include/asm/octeon/cvmx-fpa.h
@@ -264,26 +264,6 @@ static inline void cvmx_fpa_free(void *ptr, uint64_t pool,
}
/**
- * Setup a FPA pool to control a new block of memory.
- * This can only be called once per pool. Make sure proper
- * locking enforces this.
- *
- * @pool: Pool to initialize
- * 0 <= pool < 8
- * @name: Constant character string to name this pool.
- * String is not copied.
- * @buffer: Pointer to the block of memory to use. This must be
- * accessible by all processors and external hardware.
- * @block_size: Size for each block controlled by the FPA
- * @num_blocks: Number of blocks
- *
- * Returns 0 on Success,
- * -1 on failure
- */
-extern int cvmx_fpa_setup_pool(uint64_t pool, const char *name, void *buffer,
- uint64_t block_size, uint64_t num_blocks);
-
-/**
* Shutdown a Memory pool and validate that it had all of
* the buffers originally placed in it. This should only be
* called by one processor after all hardware has finished
diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h
index 7e714aefc76d..5c1d726c702f 100644
--- a/arch/mips/include/asm/octeon/octeon.h
+++ b/arch/mips/include/asm/octeon/octeon.h
@@ -43,7 +43,6 @@ extern int octeon_get_southbridge_interrupt(void);
extern int octeon_get_boot_coremask(void);
extern int octeon_get_boot_num_arguments(void);
extern const char *octeon_get_boot_argument(int arg);
-extern void octeon_hal_setup_reserved32(void);
extern void octeon_user_io_init(void);
extern void octeon_init_cvmcount(void);
diff --git a/arch/mips/include/asm/octeon/pci-octeon.h b/arch/mips/include/asm/octeon/pci-octeon.h
index b12d9a3fbfb6..2f46f6c6e3d0 100644
--- a/arch/mips/include/asm/octeon/pci-octeon.h
+++ b/arch/mips/include/asm/octeon/pci-octeon.h
@@ -64,6 +64,4 @@ enum octeon_dma_bar_type {
extern enum octeon_dma_bar_type octeon_dma_bar_type;
void octeon_pci_dma_init(void);
-extern char *octeon_swiotlb;
-
#endif
diff --git a/arch/mips/include/asm/sibyte/sb1250.h b/arch/mips/include/asm/sibyte/sb1250.h
index dbde5f93f0dd..495b31925ed7 100644
--- a/arch/mips/include/asm/sibyte/sb1250.h
+++ b/arch/mips/include/asm/sibyte/sb1250.h
@@ -32,7 +32,6 @@ extern unsigned int soc_type;
extern unsigned int periph_rev;
extern unsigned int zbbus_mhz;
-extern void sb1250_time_init(void);
extern void sb1250_mask_irq(int cpu, int irq);
extern void sb1250_unmask_irq(int cpu, int irq);
diff --git a/arch/mips/include/asm/sni.h b/arch/mips/include/asm/sni.h
index 7dfa297ce597..7fb6656a6bfd 100644
--- a/arch/mips/include/asm/sni.h
+++ b/arch/mips/include/asm/sni.h
@@ -226,9 +226,6 @@ extern void sni_pcit_cplus_irq_init(void);
extern void sni_rm200_irq_init(void);
extern void sni_pcimt_irq_init(void);
-/* timer inits */
-extern void sni_cpu_time_init(void);
-
/* eisa init for RM200/400 */
#ifdef CONFIG_EISA
extern int sni_eisa_root_init(void);
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index 7db6ff9aed7d..f88ce78e13e3 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -26,7 +26,7 @@ __init void mips_set_machine_name(const char *name)
if (name == NULL)
return;
- strlcpy(mips_machine_name, name, sizeof(mips_machine_name));
+ strscpy(mips_machine_name, name, sizeof(mips_machine_name));
pr_info("MIPS: machine is %s\n", mips_get_machine_name());
}
@@ -52,9 +52,9 @@ int __init __dt_register_buses(const char *bus0, const char *bus1)
if (!of_have_populated_dt())
panic("device tree not present");
- strlcpy(of_ids[0].compatible, bus0, sizeof(of_ids[0].compatible));
+ strscpy(of_ids[0].compatible, bus0, sizeof(of_ids[0].compatible));
if (bus1) {
- strlcpy(of_ids[1].compatible, bus1,
+ strscpy(of_ids[1].compatible, bus1,
sizeof(of_ids[1].compatible));
}
diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c
index 56b51de2dc51..58fc8d089402 100644
--- a/arch/mips/kernel/relocate.c
+++ b/arch/mips/kernel/relocate.c
@@ -340,7 +340,7 @@ void *__init relocate_kernel(void)
early_init_dt_scan(fdt);
if (boot_command_line[0]) {
/* Boot command line was passed in device tree */
- strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE);
+ strscpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE);
}
#endif /* CONFIG_USE_OF */
diff --git a/arch/mips/kernel/segment.c b/arch/mips/kernel/segment.c
index 0a9bd7b0983b..24560501c70d 100644
--- a/arch/mips/kernel/segment.c
+++ b/arch/mips/kernel/segment.c
@@ -46,7 +46,7 @@ static void build_segment_config(char *str, unsigned int cfg)
((cfg & MIPS_SEGCFG_EU) >> MIPS_SEGCFG_EU_SHIFT));
}
-static int show_segments(struct seq_file *m, void *v)
+static int segments_show(struct seq_file *m, void *v)
{
unsigned int segcfg;
char str[42];
@@ -80,18 +80,7 @@ static int show_segments(struct seq_file *m, void *v)
return 0;
}
-
-static int segments_open(struct inode *inode, struct file *file)
-{
- return single_open(file, show_segments, NULL);
-}
-
-static const struct file_operations segments_fops = {
- .open = segments_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(segments);
static int __init segments_info(void)
{
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 2ca156a5b231..39c79f67c7a3 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -42,6 +42,7 @@
#include <asm/setup.h>
#include <asm/smp-ops.h>
#include <asm/prom.h>
+#include <asm/fw/fw.h>
#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
char __section(".appended_dtb") __appended_dtb[0x100000];
@@ -756,6 +757,24 @@ static void __init prefill_possible_map(void)
static inline void prefill_possible_map(void) {}
#endif
+static void __init setup_rng_seed(void)
+{
+ char *rng_seed_hex = fw_getenv("rngseed");
+ u8 rng_seed[512];
+ size_t len;
+
+ if (!rng_seed_hex)
+ return;
+
+ len = min(sizeof(rng_seed), strlen(rng_seed_hex) / 2);
+ if (hex2bin(rng_seed, rng_seed_hex, len))
+ return;
+
+ add_bootloader_randomness(rng_seed, len);
+ memzero_explicit(rng_seed, len);
+ memzero_explicit(rng_seed_hex, len * 2);
+}
+
void __init setup_arch(char **cmdline_p)
{
cpu_probe();
@@ -786,6 +805,8 @@ void __init setup_arch(char **cmdline_p)
paging_init();
memblock_dump_all();
+
+ setup_rng_seed();
}
unsigned long kernelsp[NR_CPUS];
diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c
index 7a623684d9b5..2d5a0bcb0cec 100644
--- a/arch/mips/lantiq/clk.c
+++ b/arch/mips/lantiq/clk.c
@@ -50,6 +50,7 @@ struct clk *clk_get_io(void)
{
return &cpu_clk_generic[2];
}
+EXPORT_SYMBOL_GPL(clk_get_io);
struct clk *clk_get_ppe(void)
{
diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c
index c731082a0c42..be4829cc7a3a 100644
--- a/arch/mips/lantiq/prom.c
+++ b/arch/mips/lantiq/prom.c
@@ -34,6 +34,14 @@ unsigned long physical_memsize = 0L;
*/
static struct ltq_soc_info soc_info;
+/*
+ * These structs are used to override vsmp_init_secondary()
+ */
+#if defined(CONFIG_MIPS_MT_SMP)
+extern const struct plat_smp_ops vsmp_smp_ops;
+static struct plat_smp_ops lantiq_smp_ops;
+#endif
+
const char *get_system_type(void)
{
return soc_info.sys_type;
@@ -84,6 +92,17 @@ void __init plat_mem_setup(void)
__dt_setup_arch(dtb);
}
+#if defined(CONFIG_MIPS_MT_SMP)
+static void lantiq_init_secondary(void)
+{
+ /*
+ * MIPS CPU startup function vsmp_init_secondary() will only
+ * enable some of the interrupts for the second CPU/VPE.
+ */
+ set_c0_status(ST0_IM);
+}
+#endif
+
void __init prom_init(void)
{
/* call the soc specific detetcion code and get it to fill soc_info */
@@ -95,7 +114,10 @@ void __init prom_init(void)
prom_init_cmdline();
#if defined(CONFIG_MIPS_MT_SMP)
- if (register_vsmp_smp_ops())
- panic("failed to register_vsmp_smp_ops()");
+ if (cpu_has_mipsmt) {
+ lantiq_smp_ops = vsmp_smp_ops;
+ lantiq_smp_ops.init_secondary = lantiq_init_secondary;
+ register_smp_ops(&lantiq_smp_ops);
+ }
#endif
}
diff --git a/arch/mips/lantiq/xway/vmmc.c b/arch/mips/lantiq/xway/vmmc.c
index 7a14da8d9d15..2796e87dfcae 100644
--- a/arch/mips/lantiq/xway/vmmc.c
+++ b/arch/mips/lantiq/xway/vmmc.c
@@ -4,9 +4,10 @@
* Copyright (C) 2012 John Crispin <[email protected]>
*/
+#include <linux/err.h>
#include <linux/export.h>
+#include <linux/gpio/consumer.h>
#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
#include <linux/dma-mapping.h>
#include <lantiq_soc.h>
@@ -25,23 +26,28 @@ EXPORT_SYMBOL(ltq_get_cp1_base);
static int vmmc_probe(struct platform_device *pdev)
{
#define CP1_SIZE (1 << 20)
+ struct gpio_desc *gpio;
int gpio_count;
dma_addr_t dma;
+ int error;
cp1_base =
(void *) CPHYSADDR(dma_alloc_coherent(&pdev->dev, CP1_SIZE,
&dma, GFP_KERNEL));
- gpio_count = of_gpio_count(pdev->dev.of_node);
+ gpio_count = gpiod_count(&pdev->dev, NULL);
while (gpio_count > 0) {
- enum of_gpio_flags flags;
- int gpio = of_get_gpio_flags(pdev->dev.of_node,
- --gpio_count, &flags);
- if (gpio_request(gpio, "vmmc-relay"))
+ gpio = devm_gpiod_get_index(&pdev->dev,
+ NULL, --gpio_count, GPIOD_OUT_HIGH);
+ error = PTR_ERR_OR_ZERO(gpio);
+ if (error) {
+ dev_err(&pdev->dev,
+ "failed to request GPIO idx %d: %d\n",
+ gpio_count, error);
continue;
- dev_info(&pdev->dev, "requested GPIO %d\n", gpio);
- gpio_direction_output(gpio,
- (flags & OF_GPIO_ACTIVE_LOW) ? (0) : (1));
+ }
+
+ gpiod_set_consumer_name(gpio, "vmmc-relay");
}
dev_info(&pdev->dev, "reserved %dMB at 0x%p", CP1_SIZE >> 20, cp1_base);
diff --git a/arch/mips/lib/bswapdi.c b/arch/mips/lib/bswapdi.c
index fcef74084492..88242dc7de17 100644
--- a/arch/mips/lib/bswapdi.c
+++ b/arch/mips/lib/bswapdi.c
@@ -1,17 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/export.h>
#include <linux/compiler.h>
+#include <uapi/linux/swab.h>
+
+/* To silence -Wmissing-prototypes. */
+unsigned long long __bswapdi2(unsigned long long u);
unsigned long long notrace __bswapdi2(unsigned long long u)
{
- return (((u) & 0xff00000000000000ull) >> 56) |
- (((u) & 0x00ff000000000000ull) >> 40) |
- (((u) & 0x0000ff0000000000ull) >> 24) |
- (((u) & 0x000000ff00000000ull) >> 8) |
- (((u) & 0x00000000ff000000ull) << 8) |
- (((u) & 0x0000000000ff0000ull) << 24) |
- (((u) & 0x000000000000ff00ull) << 40) |
- (((u) & 0x00000000000000ffull) << 56);
+ return ___constant_swab64(u);
}
-
EXPORT_SYMBOL(__bswapdi2);
diff --git a/arch/mips/lib/bswapsi.c b/arch/mips/lib/bswapsi.c
index 22d8e4f6d66e..2ed655497de5 100644
--- a/arch/mips/lib/bswapsi.c
+++ b/arch/mips/lib/bswapsi.c
@@ -1,13 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/export.h>
#include <linux/compiler.h>
+#include <uapi/linux/swab.h>
+
+/* To silence -Wmissing-prototypes. */
+unsigned int __bswapsi2(unsigned int u);
unsigned int notrace __bswapsi2(unsigned int u)
{
- return (((u) & 0xff000000) >> 24) |
- (((u) & 0x00ff0000) >> 8) |
- (((u) & 0x0000ff00) << 8) |
- (((u) & 0x000000ff) << 24);
+ return ___constant_swab32(u);
}
-
EXPORT_SYMBOL(__bswapsi2);
diff --git a/arch/mips/loongson2ef/common/pci.c b/arch/mips/loongson2ef/common/pci.c
index 200916925e95..7d9ea51e8c01 100644
--- a/arch/mips/loongson2ef/common/pci.c
+++ b/arch/mips/loongson2ef/common/pci.c
@@ -73,8 +73,6 @@ static void __init setup_pcimap(void)
#endif
}
-extern int sbx00_acpi_init(void);
-
static int __init pcibios_init(void)
{
setup_pcimap();
diff --git a/arch/mips/loongson32/common/platform.c b/arch/mips/loongson32/common/platform.c
index 794c96c2a4cd..311dc1580bbd 100644
--- a/arch/mips/loongson32/common/platform.c
+++ b/arch/mips/loongson32/common/platform.c
@@ -98,7 +98,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
if (plat_dat->bus_id) {
__raw_writel(__raw_readl(LS1X_MUX_CTRL0) | GMAC1_USE_UART1 |
GMAC1_USE_UART0, LS1X_MUX_CTRL0);
- switch (plat_dat->interface) {
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_RGMII:
val &= ~(GMAC1_USE_TXCLK | GMAC1_USE_PWM23);
break;
@@ -107,12 +107,12 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
break;
default:
pr_err("unsupported mii mode %d\n",
- plat_dat->interface);
+ plat_dat->phy_interface);
return -ENOTSUPP;
}
val &= ~GMAC1_SHUT;
} else {
- switch (plat_dat->interface) {
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_RGMII:
val &= ~(GMAC0_USE_TXCLK | GMAC0_USE_PWM01);
break;
@@ -121,7 +121,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
break;
default:
pr_err("unsupported mii mode %d\n",
- plat_dat->interface);
+ plat_dat->phy_interface);
return -ENOTSUPP;
}
val &= ~GMAC0_SHUT;
@@ -131,7 +131,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
plat_dat = dev_get_platdata(&pdev->dev);
val &= ~PHY_INTF_SELI;
- if (plat_dat->interface == PHY_INTERFACE_MODE_RMII)
+ if (plat_dat->phy_interface == PHY_INTERFACE_MODE_RMII)
val |= 0x4 << PHY_INTF_SELI_SHIFT;
__raw_writel(val, LS1X_MUX_CTRL1);
@@ -146,9 +146,9 @@ static struct plat_stmmacenet_data ls1x_eth0_pdata = {
.bus_id = 0,
.phy_addr = -1,
#if defined(CONFIG_LOONGSON1_LS1B)
- .interface = PHY_INTERFACE_MODE_MII,
+ .phy_interface = PHY_INTERFACE_MODE_MII,
#elif defined(CONFIG_LOONGSON1_LS1C)
- .interface = PHY_INTERFACE_MODE_RMII,
+ .phy_interface = PHY_INTERFACE_MODE_RMII,
#endif
.mdio_bus_data = &ls1x_mdio_bus_data,
.dma_cfg = &ls1x_eth_dma_cfg,
@@ -186,7 +186,7 @@ struct platform_device ls1x_eth0_pdev = {
static struct plat_stmmacenet_data ls1x_eth1_pdata = {
.bus_id = 1,
.phy_addr = -1,
- .interface = PHY_INTERFACE_MODE_MII,
+ .phy_interface = PHY_INTERFACE_MODE_MII,
.mdio_bus_data = &ls1x_mdio_bus_data,
.dma_cfg = &ls1x_eth_dma_cfg,
.has_gmac = 1,
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 587cf1d115e8..265bc57819df 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -1032,7 +1032,7 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
*/
if (dec_insn.micro_mips_mode) {
/*
- * If next instruction is a 16-bit instruction, then it
+ * If next instruction is a 16-bit instruction, then
* it cannot be a FPU instruction. This could happen
* since we can be called for non-FPU instructions.
*/
diff --git a/arch/mips/pci/pci-ar2315.c b/arch/mips/pci/pci-ar2315.c
index 30e0922f4cea..e17d862cfa4c 100644
--- a/arch/mips/pci/pci-ar2315.c
+++ b/arch/mips/pci/pci-ar2315.c
@@ -2,7 +2,7 @@
/*
*/
-/**
+/*
* Both AR2315 and AR2316 chips have PCI interface unit, which supports DMA
* and interrupt. PCI interface supports MMIO access method, but does not
* seem to support I/O ports.
diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c
index 1ca42f482130..8d16cd021f60 100644
--- a/arch/mips/pci/pci-lantiq.c
+++ b/arch/mips/pci/pci-lantiq.c
@@ -9,11 +9,11 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/clk.h>
#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
#include <linux/of_irq.h>
#include <linux/of_pci.h>
@@ -62,7 +62,7 @@
__iomem void *ltq_pci_mapped_cfg;
static __iomem void *ltq_pci_membase;
-static int reset_gpio;
+static struct gpio_desc *reset_gpio;
static struct clk *clk_pci, *clk_external;
static struct resource pci_io_resource;
static struct resource pci_mem_resource;
@@ -95,6 +95,7 @@ static int ltq_pci_startup(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
const __be32 *req_mask, *bus_clk;
u32 temp_buffer;
+ int error;
/* get our clocks */
clk_pci = clk_get(&pdev->dev, NULL);
@@ -123,17 +124,14 @@ static int ltq_pci_startup(struct platform_device *pdev)
clk_disable(clk_external);
/* setup reset gpio used by pci */
- reset_gpio = of_get_named_gpio(node, "gpio-reset", 0);
- if (gpio_is_valid(reset_gpio)) {
- int ret = devm_gpio_request(&pdev->dev,
- reset_gpio, "pci-reset");
- if (ret) {
- dev_err(&pdev->dev,
- "failed to request gpio %d\n", reset_gpio);
- return ret;
- }
- gpio_direction_output(reset_gpio, 1);
+ reset_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
+ GPIOD_OUT_LOW);
+ error = PTR_ERR_OR_ZERO(reset_gpio);
+ if (error) {
+ dev_err(&pdev->dev, "failed to request gpio: %d\n", error);
+ return error;
}
+ gpiod_set_consumer_name(reset_gpio, "pci_reset");
/* enable auto-switching between PCI and EBU */
ltq_pci_w32(0xa, PCI_CR_CLK_CTRL);
@@ -195,11 +193,11 @@ static int ltq_pci_startup(struct platform_device *pdev)
ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_IEN) | 0x10, LTQ_EBU_PCC_IEN);
/* toggle reset pin */
- if (gpio_is_valid(reset_gpio)) {
- __gpio_set_value(reset_gpio, 0);
+ if (reset_gpio) {
+ gpiod_set_value_cansleep(reset_gpio, 1);
wmb();
mdelay(1);
- __gpio_set_value(reset_gpio, 1);
+ gpiod_set_value_cansleep(reset_gpio, 0);
}
return 0;
}
diff --git a/arch/mips/pic32/pic32mzda/init.c b/arch/mips/pic32/pic32mzda/init.c
index d9c8c4e46aff..08c46cf122d7 100644
--- a/arch/mips/pic32/pic32mzda/init.c
+++ b/arch/mips/pic32/pic32mzda/init.c
@@ -44,7 +44,7 @@ void __init plat_mem_setup(void)
pr_info(" builtin_cmdline : %s\n", CONFIG_CMDLINE);
#endif
if (dtb != __dtb_start)
- strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE);
+ strscpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE);
#ifdef CONFIG_EARLY_PRINTK
fw_init_early_console(-1);
diff --git a/arch/mips/ralink/bootrom.c b/arch/mips/ralink/bootrom.c
index 94ca8379b83c..8c8cc0a81ed8 100644
--- a/arch/mips/ralink/bootrom.c
+++ b/arch/mips/ralink/bootrom.c
@@ -18,22 +18,11 @@ static int bootrom_show(struct seq_file *s, void *unused)
return 0;
}
-
-static int bootrom_open(struct inode *inode, struct file *file)
-{
- return single_open(file, bootrom_show, NULL);
-}
-
-static const struct file_operations bootrom_file_ops = {
- .open = bootrom_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(bootrom);
static int __init bootrom_setup(void)
{
- debugfs_create_file("bootrom", 0444, NULL, NULL, &bootrom_file_ops);
+ debugfs_create_file("bootrom", 0444, NULL, NULL, &bootrom_fops);
return 0;
}
diff --git a/arch/mips/sgi-ip27/ip27-xtalk.c b/arch/mips/sgi-ip27/ip27-xtalk.c
index e762886d1dda..5143d1cf8984 100644
--- a/arch/mips/sgi-ip27/ip27-xtalk.c
+++ b/arch/mips/sgi-ip27/ip27-xtalk.c
@@ -27,15 +27,18 @@ static void bridge_platform_create(nasid_t nasid, int widget, int masterwid)
{
struct xtalk_bridge_platform_data *bd;
struct sgi_w1_platform_data *wd;
- struct platform_device *pdev;
+ struct platform_device *pdev_wd;
+ struct platform_device *pdev_bd;
struct resource w1_res;
unsigned long offset;
offset = NODE_OFFSET(nasid);
wd = kzalloc(sizeof(*wd), GFP_KERNEL);
- if (!wd)
- goto no_mem;
+ if (!wd) {
+ pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget);
+ return;
+ }
snprintf(wd->dev_id, sizeof(wd->dev_id), "bridge-%012lx",
offset + (widget << SWIN_SIZE_BITS));
@@ -46,24 +49,35 @@ static void bridge_platform_create(nasid_t nasid, int widget, int masterwid)
w1_res.end = w1_res.start + 3;
w1_res.flags = IORESOURCE_MEM;
- pdev = platform_device_alloc("sgi_w1", PLATFORM_DEVID_AUTO);
- if (!pdev) {
- kfree(wd);
- goto no_mem;
+ pdev_wd = platform_device_alloc("sgi_w1", PLATFORM_DEVID_AUTO);
+ if (!pdev_wd) {
+ pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget);
+ goto err_kfree_wd;
+ }
+ if (platform_device_add_resources(pdev_wd, &w1_res, 1)) {
+ pr_warn("xtalk:n%d/%x bridge failed to add platform resources.\n", nasid, widget);
+ goto err_put_pdev_wd;
+ }
+ if (platform_device_add_data(pdev_wd, wd, sizeof(*wd))) {
+ pr_warn("xtalk:n%d/%x bridge failed to add platform data.\n", nasid, widget);
+ goto err_put_pdev_wd;
+ }
+ if (platform_device_add(pdev_wd)) {
+ pr_warn("xtalk:n%d/%x bridge failed to add platform device.\n", nasid, widget);
+ goto err_put_pdev_wd;
}
- platform_device_add_resources(pdev, &w1_res, 1);
- platform_device_add_data(pdev, wd, sizeof(*wd));
/* platform_device_add_data() duplicates the data */
kfree(wd);
- platform_device_add(pdev);
bd = kzalloc(sizeof(*bd), GFP_KERNEL);
- if (!bd)
- goto no_mem;
- pdev = platform_device_alloc("xtalk-bridge", PLATFORM_DEVID_AUTO);
- if (!pdev) {
- kfree(bd);
- goto no_mem;
+ if (!bd) {
+ pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget);
+ goto err_unregister_pdev_wd;
+ }
+ pdev_bd = platform_device_alloc("xtalk-bridge", PLATFORM_DEVID_AUTO);
+ if (!pdev_bd) {
+ pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget);
+ goto err_kfree_bd;
}
@@ -84,15 +98,31 @@ static void bridge_platform_create(nasid_t nasid, int widget, int masterwid)
bd->io.flags = IORESOURCE_IO;
bd->io_offset = offset;
- platform_device_add_data(pdev, bd, sizeof(*bd));
+ if (platform_device_add_data(pdev_bd, bd, sizeof(*bd))) {
+ pr_warn("xtalk:n%d/%x bridge failed to add platform data.\n", nasid, widget);
+ goto err_put_pdev_bd;
+ }
+ if (platform_device_add(pdev_bd)) {
+ pr_warn("xtalk:n%d/%x bridge failed to add platform device.\n", nasid, widget);
+ goto err_put_pdev_bd;
+ }
/* platform_device_add_data() duplicates the data */
kfree(bd);
- platform_device_add(pdev);
pr_info("xtalk:n%d/%x bridge widget\n", nasid, widget);
return;
-no_mem:
- pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget);
+err_put_pdev_bd:
+ platform_device_put(pdev_bd);
+err_kfree_bd:
+ kfree(bd);
+err_unregister_pdev_wd:
+ platform_device_unregister(pdev_wd);
+ return;
+err_put_pdev_wd:
+ platform_device_put(pdev_wd);
+err_kfree_wd:
+ kfree(wd);
+ return;
}
static int probe_one_port(nasid_t nasid, int widget, int masterwid)
diff --git a/arch/mips/sgi-ip30/ip30-xtalk.c b/arch/mips/sgi-ip30/ip30-xtalk.c
index 8129524421cb..7ceb2b23ea1c 100644
--- a/arch/mips/sgi-ip30/ip30-xtalk.c
+++ b/arch/mips/sgi-ip30/ip30-xtalk.c
@@ -40,12 +40,15 @@ static void bridge_platform_create(int widget, int masterwid)
{
struct xtalk_bridge_platform_data *bd;
struct sgi_w1_platform_data *wd;
- struct platform_device *pdev;
+ struct platform_device *pdev_wd;
+ struct platform_device *pdev_bd;
struct resource w1_res;
wd = kzalloc(sizeof(*wd), GFP_KERNEL);
- if (!wd)
- goto no_mem;
+ if (!wd) {
+ pr_warn("xtalk:%x bridge create out of memory\n", widget);
+ return;
+ }
snprintf(wd->dev_id, sizeof(wd->dev_id), "bridge-%012lx",
IP30_SWIN_BASE(widget));
@@ -56,24 +59,35 @@ static void bridge_platform_create(int widget, int masterwid)
w1_res.end = w1_res.start + 3;
w1_res.flags = IORESOURCE_MEM;
- pdev = platform_device_alloc("sgi_w1", PLATFORM_DEVID_AUTO);
- if (!pdev) {
- kfree(wd);
- goto no_mem;
+ pdev_wd = platform_device_alloc("sgi_w1", PLATFORM_DEVID_AUTO);
+ if (!pdev_wd) {
+ pr_warn("xtalk:%x bridge create out of memory\n", widget);
+ goto err_kfree_wd;
+ }
+ if (platform_device_add_resources(pdev_wd, &w1_res, 1)) {
+ pr_warn("xtalk:%x bridge failed to add platform resources.\n", widget);
+ goto err_put_pdev_wd;
+ }
+ if (platform_device_add_data(pdev_wd, wd, sizeof(*wd))) {
+ pr_warn("xtalk:%x bridge failed to add platform data.\n", widget);
+ goto err_put_pdev_wd;
+ }
+ if (platform_device_add(pdev_wd)) {
+ pr_warn("xtalk:%x bridge failed to add platform device.\n", widget);
+ goto err_put_pdev_wd;
}
- platform_device_add_resources(pdev, &w1_res, 1);
- platform_device_add_data(pdev, wd, sizeof(*wd));
/* platform_device_add_data() duplicates the data */
kfree(wd);
- platform_device_add(pdev);
bd = kzalloc(sizeof(*bd), GFP_KERNEL);
- if (!bd)
- goto no_mem;
- pdev = platform_device_alloc("xtalk-bridge", PLATFORM_DEVID_AUTO);
- if (!pdev) {
- kfree(bd);
- goto no_mem;
+ if (!bd) {
+ pr_warn("xtalk:%x bridge create out of memory\n", widget);
+ goto err_unregister_pdev_wd;
+ }
+ pdev_bd = platform_device_alloc("xtalk-bridge", PLATFORM_DEVID_AUTO);
+ if (!pdev_bd) {
+ pr_warn("xtalk:%x bridge create out of memory\n", widget);
+ goto err_kfree_bd;
}
bd->bridge_addr = IP30_RAW_SWIN_BASE(widget);
@@ -93,15 +107,31 @@ static void bridge_platform_create(int widget, int masterwid)
bd->io.flags = IORESOURCE_IO;
bd->io_offset = IP30_SWIN_BASE(widget);
- platform_device_add_data(pdev, bd, sizeof(*bd));
+ if (platform_device_add_data(pdev_bd, bd, sizeof(*bd))) {
+ pr_warn("xtalk:%x bridge failed to add platform data.\n", widget);
+ goto err_put_pdev_bd;
+ }
+ if (platform_device_add(pdev_bd)) {
+ pr_warn("xtalk:%x bridge failed to add platform device.\n", widget);
+ goto err_put_pdev_bd;
+ }
/* platform_device_add_data() duplicates the data */
kfree(bd);
- platform_device_add(pdev);
pr_info("xtalk:%x bridge widget\n", widget);
return;
-no_mem:
- pr_warn("xtalk:%x bridge create out of memory\n", widget);
+err_put_pdev_bd:
+ platform_device_put(pdev_bd);
+err_kfree_bd:
+ kfree(bd);
+err_unregister_pdev_wd:
+ platform_device_unregister(pdev_wd);
+ return;
+err_put_pdev_wd:
+ platform_device_put(pdev_wd);
+err_kfree_wd:
+ kfree(wd);
+ return;
}
static unsigned int __init xbow_widget_active(s8 wid)
diff --git a/arch/mips/sibyte/sb1250/irq.c b/arch/mips/sibyte/sb1250/irq.c
index 86f49c48fc34..2f08ad267a11 100644
--- a/arch/mips/sibyte/sb1250/irq.c
+++ b/arch/mips/sibyte/sb1250/irq.c
@@ -262,12 +262,6 @@ void __init arch_init_irq(void)
__raw_writeq(tmp, IOADDR(A_IMR_REGISTER(0, R_IMR_INTERRUPT_MASK)));
__raw_writeq(tmp, IOADDR(A_IMR_REGISTER(1, R_IMR_INTERRUPT_MASK)));
- /*
- * Note that the timer interrupts are also mapped, but this is
- * done in sb1250_time_init(). Also, the profiling driver
- * does its own management of IP7.
- */
-
/* Enable necessary IPs, disable the rest */
change_c0_status(ST0_IM, imask);
}
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 698274109c91..e712f80fe189 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -937,15 +937,6 @@ pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addre
pmd = *pmdp;
pmd_clear(pmdp);
- /*
- * pmdp collapse_flush need to ensure that there are no parallel gup
- * walk after this call. This is needed so that we can have stable
- * page ref count when collapsing a page. We don't allow a collapse page
- * if we have gup taken on the page. We can ensure that by sending IPI
- * because gup walk happens with IRQ disabled.
- */
- serialize_against_pte_lookup(vma->vm_mm);
-
radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
return pmd;
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index ed66c31e4655..59d18881f35b 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -386,6 +386,7 @@ config RISCV_ISA_C
config RISCV_ISA_SVPBMT
bool "SVPBMT extension support"
depends on 64BIT && MMU
+ depends on !XIP_KERNEL
select RISCV_ALTERNATIVE
default y
help
diff --git a/arch/riscv/Kconfig.erratas b/arch/riscv/Kconfig.erratas
index 6850e9389930..f3623df23b5f 100644
--- a/arch/riscv/Kconfig.erratas
+++ b/arch/riscv/Kconfig.erratas
@@ -46,7 +46,7 @@ config ERRATA_THEAD
config ERRATA_THEAD_PBMT
bool "Apply T-Head memory type errata"
- depends on ERRATA_THEAD && 64BIT
+ depends on ERRATA_THEAD && 64BIT && MMU
select RISCV_ALTERNATIVE_EARLY
default y
help
@@ -57,7 +57,7 @@ config ERRATA_THEAD_PBMT
config ERRATA_THEAD_CMO
bool "Apply T-Head cache management errata"
- depends on ERRATA_THEAD
+ depends on ERRATA_THEAD && MMU
select RISCV_DMA_NONCOHERENT
default y
help
diff --git a/arch/riscv/errata/thead/errata.c b/arch/riscv/errata/thead/errata.c
index 202c83f677b2..96648c176f37 100644
--- a/arch/riscv/errata/thead/errata.c
+++ b/arch/riscv/errata/thead/errata.c
@@ -37,6 +37,7 @@ static bool errata_probe_cmo(unsigned int stage,
if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
return false;
+ riscv_cbom_block_size = L1_CACHE_BYTES;
riscv_noncoherent_supported();
return true;
#else
diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
index a60acaecfeda..273ece6b622f 100644
--- a/arch/riscv/include/asm/cacheflush.h
+++ b/arch/riscv/include/asm/cacheflush.h
@@ -42,6 +42,11 @@ void flush_icache_mm(struct mm_struct *mm, bool local);
#endif /* CONFIG_SMP */
+/*
+ * The T-Head CMO errata internally probe the CBOM block size, but otherwise
+ * don't depend on Zicbom.
+ */
+extern unsigned int riscv_cbom_block_size;
#ifdef CONFIG_RISCV_ISA_ZICBOM
void riscv_init_cbom_blocksize(void);
#else
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 95ef6e2bf45c..2dfc463b86bb 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -296,8 +296,8 @@ void __init setup_arch(char **cmdline_p)
setup_smp();
#endif
- riscv_fill_hwcap();
riscv_init_cbom_blocksize();
+ riscv_fill_hwcap();
apply_boot_alternatives();
}
diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
index 5a2de6b6f882..5c591123c440 100644
--- a/arch/riscv/kernel/signal.c
+++ b/arch/riscv/kernel/signal.c
@@ -124,6 +124,8 @@ SYSCALL_DEFINE0(rt_sigreturn)
if (restore_altstack(&frame->uc.uc_stack))
goto badframe;
+ regs->cause = -1UL;
+
return regs->a0;
badframe:
diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c
index cd2225304c82..e3f9bdf47c5f 100644
--- a/arch/riscv/mm/dma-noncoherent.c
+++ b/arch/riscv/mm/dma-noncoherent.c
@@ -12,7 +12,7 @@
#include <linux/of_device.h>
#include <asm/cacheflush.h>
-static unsigned int riscv_cbom_block_size = L1_CACHE_BYTES;
+unsigned int riscv_cbom_block_size;
static bool noncoherent_supported;
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
@@ -79,38 +79,41 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
void riscv_init_cbom_blocksize(void)
{
struct device_node *node;
+ unsigned long cbom_hartid;
+ u32 val, probed_block_size;
int ret;
- u32 val;
+ probed_block_size = 0;
for_each_of_cpu_node(node) {
unsigned long hartid;
- int cbom_hartid;
ret = riscv_of_processor_hartid(node, &hartid);
if (ret)
continue;
- if (hartid < 0)
- continue;
-
/* set block-size for cbom extension if available */
ret = of_property_read_u32(node, "riscv,cbom-block-size", &val);
if (ret)
continue;
- if (!riscv_cbom_block_size) {
- riscv_cbom_block_size = val;
+ if (!probed_block_size) {
+ probed_block_size = val;
cbom_hartid = hartid;
} else {
- if (riscv_cbom_block_size != val)
- pr_warn("cbom-block-size mismatched between harts %d and %lu\n",
+ if (probed_block_size != val)
+ pr_warn("cbom-block-size mismatched between harts %lu and %lu\n",
cbom_hartid, hartid);
}
}
+
+ if (probed_block_size)
+ riscv_cbom_block_size = probed_block_size;
}
#endif
void riscv_noncoherent_supported(void)
{
+ WARN(!riscv_cbom_block_size,
+ "Non-coherent DMA support enabled without a block size\n");
noncoherent_supported = true;
}
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 082ec5f2c3a5..0243b6e38d36 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -489,6 +489,8 @@ enum prot_type {
PROT_TYPE_ALC = 2,
PROT_TYPE_DAT = 3,
PROT_TYPE_IEP = 4,
+ /* Dummy value for passing an initialized value when code != PGM_PROTECTION */
+ PROT_NONE,
};
static int trans_exc_ending(struct kvm_vcpu *vcpu, int code, unsigned long gva, u8 ar,
@@ -504,6 +506,10 @@ static int trans_exc_ending(struct kvm_vcpu *vcpu, int code, unsigned long gva,
switch (code) {
case PGM_PROTECTION:
switch (prot) {
+ case PROT_NONE:
+ /* We should never get here, acts like termination */
+ WARN_ON_ONCE(1);
+ break;
case PROT_TYPE_IEP:
tec->b61 = 1;
fallthrough;
@@ -968,8 +974,10 @@ static int guest_range_to_gpas(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
return rc;
} else {
gpa = kvm_s390_real_to_abs(vcpu, ga);
- if (kvm_is_error_gpa(vcpu->kvm, gpa))
+ if (kvm_is_error_gpa(vcpu->kvm, gpa)) {
rc = PGM_ADDRESSING;
+ prot = PROT_NONE;
+ }
}
if (rc)
return trans_exc(vcpu, rc, ga, ar, mode, prot);
@@ -1112,8 +1120,6 @@ int access_guest_with_key(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
if (rc == PGM_PROTECTION && try_storage_prot_override)
rc = access_guest_page_with_key(vcpu->kvm, mode, gpas[idx],
data, fragment_len, PAGE_SPO_ACC);
- if (rc == PGM_PROTECTION)
- prot = PROT_TYPE_KEYC;
if (rc)
break;
len -= fragment_len;
@@ -1123,6 +1129,10 @@ int access_guest_with_key(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
if (rc > 0) {
bool terminate = (mode == GACC_STORE) && (idx > 0);
+ if (rc == PGM_PROTECTION)
+ prot = PROT_TYPE_KEYC;
+ else
+ prot = PROT_NONE;
rc = trans_exc_ending(vcpu, rc, ga, ar, mode, prot, terminate);
}
out_unlock:
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index b9c944b262c7..ab569faf0df2 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -3324,7 +3324,7 @@ static void aen_host_forward(unsigned long si)
if (gaite->count == 0)
return;
if (gaite->aisb != 0)
- set_bit_inv(gaite->aisbo, (unsigned long *)gaite->aisb);
+ set_bit_inv(gaite->aisbo, phys_to_virt(gaite->aisb));
kvm = kvm_s390_pci_si_to_kvm(aift, si);
if (!kvm)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index edfd4bbd0cba..b7ef0b71014d 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -505,7 +505,7 @@ int kvm_arch_init(void *opaque)
goto out;
}
- if (kvm_s390_pci_interp_allowed()) {
+ if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
rc = kvm_s390_pci_init();
if (rc) {
pr_err("Unable to allocate AIFT for PCI\n");
@@ -527,7 +527,7 @@ out:
void kvm_arch_exit(void)
{
kvm_s390_gib_destroy();
- if (kvm_s390_pci_interp_allowed())
+ if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
kvm_s390_pci_exit();
debug_unregister(kvm_s390_dbf);
debug_unregister(kvm_s390_dbf_uv);
diff --git a/arch/s390/kvm/pci.c b/arch/s390/kvm/pci.c
index bb8c335d17b9..c50c1645c0ae 100644
--- a/arch/s390/kvm/pci.c
+++ b/arch/s390/kvm/pci.c
@@ -58,7 +58,7 @@ static int zpci_setup_aipb(u8 nisc)
if (!zpci_aipb)
return -ENOMEM;
- aift->sbv = airq_iv_create(ZPCI_NR_DEVICES, AIRQ_IV_ALLOC, 0);
+ aift->sbv = airq_iv_create(ZPCI_NR_DEVICES, AIRQ_IV_ALLOC, NULL);
if (!aift->sbv) {
rc = -ENOMEM;
goto free_aipb;
@@ -71,7 +71,7 @@ static int zpci_setup_aipb(u8 nisc)
rc = -ENOMEM;
goto free_sbv;
}
- aift->gait = (struct zpci_gaite *)page_to_phys(page);
+ aift->gait = (struct zpci_gaite *)page_to_virt(page);
zpci_aipb->aipb.faisb = virt_to_phys(aift->sbv->vector);
zpci_aipb->aipb.gait = virt_to_phys(aift->gait);
@@ -373,7 +373,7 @@ static int kvm_s390_pci_aif_disable(struct zpci_dev *zdev, bool force)
gaite->gisc = 0;
gaite->aisbo = 0;
gaite->gisa = 0;
- aift->kzdev[zdev->aisb] = 0;
+ aift->kzdev[zdev->aisb] = NULL;
/* Clear zdev info */
airq_iv_free_bit(aift->sbv, zdev->aisb);
airq_iv_release(zdev->aibv);
@@ -672,23 +672,31 @@ out:
int kvm_s390_pci_init(void)
{
+ zpci_kvm_hook.kvm_register = kvm_s390_pci_register_kvm;
+ zpci_kvm_hook.kvm_unregister = kvm_s390_pci_unregister_kvm;
+
+ if (!kvm_s390_pci_interp_allowed())
+ return 0;
+
aift = kzalloc(sizeof(struct zpci_aift), GFP_KERNEL);
if (!aift)
return -ENOMEM;
spin_lock_init(&aift->gait_lock);
mutex_init(&aift->aift_lock);
- zpci_kvm_hook.kvm_register = kvm_s390_pci_register_kvm;
- zpci_kvm_hook.kvm_unregister = kvm_s390_pci_unregister_kvm;
return 0;
}
void kvm_s390_pci_exit(void)
{
- mutex_destroy(&aift->aift_lock);
zpci_kvm_hook.kvm_register = NULL;
zpci_kvm_hook.kvm_unregister = NULL;
+ if (!kvm_s390_pci_interp_allowed())
+ return;
+
+ mutex_destroy(&aift->aift_lock);
+
kfree(aift);
}
diff --git a/arch/s390/kvm/pci.h b/arch/s390/kvm/pci.h
index 3a3606c3a0fe..486d06ef563f 100644
--- a/arch/s390/kvm/pci.h
+++ b/arch/s390/kvm/pci.h
@@ -46,9 +46,9 @@ extern struct zpci_aift *aift;
static inline struct kvm *kvm_s390_pci_si_to_kvm(struct zpci_aift *aift,
unsigned long si)
{
- if (!IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM) || aift->kzdev == 0 ||
- aift->kzdev[si] == 0)
- return 0;
+ if (!IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM) || !aift->kzdev ||
+ !aift->kzdev[si])
+ return NULL;
return aift->kzdev[si]->kvm;
};
diff --git a/arch/um/Makefile b/arch/um/Makefile
index f2fe63bfd819..f1d4d67157be 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -132,10 +132,18 @@ export LDS_ELF_FORMAT := $(ELF_FORMAT)
# The wrappers will select whether using "malloc" or the kernel allocator.
LINK_WRAPS = -Wl,--wrap,malloc -Wl,--wrap,free -Wl,--wrap,calloc
+# Avoid binutils 2.39+ warnings by marking the stack non-executable and
+# ignorning warnings for the kallsyms sections.
+LDFLAGS_EXECSTACK = -z noexecstack
+ifeq ($(CONFIG_LD_IS_BFD),y)
+LDFLAGS_EXECSTACK += $(call ld-option,--no-warn-rwx-segments)
+endif
+
LD_FLAGS_CMDLINE = $(foreach opt,$(KBUILD_LDFLAGS),-Wl,$(opt))
# Used by link-vmlinux.sh which has special support for um link
export CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE)
+export LDFLAGS_vmlinux := $(LDFLAGS_EXECSTACK)
# When cleaning we don't include .config, so we don't include
# TT or skas makefiles and don't clean skas_ptregs.h.
diff --git a/arch/um/kernel/sysrq.c b/arch/um/kernel/sysrq.c
index 7452f70d50d0..746715379f12 100644
--- a/arch/um/kernel/sysrq.c
+++ b/arch/um/kernel/sysrq.c
@@ -48,7 +48,8 @@ void show_stack(struct task_struct *task, unsigned long *stack,
break;
if (i && ((i % STACKSLOTS_PER_LINE) == 0))
pr_cont("\n");
- pr_cont(" %08lx", *stack++);
+ pr_cont(" %08lx", READ_ONCE_NOCHECK(*stack));
+ stack++;
}
printk("%sCall Trace:\n", loglvl);
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index e0de60e503b9..d9e023c78f56 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -33,7 +33,7 @@
#include "um_arch.h"
#define DEFAULT_COMMAND_LINE_ROOT "root=98:0"
-#define DEFAULT_COMMAND_LINE_CONSOLE "console=tty"
+#define DEFAULT_COMMAND_LINE_CONSOLE "console=tty0"
/* Changed in add_arg and setup_arch, which run before SMP is started */
static char __initdata command_line[COMMAND_LINE_SIZE] = { 0 };
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index c601939a74b1..c20d8cd47c48 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2102,6 +2102,15 @@ static struct extra_reg intel_tnt_extra_regs[] __read_mostly = {
EVENT_EXTRA_END
};
+EVENT_ATTR_STR(mem-loads, mem_ld_grt, "event=0xd0,umask=0x5,ldlat=3");
+EVENT_ATTR_STR(mem-stores, mem_st_grt, "event=0xd0,umask=0x6");
+
+static struct attribute *grt_mem_attrs[] = {
+ EVENT_PTR(mem_ld_grt),
+ EVENT_PTR(mem_st_grt),
+ NULL
+};
+
static struct extra_reg intel_grt_extra_regs[] __read_mostly = {
/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
@@ -5975,6 +5984,36 @@ __init int intel_pmu_init(void)
name = "Tremont";
break;
+ case INTEL_FAM6_ALDERLAKE_N:
+ x86_pmu.mid_ack = true;
+ memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
+ sizeof(hw_cache_event_ids));
+ memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs,
+ sizeof(hw_cache_extra_regs));
+ hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
+
+ x86_pmu.event_constraints = intel_slm_event_constraints;
+ x86_pmu.pebs_constraints = intel_grt_pebs_event_constraints;
+ x86_pmu.extra_regs = intel_grt_extra_regs;
+
+ x86_pmu.pebs_aliases = NULL;
+ x86_pmu.pebs_prec_dist = true;
+ x86_pmu.pebs_block = true;
+ x86_pmu.lbr_pt_coexist = true;
+ x86_pmu.flags |= PMU_FL_HAS_RSP_1;
+ x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
+
+ intel_pmu_pebs_data_source_grt();
+ x86_pmu.pebs_latency_data = adl_latency_data_small;
+ x86_pmu.get_event_constraints = tnt_get_event_constraints;
+ x86_pmu.limit_period = spr_limit_period;
+ td_attr = tnt_events_attrs;
+ mem_attr = grt_mem_attrs;
+ extra_attr = nhm_format_attr;
+ pr_cont("Gracemont events, ");
+ name = "gracemont";
+ break;
+
case INTEL_FAM6_WESTMERE:
case INTEL_FAM6_WESTMERE_EP:
case INTEL_FAM6_WESTMERE_EX:
@@ -6317,7 +6356,6 @@ __init int intel_pmu_init(void)
case INTEL_FAM6_ALDERLAKE:
case INTEL_FAM6_ALDERLAKE_L:
- case INTEL_FAM6_ALDERLAKE_N:
case INTEL_FAM6_RAPTORLAKE:
case INTEL_FAM6_RAPTORLAKE_P:
/*
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index de1f55d51784..ac973c6f82ad 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -110,13 +110,18 @@ void __init intel_pmu_pebs_data_source_skl(bool pmem)
__intel_pmu_pebs_data_source_skl(pmem, pebs_data_source);
}
-static void __init intel_pmu_pebs_data_source_grt(u64 *data_source)
+static void __init __intel_pmu_pebs_data_source_grt(u64 *data_source)
{
data_source[0x05] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT);
data_source[0x06] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM);
data_source[0x08] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOPX, FWD);
}
+void __init intel_pmu_pebs_data_source_grt(void)
+{
+ __intel_pmu_pebs_data_source_grt(pebs_data_source);
+}
+
void __init intel_pmu_pebs_data_source_adl(void)
{
u64 *data_source;
@@ -127,7 +132,7 @@ void __init intel_pmu_pebs_data_source_adl(void)
data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX].pebs_data_source;
memcpy(data_source, pebs_data_source, sizeof(pebs_data_source));
- intel_pmu_pebs_data_source_grt(data_source);
+ __intel_pmu_pebs_data_source_grt(data_source);
}
static u64 precise_store_data(u64 status)
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index ba3d24a6a4ec..266143abcbd8 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -1516,6 +1516,8 @@ void intel_pmu_pebs_data_source_skl(bool pmem);
void intel_pmu_pebs_data_source_adl(void);
+void intel_pmu_pebs_data_source_grt(void);
+
int intel_pmu_setup_lbr_filter(struct perf_event *event);
void intel_pt_interrupt(void);
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index aeb38023a703..5d75fe229342 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -115,6 +115,9 @@
#define INTEL_FAM6_RAPTORLAKE_P 0xBA
#define INTEL_FAM6_RAPTORLAKE_S 0xBF
+#define INTEL_FAM6_METEORLAKE 0xAC
+#define INTEL_FAM6_METEORLAKE_L 0xAA
+
/* "Small Core" Processors (Atom) */
#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 2c96c43c313a..aa381ab69a19 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -729,6 +729,7 @@ struct kvm_vcpu_arch {
struct fpu_guest guest_fpu;
u64 xcr0;
+ u64 guest_supported_xcr0;
struct kvm_pio_request pio;
void *pio_data;
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 81a0211a372d..a73bced40e24 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -21,16 +21,6 @@ DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_l2c_id);
DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
-static inline struct cpumask *cpu_llc_shared_mask(int cpu)
-{
- return per_cpu(cpu_llc_shared_map, cpu);
-}
-
-static inline struct cpumask *cpu_l2c_shared_mask(int cpu)
-{
- return per_cpu(cpu_l2c_shared_map, cpu);
-}
-
DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid);
DECLARE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid);
DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid);
@@ -172,6 +162,16 @@ extern int safe_smp_processor_id(void);
# define safe_smp_processor_id() smp_processor_id()
#endif
+static inline struct cpumask *cpu_llc_shared_mask(int cpu)
+{
+ return per_cpu(cpu_llc_shared_map, cpu);
+}
+
+static inline struct cpumask *cpu_l2c_shared_mask(int cpu)
+{
+ return per_cpu(cpu_l2c_shared_map, cpu);
+}
+
#else /* !CONFIG_SMP */
#define wbinvd_on_cpu(cpu) wbinvd()
static inline int wbinvd_on_all_cpus(void)
@@ -179,6 +179,11 @@ static inline int wbinvd_on_all_cpus(void)
wbinvd();
return 0;
}
+
+static inline struct cpumask *cpu_llc_shared_mask(int cpu)
+{
+ return (struct cpumask *)cpumask_of(0);
+}
#endif /* CONFIG_SMP */
extern unsigned disabled_cpus;
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 62f6b8b7c4a5..4f3204364caa 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -1319,22 +1319,23 @@ struct bp_patching_desc {
atomic_t refs;
};
-static struct bp_patching_desc *bp_desc;
+static struct bp_patching_desc bp_desc;
static __always_inline
-struct bp_patching_desc *try_get_desc(struct bp_patching_desc **descp)
+struct bp_patching_desc *try_get_desc(void)
{
- /* rcu_dereference */
- struct bp_patching_desc *desc = __READ_ONCE(*descp);
+ struct bp_patching_desc *desc = &bp_desc;
- if (!desc || !arch_atomic_inc_not_zero(&desc->refs))
+ if (!arch_atomic_inc_not_zero(&desc->refs))
return NULL;
return desc;
}
-static __always_inline void put_desc(struct bp_patching_desc *desc)
+static __always_inline void put_desc(void)
{
+ struct bp_patching_desc *desc = &bp_desc;
+
smp_mb__before_atomic();
arch_atomic_dec(&desc->refs);
}
@@ -1367,15 +1368,15 @@ noinstr int poke_int3_handler(struct pt_regs *regs)
/*
* Having observed our INT3 instruction, we now must observe
- * bp_desc:
+ * bp_desc with non-zero refcount:
*
- * bp_desc = desc INT3
+ * bp_desc.refs = 1 INT3
* WMB RMB
- * write INT3 if (desc)
+ * write INT3 if (bp_desc.refs != 0)
*/
smp_rmb();
- desc = try_get_desc(&bp_desc);
+ desc = try_get_desc();
if (!desc)
return 0;
@@ -1429,7 +1430,7 @@ noinstr int poke_int3_handler(struct pt_regs *regs)
ret = 1;
out_put:
- put_desc(desc);
+ put_desc();
return ret;
}
@@ -1460,18 +1461,20 @@ static int tp_vec_nr;
*/
static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
{
- struct bp_patching_desc desc = {
- .vec = tp,
- .nr_entries = nr_entries,
- .refs = ATOMIC_INIT(1),
- };
unsigned char int3 = INT3_INSN_OPCODE;
unsigned int i;
int do_sync;
lockdep_assert_held(&text_mutex);
- smp_store_release(&bp_desc, &desc); /* rcu_assign_pointer */
+ bp_desc.vec = tp;
+ bp_desc.nr_entries = nr_entries;
+
+ /*
+ * Corresponds to the implicit memory barrier in try_get_desc() to
+ * ensure reading a non-zero refcount provides up to date bp_desc data.
+ */
+ atomic_set_release(&bp_desc.refs, 1);
/*
* Corresponding read barrier in int3 notifier for making sure the
@@ -1559,12 +1562,10 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
text_poke_sync();
/*
- * Remove and synchronize_rcu(), except we have a very primitive
- * refcount based completion.
+ * Remove and wait for refs to be zero.
*/
- WRITE_ONCE(bp_desc, NULL); /* RCU_INIT_POINTER */
- if (!atomic_dec_and_test(&desc.refs))
- atomic_cond_read_acquire(&desc.refs, !VAL);
+ if (!atomic_dec_and_test(&bp_desc.refs))
+ atomic_cond_read_acquire(&bp_desc.refs, !VAL);
}
static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
index 24c1bb8eb196..8bdeae2fc309 100644
--- a/arch/x86/kernel/cpu/sgx/encl.c
+++ b/arch/x86/kernel/cpu/sgx/encl.c
@@ -344,8 +344,11 @@ static vm_fault_t sgx_encl_eaug_page(struct vm_area_struct *vma,
}
va_page = sgx_encl_grow(encl, false);
- if (IS_ERR(va_page))
+ if (IS_ERR(va_page)) {
+ if (PTR_ERR(va_page) == -EBUSY)
+ vmret = VM_FAULT_NOPAGE;
goto err_out_epc;
+ }
if (va_page)
list_add(&va_page->list, &encl->va_pages);
diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
index 515e2a5f25bb..0aad028f04d4 100644
--- a/arch/x86/kernel/cpu/sgx/main.c
+++ b/arch/x86/kernel/cpu/sgx/main.c
@@ -49,9 +49,13 @@ static LIST_HEAD(sgx_dirty_page_list);
* Reset post-kexec EPC pages to the uninitialized state. The pages are removed
* from the input list, and made available for the page allocator. SECS pages
* prepending their children in the input list are left intact.
+ *
+ * Return 0 when sanitization was successful or kthread was stopped, and the
+ * number of unsanitized pages otherwise.
*/
-static void __sgx_sanitize_pages(struct list_head *dirty_page_list)
+static unsigned long __sgx_sanitize_pages(struct list_head *dirty_page_list)
{
+ unsigned long left_dirty = 0;
struct sgx_epc_page *page;
LIST_HEAD(dirty);
int ret;
@@ -59,7 +63,7 @@ static void __sgx_sanitize_pages(struct list_head *dirty_page_list)
/* dirty_page_list is thread-local, no need for a lock: */
while (!list_empty(dirty_page_list)) {
if (kthread_should_stop())
- return;
+ return 0;
page = list_first_entry(dirty_page_list, struct sgx_epc_page, list);
@@ -92,12 +96,14 @@ static void __sgx_sanitize_pages(struct list_head *dirty_page_list)
} else {
/* The page is not yet clean - move to the dirty list. */
list_move_tail(&page->list, &dirty);
+ left_dirty++;
}
cond_resched();
}
list_splice(&dirty, dirty_page_list);
+ return left_dirty;
}
static bool sgx_reclaimer_age(struct sgx_epc_page *epc_page)
@@ -395,10 +401,7 @@ static int ksgxd(void *p)
* required for SECS pages, whose child pages blocked EREMOVE.
*/
__sgx_sanitize_pages(&sgx_dirty_page_list);
- __sgx_sanitize_pages(&sgx_dirty_page_list);
-
- /* sanity check: */
- WARN_ON(!list_empty(&sgx_dirty_page_list));
+ WARN_ON(__sgx_sanitize_pages(&sgx_dirty_page_list));
while (!kthread_should_stop()) {
if (try_to_freeze())
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 75dcf7a72605..2796dde06302 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -315,7 +315,6 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
struct kvm_cpuid_entry2 *best;
- u64 guest_supported_xcr0;
best = kvm_find_cpuid_entry(vcpu, 1);
if (best && apic) {
@@ -327,10 +326,16 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
kvm_apic_set_version(vcpu);
}
- guest_supported_xcr0 =
+ vcpu->arch.guest_supported_xcr0 =
cpuid_get_supported_xcr0(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
- vcpu->arch.guest_fpu.fpstate->user_xfeatures = guest_supported_xcr0;
+ /*
+ * FP+SSE can always be saved/restored via KVM_{G,S}ET_XSAVE, even if
+ * XSAVE/XCRO are not exposed to the guest, and even if XSAVE isn't
+ * supported by the host.
+ */
+ vcpu->arch.guest_fpu.fpstate->user_xfeatures = vcpu->arch.guest_supported_xcr0 |
+ XFEATURE_MASK_FPSSE;
kvm_update_pv_runtime(vcpu);
@@ -897,8 +902,6 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
entry->edx = 0;
}
break;
- case 9:
- break;
case 0xa: { /* Architectural Performance Monitoring */
union cpuid10_eax eax;
union cpuid10_edx edx;
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index d5ec3a2ed5a4..aacb28c83e43 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -4132,6 +4132,9 @@ static int em_xsetbv(struct x86_emulate_ctxt *ctxt)
{
u32 eax, ecx, edx;
+ if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSXSAVE))
+ return emulate_ud(ctxt);
+
eax = reg_read(ctxt, VCPU_REGS_RAX);
edx = reg_read(ctxt, VCPU_REGS_RDX);
ecx = reg_read(ctxt, VCPU_REGS_RCX);
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index e418ef3ecfcb..3552e6af3684 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1596,6 +1596,8 @@ static void __rmap_add(struct kvm *kvm,
rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
rmap_count = pte_list_add(cache, spte, rmap_head);
+ if (rmap_count > kvm->stat.max_mmu_rmap_size)
+ kvm->stat.max_mmu_rmap_size = rmap_count;
if (rmap_count > RMAP_RECYCLE_THRESHOLD) {
kvm_zap_all_rmap_sptes(kvm, rmap_head);
kvm_flush_remote_tlbs_with_address(
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 43a6a7efc6ec..b0c47b41c264 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1011,15 +1011,10 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_load_host_xsave_state);
-static inline u64 kvm_guest_supported_xcr0(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.guest_fpu.fpstate->user_xfeatures;
-}
-
#ifdef CONFIG_X86_64
static inline u64 kvm_guest_supported_xfd(struct kvm_vcpu *vcpu)
{
- return kvm_guest_supported_xcr0(vcpu) & XFEATURE_MASK_USER_DYNAMIC;
+ return vcpu->arch.guest_supported_xcr0 & XFEATURE_MASK_USER_DYNAMIC;
}
#endif
@@ -1042,7 +1037,7 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
* saving. However, xcr0 bit 0 is always set, even if the
* emulated CPU does not support XSAVE (see kvm_vcpu_reset()).
*/
- valid_bits = kvm_guest_supported_xcr0(vcpu) | XFEATURE_MASK_FP;
+ valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP;
if (xcr0 & ~valid_bits)
return 1;
@@ -1070,6 +1065,7 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu)
{
+ /* Note, #UD due to CR4.OSXSAVE=0 has priority over the intercept. */
if (static_call(kvm_x86_get_cpl)(vcpu) != 0 ||
__kvm_set_xcr(vcpu, kvm_rcx_read(vcpu), kvm_read_edx_eax(vcpu))) {
kvm_inject_gp(vcpu, 0);
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
index ad0139d25401..f1bb18617156 100644
--- a/arch/x86/lib/usercopy.c
+++ b/arch/x86/lib/usercopy.c
@@ -44,7 +44,7 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
* called from other contexts.
*/
pagefault_disable();
- ret = __copy_from_user_inatomic(to, from, n);
+ ret = raw_copy_from_user(to, from, n);
pagefault_enable();
return ret;
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index f8220fd2c169..829c1409ffbd 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -4,10 +4,12 @@ KCOV_INSTRUMENT_tlb.o := n
KCOV_INSTRUMENT_mem_encrypt.o := n
KCOV_INSTRUMENT_mem_encrypt_amd.o := n
KCOV_INSTRUMENT_mem_encrypt_identity.o := n
+KCOV_INSTRUMENT_pgprot.o := n
KASAN_SANITIZE_mem_encrypt.o := n
KASAN_SANITIZE_mem_encrypt_amd.o := n
KASAN_SANITIZE_mem_encrypt_identity.o := n
+KASAN_SANITIZE_pgprot.o := n
# Disable KCSAN entirely, because otherwise we get warnings that some functions
# reference __initdata sections.
@@ -17,6 +19,7 @@ ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_mem_encrypt.o = -pg
CFLAGS_REMOVE_mem_encrypt_amd.o = -pg
CFLAGS_REMOVE_mem_encrypt_identity.o = -pg
+CFLAGS_REMOVE_pgprot.o = -pg
endif
obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o mmap.o \
diff --git a/arch/x86/um/shared/sysdep/syscalls_32.h b/arch/x86/um/shared/sysdep/syscalls_32.h
index 68fd2cf526fd..f6e9f84397e7 100644
--- a/arch/x86/um/shared/sysdep/syscalls_32.h
+++ b/arch/x86/um/shared/sysdep/syscalls_32.h
@@ -6,10 +6,9 @@
#include <asm/unistd.h>
#include <sysdep/ptrace.h>
-typedef long syscall_handler_t(struct pt_regs);
+typedef long syscall_handler_t(struct syscall_args);
extern syscall_handler_t *sys_call_table[];
#define EXECUTE_SYSCALL(syscall, regs) \
- ((long (*)(struct syscall_args)) \
- (*sys_call_table[syscall]))(SYSCALL_ARGS(&regs->regs))
+ ((*sys_call_table[syscall]))(SYSCALL_ARGS(&regs->regs))
diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
index ac8eee093f9c..66162eafd8e8 100644
--- a/arch/x86/um/tls_32.c
+++ b/arch/x86/um/tls_32.c
@@ -65,9 +65,6 @@ static int get_free_idx(struct task_struct* task)
struct thread_struct *t = &task->thread;
int idx;
- if (!t->arch.tls_array)
- return GDT_ENTRY_TLS_MIN;
-
for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
if (!t->arch.tls_array[idx].present)
return idx + GDT_ENTRY_TLS_MIN;
@@ -240,9 +237,6 @@ static int get_tls_entry(struct task_struct *task, struct user_desc *info,
{
struct thread_struct *t = &task->thread;
- if (!t->arch.tls_array)
- goto clear;
-
if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
return -EINVAL;
diff --git a/arch/x86/um/vdso/Makefile b/arch/x86/um/vdso/Makefile
index 8c0396fd0e6f..6fbe97c52c99 100644
--- a/arch/x86/um/vdso/Makefile
+++ b/arch/x86/um/vdso/Makefile
@@ -65,7 +65,7 @@ quiet_cmd_vdso = VDSO $@
-Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
-VDSO_LDFLAGS = -fPIC -shared -Wl,--hash-style=sysv
+VDSO_LDFLAGS = -fPIC -shared -Wl,--hash-style=sysv -z noexecstack
GCOV_PROFILE := n
#
diff --git a/block/genhd.c b/block/genhd.c
index d36fabf0abc1..988ba52fd331 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -602,7 +602,6 @@ void del_gendisk(struct gendisk *disk)
* Prevent new I/O from crossing bio_queue_enter().
*/
blk_queue_start_drain(q);
- blk_mq_freeze_queue_wait(q);
if (!(disk->flags & GENHD_FL_HIDDEN)) {
sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
@@ -626,6 +625,8 @@ void del_gendisk(struct gendisk *disk)
pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
device_del(disk_to_dev(disk));
+ blk_mq_freeze_queue_wait(q);
+
blk_throtl_cancel_bios(disk->queue);
blk_sync_queue(q);
diff --git a/certs/Kconfig b/certs/Kconfig
index bf9b511573d7..1f109b070877 100644
--- a/certs/Kconfig
+++ b/certs/Kconfig
@@ -43,7 +43,7 @@ config SYSTEM_TRUSTED_KEYRING
bool "Provide system-wide ring of trusted keys"
depends on KEYS
depends on ASYMMETRIC_KEY_TYPE
- depends on X509_CERTIFICATE_PARSER
+ depends on X509_CERTIFICATE_PARSER = y
help
Provide a system keyring to which trusted keys can be added. Keys in
the keyring are considered to be trusted. Keys may be added at will
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 1778016ea895..acfabfe07c4f 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -531,10 +531,27 @@ static void wait_for_freeze(void)
/* No delay is needed if we are in guest */
if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
return;
+ /*
+ * Modern (>=Nehalem) Intel systems use ACPI via intel_idle,
+ * not this code. Assume that any Intel systems using this
+ * are ancient and may need the dummy wait. This also assumes
+ * that the motivating chipset issue was Intel-only.
+ */
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ return;
#endif
- /* Dummy wait op - must do something useless after P_LVL2 read
- because chipsets cannot guarantee that STPCLK# signal
- gets asserted in time to freeze execution properly. */
+ /*
+ * Dummy wait op - must do something useless after P_LVL2 read
+ * because chipsets cannot guarantee that STPCLK# signal gets
+ * asserted in time to freeze execution properly
+ *
+ * This workaround has been in place since the original ACPI
+ * implementation was merged, circa 2002.
+ *
+ * If a profile is pointing to this instruction, please first
+ * consider moving your system to a more modern idle
+ * mechanism.
+ */
inl(acpi_gbl_FADT.xpm_timer_block.address);
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 826d41f341e4..c9a9aa607b62 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -3988,6 +3988,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
{ "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
+ /* These specific Pioneer models have LPM issues */
+ { "PIONEER BD-RW BDR-207M", NULL, ATA_HORKAGE_NOLPM },
+ { "PIONEER BD-RW BDR-205", NULL, ATA_HORKAGE_NOLPM },
+
/* Crucial BX100 SSD 500GB has broken LPM support */
{ "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM },
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index 7a5fe41aa5ae..13b9d0fdd42c 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -1018,26 +1018,25 @@ DEVICE_ATTR(sw_activity, S_IWUSR | S_IRUGO, ata_scsi_activity_show,
EXPORT_SYMBOL_GPL(dev_attr_sw_activity);
/**
- * __ata_change_queue_depth - helper for ata_scsi_change_queue_depth
- * @ap: ATA port to which the device change the queue depth
+ * ata_change_queue_depth - Set a device maximum queue depth
+ * @ap: ATA port of the target device
+ * @dev: target ATA device
* @sdev: SCSI device to configure queue depth for
* @queue_depth: new queue depth
*
- * libsas and libata have different approaches for associating a sdev to
- * its ata_port.
+ * Helper to set a device maximum queue depth, usable with both libsas
+ * and libata.
*
*/
-int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
- int queue_depth)
+int ata_change_queue_depth(struct ata_port *ap, struct ata_device *dev,
+ struct scsi_device *sdev, int queue_depth)
{
- struct ata_device *dev;
unsigned long flags;
- if (queue_depth < 1 || queue_depth == sdev->queue_depth)
+ if (!dev || !ata_dev_enabled(dev))
return sdev->queue_depth;
- dev = ata_scsi_find_dev(ap, sdev);
- if (!dev || !ata_dev_enabled(dev))
+ if (queue_depth < 1 || queue_depth == sdev->queue_depth)
return sdev->queue_depth;
/* NCQ enabled? */
@@ -1059,7 +1058,7 @@ int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
return scsi_change_queue_depth(sdev, queue_depth);
}
-EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
+EXPORT_SYMBOL_GPL(ata_change_queue_depth);
/**
* ata_scsi_change_queue_depth - SCSI callback for queue depth config
@@ -1080,7 +1079,8 @@ int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
- return __ata_change_queue_depth(ap, sdev, queue_depth);
+ return ata_change_queue_depth(ap, ata_scsi_find_dev(ap, sdev),
+ sdev, queue_depth);
}
EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 29e2f55c6faa..ff9602a0e54e 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1055,6 +1055,7 @@ EXPORT_SYMBOL_GPL(ata_scsi_dma_need_drain);
int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
{
struct request_queue *q = sdev->request_queue;
+ int depth = 1;
if (!ata_id_has_unload(dev->id))
dev->flags |= ATA_DFLAG_NO_UNLOAD;
@@ -1100,13 +1101,10 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
if (dev->flags & ATA_DFLAG_AN)
set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
- if (dev->flags & ATA_DFLAG_NCQ) {
- int depth;
-
+ if (dev->flags & ATA_DFLAG_NCQ)
depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
- depth = min(ATA_MAX_QUEUE, depth);
- scsi_change_queue_depth(sdev, depth);
- }
+ depth = min(ATA_MAX_QUEUE, depth);
+ scsi_change_queue_depth(sdev, depth);
if (dev->flags & ATA_DFLAG_TRUSTED)
sdev->security_supported = 1;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 753e7cca0f40..5fb4bc51dd8b 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1625,7 +1625,7 @@ static int __init fw_devlink_setup(char *arg)
}
early_param("fw_devlink", fw_devlink_setup);
-static bool fw_devlink_strict = true;
+static bool fw_devlink_strict;
static int __init fw_devlink_strict_setup(char *arg)
{
return strtobool(arg, &fw_devlink_strict);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 30255fcaf181..dd9a05174726 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -322,14 +322,14 @@ static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx,
if (unlikely(status))
return status;
- blk_mq_start_request(req);
-
vbr->sg_table.nents = virtblk_map_data(hctx, req, vbr);
if (unlikely(vbr->sg_table.nents < 0)) {
virtblk_cleanup_cmd(req);
return BLK_STS_RESOURCE;
}
+ blk_mq_start_request(req);
+
return BLK_STS_OK;
}
@@ -391,8 +391,7 @@ static bool virtblk_prep_rq_batch(struct request *req)
}
static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
- struct request **rqlist,
- struct request **requeue_list)
+ struct request **rqlist)
{
unsigned long flags;
int err;
@@ -408,7 +407,7 @@ static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
if (err) {
virtblk_unmap_data(req, vbr);
virtblk_cleanup_cmd(req);
- rq_list_add(requeue_list, req);
+ blk_mq_requeue_request(req, true);
}
}
@@ -436,7 +435,7 @@ static void virtio_queue_rqs(struct request **rqlist)
if (!next || req->mq_hctx != next->mq_hctx) {
req->rq_next = NULL;
- kick = virtblk_add_req_batch(vq, rqlist, &requeue_list);
+ kick = virtblk_add_req_batch(vq, rqlist);
if (kick)
virtqueue_notify(vq->vq);
diff --git a/drivers/clk/bcm/clk-iproc-pll.c b/drivers/clk/bcm/clk-iproc-pll.c
index 1a098db12062..680f9d8d357c 100644
--- a/drivers/clk/bcm/clk-iproc-pll.c
+++ b/drivers/clk/bcm/clk-iproc-pll.c
@@ -726,6 +726,7 @@ void iproc_pll_clk_setup(struct device_node *node,
const char *parent_name;
struct iproc_clk *iclk_array;
struct clk_hw_onecell_data *clk_data;
+ const char *clk_name;
if (WARN_ON(!pll_ctrl) || WARN_ON(!clk_ctrl))
return;
@@ -773,7 +774,12 @@ void iproc_pll_clk_setup(struct device_node *node,
iclk = &iclk_array[0];
iclk->pll = pll;
- init.name = node->name;
+ ret = of_property_read_string_index(node, "clock-output-names",
+ 0, &clk_name);
+ if (WARN_ON(ret))
+ goto err_pll_register;
+
+ init.name = clk_name;
init.ops = &iproc_pll_ops;
init.flags = 0;
parent_name = of_clk_get_parent_name(node, 0);
@@ -793,13 +799,11 @@ void iproc_pll_clk_setup(struct device_node *node,
goto err_pll_register;
clk_data->hws[0] = &iclk->hw;
+ parent_name = clk_name;
/* now initialize and register all leaf clocks */
for (i = 1; i < num_clks; i++) {
- const char *clk_name;
-
memset(&init, 0, sizeof(init));
- parent_name = node->name;
ret = of_property_read_string_index(node, "clock-output-names",
i, &clk_name);
diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c
index fc1bd23d4583..598f3cf4eba4 100644
--- a/drivers/clk/imx/clk-imx6sx.c
+++ b/drivers/clk/imx/clk-imx6sx.c
@@ -280,13 +280,13 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
hws[IMX6SX_CLK_SSI3_SEL] = imx_clk_hw_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
hws[IMX6SX_CLK_SSI2_SEL] = imx_clk_hw_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
hws[IMX6SX_CLK_SSI1_SEL] = imx_clk_hw_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
- hws[IMX6SX_CLK_QSPI1_SEL] = imx_clk_hw_mux_flags("qspi1_sel", base + 0x1c, 7, 3, qspi1_sels, ARRAY_SIZE(qspi1_sels), CLK_SET_RATE_PARENT);
+ hws[IMX6SX_CLK_QSPI1_SEL] = imx_clk_hw_mux("qspi1_sel", base + 0x1c, 7, 3, qspi1_sels, ARRAY_SIZE(qspi1_sels));
hws[IMX6SX_CLK_PERCLK_SEL] = imx_clk_hw_mux("perclk_sel", base + 0x1c, 6, 1, perclk_sels, ARRAY_SIZE(perclk_sels));
hws[IMX6SX_CLK_VID_SEL] = imx_clk_hw_mux("vid_sel", base + 0x20, 21, 3, vid_sels, ARRAY_SIZE(vid_sels));
hws[IMX6SX_CLK_ESAI_SEL] = imx_clk_hw_mux("esai_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels));
hws[IMX6SX_CLK_CAN_SEL] = imx_clk_hw_mux("can_sel", base + 0x20, 8, 2, can_sels, ARRAY_SIZE(can_sels));
hws[IMX6SX_CLK_UART_SEL] = imx_clk_hw_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels));
- hws[IMX6SX_CLK_QSPI2_SEL] = imx_clk_hw_mux_flags("qspi2_sel", base + 0x2c, 15, 3, qspi2_sels, ARRAY_SIZE(qspi2_sels), CLK_SET_RATE_PARENT);
+ hws[IMX6SX_CLK_QSPI2_SEL] = imx_clk_hw_mux("qspi2_sel", base + 0x2c, 15, 3, qspi2_sels, ARRAY_SIZE(qspi2_sels));
hws[IMX6SX_CLK_SPDIF_SEL] = imx_clk_hw_mux("spdif_sel", base + 0x30, 20, 2, audio_sels, ARRAY_SIZE(audio_sels));
hws[IMX6SX_CLK_AUDIO_SEL] = imx_clk_hw_mux("audio_sel", base + 0x30, 7, 2, audio_sels, ARRAY_SIZE(audio_sels));
hws[IMX6SX_CLK_ENET_PRE_SEL] = imx_clk_hw_mux("enet_pre_sel", base + 0x34, 15, 3, enet_pre_sels, ARRAY_SIZE(enet_pre_sels));
diff --git a/drivers/clk/imx/clk-imx93.c b/drivers/clk/imx/clk-imx93.c
index f5c9fa40491c..dcc41d178238 100644
--- a/drivers/clk/imx/clk-imx93.c
+++ b/drivers/clk/imx/clk-imx93.c
@@ -332,7 +332,7 @@ static struct platform_driver imx93_clk_driver = {
.driver = {
.name = "imx93-ccm",
.suppress_bind_attrs = true,
- .of_match_table = of_match_ptr(imx93_clk_of_match),
+ .of_match_table = imx93_clk_of_match,
},
};
module_platform_driver(imx93_clk_driver);
diff --git a/drivers/clk/ingenic/tcu.c b/drivers/clk/ingenic/tcu.c
index 201bf6e6b6e0..d5544cbc5c48 100644
--- a/drivers/clk/ingenic/tcu.c
+++ b/drivers/clk/ingenic/tcu.c
@@ -101,15 +101,11 @@ static bool ingenic_tcu_enable_regs(struct clk_hw *hw)
bool enabled = false;
/*
- * If the SoC has no global TCU clock, we must ungate the channel's
- * clock to be able to access its registers.
- * If we have a TCU clock, it will be enabled automatically as it has
- * been attached to the regmap.
+ * According to the programming manual, a timer channel's registers can
+ * only be accessed when the channel's stop bit is clear.
*/
- if (!tcu->clk) {
- enabled = !!ingenic_tcu_is_enabled(hw);
- regmap_write(tcu->map, TCU_REG_TSCR, BIT(info->gate_bit));
- }
+ enabled = !!ingenic_tcu_is_enabled(hw);
+ regmap_write(tcu->map, TCU_REG_TSCR, BIT(info->gate_bit));
return enabled;
}
@@ -120,8 +116,7 @@ static void ingenic_tcu_disable_regs(struct clk_hw *hw)
const struct ingenic_tcu_clk_info *info = tcu_clk->info;
struct ingenic_tcu *tcu = tcu_clk->tcu;
- if (!tcu->clk)
- regmap_write(tcu->map, TCU_REG_TSSR, BIT(info->gate_bit));
+ regmap_write(tcu->map, TCU_REG_TSSR, BIT(info->gate_bit));
}
static u8 ingenic_tcu_get_parent(struct clk_hw *hw)
diff --git a/drivers/clk/microchip/clk-mpfs.c b/drivers/clk/microchip/clk-mpfs.c
index 070c3b896559..b6b89413e090 100644
--- a/drivers/clk/microchip/clk-mpfs.c
+++ b/drivers/clk/microchip/clk-mpfs.c
@@ -239,6 +239,11 @@ static const struct clk_ops mpfs_clk_cfg_ops = {
.hw.init = CLK_HW_INIT(_name, _parent, &mpfs_clk_cfg_ops, 0), \
}
+#define CLK_CPU_OFFSET 0u
+#define CLK_AXI_OFFSET 1u
+#define CLK_AHB_OFFSET 2u
+#define CLK_RTCREF_OFFSET 3u
+
static struct mpfs_cfg_hw_clock mpfs_cfg_clks[] = {
CLK_CFG(CLK_CPU, "clk_cpu", "clk_msspll", 0, 2, mpfs_div_cpu_axi_table, 0,
REG_CLOCK_CONFIG_CR),
@@ -362,7 +367,7 @@ static const struct clk_ops mpfs_periph_clk_ops = {
_flags), \
}
-#define PARENT_CLK(PARENT) (&mpfs_cfg_clks[CLK_##PARENT].hw)
+#define PARENT_CLK(PARENT) (&mpfs_cfg_clks[CLK_##PARENT##_OFFSET].hw)
/*
* Critical clocks:
@@ -370,6 +375,8 @@ static const struct clk_ops mpfs_periph_clk_ops = {
* trap handler
* - CLK_MMUART0: reserved by the hss
* - CLK_DDRC: provides clock to the ddr subsystem
+ * - CLK_RTC: the onboard RTC's AHB bus clock must be kept running as the rtc will stop
+ * if the AHB interface clock is disabled
* - CLK_FICx: these provide the processor side clocks to the "FIC" (Fabric InterConnect)
* clock domain crossers which provide the interface to the FPGA fabric. Disabling them
* causes the FPGA fabric to go into reset.
@@ -394,7 +401,7 @@ static struct mpfs_periph_hw_clock mpfs_periph_clks[] = {
CLK_PERIPH(CLK_CAN0, "clk_periph_can0", PARENT_CLK(AHB), 14, 0),
CLK_PERIPH(CLK_CAN1, "clk_periph_can1", PARENT_CLK(AHB), 15, 0),
CLK_PERIPH(CLK_USB, "clk_periph_usb", PARENT_CLK(AHB), 16, 0),
- CLK_PERIPH(CLK_RTC, "clk_periph_rtc", PARENT_CLK(AHB), 18, 0),
+ CLK_PERIPH(CLK_RTC, "clk_periph_rtc", PARENT_CLK(AHB), 18, CLK_IS_CRITICAL),
CLK_PERIPH(CLK_QSPI, "clk_periph_qspi", PARENT_CLK(AHB), 19, 0),
CLK_PERIPH(CLK_GPIO0, "clk_periph_gpio0", PARENT_CLK(AHB), 20, 0),
CLK_PERIPH(CLK_GPIO1, "clk_periph_gpio1", PARENT_CLK(AHB), 21, 0),
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
index 30056da3e0af..42568c616181 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
@@ -1191,9 +1191,13 @@ static int sun50i_h6_ccu_probe(struct platform_device *pdev)
if (IS_ERR(reg))
return PTR_ERR(reg);
- /* Force PLL_GPU output divider bits to 0 */
+ /*
+ * Force PLL_GPU output divider bits to 0 and adjust
+ * multiplier to sensible default value of 432 MHz.
+ */
val = readl(reg + SUN50I_H6_PLL_GPU_REG);
- val &= ~BIT(0);
+ val &= ~(GENMASK(15, 8) | BIT(0));
+ val |= 17 << 8;
writel(val, reg + SUN50I_H6_PLL_GPU_REG);
/* Force GPU_CLK divider bits to 0 */
diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c
index 62c2b7ac4339..4407203e0c9b 100644
--- a/drivers/counter/104-quad-8.c
+++ b/drivers/counter/104-quad-8.c
@@ -449,6 +449,9 @@ static int quad8_events_configure(struct counter_device *counter)
return -EINVAL;
}
+ /* Enable IRQ line */
+ irq_enabled |= BIT(event_node->channel);
+
/* Skip configuration if it is the same as previously set */
if (priv->irq_trigger[event_node->channel] == next_irq_trigger)
continue;
@@ -462,9 +465,6 @@ static int quad8_events_configure(struct counter_device *counter)
priv->irq_trigger[event_node->channel] << 3;
iowrite8(QUAD8_CTR_IOR | ior_cfg,
&priv->reg->channel[event_node->channel].control);
-
- /* Enable IRQ line */
- irq_enabled |= BIT(event_node->channel);
}
iowrite8(irq_enabled, &priv->reg->index_interrupt);
diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
index 2a60d0525cde..168195672e2e 100644
--- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
@@ -56,6 +56,10 @@ static void virtio_crypto_akcipher_finalize_req(
struct virtio_crypto_akcipher_request *vc_akcipher_req,
struct akcipher_request *req, int err)
{
+ kfree(vc_akcipher_req->src_buf);
+ kfree(vc_akcipher_req->dst_buf);
+ vc_akcipher_req->src_buf = NULL;
+ vc_akcipher_req->dst_buf = NULL;
virtcrypto_clear_request(&vc_akcipher_req->base);
crypto_finalize_akcipher_request(vc_akcipher_req->base.dataq->engine, req, err);
diff --git a/drivers/dax/hmem/device.c b/drivers/dax/hmem/device.c
index cb6401c9e9a4..acf31cc1dbcc 100644
--- a/drivers/dax/hmem/device.c
+++ b/drivers/dax/hmem/device.c
@@ -15,6 +15,7 @@ void hmem_register_device(int target_nid, struct resource *r)
.start = r->start,
.end = r->end,
.flags = IORESOURCE_MEM,
+ .desc = IORES_DESC_SOFT_RESERVED,
};
struct platform_device *pdev;
struct memregion_info info;
diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c
index d4f1e4e9603a..85e00701473c 100644
--- a/drivers/dma/ti/k3-udma-private.c
+++ b/drivers/dma/ti/k3-udma-private.c
@@ -31,14 +31,14 @@ struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property)
}
pdev = of_find_device_by_node(udma_node);
+ if (np != udma_node)
+ of_node_put(udma_node);
+
if (!pdev) {
pr_debug("UDMA device not found\n");
return ERR_PTR(-EPROBE_DEFER);
}
- if (np != udma_node)
- of_node_put(udma_node);
-
ud = platform_get_drvdata(pdev);
if (!ud) {
pr_debug("UDMA has not been probed\n");
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 6276934d4d2b..8cd4e69dc7b4 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -3040,9 +3040,10 @@ static int xilinx_dma_probe(struct platform_device *pdev)
/* Request and map I/O memory */
xdev->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(xdev->regs))
- return PTR_ERR(xdev->regs);
-
+ if (IS_ERR(xdev->regs)) {
+ err = PTR_ERR(xdev->regs);
+ goto disable_clks;
+ }
/* Retrieve the DMA engine properties from the device tree */
xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
xdev->s2mm_chan_id = xdev->dma_config->max_channels / 2;
@@ -3070,7 +3071,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
if (err < 0) {
dev_err(xdev->dev,
"missing xlnx,num-fstores property\n");
- return err;
+ goto disable_clks;
}
err = of_property_read_u32(node, "xlnx,flush-fsync",
@@ -3090,7 +3091,11 @@ static int xilinx_dma_probe(struct platform_device *pdev)
xdev->ext_addr = false;
/* Set the dma mask bits */
- dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
+ err = dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
+ if (err < 0) {
+ dev_err(xdev->dev, "DMA mask error %d\n", err);
+ goto disable_clks;
+ }
/* Initialize the DMA engine */
xdev->common.dev = &pdev->dev;
@@ -3137,7 +3142,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
for_each_child_of_node(node, child) {
err = xilinx_dma_child_probe(xdev, child);
if (err < 0)
- goto disable_clks;
+ goto error;
}
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
@@ -3172,12 +3177,12 @@ static int xilinx_dma_probe(struct platform_device *pdev)
return 0;
-disable_clks:
- xdma_disable_allclks(xdev);
error:
for (i = 0; i < xdev->dma_config->max_channels; i++)
if (xdev->chan[i])
xilinx_dma_chan_remove(xdev->chan[i]);
+disable_clks:
+ xdma_disable_allclks(xdev);
return err;
}
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index dc299ab36818..3f4ee3954384 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -849,7 +849,7 @@ static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy(
zynqmp_dma_desc_config_eod(chan, desc);
async_tx_ack(&first->async_tx);
- first->async_tx.flags = flags;
+ first->async_tx.flags = (enum dma_ctrl_flags)flags;
return &first->async_tx;
}
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index 3ed7ae0d6781..96060bf90a24 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -450,9 +450,13 @@ static int scmi_clock_count_get(const struct scmi_protocol_handle *ph)
static const struct scmi_clock_info *
scmi_clock_info_get(const struct scmi_protocol_handle *ph, u32 clk_id)
{
+ struct scmi_clock_info *clk;
struct clock_info *ci = ph->get_priv(ph);
- struct scmi_clock_info *clk = ci->clk + clk_id;
+ if (clk_id >= ci->num_clocks)
+ return NULL;
+
+ clk = ci->clk + clk_id;
if (!clk->name[0])
return NULL;
diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c
index 8abace56b958..f42dad997ac9 100644
--- a/drivers/firmware/arm_scmi/optee.c
+++ b/drivers/firmware/arm_scmi/optee.c
@@ -106,6 +106,7 @@ enum scmi_optee_pta_cmd {
* @channel_id: OP-TEE channel ID used for this transport
* @tee_session: TEE session identifier
* @caps: OP-TEE SCMI channel capabilities
+ * @rx_len: Response size
* @mu: Mutex protection on channel access
* @cinfo: SCMI channel information
* @shmem: Virtual base address of the shared memory
diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c
index 673f3eb498f4..e9afa8cab730 100644
--- a/drivers/firmware/arm_scmi/reset.c
+++ b/drivers/firmware/arm_scmi/reset.c
@@ -166,9 +166,13 @@ static int scmi_domain_reset(const struct scmi_protocol_handle *ph, u32 domain,
struct scmi_xfer *t;
struct scmi_msg_reset_domain_reset *dom;
struct scmi_reset_info *pi = ph->get_priv(ph);
- struct reset_dom_info *rdom = pi->dom_info + domain;
+ struct reset_dom_info *rdom;
- if (rdom->async_reset)
+ if (domain >= pi->num_domains)
+ return -EINVAL;
+
+ rdom = pi->dom_info + domain;
+ if (rdom->async_reset && flags & AUTONOMOUS_RESET)
flags |= ASYNCHRONOUS_RESET;
ret = ph->xops->xfer_get_init(ph, RESET, sizeof(*dom), 0, &t);
@@ -180,7 +184,7 @@ static int scmi_domain_reset(const struct scmi_protocol_handle *ph, u32 domain,
dom->flags = cpu_to_le32(flags);
dom->reset_state = cpu_to_le32(state);
- if (rdom->async_reset)
+ if (flags & ASYNCHRONOUS_RESET)
ret = ph->xops->do_xfer_with_response(ph, t);
else
ret = ph->xops->do_xfer(ph, t);
diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c
index 581d34c95769..0e05a79de82d 100644
--- a/drivers/firmware/arm_scmi/scmi_pm_domain.c
+++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c
@@ -8,7 +8,6 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/pm_clock.h>
#include <linux/pm_domain.h>
#include <linux/scmi_protocol.h>
@@ -53,27 +52,6 @@ static int scmi_pd_power_off(struct generic_pm_domain *domain)
return scmi_pd_power(domain, false);
}
-static int scmi_pd_attach_dev(struct generic_pm_domain *pd, struct device *dev)
-{
- int ret;
-
- ret = pm_clk_create(dev);
- if (ret)
- return ret;
-
- ret = of_pm_clk_add_clks(dev);
- if (ret >= 0)
- return 0;
-
- pm_clk_destroy(dev);
- return ret;
-}
-
-static void scmi_pd_detach_dev(struct generic_pm_domain *pd, struct device *dev)
-{
- pm_clk_destroy(dev);
-}
-
static int scmi_pm_domain_probe(struct scmi_device *sdev)
{
int num_domains, i;
@@ -124,10 +102,6 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
scmi_pd->genpd.name = scmi_pd->name;
scmi_pd->genpd.power_off = scmi_pd_power_off;
scmi_pd->genpd.power_on = scmi_pd_power_on;
- scmi_pd->genpd.attach_dev = scmi_pd_attach_dev;
- scmi_pd->genpd.detach_dev = scmi_pd_detach_dev;
- scmi_pd->genpd.flags = GENPD_FLAG_PM_CLK |
- GENPD_FLAG_ACTIVE_WAKEUP;
pm_genpd_init(&scmi_pd->genpd, NULL,
state == SCMI_POWER_STATE_GENERIC_OFF);
@@ -138,9 +112,28 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
scmi_pd_data->domains = domains;
scmi_pd_data->num_domains = num_domains;
+ dev_set_drvdata(dev, scmi_pd_data);
+
return of_genpd_add_provider_onecell(np, scmi_pd_data);
}
+static void scmi_pm_domain_remove(struct scmi_device *sdev)
+{
+ int i;
+ struct genpd_onecell_data *scmi_pd_data;
+ struct device *dev = &sdev->dev;
+ struct device_node *np = dev->of_node;
+
+ of_genpd_del_provider(np);
+
+ scmi_pd_data = dev_get_drvdata(dev);
+ for (i = 0; i < scmi_pd_data->num_domains; i++) {
+ if (!scmi_pd_data->domains[i])
+ continue;
+ pm_genpd_remove(scmi_pd_data->domains[i]);
+ }
+}
+
static const struct scmi_device_id scmi_id_table[] = {
{ SCMI_PROTOCOL_POWER, "genpd" },
{ },
@@ -150,6 +143,7 @@ MODULE_DEVICE_TABLE(scmi, scmi_id_table);
static struct scmi_driver scmi_power_domain_driver = {
.name = "scmi-power-domain",
.probe = scmi_pm_domain_probe,
+ .remove = scmi_pm_domain_remove,
.id_table = scmi_id_table,
};
module_scmi_driver(scmi_power_domain_driver);
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
index 7288c6117838..0b5853fa9d87 100644
--- a/drivers/firmware/arm_scmi/sensors.c
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -762,6 +762,10 @@ static int scmi_sensor_config_get(const struct scmi_protocol_handle *ph,
{
int ret;
struct scmi_xfer *t;
+ struct sensors_info *si = ph->get_priv(ph);
+
+ if (sensor_id >= si->num_sensors)
+ return -EINVAL;
ret = ph->xops->xfer_get_init(ph, SENSOR_CONFIG_GET,
sizeof(__le32), sizeof(__le32), &t);
@@ -771,7 +775,6 @@ static int scmi_sensor_config_get(const struct scmi_protocol_handle *ph,
put_unaligned_le32(sensor_id, t->tx.buf);
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
- struct sensors_info *si = ph->get_priv(ph);
struct scmi_sensor_info *s = si->sensors + sensor_id;
*sensor_config = get_unaligned_le64(t->rx.buf);
@@ -788,6 +791,10 @@ static int scmi_sensor_config_set(const struct scmi_protocol_handle *ph,
int ret;
struct scmi_xfer *t;
struct scmi_msg_sensor_config_set *msg;
+ struct sensors_info *si = ph->get_priv(ph);
+
+ if (sensor_id >= si->num_sensors)
+ return -EINVAL;
ret = ph->xops->xfer_get_init(ph, SENSOR_CONFIG_SET,
sizeof(*msg), 0, &t);
@@ -800,7 +807,6 @@ static int scmi_sensor_config_set(const struct scmi_protocol_handle *ph,
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
- struct sensors_info *si = ph->get_priv(ph);
struct scmi_sensor_info *s = si->sensors + sensor_id;
s->sensor_config = sensor_config;
@@ -831,8 +837,11 @@ static int scmi_sensor_reading_get(const struct scmi_protocol_handle *ph,
int ret;
struct scmi_xfer *t;
struct scmi_msg_sensor_reading_get *sensor;
+ struct scmi_sensor_info *s;
struct sensors_info *si = ph->get_priv(ph);
- struct scmi_sensor_info *s = si->sensors + sensor_id;
+
+ if (sensor_id >= si->num_sensors)
+ return -EINVAL;
ret = ph->xops->xfer_get_init(ph, SENSOR_READING_GET,
sizeof(*sensor), 0, &t);
@@ -841,6 +850,7 @@ static int scmi_sensor_reading_get(const struct scmi_protocol_handle *ph,
sensor = t->tx.buf;
sensor->id = cpu_to_le32(sensor_id);
+ s = si->sensors + sensor_id;
if (s->async) {
sensor->flags = cpu_to_le32(SENSOR_READ_ASYNC);
ret = ph->xops->do_xfer_with_response(ph, t);
@@ -895,9 +905,13 @@ scmi_sensor_reading_get_timestamped(const struct scmi_protocol_handle *ph,
int ret;
struct scmi_xfer *t;
struct scmi_msg_sensor_reading_get *sensor;
+ struct scmi_sensor_info *s;
struct sensors_info *si = ph->get_priv(ph);
- struct scmi_sensor_info *s = si->sensors + sensor_id;
+ if (sensor_id >= si->num_sensors)
+ return -EINVAL;
+
+ s = si->sensors + sensor_id;
if (!count || !readings ||
(!s->num_axis && count > 1) || (s->num_axis && count > s->num_axis))
return -EINVAL;
@@ -948,6 +962,9 @@ scmi_sensor_info_get(const struct scmi_protocol_handle *ph, u32 sensor_id)
{
struct sensors_info *si = ph->get_priv(ph);
+ if (sensor_id >= si->num_sensors)
+ return NULL;
+
return si->sensors + sensor_id;
}
diff --git a/drivers/firmware/efi/efibc.c b/drivers/firmware/efi/efibc.c
index 8ced7af8e56d..4f9fb086eab7 100644
--- a/drivers/firmware/efi/efibc.c
+++ b/drivers/firmware/efi/efibc.c
@@ -48,6 +48,9 @@ static int efibc_reboot_notifier_call(struct notifier_block *notifier,
return NOTIFY_DONE;
wdata = kmalloc(MAX_DATA_LEN * sizeof(efi_char16_t), GFP_KERNEL);
+ if (!wdata)
+ return NOTIFY_DONE;
+
for (l = 0; l < MAX_DATA_LEN - 1 && str[l] != '\0'; l++)
wdata[l] = str[l];
wdata[l] = L'\0';
diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c
index 8a18930f3eb6..516f4f0069bd 100644
--- a/drivers/firmware/efi/libstub/secureboot.c
+++ b/drivers/firmware/efi/libstub/secureboot.c
@@ -14,7 +14,7 @@
/* SHIM variables */
static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID;
-static const efi_char16_t shim_MokSBState_name[] = L"MokSBState";
+static const efi_char16_t shim_MokSBState_name[] = L"MokSBStateRT";
static efi_status_t get_var(efi_char16_t *name, efi_guid_t *vendor, u32 *attr,
unsigned long *data_size, void *data)
@@ -43,8 +43,8 @@ enum efi_secureboot_mode efi_get_secureboot(void)
/*
* See if a user has put the shim into insecure mode. If so, and if the
- * variable doesn't have the runtime attribute set, we might as well
- * honor that.
+ * variable doesn't have the non-volatile attribute set, we might as
+ * well honor that.
*/
size = sizeof(moksbstate);
status = get_efi_var(shim_MokSBState_name, &shim_guid,
@@ -53,7 +53,7 @@ enum efi_secureboot_mode efi_get_secureboot(void)
/* If it fails, we don't care why. Default to secure */
if (status != EFI_SUCCESS)
goto secure_boot_enabled;
- if (!(attr & EFI_VARIABLE_RUNTIME_ACCESS) && moksbstate == 1)
+ if (!(attr & EFI_VARIABLE_NON_VOLATILE) && moksbstate == 1)
return efi_secureboot_mode_disabled;
secure_boot_enabled:
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index 43ca665af610..7a7abc8959d2 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -516,6 +516,13 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
hdr->ramdisk_image = 0;
hdr->ramdisk_size = 0;
+ /*
+ * Disregard any setup data that was provided by the bootloader:
+ * setup_data could be pointing anywhere, and we have no way of
+ * authenticating or validating the payload.
+ */
+ hdr->setup_data = 0;
+
efi_stub_entry(handle, sys_table_arg, boot_params);
/* not reached */
diff --git a/drivers/fpga/intel-m10-bmc-sec-update.c b/drivers/fpga/intel-m10-bmc-sec-update.c
index 72c677c910de..133e511355c9 100644
--- a/drivers/fpga/intel-m10-bmc-sec-update.c
+++ b/drivers/fpga/intel-m10-bmc-sec-update.c
@@ -148,10 +148,6 @@ static ssize_t flash_count_show(struct device *dev,
stride = regmap_get_reg_stride(sec->m10bmc->regmap);
num_bits = FLASH_COUNT_SIZE * 8;
- flash_buf = kmalloc(FLASH_COUNT_SIZE, GFP_KERNEL);
- if (!flash_buf)
- return -ENOMEM;
-
if (FLASH_COUNT_SIZE % stride) {
dev_err(sec->dev,
"FLASH_COUNT_SIZE (0x%x) not aligned to stride (0x%x)\n",
@@ -160,6 +156,10 @@ static ssize_t flash_count_show(struct device *dev,
return -EINVAL;
}
+ flash_buf = kmalloc(FLASH_COUNT_SIZE, GFP_KERNEL);
+ if (!flash_buf)
+ return -ENOMEM;
+
ret = regmap_bulk_read(sec->m10bmc->regmap, STAGING_FLASH_COUNT,
flash_buf, FLASH_COUNT_SIZE / stride);
if (ret) {
diff --git a/drivers/gpio/gpio-ftgpio010.c b/drivers/gpio/gpio-ftgpio010.c
index f422c3e129a0..f77a965f5780 100644
--- a/drivers/gpio/gpio-ftgpio010.c
+++ b/drivers/gpio/gpio-ftgpio010.c
@@ -41,14 +41,12 @@
* struct ftgpio_gpio - Gemini GPIO state container
* @dev: containing device for this instance
* @gc: gpiochip for this instance
- * @irq: irqchip for this instance
* @base: remapped I/O-memory base
* @clk: silicon clock
*/
struct ftgpio_gpio {
struct device *dev;
struct gpio_chip gc;
- struct irq_chip irq;
void __iomem *base;
struct clk *clk;
};
@@ -70,6 +68,7 @@ static void ftgpio_gpio_mask_irq(struct irq_data *d)
val = readl(g->base + GPIO_INT_EN);
val &= ~BIT(irqd_to_hwirq(d));
writel(val, g->base + GPIO_INT_EN);
+ gpiochip_disable_irq(gc, irqd_to_hwirq(d));
}
static void ftgpio_gpio_unmask_irq(struct irq_data *d)
@@ -78,6 +77,7 @@ static void ftgpio_gpio_unmask_irq(struct irq_data *d)
struct ftgpio_gpio *g = gpiochip_get_data(gc);
u32 val;
+ gpiochip_enable_irq(gc, irqd_to_hwirq(d));
val = readl(g->base + GPIO_INT_EN);
val |= BIT(irqd_to_hwirq(d));
writel(val, g->base + GPIO_INT_EN);
@@ -221,6 +221,16 @@ static int ftgpio_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
return 0;
}
+static const struct irq_chip ftgpio_irq_chip = {
+ .name = "FTGPIO010",
+ .irq_ack = ftgpio_gpio_ack_irq,
+ .irq_mask = ftgpio_gpio_mask_irq,
+ .irq_unmask = ftgpio_gpio_unmask_irq,
+ .irq_set_type = ftgpio_gpio_set_irq_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static int ftgpio_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -277,14 +287,8 @@ static int ftgpio_gpio_probe(struct platform_device *pdev)
if (!IS_ERR(g->clk))
g->gc.set_config = ftgpio_gpio_set_config;
- g->irq.name = "FTGPIO010";
- g->irq.irq_ack = ftgpio_gpio_ack_irq;
- g->irq.irq_mask = ftgpio_gpio_mask_irq;
- g->irq.irq_unmask = ftgpio_gpio_unmask_irq;
- g->irq.irq_set_type = ftgpio_gpio_set_irq_type;
-
girq = &g->gc.irq;
- girq->chip = &g->irq;
+ gpio_irq_chip_set_chip(girq, &ftgpio_irq_chip);
girq->parent_handler = ftgpio_gpio_irq_handler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents),
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index a2e505a7545c..523dfd17dd92 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -533,8 +533,10 @@ static int __init gpio_mockup_register_chip(int idx)
}
fwnode = fwnode_create_software_node(properties, NULL);
- if (IS_ERR(fwnode))
+ if (IS_ERR(fwnode)) {
+ kfree_strarray(line_names, ngpio);
return PTR_ERR(fwnode);
+ }
pdevinfo.name = "gpio-mockup";
pdevinfo.id = idx;
@@ -597,9 +599,9 @@ static int __init gpio_mockup_init(void)
static void __exit gpio_mockup_exit(void)
{
+ gpio_mockup_unregister_pdevs();
debugfs_remove_recursive(gpio_mockup_dbg_dir);
platform_driver_unregister(&gpio_mockup_driver);
- gpio_mockup_unregister_pdevs();
}
module_init(gpio_mockup_init);
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index aa126ab80f0c..1bb317b8dcce 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -790,8 +790,12 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
u32 offset;
u32 set;
- if (of_device_is_compatible(mvchip->chip.of_node,
- "marvell,armada-370-gpio")) {
+ if (mvchip->soc_variant == MVEBU_GPIO_SOC_VARIANT_A8K) {
+ int ret = of_property_read_u32(dev->of_node,
+ "marvell,pwm-offset", &offset);
+ if (ret < 0)
+ return 0;
+ } else {
/*
* There are only two sets of PWM configuration registers for
* all the GPIO lines on those SoCs which this driver reserves
@@ -801,13 +805,6 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
if (!platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwm"))
return 0;
offset = 0;
- } else if (mvchip->soc_variant == MVEBU_GPIO_SOC_VARIANT_A8K) {
- int ret = of_property_read_u32(dev->of_node,
- "marvell,pwm-offset", &offset);
- if (ret < 0)
- return 0;
- } else {
- return 0;
}
if (IS_ERR(mvchip->clk))
diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c
index fa4bc7481f9a..e739dcea61b2 100644
--- a/drivers/gpio/gpio-tqmx86.c
+++ b/drivers/gpio/gpio-tqmx86.c
@@ -307,6 +307,8 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_simple_irq;
girq->init_valid_mask = tqmx86_init_irq_valid_mask;
+
+ irq_domain_set_pm_device(girq->domain, dev);
}
ret = devm_gpiochip_add_data(dev, chip, gpio);
@@ -315,8 +317,6 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
goto out_pm_dis;
}
- irq_domain_set_pm_device(girq->domain, dev);
-
dev_info(dev, "GPIO functionality initialized with %d pins\n",
chip->ngpio);
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index f8041d4898d1..92f185575e94 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -1986,7 +1986,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
ret = -ENODEV;
goto out_free_le;
}
- le->irq = irq;
if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
@@ -2000,7 +1999,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
init_waitqueue_head(&le->wait);
/* Request a thread to read the events */
- ret = request_threaded_irq(le->irq,
+ ret = request_threaded_irq(irq,
lineevent_irq_handler,
lineevent_irq_thread,
irqflags,
@@ -2009,6 +2008,8 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
if (ret)
goto out_free_le;
+ le->irq = irq;
+
fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
if (fd < 0) {
ret = fd;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 130060834b4e..48bd660ddb85 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -1050,6 +1050,10 @@ bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
{
if (adev->flags & AMD_IS_APU)
return false;
+
+ if (amdgpu_sriov_vf(adev))
+ return false;
+
return pm_suspend_target_state != PM_SUSPEND_TO_IDLE;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index be7aff2d4a57..25e1f5ed7ead 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3152,7 +3152,8 @@ static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
continue;
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
r = adev->ip_blocks[i].version->funcs->resume(adev);
if (r) {
@@ -4064,12 +4065,20 @@ static void amdgpu_device_evict_resources(struct amdgpu_device *adev)
int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
{
struct amdgpu_device *adev = drm_to_adev(dev);
+ int r = 0;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
adev->in_suspend = true;
+ if (amdgpu_sriov_vf(adev)) {
+ amdgpu_virt_fini_data_exchange(adev);
+ r = amdgpu_virt_request_full_gpu(adev, false);
+ if (r)
+ return r;
+ }
+
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
DRM_WARN("smart shift update failed\n");
@@ -4093,6 +4102,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
amdgpu_device_ip_suspend_phase2(adev);
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_release_full_gpu(adev, false);
+
return 0;
}
@@ -4111,6 +4123,12 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
struct amdgpu_device *adev = drm_to_adev(dev);
int r = 0;
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_virt_request_full_gpu(adev, true);
+ if (r)
+ return r;
+ }
+
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -4125,6 +4143,13 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
}
r = amdgpu_device_ip_resume(adev);
+
+ /* no matter what r is, always need to properly release full GPU */
+ if (amdgpu_sriov_vf(adev)) {
+ amdgpu_virt_init_data_exchange(adev);
+ amdgpu_virt_release_full_gpu(adev, true);
+ }
+
if (r) {
dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 5b09c8f4fe95..23998f727c7f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -39,6 +39,7 @@
#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fb_helper.h>
@@ -497,6 +498,11 @@ bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
.destroy = drm_gem_fb_destroy,
.create_handle = drm_gem_fb_create_handle,
+};
+
+static const struct drm_framebuffer_funcs amdgpu_fb_funcs_atomic = {
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
.dirty = drm_atomic_helper_dirtyfb,
};
@@ -1102,7 +1108,10 @@ static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
if (ret)
goto err;
- ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+ if (drm_drv_uses_atomic_modeset(dev))
+ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs_atomic);
+ else
+ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
if (ret)
goto err;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 8adeb7469f1e..d0d99ed607dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -400,7 +400,6 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
/* We are not protected by ring lock when reading the last sequence
* but it's ok to report slightly wrong fence count here.
*/
- amdgpu_fence_process(ring);
emitted = 0x100000000ull;
emitted -= atomic_read(&ring->fence_drv.last_seq);
emitted += READ_ONCE(ring->fence_drv.sync_seq);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 23a696d38390..de5b936b016d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -304,6 +304,10 @@ struct amdgpu_gfx {
uint32_t rlc_srlg_feature_version;
uint32_t rlc_srls_fw_version;
uint32_t rlc_srls_feature_version;
+ uint32_t rlcp_ucode_version;
+ uint32_t rlcp_ucode_feature_version;
+ uint32_t rlcv_ucode_version;
+ uint32_t rlcv_ucode_feature_version;
uint32_t mec_feature_version;
uint32_t mec2_feature_version;
bool mec_fw_write_wait;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index fe82b8b19a4e..0c546245793b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -181,6 +181,9 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
if (adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(6, 0, 0))
adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
+ /* zero sdma_hqd_mask for non-existent engine */
+ else if (adev->sdma.num_instances == 1)
+ adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc;
else
adev->mes.sdma_hqd_mask[i] = 0xfc;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index 7b46f6bf4187..ad980f4b66e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -222,6 +222,8 @@ struct mes_add_queue_input {
uint64_t tba_addr;
uint64_t tma_addr;
uint32_t is_kfd_process;
+ uint32_t is_aql_queue;
+ uint32_t queue_size;
};
struct mes_remove_queue_input {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
index 6373bfb47d55..e23f6192c50e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
@@ -272,3 +272,267 @@ void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev)
&adev->gfx.rlc.cp_table_gpu_addr,
(void **)&adev->gfx.rlc.cp_table_ptr);
}
+
+static int amdgpu_gfx_rlc_init_microcode_v2_0(struct amdgpu_device *adev)
+{
+ const struct common_firmware_header *common_hdr;
+ const struct rlc_firmware_header_v2_0 *rlc_hdr;
+ struct amdgpu_firmware_info *info;
+ unsigned int *tmp;
+ unsigned int i;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
+
+ adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
+ adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
+ adev->gfx.rlc.save_and_restore_offset =
+ le32_to_cpu(rlc_hdr->save_and_restore_offset);
+ adev->gfx.rlc.clear_state_descriptor_offset =
+ le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
+ adev->gfx.rlc.avail_scratch_ram_locations =
+ le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
+ adev->gfx.rlc.reg_restore_list_size =
+ le32_to_cpu(rlc_hdr->reg_restore_list_size);
+ adev->gfx.rlc.reg_list_format_start =
+ le32_to_cpu(rlc_hdr->reg_list_format_start);
+ adev->gfx.rlc.reg_list_format_separate_start =
+ le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
+ adev->gfx.rlc.starting_offsets_start =
+ le32_to_cpu(rlc_hdr->starting_offsets_start);
+ adev->gfx.rlc.reg_list_format_size_bytes =
+ le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
+ adev->gfx.rlc.reg_list_size_bytes =
+ le32_to_cpu(rlc_hdr->reg_list_size_bytes);
+ adev->gfx.rlc.register_list_format =
+ kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
+ adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
+ if (!adev->gfx.rlc.register_list_format) {
+ dev_err(adev->dev, "failed to allocate memory for rlc register_list_format\n");
+ return -ENOMEM;
+ }
+
+ tmp = (unsigned int *)((uintptr_t)rlc_hdr +
+ le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
+ for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
+ adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
+
+ adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
+
+ tmp = (unsigned int *)((uintptr_t)rlc_hdr +
+ le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
+ for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
+ adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
+ info->fw = adev->gfx.rlc_fw;
+ if (info->fw) {
+ common_hdr = (const struct common_firmware_header *)info->fw->data;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(common_hdr->ucode_size_bytes), PAGE_SIZE);
+ }
+ }
+
+ return 0;
+}
+
+static void amdgpu_gfx_rlc_init_microcode_v2_1(struct amdgpu_device *adev)
+{
+ const struct rlc_firmware_header_v2_1 *rlc_hdr;
+ struct amdgpu_firmware_info *info;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
+ adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
+ adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
+ adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
+ adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
+ adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
+ adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
+ adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
+ adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
+ adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
+ adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
+ adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
+ adev->gfx.rlc.reg_list_format_direct_reg_list_length =
+ le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ if (adev->gfx.rlc.save_restore_list_gpm_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.save_restore_list_srm_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
+ }
+ }
+}
+
+static void amdgpu_gfx_rlc_init_microcode_v2_2(struct amdgpu_device *adev)
+{
+ const struct rlc_firmware_header_v2_2 *rlc_hdr;
+ struct amdgpu_firmware_info *info;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlc.rlc_iram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_iram_ucode_size_bytes);
+ adev->gfx.rlc.rlc_iram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_iram_ucode_offset_bytes);
+ adev->gfx.rlc.rlc_dram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_dram_ucode_size_bytes);
+ adev->gfx.rlc.rlc_dram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_dram_ucode_offset_bytes);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ if (adev->gfx.rlc.rlc_iram_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_IRAM];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_IRAM;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.rlc_iram_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.rlc_dram_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_DRAM];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_DRAM;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.rlc_dram_ucode_size_bytes, PAGE_SIZE);
+ }
+ }
+}
+
+static void amdgpu_gfx_rlc_init_microcode_v2_3(struct amdgpu_device *adev)
+{
+ const struct rlc_firmware_header_v2_3 *rlc_hdr;
+ struct amdgpu_firmware_info *info;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlcp_ucode_version = le32_to_cpu(rlc_hdr->rlcp_ucode_version);
+ adev->gfx.rlcp_ucode_feature_version = le32_to_cpu(rlc_hdr->rlcp_ucode_feature_version);
+ adev->gfx.rlc.rlcp_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcp_ucode_size_bytes);
+ adev->gfx.rlc.rlcp_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcp_ucode_offset_bytes);
+
+ adev->gfx.rlcv_ucode_version = le32_to_cpu(rlc_hdr->rlcv_ucode_version);
+ adev->gfx.rlcv_ucode_feature_version = le32_to_cpu(rlc_hdr->rlcv_ucode_feature_version);
+ adev->gfx.rlc.rlcv_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcv_ucode_size_bytes);
+ adev->gfx.rlc.rlcv_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcv_ucode_offset_bytes);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ if (adev->gfx.rlc.rlcp_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_P];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_P;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.rlcp_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.rlcv_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_V];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_V;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.rlcv_ucode_size_bytes, PAGE_SIZE);
+ }
+ }
+}
+
+static void amdgpu_gfx_rlc_init_microcode_v2_4(struct amdgpu_device *adev)
+{
+ const struct rlc_firmware_header_v2_4 *rlc_hdr;
+ struct amdgpu_firmware_info *info;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_4 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlc.global_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->global_tap_delays_ucode_size_bytes);
+ adev->gfx.rlc.global_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->global_tap_delays_ucode_offset_bytes);
+ adev->gfx.rlc.se0_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se0_tap_delays_ucode_size_bytes);
+ adev->gfx.rlc.se0_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se0_tap_delays_ucode_offset_bytes);
+ adev->gfx.rlc.se1_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se1_tap_delays_ucode_size_bytes);
+ adev->gfx.rlc.se1_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se1_tap_delays_ucode_offset_bytes);
+ adev->gfx.rlc.se2_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se2_tap_delays_ucode_size_bytes);
+ adev->gfx.rlc.se2_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se2_tap_delays_ucode_offset_bytes);
+ adev->gfx.rlc.se3_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se3_tap_delays_ucode_size_bytes);
+ adev->gfx.rlc.se3_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se3_tap_delays_ucode_offset_bytes);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ if (adev->gfx.rlc.global_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.global_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.se0_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE0_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE0_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se0_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.se1_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE1_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE1_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se1_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.se2_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE2_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE2_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se2_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.se3_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE3_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE3_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se3_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
+ }
+}
+
+int amdgpu_gfx_rlc_init_microcode(struct amdgpu_device *adev,
+ uint16_t version_major,
+ uint16_t version_minor)
+{
+ int err;
+
+ if (version_major < 2) {
+ /* only support rlc_hdr v2.x and onwards */
+ dev_err(adev->dev, "unsupported rlc fw hdr\n");
+ return -EINVAL;
+ }
+
+ /* is_rlc_v2_1 is still used in APU code path */
+ if (version_major == 2 && version_minor == 1)
+ adev->gfx.rlc.is_rlc_v2_1 = true;
+
+ if (version_minor >= 0) {
+ err = amdgpu_gfx_rlc_init_microcode_v2_0(adev);
+ if (err) {
+ dev_err(adev->dev, "fail to init rlc v2_0 microcode\n");
+ return err;
+ }
+ }
+ if (version_minor >= 1)
+ amdgpu_gfx_rlc_init_microcode_v2_1(adev);
+ if (version_minor >= 2)
+ amdgpu_gfx_rlc_init_microcode_v2_2(adev);
+ if (version_minor == 3)
+ amdgpu_gfx_rlc_init_microcode_v2_3(adev);
+ if (version_minor == 4)
+ amdgpu_gfx_rlc_init_microcode_v2_4(adev);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
index 03ac36b2c2cf..23f060db9255 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
@@ -267,5 +267,7 @@ int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev);
int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev);
void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev);
void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev);
-
+int amdgpu_gfx_rlc_init_microcode(struct amdgpu_device *adev,
+ uint16_t version_major,
+ uint16_t version_minor);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 96b6cf4c4d54..59edf32f775e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -260,8 +260,12 @@ struct rlc_firmware_header_v2_2 {
/* version_major=2, version_minor=3 */
struct rlc_firmware_header_v2_3 {
struct rlc_firmware_header_v2_2 v2_2;
+ uint32_t rlcp_ucode_version;
+ uint32_t rlcp_ucode_feature_version;
uint32_t rlcp_ucode_size_bytes;
uint32_t rlcp_ucode_offset_bytes;
+ uint32_t rlcv_ucode_version;
+ uint32_t rlcv_ucode_feature_version;
uint32_t rlcv_ucode_size_bytes;
uint32_t rlcv_ucode_offset_bytes;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index f36e4f08db6d..0b52af415b28 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -191,7 +191,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
fw_name = FIRMWARE_VCN4_0_2;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = false;
+ adev->vcn.indirect_sram = true;
break;
case IP_VERSION(4, 0, 4):
fw_name = FIRMWARE_VCN4_0_4;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 59cac347baa3..690fd4f639f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2484,8 +2484,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
/* Intentionally setting invalid PTE flag
* combination to force a no-retry-fault
*/
- flags = AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE |
- AMDGPU_PTE_TF;
+ flags = AMDGPU_PTE_SNOOPED | AMDGPU_PTE_PRT;
value = 0;
} else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
/* Redirect the access to the dummy page */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index f6b1bb40e503..daf8ba8235cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -474,49 +474,6 @@ static void gfx_v11_0_free_microcode(struct amdgpu_device *adev)
kfree(adev->gfx.rlc.register_list_format);
}
-static void gfx_v11_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
-{
- const struct rlc_firmware_header_v2_1 *rlc_hdr;
-
- rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
- adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
- adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
- adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
- adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
- adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
- adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
- adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
- adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
- adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
- adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
- adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
- adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
- adev->gfx.rlc.reg_list_format_direct_reg_list_length =
- le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
-}
-
-static void gfx_v11_0_init_rlc_iram_dram_microcode(struct amdgpu_device *adev)
-{
- const struct rlc_firmware_header_v2_2 *rlc_hdr;
-
- rlc_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
- adev->gfx.rlc.rlc_iram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_iram_ucode_size_bytes);
- adev->gfx.rlc.rlc_iram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_iram_ucode_offset_bytes);
- adev->gfx.rlc.rlc_dram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_dram_ucode_size_bytes);
- adev->gfx.rlc.rlc_dram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_dram_ucode_offset_bytes);
-}
-
-static void gfx_v11_0_init_rlcp_rlcv_microcode(struct amdgpu_device *adev)
-{
- const struct rlc_firmware_header_v2_3 *rlc_hdr;
-
- rlc_hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data;
- adev->gfx.rlc.rlcp_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcp_ucode_size_bytes);
- adev->gfx.rlc.rlcp_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcp_ucode_offset_bytes);
- adev->gfx.rlc.rlcv_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcv_ucode_size_bytes);
- adev->gfx.rlc.rlcv_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcv_ucode_offset_bytes);
-}
-
static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
{
char fw_name[40];
@@ -527,8 +484,6 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
const struct gfx_firmware_header_v1_0 *cp_hdr;
const struct gfx_firmware_header_v2_0 *cp_hdr_v2_0;
const struct rlc_firmware_header_v2_0 *rlc_hdr;
- unsigned int *tmp = NULL;
- unsigned int i = 0;
uint16_t version_major;
uint16_t version_minor;
@@ -583,58 +538,14 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
if (err)
goto out;
err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+ if (err)
+ goto out;
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
-
- adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
- adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
- adev->gfx.rlc.save_and_restore_offset =
- le32_to_cpu(rlc_hdr->save_and_restore_offset);
- adev->gfx.rlc.clear_state_descriptor_offset =
- le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
- adev->gfx.rlc.avail_scratch_ram_locations =
- le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
- adev->gfx.rlc.reg_restore_list_size =
- le32_to_cpu(rlc_hdr->reg_restore_list_size);
- adev->gfx.rlc.reg_list_format_start =
- le32_to_cpu(rlc_hdr->reg_list_format_start);
- adev->gfx.rlc.reg_list_format_separate_start =
- le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
- adev->gfx.rlc.starting_offsets_start =
- le32_to_cpu(rlc_hdr->starting_offsets_start);
- adev->gfx.rlc.reg_list_format_size_bytes =
- le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
- adev->gfx.rlc.reg_list_size_bytes =
- le32_to_cpu(rlc_hdr->reg_list_size_bytes);
- adev->gfx.rlc.register_list_format =
- kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
- adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
- if (!adev->gfx.rlc.register_list_format) {
- err = -ENOMEM;
+ err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
+ if (err)
goto out;
- }
-
- tmp = (unsigned int *)((uintptr_t)rlc_hdr +
- le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
- for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
- adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
-
- adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
-
- tmp = (unsigned int *)((uintptr_t)rlc_hdr +
- le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
- for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
- adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
-
- if (version_major == 2) {
- if (version_minor >= 1)
- gfx_v11_0_init_rlc_ext_microcode(adev);
- if (version_minor >= 2)
- gfx_v11_0_init_rlc_iram_dram_microcode(adev);
- if (version_minor == 3)
- gfx_v11_0_init_rlcp_rlcv_microcode(adev);
- }
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", ucode_prefix);
@@ -769,60 +680,6 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
}
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
- info->fw = adev->gfx.rlc_fw;
- if (info->fw) {
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
- }
- if (adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
- adev->gfx.rlc.save_restore_list_srm_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
- }
-
- if (adev->gfx.rlc.rlc_iram_ucode_size_bytes &&
- adev->gfx.rlc.rlc_dram_ucode_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_IRAM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_IRAM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.rlc_iram_ucode_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_DRAM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_DRAM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.rlc_dram_ucode_size_bytes, PAGE_SIZE);
- }
-
- if (adev->gfx.rlc.rlcp_ucode_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_P];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_P;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.rlcp_ucode_size_bytes, PAGE_SIZE);
- }
-
- if (adev->gfx.rlc.rlcv_ucode_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_V];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_V;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.rlcv_ucode_size_bytes, PAGE_SIZE);
- }
}
out:
@@ -5260,6 +5117,8 @@ static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
{
u32 reg, data;
+ amdgpu_gfx_off_ctrl(adev, false);
+
reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL);
if (amdgpu_sriov_is_pp_one_vf(adev))
data = RREG32_NO_KIQ(reg);
@@ -5273,6 +5132,8 @@ static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data);
else
WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data);
+
+ amdgpu_gfx_off_ctrl(adev, true);
}
static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index fc9c1043244c..037af8352677 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -5597,7 +5597,7 @@ static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne
BUG_ON(offset > ring->buf_mask);
BUG_ON(ring->ring[offset] != 0x55aa55aa);
- cur = (ring->wptr & ring->buf_mask) - 1;
+ cur = (ring->wptr - 1) & ring->buf_mask;
if (likely(cur > offset))
ring->ring[offset] = cur - offset;
else
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 4603653916f5..67ca16a8027c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1103,10 +1103,13 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
*flags |= AMDGPU_PDE_BFS(0x9);
} else if (level == AMDGPU_VM_PDB0) {
- if (*flags & AMDGPU_PDE_PTE)
+ if (*flags & AMDGPU_PDE_PTE) {
*flags &= ~AMDGPU_PDE_PTE;
- else
+ if (!(*flags & AMDGPU_PTE_VALID))
+ *addr |= 1 << PAGE_SHIFT;
+ } else {
*flags |= AMDGPU_PTE_TF;
+ }
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index cc3fdbbcd314..f92744b8d79d 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -185,6 +185,10 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
mes_add_queue_pkt.is_kfd_process = input->is_kfd_process;
mes_add_queue_pkt.trap_en = 1;
+ /* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */
+ mes_add_queue_pkt.is_aql_queue = input->is_aql_queue;
+ mes_add_queue_pkt.gds_size = input->queue_size;
+
return mes_v11_0_submit_pkt_and_poll_completion(mes,
&mes_add_queue_pkt, sizeof(mes_add_queue_pkt),
offsetof(union MESAPI__ADD_QUEUE, api_status));
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 2e50db3b761e..6e564b549b9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -625,6 +625,7 @@ static int soc21_common_early_init(void *handle)
AMD_CG_SUPPORT_JPEG_MGCG;
adev->pg_flags =
AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_JPEG;
adev->external_rev_id = adev->rev_id + 0x1;
break;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index e83725a28106..007a3db69df1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -205,6 +205,8 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
}
queue_input.is_kfd_process = 1;
+ queue_input.is_aql_queue = (q->properties.format == KFD_QUEUE_FORMAT_AQL);
+ queue_input.queue_size = q->properties.queue_size >> 2;
queue_input.paging = false;
queue_input.tba_addr = qpd->tba_addr;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
index a6fcbeeb7428..0d53f6067422 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
@@ -350,11 +350,11 @@ static void event_interrupt_wq_v11(struct kfd_dev *dev,
print_sq_intr_info_inst(context_id0, context_id1);
sq_int_priv = REG_GET_FIELD(context_id0,
SQ_INTERRUPT_WORD_WAVE_CTXID0, PRIV);
- if (sq_int_priv /*&& (kfd_set_dbg_ev_from_interrupt(dev, pasid,
+ /*if (sq_int_priv && (kfd_set_dbg_ev_from_interrupt(dev, pasid,
KFD_CTXID0_DOORBELL_ID(context_id0),
KFD_CTXID0_TRAP_CODE(context_id0),
- NULL, 0))*/)
- return;
+ NULL, 0)))
+ return;*/
break;
case SQ_INTERRUPT_WORD_ENCODING_ERROR:
print_sq_intr_info_error(context_id0, context_id1);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
index b8e14c2cc295..3ae350220d42 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
@@ -126,6 +126,10 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se4 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se5 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se6 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se7 = 0xFFFFFFFF;
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 5140d9c2bf3b..1efe7fa5bc58 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4759,7 +4759,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
plane_info->visible = true;
plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
- plane_info->layer_index = 0;
+ plane_info->layer_index = plane_state->normalized_zpos;
ret = fill_plane_color_attributes(plane_state, plane_info->format,
&plane_info->color_space);
@@ -4827,7 +4827,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
dc_plane_state->global_alpha = plane_info.global_alpha;
dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
dc_plane_state->dcc = plane_info.dcc;
- dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
+ dc_plane_state->layer_index = plane_info.layer_index;
dc_plane_state->flip_int_enabled = true;
/*
@@ -9485,6 +9485,14 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
}
+ /*
+ * DC consults the zpos (layer_index in DC terminology) to determine the
+ * hw plane on which to enable the hw cursor (see
+ * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in
+ * atomic state, so call drm helper to normalize zpos.
+ */
+ drm_atomic_normalize_zpos(dev, state);
+
/* Remove exiting planes if they are modified */
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
ret = dm_update_plane_state(dc, state, plane,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index c8da18e45b0e..8ca10ab3dfc1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -170,7 +170,13 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
&stream, 1,
&params);
- power_opt |= psr_power_opt_z10_static_screen;
+ /*
+ * Only enable static-screen optimizations for PSR1. For PSR SU, this
+ * causes vstartup interrupt issues, used by amdgpu_dm to send vblank
+ * events.
+ */
+ if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1)
+ power_opt |= psr_power_opt_z10_static_screen;
return dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index c09be3f15fe6..23a299c929a1 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -99,7 +99,7 @@ static int dcn31_get_active_display_cnt_wa(
return display_count;
}
-static void dcn31_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
+static void dcn31_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
int i;
@@ -110,9 +110,10 @@ static void dcn31_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
- if (disable)
+ if (disable) {
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
- else
+ reset_sync_context_for_pipe(dc, context, i);
+ } else
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
}
}
@@ -211,11 +212,11 @@ void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
- dcn31_disable_otg_wa(clk_mgr_base, true);
+ dcn31_disable_otg_wa(clk_mgr_base, context, true);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn31_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
- dcn31_disable_otg_wa(clk_mgr_base, false);
+ dcn31_disable_otg_wa(clk_mgr_base, context, false);
update_dispclk = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
index 9781a8dbc238..4a15aa7a375f 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
@@ -119,7 +119,7 @@ static int dcn314_get_active_display_cnt_wa(
return display_count;
}
-static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
+static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
int i;
@@ -129,12 +129,21 @@ static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
- if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
- dc_is_virtual_signal(pipe->stream->signal))) {
- if (disable)
+ if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
+ struct stream_encoder *stream_enc = pipe->stream_res.stream_enc;
+
+ if (disable) {
+ if (stream_enc && stream_enc->funcs->disable_fifo)
+ pipe->stream_res.stream_enc->funcs->disable_fifo(stream_enc);
+
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
- else
+ reset_sync_context_for_pipe(dc, context, i);
+ } else {
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
+
+ if (stream_enc && stream_enc->funcs->enable_fifo)
+ pipe->stream_res.stream_enc->funcs->enable_fifo(stream_enc);
+ }
}
}
}
@@ -233,11 +242,11 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
- dcn314_disable_otg_wa(clk_mgr_base, true);
+ dcn314_disable_otg_wa(clk_mgr_base, context, true);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn314_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
- dcn314_disable_otg_wa(clk_mgr_base, false);
+ dcn314_disable_otg_wa(clk_mgr_base, context, false);
update_dispclk = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
index cc076621f5e6..98ad8e0fd2d8 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
@@ -46,6 +46,9 @@
#define TO_CLK_MGR_DCN315(clk_mgr)\
container_of(clk_mgr, struct clk_mgr_dcn315, base)
+#define UNSUPPORTED_DCFCLK 10000000
+#define MIN_DPP_DISP_CLK 100000
+
static int dcn315_get_active_display_cnt_wa(
struct dc *dc,
struct dc_state *context)
@@ -79,7 +82,7 @@ static int dcn315_get_active_display_cnt_wa(
return display_count;
}
-static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
+static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
int i;
@@ -91,9 +94,10 @@ static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
continue;
if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
dc_is_virtual_signal(pipe->stream->signal))) {
- if (disable)
+ if (disable) {
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
- else
+ reset_sync_context_for_pipe(dc, context, i);
+ } else
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
}
}
@@ -146,6 +150,9 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
}
}
+ /* Lock pstate by requesting unsupported dcfclk if change is unsupported */
+ if (!new_clocks->p_state_change_support)
+ new_clocks->dcfclk_khz = UNSUPPORTED_DCFCLK;
if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
dcn315_smu_set_hard_min_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_khz);
@@ -159,10 +166,10 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
// workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
- if (new_clocks->dppclk_khz < 100000)
- new_clocks->dppclk_khz = 100000;
- if (new_clocks->dispclk_khz < 100000)
- new_clocks->dispclk_khz = 100000;
+ if (new_clocks->dppclk_khz < MIN_DPP_DISP_CLK)
+ new_clocks->dppclk_khz = MIN_DPP_DISP_CLK;
+ if (new_clocks->dispclk_khz < MIN_DPP_DISP_CLK)
+ new_clocks->dispclk_khz = MIN_DPP_DISP_CLK;
}
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
@@ -175,12 +182,12 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
/* No need to apply the w/a if we haven't taken over from bios yet */
if (clk_mgr_base->clks.dispclk_khz)
- dcn315_disable_otg_wa(clk_mgr_base, true);
+ dcn315_disable_otg_wa(clk_mgr_base, context, true);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn315_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
if (clk_mgr_base->clks.dispclk_khz)
- dcn315_disable_otg_wa(clk_mgr_base, false);
+ dcn315_disable_otg_wa(clk_mgr_base, context, false);
update_dispclk = true;
}
@@ -275,7 +282,7 @@ static struct wm_table ddr5_wm_table = {
{
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
- .pstate_latency_us = 64.0,
+ .pstate_latency_us = 129.0,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
@@ -283,7 +290,7 @@ static struct wm_table ddr5_wm_table = {
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
- .pstate_latency_us = 64.0,
+ .pstate_latency_us = 129.0,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
@@ -291,7 +298,7 @@ static struct wm_table ddr5_wm_table = {
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
- .pstate_latency_us = 64.0,
+ .pstate_latency_us = 129.0,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
@@ -299,7 +306,7 @@ static struct wm_table ddr5_wm_table = {
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
- .pstate_latency_us = 64.0,
+ .pstate_latency_us = 129.0,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
@@ -556,8 +563,7 @@ static void dcn315_clk_mgr_helper_populate_bw_params(
ASSERT(bw_params->clk_table.entries[i-1].dcfclk_mhz);
bw_params->vram_type = bios_info->memory_type;
bw_params->num_channels = bios_info->ma_channel_number;
- if (!bw_params->num_channels)
- bw_params->num_channels = 2;
+ bw_params->dram_channel_width_bytes = bios_info->memory_type == 0x22 ? 8 : 4;
for (i = 0; i < WM_SET_COUNT; i++) {
bw_params->wm_table.entries[i].wm_inst = i;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index 0cd3d2eb7ac7..187f5b27fdc8 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -112,7 +112,7 @@ static int dcn316_get_active_display_cnt_wa(
return display_count;
}
-static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
+static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
int i;
@@ -124,9 +124,10 @@ static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
continue;
if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
dc_is_virtual_signal(pipe->stream->signal))) {
- if (disable)
+ if (disable) {
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
- else
+ reset_sync_context_for_pipe(dc, context, i);
+ } else
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
}
}
@@ -221,11 +222,11 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
- dcn316_disable_otg_wa(clk_mgr_base, true);
+ dcn316_disable_otg_wa(clk_mgr_base, context, true);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
- dcn316_disable_otg_wa(clk_mgr_base, false);
+ dcn316_disable_otg_wa(clk_mgr_base, context, false);
update_dispclk = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index c6785969eb1a..f0f3f66629cc 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -156,12 +156,14 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
unsigned int num_levels;
+ unsigned int num_dcfclk_levels, num_dtbclk_levels, num_dispclk_levels;
memset(&(clk_mgr_base->clks), 0, sizeof(struct dc_clocks));
clk_mgr_base->clks.p_state_change_support = true;
clk_mgr_base->clks.prev_p_state_change_support = true;
clk_mgr_base->clks.fclk_prev_p_state_change_support = true;
clk_mgr->smu_present = false;
+ clk_mgr->dpm_present = false;
if (!clk_mgr_base->bw_params)
return;
@@ -179,6 +181,7 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
dcn32_init_single_clock(clk_mgr, PPCLK_DCFCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dcfclk_mhz,
&num_levels);
+ num_dcfclk_levels = num_levels;
/* SOCCLK */
dcn32_init_single_clock(clk_mgr, PPCLK_SOCCLK,
@@ -189,11 +192,16 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
dcn32_init_single_clock(clk_mgr, PPCLK_DTBCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz,
&num_levels);
+ num_dtbclk_levels = num_levels;
/* DISPCLK */
dcn32_init_single_clock(clk_mgr, PPCLK_DISPCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dispclk_mhz,
&num_levels);
+ num_dispclk_levels = num_levels;
+
+ if (num_dcfclk_levels && num_dtbclk_levels && num_dispclk_levels)
+ clk_mgr->dpm_present = true;
if (clk_mgr_base->ctx->dc->debug.min_disp_clk_khz) {
unsigned int i;
@@ -658,6 +666,12 @@ static void dcn32_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
&num_levels);
clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1;
+ if (clk_mgr->dpm_present && !num_levels)
+ clk_mgr->dpm_present = false;
+
+ if (!clk_mgr->dpm_present)
+ dcn32_patch_dpm_table(clk_mgr_base->bw_params);
+
DC_FP_START();
/* Refresh bounding box */
clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box(
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 48dad093ae8b..780f7f4c28b6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -2758,8 +2758,14 @@ bool perform_link_training_with_retries(
skip_video_pattern);
/* Transmit idle pattern once training successful. */
- if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low)
+ if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low) {
dp_set_hw_test_pattern(link, &pipe_ctx->link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
+ /* Update verified link settings to current one
+ * Because DPIA LT might fallback to lower link setting.
+ */
+ link->verified_link_cap.link_rate = link->cur_link_settings.link_rate;
+ link->verified_link_cap.lane_count = link->cur_link_settings.lane_count;
+ }
} else {
status = dc_link_dp_perform_link_training(link,
&pipe_ctx->link_res,
@@ -5121,6 +5127,14 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link)
lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES -
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+ /* If this chip cap is set, at least one retimer must exist in the chain
+ * Override count to 1 if we receive a known bad count (0 or an invalid value) */
+ if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN &&
+ (dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) {
+ ASSERT(0);
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt = 0x80;
+ }
+
/* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
is_lttpr_present = (link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 7dbab15bfa68..ccf7bd3d90fe 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -3584,6 +3584,23 @@ void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc,
}
}
+void reset_sync_context_for_pipe(const struct dc *dc,
+ struct dc_state *context,
+ uint8_t pipe_idx)
+{
+ int i;
+ struct pipe_ctx *pipe_ctx_reset;
+
+ /* reset the otg sync context for the pipe and its slave pipes if any */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe_ctx_reset = &context->res_ctx.pipe_ctx[i];
+
+ if (((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx_reset) == pipe_idx) &&
+ IS_PIPE_SYNCD_VALID(pipe_ctx_reset)) || (i == pipe_idx))
+ SET_PIPE_SYNCD_TO_PIPE(pipe_ctx_reset, i);
+ }
+}
+
uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter transmitter)
{
/* TODO - get transmitter to phy idx mapping from DMUB */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
index cd2671161ef1..7ce64a3c1b02 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
@@ -445,226 +445,6 @@
type DSCRM_DSC_FORWARD_EN; \
type DSCRM_DSC_OPP_PIPE_SOURCE
-#define DSC_REG_LIST_DCN314(id) \
- SRI(DSC_TOP_CONTROL, DSC_TOP, id),\
- SRI(DSC_DEBUG_CONTROL, DSC_TOP, id),\
- SRI(DSCC_CONFIG0, DSCC, id),\
- SRI(DSCC_CONFIG1, DSCC, id),\
- SRI(DSCC_STATUS, DSCC, id),\
- SRI(DSCC_INTERRUPT_CONTROL_STATUS, DSCC, id),\
- SRI(DSCC_PPS_CONFIG0, DSCC, id),\
- SRI(DSCC_PPS_CONFIG1, DSCC, id),\
- SRI(DSCC_PPS_CONFIG2, DSCC, id),\
- SRI(DSCC_PPS_CONFIG3, DSCC, id),\
- SRI(DSCC_PPS_CONFIG4, DSCC, id),\
- SRI(DSCC_PPS_CONFIG5, DSCC, id),\
- SRI(DSCC_PPS_CONFIG6, DSCC, id),\
- SRI(DSCC_PPS_CONFIG7, DSCC, id),\
- SRI(DSCC_PPS_CONFIG8, DSCC, id),\
- SRI(DSCC_PPS_CONFIG9, DSCC, id),\
- SRI(DSCC_PPS_CONFIG10, DSCC, id),\
- SRI(DSCC_PPS_CONFIG11, DSCC, id),\
- SRI(DSCC_PPS_CONFIG12, DSCC, id),\
- SRI(DSCC_PPS_CONFIG13, DSCC, id),\
- SRI(DSCC_PPS_CONFIG14, DSCC, id),\
- SRI(DSCC_PPS_CONFIG15, DSCC, id),\
- SRI(DSCC_PPS_CONFIG16, DSCC, id),\
- SRI(DSCC_PPS_CONFIG17, DSCC, id),\
- SRI(DSCC_PPS_CONFIG18, DSCC, id),\
- SRI(DSCC_PPS_CONFIG19, DSCC, id),\
- SRI(DSCC_PPS_CONFIG20, DSCC, id),\
- SRI(DSCC_PPS_CONFIG21, DSCC, id),\
- SRI(DSCC_PPS_CONFIG22, DSCC, id),\
- SRI(DSCC_MEM_POWER_CONTROL, DSCC, id),\
- SRI(DSCC_R_Y_SQUARED_ERROR_LOWER, DSCC, id),\
- SRI(DSCC_R_Y_SQUARED_ERROR_UPPER, DSCC, id),\
- SRI(DSCC_G_CB_SQUARED_ERROR_LOWER, DSCC, id),\
- SRI(DSCC_G_CB_SQUARED_ERROR_UPPER, DSCC, id),\
- SRI(DSCC_B_CR_SQUARED_ERROR_LOWER, DSCC, id),\
- SRI(DSCC_B_CR_SQUARED_ERROR_UPPER, DSCC, id),\
- SRI(DSCC_MAX_ABS_ERROR0, DSCC, id),\
- SRI(DSCC_MAX_ABS_ERROR1, DSCC, id),\
- SRI(DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCCIF_CONFIG0, DSCCIF, id),\
- SRI(DSCCIF_CONFIG1, DSCCIF, id),\
- SRI(DSCRM_DSC_FORWARD_CONFIG, DSCRM, id)
-
-#define DSC_REG_LIST_SH_MASK_DCN314(mask_sh)\
- DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_CLOCK_EN, mask_sh), \
- DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_DISPCLK_R_GATE_DIS, mask_sh), \
- DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_DSCCLK_R_GATE_DIS, mask_sh), \
- DSC_SF(DSC_TOP0_DSC_DEBUG_CONTROL, DSC_DBG_EN, mask_sh), \
- DSC_SF(DSC_TOP0_DSC_DEBUG_CONTROL, DSC_TEST_CLOCK_MUX_SEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_CONFIG0, NUMBER_OF_SLICES_PER_LINE, mask_sh), \
- DSC_SF(DSCC0_DSCC_CONFIG0, ALTERNATE_ICH_ENCODING_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_CONFIG0, NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION, mask_sh), \
- DSC_SF(DSCC0_DSCC_CONFIG1, DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE, mask_sh), \
- /*DSC_SF(DSCC0_DSCC_CONFIG1, DSCC_DISABLE_ICH, mask_sh),*/ \
- DSC_SF(DSCC0_DSCC_STATUS, DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG0, DSC_VERSION_MINOR, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG0, DSC_VERSION_MAJOR, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG0, PPS_IDENTIFIER, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG0, LINEBUF_DEPTH, mask_sh), \
- DSC2_SF(DSCC0, DSCC_PPS_CONFIG0__BITS_PER_COMPONENT, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, BITS_PER_PIXEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, VBR_ENABLE, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, SIMPLE_422, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, CONVERT_RGB, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, BLOCK_PRED_ENABLE, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, NATIVE_422, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, NATIVE_420, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, CHUNK_SIZE, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG2, PIC_WIDTH, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG2, PIC_HEIGHT, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG3, SLICE_WIDTH, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG3, SLICE_HEIGHT, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG4, INITIAL_XMIT_DELAY, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG4, INITIAL_DEC_DELAY, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG5, INITIAL_SCALE_VALUE, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG5, SCALE_INCREMENT_INTERVAL, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG6, SCALE_DECREMENT_INTERVAL, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG6, FIRST_LINE_BPG_OFFSET, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG6, SECOND_LINE_BPG_OFFSET, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG7, NFL_BPG_OFFSET, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG7, SLICE_BPG_OFFSET, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG8, NSL_BPG_OFFSET, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG8, SECOND_LINE_OFFSET_ADJ, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG9, INITIAL_OFFSET, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG9, FINAL_OFFSET, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG10, FLATNESS_MIN_QP, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG10, FLATNESS_MAX_QP, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG10, RC_MODEL_SIZE, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_EDGE_FACTOR, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_QUANT_INCR_LIMIT0, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_QUANT_INCR_LIMIT1, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_TGT_OFFSET_LO, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_TGT_OFFSET_HI, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH0, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH1, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH2, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH3, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH4, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH5, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH6, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH7, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH8, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH9, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH10, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH11, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RC_BUF_THRESH12, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RC_BUF_THRESH13, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RANGE_MIN_QP0, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RANGE_MAX_QP0, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RANGE_BPG_OFFSET0, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MIN_QP1, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MAX_QP1, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_BPG_OFFSET1, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MIN_QP2, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MAX_QP2, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_BPG_OFFSET2, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MIN_QP3, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MAX_QP3, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_BPG_OFFSET3, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MIN_QP4, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MAX_QP4, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_BPG_OFFSET4, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MIN_QP5, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MAX_QP5, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_BPG_OFFSET5, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MIN_QP6, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MAX_QP6, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_BPG_OFFSET6, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MIN_QP7, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MAX_QP7, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_BPG_OFFSET7, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MIN_QP8, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MAX_QP8, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_BPG_OFFSET8, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MIN_QP9, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MAX_QP9, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_BPG_OFFSET9, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MIN_QP10, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MAX_QP10, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_BPG_OFFSET10, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MIN_QP11, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MAX_QP11, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_BPG_OFFSET11, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MIN_QP12, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MAX_QP12, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_BPG_OFFSET12, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MIN_QP13, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MAX_QP13, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_BPG_OFFSET13, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MIN_QP14, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MAX_QP14, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_BPG_OFFSET14, mask_sh), \
- DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_DEFAULT_MEM_LOW_POWER_STATE, mask_sh), \
- DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_MEM_PWR_FORCE, mask_sh), \
- DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_MEM_PWR_DIS, mask_sh), \
- DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_MEM_PWR_STATE, mask_sh), \
- DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_NATIVE_422_MEM_PWR_FORCE, mask_sh), \
- DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_NATIVE_422_MEM_PWR_DIS, mask_sh), \
- DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_NATIVE_422_MEM_PWR_STATE, mask_sh), \
- DSC_SF(DSCC0_DSCC_R_Y_SQUARED_ERROR_LOWER, DSCC_R_Y_SQUARED_ERROR_LOWER, mask_sh), \
- DSC_SF(DSCC0_DSCC_R_Y_SQUARED_ERROR_UPPER, DSCC_R_Y_SQUARED_ERROR_UPPER, mask_sh), \
- DSC_SF(DSCC0_DSCC_G_CB_SQUARED_ERROR_LOWER, DSCC_G_CB_SQUARED_ERROR_LOWER, mask_sh), \
- DSC_SF(DSCC0_DSCC_G_CB_SQUARED_ERROR_UPPER, DSCC_G_CB_SQUARED_ERROR_UPPER, mask_sh), \
- DSC_SF(DSCC0_DSCC_B_CR_SQUARED_ERROR_LOWER, DSCC_B_CR_SQUARED_ERROR_LOWER, mask_sh), \
- DSC_SF(DSCC0_DSCC_B_CR_SQUARED_ERROR_UPPER, DSCC_B_CR_SQUARED_ERROR_UPPER, mask_sh), \
- DSC_SF(DSCC0_DSCC_MAX_ABS_ERROR0, DSCC_R_Y_MAX_ABS_ERROR, mask_sh), \
- DSC_SF(DSCC0_DSCC_MAX_ABS_ERROR0, DSCC_G_CB_MAX_ABS_ERROR, mask_sh), \
- DSC_SF(DSCC0_DSCC_MAX_ABS_ERROR1, DSCC_B_CR_MAX_ABS_ERROR, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN, mask_sh), \
- DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS, mask_sh), \
- DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_PIXEL_FORMAT, mask_sh), \
- DSC2_SF(DSCCIF0, DSCCIF_CONFIG0__BITS_PER_COMPONENT, mask_sh), \
- DSC_SF(DSCCIF0_DSCCIF_CONFIG0, DOUBLE_BUFFER_REG_UPDATE_PENDING, mask_sh), \
- DSC_SF(DSCCIF0_DSCCIF_CONFIG1, PIC_WIDTH, mask_sh), \
- DSC_SF(DSCCIF0_DSCCIF_CONFIG1, PIC_HEIGHT, mask_sh), \
- DSC_SF(DSCRM0_DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, mask_sh), \
- DSC_SF(DSCRM0_DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_OPP_PIPE_SOURCE, mask_sh)
-
-
struct dcn20_dsc_registers {
uint32_t DSC_TOP_CONTROL;
uint32_t DSC_DEBUG_CONTROL;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 884fa060f375..598ce872a8d7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1565,6 +1565,7 @@ static void dcn20_update_dchubp_dpp(
/* Any updates are handled in dc interface, just need
* to apply existing for plane enable / opp change */
if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed
+ || pipe_ctx->update_flags.bits.plane_changed
|| pipe_ctx->stream->update_flags.bits.gamut_remap
|| pipe_ctx->stream->update_flags.bits.out_csc) {
/* dpp/cm gamut remap*/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
index 232cc15979dd..fb729674953b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
@@ -45,6 +45,48 @@
#define DC_LOGGER \
dccg->ctx->logger
+static void dccg314_get_pixel_rate_div(
+ struct dccg *dccg,
+ uint32_t otg_inst,
+ enum pixel_rate_div *k1,
+ enum pixel_rate_div *k2)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ uint32_t val_k1 = PIXEL_RATE_DIV_NA, val_k2 = PIXEL_RATE_DIV_NA;
+
+ *k1 = PIXEL_RATE_DIV_NA;
+ *k2 = PIXEL_RATE_DIV_NA;
+
+ switch (otg_inst) {
+ case 0:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG0_PIXEL_RATE_DIVK1, &val_k1,
+ OTG0_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ case 1:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG1_PIXEL_RATE_DIVK1, &val_k1,
+ OTG1_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ case 2:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG2_PIXEL_RATE_DIVK1, &val_k1,
+ OTG2_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ case 3:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG3_PIXEL_RATE_DIVK1, &val_k1,
+ OTG3_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ *k1 = (enum pixel_rate_div)val_k1;
+ *k2 = (enum pixel_rate_div)val_k2;
+}
+
static void dccg314_set_pixel_rate_div(
struct dccg *dccg,
uint32_t otg_inst,
@@ -52,6 +94,11 @@ static void dccg314_set_pixel_rate_div(
enum pixel_rate_div k2)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ enum pixel_rate_div cur_k1 = PIXEL_RATE_DIV_NA, cur_k2 = PIXEL_RATE_DIV_NA;
+
+ dccg314_get_pixel_rate_div(dccg, otg_inst, &cur_k1, &cur_k2);
+ if (k1 == PIXEL_RATE_DIV_NA || k2 == PIXEL_RATE_DIV_NA || (k1 == cur_k1 && k2 == cur_k2))
+ return;
switch (otg_inst) {
case 0:
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
index 06d8638db696..8c0ab013764e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
@@ -56,7 +56,8 @@ static void enc314_enable_fifo(struct stream_encoder *enc)
/* TODO: Confirm if we need to wait for DIG_SYMCLK_FE_ON */
REG_WAIT(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, 1, 10, 5000);
- REG_UPDATE_2(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1, DIG_FIFO_READ_START_LEVEL, 0x7);
+ REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
+ REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1);
REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 1, 10, 5000);
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 0);
REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 0, 10, 5000);
@@ -261,6 +262,16 @@ static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
return two_pix;
}
+void enc314_stream_encoder_dp_blank(
+ struct dc_link *link,
+ struct stream_encoder *enc)
+{
+ /* New to DCN314 - disable the FIFO before VID stream disable. */
+ enc314_disable_fifo(enc);
+
+ enc1_stream_encoder_dp_blank(link, enc);
+}
+
static void enc314_stream_encoder_dp_unblank(
struct dc_link *link,
struct stream_encoder *enc,
@@ -316,15 +327,11 @@ static void enc314_stream_encoder_dp_unblank(
/* switch DP encoder to CRTC data, but reset it the fifo first. It may happen
* that it overflows during mode transition, and sometimes doesn't recover.
*/
- REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 1);
udelay(10);
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0);
- /* DIG Resync FIFO now needs to be explicitly enabled. */
- enc314_enable_fifo(enc);
-
/* wait 100us for DIG/DP logic to prime
* (i.e. a few video lines)
*/
@@ -340,6 +347,12 @@ static void enc314_stream_encoder_dp_unblank(
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
+ /*
+ * DIG Resync FIFO now needs to be explicitly enabled.
+ * This should come after DP_VID_STREAM_ENABLE per HW docs.
+ */
+ enc314_enable_fifo(enc);
+
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
}
@@ -408,7 +421,7 @@ static const struct stream_encoder_funcs dcn314_str_enc_funcs = {
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
.dp_blank =
- enc1_stream_encoder_dp_blank,
+ enc314_stream_encoder_dp_blank,
.dp_unblank =
enc314_stream_encoder_dp_unblank,
.audio_mute_control = enc3_audio_mute_control,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
index 39931d48f385..f4d1b83979fe 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
@@ -343,7 +343,6 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
{
struct dc_stream_state *stream = pipe_ctx->stream;
unsigned int odm_combine_factor = 0;
- struct dc *dc = pipe_ctx->stream->ctx->dc;
bool two_pix_per_container = false;
two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
@@ -364,7 +363,7 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
} else {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_4;
- if ((odm_combine_factor == 2) || dc->debug.enable_dp_dig_pixel_rate_div_policy)
+ if (odm_combine_factor == 2)
*k2_div = PIXEL_RATE_DIV_BY_2;
}
}
@@ -384,21 +383,10 @@ void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx)
return;
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
- if (optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing) || odm_combine_factor > 1
- || dcn314_is_dp_dig_pixel_rate_div_policy(pipe_ctx))
+ if (optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing) || odm_combine_factor > 1)
pix_per_cycle = 2;
if (pipe_ctx->stream_res.stream_enc->funcs->set_input_mode)
pipe_ctx->stream_res.stream_enc->funcs->set_input_mode(pipe_ctx->stream_res.stream_enc,
pix_per_cycle);
}
-
-bool dcn314_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
-{
- struct dc *dc = pipe_ctx->stream->ctx->dc;
-
- if (dc_is_dp_signal(pipe_ctx->stream->signal) && !is_dp_128b_132b_signal(pipe_ctx) &&
- dc->debug.enable_dp_dig_pixel_rate_div_policy)
- return true;
- return false;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
index d014580592ac..244280298212 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
@@ -41,6 +41,4 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx);
-bool dcn314_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx);
-
#endif /* __DC_HWSS_DCN314_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
index fcf67eb3478f..72a563a4c3e8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
@@ -146,7 +146,6 @@ static const struct hwseq_private_funcs dcn314_private_funcs = {
.setup_hpo_hw_control = dcn31_setup_hpo_hw_control,
.calculate_dccg_k1_k2_values = dcn314_calculate_dccg_k1_k2_values,
.set_pixels_per_cycle = dcn314_set_pixels_per_cycle,
- .is_dp_dig_pixel_rate_div_policy = dcn314_is_dp_dig_pixel_rate_div_policy,
};
void dcn314_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
index 2a2a4a9cc117..44ac1c2aabf5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
@@ -87,6 +87,9 @@
#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH__SHIFT 0x10
#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH_MASK 0x01FF0000L
+#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE__SHIFT 0x0
+#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE_MASK 0x0000000FL
+
#include "reg_helper.h"
#include "dce/dmub_abm.h"
#include "dce/dmub_psr.h"
@@ -579,7 +582,7 @@ static const struct dcn30_mmhubbub_mask mcif_wb30_mask = {
#define dsc_regsDCN314(id)\
[id] = {\
- DSC_REG_LIST_DCN314(id)\
+ DSC_REG_LIST_DCN20(id)\
}
static const struct dcn20_dsc_registers dsc_regs[] = {
@@ -590,11 +593,11 @@ static const struct dcn20_dsc_registers dsc_regs[] = {
};
static const struct dcn20_dsc_shift dsc_shift = {
- DSC_REG_LIST_SH_MASK_DCN314(__SHIFT)
+ DSC_REG_LIST_SH_MASK_DCN20(__SHIFT)
};
static const struct dcn20_dsc_mask dsc_mask = {
- DSC_REG_LIST_SH_MASK_DCN314(_MASK)
+ DSC_REG_LIST_SH_MASK_DCN20(_MASK)
};
static const struct dcn30_mpc_registers mpc_regs = {
@@ -844,7 +847,7 @@ static const struct resource_caps res_cap_dcn314 = {
.num_ddc = 5,
.num_vmid = 16,
.num_mpc_3dlut = 2,
- .num_dsc = 4,
+ .num_dsc = 3,
};
static const struct dc_plane_cap plane_cap = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
index 0d5e8a441512..6640d0ac4304 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
@@ -42,6 +42,48 @@
#define DC_LOGGER \
dccg->ctx->logger
+static void dccg32_get_pixel_rate_div(
+ struct dccg *dccg,
+ uint32_t otg_inst,
+ enum pixel_rate_div *k1,
+ enum pixel_rate_div *k2)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ uint32_t val_k1 = PIXEL_RATE_DIV_NA, val_k2 = PIXEL_RATE_DIV_NA;
+
+ *k1 = PIXEL_RATE_DIV_NA;
+ *k2 = PIXEL_RATE_DIV_NA;
+
+ switch (otg_inst) {
+ case 0:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG0_PIXEL_RATE_DIVK1, &val_k1,
+ OTG0_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ case 1:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG1_PIXEL_RATE_DIVK1, &val_k1,
+ OTG1_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ case 2:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG2_PIXEL_RATE_DIVK1, &val_k1,
+ OTG2_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ case 3:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG3_PIXEL_RATE_DIVK1, &val_k1,
+ OTG3_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ *k1 = (enum pixel_rate_div)val_k1;
+ *k2 = (enum pixel_rate_div)val_k2;
+}
+
static void dccg32_set_pixel_rate_div(
struct dccg *dccg,
uint32_t otg_inst,
@@ -50,6 +92,17 @@ static void dccg32_set_pixel_rate_div(
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ enum pixel_rate_div cur_k1 = PIXEL_RATE_DIV_NA, cur_k2 = PIXEL_RATE_DIV_NA;
+
+ // Don't program 0xF into the register field. Not valid since
+ // K1 / K2 field is only 1 / 2 bits wide
+ if (k1 == PIXEL_RATE_DIV_NA || k2 == PIXEL_RATE_DIV_NA)
+ return;
+
+ dccg32_get_pixel_rate_div(dccg, otg_inst, &cur_k1, &cur_k2);
+ if (k1 == cur_k1 && k2 == cur_k2)
+ return;
+
switch (otg_inst) {
case 0:
REG_UPDATE_2(OTG_PIXEL_RATE_DIV,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
index 99eb239bbc7b..9aebc1be2f59 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
@@ -98,9 +98,13 @@ static void dcn32_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigne
default:
break;
}
- /* Should never be hit, if it is we have an erroneous hw config*/
- ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size
- + hubbub2->det3_size + hubbub2->compbuf_size_segments <= hubbub2->crb_size_segs);
+ if (hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size
+ + hubbub2->det3_size + hubbub2->compbuf_size_segments > hubbub2->crb_size_segs) {
+ /* This may happen during seamless transition from ODM 2:1 to ODM4:1 */
+ DC_LOG_WARNING("CRB Config Warning: DET size (%d,%d,%d,%d) + Compbuf size (%d) > CRB segments (%d)\n",
+ hubbub2->det0_size, hubbub2->det1_size, hubbub2->det2_size, hubbub2->det3_size,
+ hubbub2->compbuf_size_segments, hubbub2->crb_size_segs);
+ }
}
static void dcn32_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
index 149a1b17cdf3..fa7b0291ce4d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
@@ -291,6 +291,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
.do_urgent_latency_adjustment = false,
.urgent_latency_adjustment_fabric_clock_component_us = 0,
.urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+ .num_chans = 4,
};
struct _vcs_dpi_ip_params_st dcn3_16_ip = {
@@ -680,7 +681,11 @@ void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
dcn3_15_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
dcn3_15_ip.max_num_dpp = dc->res_pool->pipe_count;
- dcn3_15_soc.num_chans = bw_params->num_channels;
+
+ if (bw_params->num_channels > 0)
+ dcn3_15_soc.num_chans = bw_params->num_channels;
+ if (bw_params->dram_channel_width_bytes > 0)
+ dcn3_15_soc.dram_channel_width_bytes = bw_params->dram_channel_width_bytes;
ASSERT(clk_table->num_entries);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
index 01f3fad172f3..ee821c4fb5dd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
@@ -265,33 +265,13 @@ static void CalculateRowBandwidth(
static void CalculateFlipSchedule(
struct display_mode_lib *mode_lib,
+ unsigned int k,
double HostVMInefficiencyFactor,
double UrgentExtraLatency,
double UrgentLatency,
- unsigned int GPUVMMaxPageTableLevels,
- bool HostVMEnable,
- unsigned int HostVMMaxNonCachedPageTableLevels,
- bool GPUVMEnable,
- double HostVMMinPageSize,
double PDEAndMetaPTEBytesPerFrame,
double MetaRowBytes,
- double DPTEBytesPerRow,
- double BandwidthAvailableForImmediateFlip,
- unsigned int TotImmediateFlipBytes,
- enum source_format_class SourcePixelFormat,
- double LineTime,
- double VRatio,
- double VRatioChroma,
- double Tno_bw,
- bool DCCEnable,
- unsigned int dpte_row_height,
- unsigned int meta_row_height,
- unsigned int dpte_row_height_chroma,
- unsigned int meta_row_height_chroma,
- double *DestinationLinesToRequestVMInImmediateFlip,
- double *DestinationLinesToRequestRowInImmediateFlip,
- double *final_flip_bw,
- bool *ImmediateFlipSupportedForPipe);
+ double DPTEBytesPerRow);
static double CalculateWriteBackDelay(
enum source_format_class WritebackPixelFormat,
double WritebackHRatio,
@@ -325,64 +305,28 @@ static void CalculateVupdateAndDynamicMetadataParameters(
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
- unsigned int NumberOfActivePlanes,
- unsigned int MaxLineBufferLines,
- unsigned int LineBufferSize,
- unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
- bool SynchronizedVBlank,
- unsigned int dpte_group_bytes[],
- unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
- double WritebackLatency,
- double WritebackChunkSize,
double SOCCLK,
- double DRAMClockChangeLatency,
- double SRExitTime,
- double SREnterPlusExitTime,
- double SRExitZ8Time,
- double SREnterPlusExitZ8Time,
double DCFCLKDeepSleep,
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
- unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
- double HRatio[],
- double HRatioChroma[],
- unsigned int vtaps[],
- unsigned int VTAPsChroma[],
- double VRatio[],
- double VRatioChroma[],
- unsigned int HTotal[],
- double PixelClock[],
- unsigned int BlendingAndTiming[],
unsigned int DPPPerPlane[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
- double DSTXAfterScaler[],
- double DSTYAfterScaler[],
- bool WritebackEnable[],
- enum source_format_class WritebackPixelFormat[],
- double WritebackDestinationWidth[],
- double WritebackDestinationHeight[],
- double WritebackSourceHeight[],
bool UnboundedRequestEnabled,
unsigned int CompressedBufferSizeInkByte,
enum clock_change_support *DRAMClockChangeSupport,
- double *UrgentWatermark,
- double *WritebackUrgentWatermark,
- double *DRAMClockChangeWatermark,
- double *WritebackDRAMClockChangeWatermark,
double *StutterExitWatermark,
double *StutterEnterPlusExitWatermark,
double *Z8StutterExitWatermark,
- double *Z8StutterEnterPlusExitWatermark,
- double *MinActiveDRAMClockChangeLatencySupported);
+ double *Z8StutterEnterPlusExitWatermark);
static void CalculateDCFCLKDeepSleep(
struct display_mode_lib *mode_lib,
@@ -2928,33 +2872,13 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
CalculateFlipSchedule(
mode_lib,
+ k,
HostVMInefficiencyFactor,
v->UrgentExtraLatency,
v->UrgentLatency,
- v->GPUVMMaxPageTableLevels,
- v->HostVMEnable,
- v->HostVMMaxNonCachedPageTableLevels,
- v->GPUVMEnable,
- v->HostVMMinPageSize,
v->PDEAndMetaPTEBytesFrame[k],
v->MetaRowByte[k],
- v->PixelPTEBytesPerRow[k],
- v->BandwidthAvailableForImmediateFlip,
- v->TotImmediateFlipBytes,
- v->SourcePixelFormat[k],
- v->HTotal[k] / v->PixelClock[k],
- v->VRatio[k],
- v->VRatioChroma[k],
- v->Tno_bw[k],
- v->DCCEnable[k],
- v->dpte_row_height[k],
- v->meta_row_height[k],
- v->dpte_row_height_chroma[k],
- v->meta_row_height_chroma[k],
- &v->DestinationLinesToRequestVMInImmediateFlip[k],
- &v->DestinationLinesToRequestRowInImmediateFlip[k],
- &v->final_flip_bw[k],
- &v->ImmediateFlipSupportedForPipe[k]);
+ v->PixelPTEBytesPerRow[k]);
}
v->total_dcn_read_bw_with_flip = 0.0;
@@ -3041,64 +2965,28 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
PrefetchMode,
- v->NumberOfActivePlanes,
- v->MaxLineBufferLines,
- v->LineBufferSize,
- v->WritebackInterfaceBufferSize,
v->DCFCLK,
v->ReturnBW,
- v->SynchronizedVBlank,
- v->dpte_group_bytes,
- v->MetaChunkSize,
v->UrgentLatency,
v->UrgentExtraLatency,
- v->WritebackLatency,
- v->WritebackChunkSize,
v->SOCCLK,
- v->DRAMClockChangeLatency,
- v->SRExitTime,
- v->SREnterPlusExitTime,
- v->SRExitZ8Time,
- v->SREnterPlusExitZ8Time,
v->DCFCLKDeepSleep,
v->DETBufferSizeY,
v->DETBufferSizeC,
v->SwathHeightY,
v->SwathHeightC,
- v->LBBitPerPixel,
v->SwathWidthY,
v->SwathWidthC,
- v->HRatio,
- v->HRatioChroma,
- v->vtaps,
- v->VTAPsChroma,
- v->VRatio,
- v->VRatioChroma,
- v->HTotal,
- v->PixelClock,
- v->BlendingAndTiming,
v->DPPPerPlane,
v->BytePerPixelDETY,
v->BytePerPixelDETC,
- v->DSTXAfterScaler,
- v->DSTYAfterScaler,
- v->WritebackEnable,
- v->WritebackPixelFormat,
- v->WritebackDestinationWidth,
- v->WritebackDestinationHeight,
- v->WritebackSourceHeight,
v->UnboundedRequestEnabled,
v->CompressedBufferSizeInkByte,
&DRAMClockChangeSupport,
- &v->UrgentWatermark,
- &v->WritebackUrgentWatermark,
- &v->DRAMClockChangeWatermark,
- &v->WritebackDRAMClockChangeWatermark,
&v->StutterExitWatermark,
&v->StutterEnterPlusExitWatermark,
&v->Z8StutterExitWatermark,
- &v->Z8StutterEnterPlusExitWatermark,
- &v->MinActiveDRAMClockChangeLatencySupported);
+ &v->Z8StutterEnterPlusExitWatermark);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->WritebackEnable[k] == true) {
@@ -3710,61 +3598,43 @@ static void CalculateRowBandwidth(
static void CalculateFlipSchedule(
struct display_mode_lib *mode_lib,
+ unsigned int k,
double HostVMInefficiencyFactor,
double UrgentExtraLatency,
double UrgentLatency,
- unsigned int GPUVMMaxPageTableLevels,
- bool HostVMEnable,
- unsigned int HostVMMaxNonCachedPageTableLevels,
- bool GPUVMEnable,
- double HostVMMinPageSize,
double PDEAndMetaPTEBytesPerFrame,
double MetaRowBytes,
- double DPTEBytesPerRow,
- double BandwidthAvailableForImmediateFlip,
- unsigned int TotImmediateFlipBytes,
- enum source_format_class SourcePixelFormat,
- double LineTime,
- double VRatio,
- double VRatioChroma,
- double Tno_bw,
- bool DCCEnable,
- unsigned int dpte_row_height,
- unsigned int meta_row_height,
- unsigned int dpte_row_height_chroma,
- unsigned int meta_row_height_chroma,
- double *DestinationLinesToRequestVMInImmediateFlip,
- double *DestinationLinesToRequestRowInImmediateFlip,
- double *final_flip_bw,
- bool *ImmediateFlipSupportedForPipe)
+ double DPTEBytesPerRow)
{
+ struct vba_vars_st *v = &mode_lib->vba;
double min_row_time = 0.0;
unsigned int HostVMDynamicLevelsTrips;
double TimeForFetchingMetaPTEImmediateFlip;
double TimeForFetchingRowInVBlankImmediateFlip;
double ImmediateFlipBW;
+ double LineTime = v->HTotal[k] / v->PixelClock[k];
- if (GPUVMEnable == true && HostVMEnable == true) {
- HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
+ if (v->GPUVMEnable == true && v->HostVMEnable == true) {
+ HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels;
} else {
HostVMDynamicLevelsTrips = 0;
}
- if (GPUVMEnable == true || DCCEnable == true) {
- ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * BandwidthAvailableForImmediateFlip / TotImmediateFlipBytes;
+ if (v->GPUVMEnable == true || v->DCCEnable[k] == true) {
+ ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * v->BandwidthAvailableForImmediateFlip / v->TotImmediateFlipBytes;
}
- if (GPUVMEnable == true) {
+ if (v->GPUVMEnable == true) {
TimeForFetchingMetaPTEImmediateFlip = dml_max3(
- Tno_bw + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
- UrgentExtraLatency + UrgentLatency * (GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
+ v->Tno_bw[k] + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
+ UrgentExtraLatency + UrgentLatency * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
LineTime / 4.0);
} else {
TimeForFetchingMetaPTEImmediateFlip = 0;
}
- *DestinationLinesToRequestVMInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
- if ((GPUVMEnable == true || DCCEnable == true)) {
+ v->DestinationLinesToRequestVMInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
+ if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
TimeForFetchingRowInVBlankImmediateFlip = dml_max3(
(MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / ImmediateFlipBW,
UrgentLatency * (HostVMDynamicLevelsTrips + 1),
@@ -3773,54 +3643,54 @@ static void CalculateFlipSchedule(
TimeForFetchingRowInVBlankImmediateFlip = 0;
}
- *DestinationLinesToRequestRowInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
+ v->DestinationLinesToRequestRowInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
- if (GPUVMEnable == true) {
- *final_flip_bw = dml_max(
- PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInImmediateFlip * LineTime),
- (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime));
- } else if ((GPUVMEnable == true || DCCEnable == true)) {
- *final_flip_bw = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime);
+ if (v->GPUVMEnable == true) {
+ v->final_flip_bw[k] = dml_max(
+ PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (v->DestinationLinesToRequestVMInImmediateFlip[k] * LineTime),
+ (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime));
+ } else if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
+ v->final_flip_bw[k] = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime);
} else {
- *final_flip_bw = 0;
+ v->final_flip_bw[k] = 0;
}
- if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_rgbe_alpha) {
- if (GPUVMEnable == true && DCCEnable != true) {
- min_row_time = dml_min(dpte_row_height * LineTime / VRatio, dpte_row_height_chroma * LineTime / VRatioChroma);
- } else if (GPUVMEnable != true && DCCEnable == true) {
- min_row_time = dml_min(meta_row_height * LineTime / VRatio, meta_row_height_chroma * LineTime / VRatioChroma);
+ if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_rgbe_alpha) {
+ if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
+ min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
+ } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
+ min_row_time = dml_min(v->meta_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
} else {
min_row_time = dml_min4(
- dpte_row_height * LineTime / VRatio,
- meta_row_height * LineTime / VRatio,
- dpte_row_height_chroma * LineTime / VRatioChroma,
- meta_row_height_chroma * LineTime / VRatioChroma);
+ v->dpte_row_height[k] * LineTime / v->VRatio[k],
+ v->meta_row_height[k] * LineTime / v->VRatio[k],
+ v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k],
+ v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
}
} else {
- if (GPUVMEnable == true && DCCEnable != true) {
- min_row_time = dpte_row_height * LineTime / VRatio;
- } else if (GPUVMEnable != true && DCCEnable == true) {
- min_row_time = meta_row_height * LineTime / VRatio;
+ if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
+ min_row_time = v->dpte_row_height[k] * LineTime / v->VRatio[k];
+ } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
+ min_row_time = v->meta_row_height[k] * LineTime / v->VRatio[k];
} else {
- min_row_time = dml_min(dpte_row_height * LineTime / VRatio, meta_row_height * LineTime / VRatio);
+ min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height[k] * LineTime / v->VRatio[k]);
}
}
- if (*DestinationLinesToRequestVMInImmediateFlip >= 32 || *DestinationLinesToRequestRowInImmediateFlip >= 16
+ if (v->DestinationLinesToRequestVMInImmediateFlip[k] >= 32 || v->DestinationLinesToRequestRowInImmediateFlip[k] >= 16
|| TimeForFetchingMetaPTEImmediateFlip + 2 * TimeForFetchingRowInVBlankImmediateFlip > min_row_time) {
- *ImmediateFlipSupportedForPipe = false;
+ v->ImmediateFlipSupportedForPipe[k] = false;
} else {
- *ImmediateFlipSupportedForPipe = true;
+ v->ImmediateFlipSupportedForPipe[k] = true;
}
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestVMInImmediateFlip);
- dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestRowInImmediateFlip);
+ dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestVMInImmediateFlip[k]);
+ dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestRowInImmediateFlip[k]);
dml_print("DML::%s: TimeForFetchingMetaPTEImmediateFlip = %f\n", __func__, TimeForFetchingMetaPTEImmediateFlip);
dml_print("DML::%s: TimeForFetchingRowInVBlankImmediateFlip = %f\n", __func__, TimeForFetchingRowInVBlankImmediateFlip);
dml_print("DML::%s: min_row_time = %f\n", __func__, min_row_time);
- dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, *ImmediateFlipSupportedForPipe);
+ dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, v->ImmediateFlipSupportedForPipe[k]);
#endif
}
@@ -5412,33 +5282,13 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
for (k = 0; k < v->NumberOfActivePlanes; k++) {
CalculateFlipSchedule(
mode_lib,
+ k,
HostVMInefficiencyFactor,
v->ExtraLatency,
v->UrgLatency[i],
- v->GPUVMMaxPageTableLevels,
- v->HostVMEnable,
- v->HostVMMaxNonCachedPageTableLevels,
- v->GPUVMEnable,
- v->HostVMMinPageSize,
v->PDEAndMetaPTEBytesPerFrame[i][j][k],
v->MetaRowBytes[i][j][k],
- v->DPTEBytesPerRow[i][j][k],
- v->BandwidthAvailableForImmediateFlip,
- v->TotImmediateFlipBytes,
- v->SourcePixelFormat[k],
- v->HTotal[k] / v->PixelClock[k],
- v->VRatio[k],
- v->VRatioChroma[k],
- v->Tno_bw[k],
- v->DCCEnable[k],
- v->dpte_row_height[k],
- v->meta_row_height[k],
- v->dpte_row_height_chroma[k],
- v->meta_row_height_chroma[k],
- &v->DestinationLinesToRequestVMInImmediateFlip[k],
- &v->DestinationLinesToRequestRowInImmediateFlip[k],
- &v->final_flip_bw[k],
- &v->ImmediateFlipSupportedForPipe[k]);
+ v->DPTEBytesPerRow[i][j][k]);
}
v->total_dcn_read_bw_with_flip = 0.0;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
@@ -5496,64 +5346,28 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
v->PrefetchModePerState[i][j],
- v->NumberOfActivePlanes,
- v->MaxLineBufferLines,
- v->LineBufferSize,
- v->WritebackInterfaceBufferSize,
v->DCFCLKState[i][j],
v->ReturnBWPerState[i][j],
- v->SynchronizedVBlank,
- v->dpte_group_bytes,
- v->MetaChunkSize,
v->UrgLatency[i],
v->ExtraLatency,
- v->WritebackLatency,
- v->WritebackChunkSize,
v->SOCCLKPerState[i],
- v->DRAMClockChangeLatency,
- v->SRExitTime,
- v->SREnterPlusExitTime,
- v->SRExitZ8Time,
- v->SREnterPlusExitZ8Time,
v->ProjectedDCFCLKDeepSleep[i][j],
v->DETBufferSizeYThisState,
v->DETBufferSizeCThisState,
v->SwathHeightYThisState,
v->SwathHeightCThisState,
- v->LBBitPerPixel,
v->SwathWidthYThisState,
v->SwathWidthCThisState,
- v->HRatio,
- v->HRatioChroma,
- v->vtaps,
- v->VTAPsChroma,
- v->VRatio,
- v->VRatioChroma,
- v->HTotal,
- v->PixelClock,
- v->BlendingAndTiming,
v->NoOfDPPThisState,
v->BytePerPixelInDETY,
v->BytePerPixelInDETC,
- v->DSTXAfterScaler,
- v->DSTYAfterScaler,
- v->WritebackEnable,
- v->WritebackPixelFormat,
- v->WritebackDestinationWidth,
- v->WritebackDestinationHeight,
- v->WritebackSourceHeight,
UnboundedRequestEnabledThisState,
CompressedBufferSizeInkByteThisState,
&v->DRAMClockChangeSupport[i][j],
- &v->UrgentWatermark,
- &v->WritebackUrgentWatermark,
- &v->DRAMClockChangeWatermark,
- &v->WritebackDRAMClockChangeWatermark,
- &dummy,
&dummy,
&dummy,
&dummy,
- &v->MinActiveDRAMClockChangeLatencySupported);
+ &dummy);
}
}
@@ -5679,64 +5493,28 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
- unsigned int NumberOfActivePlanes,
- unsigned int MaxLineBufferLines,
- unsigned int LineBufferSize,
- unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
- bool SynchronizedVBlank,
- unsigned int dpte_group_bytes[],
- unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
- double WritebackLatency,
- double WritebackChunkSize,
double SOCCLK,
- double DRAMClockChangeLatency,
- double SRExitTime,
- double SREnterPlusExitTime,
- double SRExitZ8Time,
- double SREnterPlusExitZ8Time,
double DCFCLKDeepSleep,
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
- unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
- double HRatio[],
- double HRatioChroma[],
- unsigned int vtaps[],
- unsigned int VTAPsChroma[],
- double VRatio[],
- double VRatioChroma[],
- unsigned int HTotal[],
- double PixelClock[],
- unsigned int BlendingAndTiming[],
unsigned int DPPPerPlane[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
- double DSTXAfterScaler[],
- double DSTYAfterScaler[],
- bool WritebackEnable[],
- enum source_format_class WritebackPixelFormat[],
- double WritebackDestinationWidth[],
- double WritebackDestinationHeight[],
- double WritebackSourceHeight[],
bool UnboundedRequestEnabled,
unsigned int CompressedBufferSizeInkByte,
enum clock_change_support *DRAMClockChangeSupport,
- double *UrgentWatermark,
- double *WritebackUrgentWatermark,
- double *DRAMClockChangeWatermark,
- double *WritebackDRAMClockChangeWatermark,
double *StutterExitWatermark,
double *StutterEnterPlusExitWatermark,
double *Z8StutterExitWatermark,
- double *Z8StutterEnterPlusExitWatermark,
- double *MinActiveDRAMClockChangeLatencySupported)
+ double *Z8StutterEnterPlusExitWatermark)
{
struct vba_vars_st *v = &mode_lib->vba;
double EffectiveLBLatencyHidingY;
@@ -5756,103 +5534,103 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
double TotalPixelBW = 0.0;
int k, j;
- *UrgentWatermark = UrgentLatency + ExtraLatency;
+ v->UrgentWatermark = UrgentLatency + ExtraLatency;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
dml_print("DML::%s: ExtraLatency = %f\n", __func__, ExtraLatency);
- dml_print("DML::%s: UrgentWatermark = %f\n", __func__, *UrgentWatermark);
+ dml_print("DML::%s: UrgentWatermark = %f\n", __func__, v->UrgentWatermark);
#endif
- *DRAMClockChangeWatermark = DRAMClockChangeLatency + *UrgentWatermark;
+ v->DRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->UrgentWatermark;
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: DRAMClockChangeLatency = %f\n", __func__, DRAMClockChangeLatency);
- dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, *DRAMClockChangeWatermark);
+ dml_print("DML::%s: v->DRAMClockChangeLatency = %f\n", __func__, v->DRAMClockChangeLatency);
+ dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, v->DRAMClockChangeWatermark);
#endif
v->TotalActiveWriteback = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (WritebackEnable[k] == true) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->WritebackEnable[k] == true) {
v->TotalActiveWriteback = v->TotalActiveWriteback + 1;
}
}
if (v->TotalActiveWriteback <= 1) {
- *WritebackUrgentWatermark = WritebackLatency;
+ v->WritebackUrgentWatermark = v->WritebackLatency;
} else {
- *WritebackUrgentWatermark = WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ v->WritebackUrgentWatermark = v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
if (v->TotalActiveWriteback <= 1) {
- *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency;
+ v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency;
} else {
- *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
TotalPixelBW = TotalPixelBW
- + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * VRatioChroma[k])
- / (HTotal[k] / PixelClock[k]);
+ + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * v->VRatioChroma[k])
+ / (v->HTotal[k] / v->PixelClock[k]);
}
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
double EffectiveDETBufferSizeY = DETBufferSizeY[k];
v->LBLatencyHidingSourceLinesY = dml_min(
- (double) MaxLineBufferLines,
- dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (vtaps[k] - 1);
+ (double) v->MaxLineBufferLines,
+ dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1);
v->LBLatencyHidingSourceLinesC = dml_min(
- (double) MaxLineBufferLines,
- dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTAPsChroma[k] - 1);
+ (double) v->MaxLineBufferLines,
+ dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1);
- EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / VRatio[k] * (HTotal[k] / PixelClock[k]);
+ EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]);
- EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
+ EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]);
if (UnboundedRequestEnabled) {
EffectiveDETBufferSizeY = EffectiveDETBufferSizeY
- + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] / (HTotal[k] / PixelClock[k]) / TotalPixelBW;
+ + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] / (v->HTotal[k] / v->PixelClock[k]) / TotalPixelBW;
}
LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
- FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
+ FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
if (BytePerPixelDETC[k] > 0) {
LinesInDETC = v->DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
LinesInDETCRoundedDownToSwath = dml_floor(LinesInDETC, SwathHeightC[k]);
- FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (HTotal[k] / PixelClock[k]) / VRatioChroma[k];
+ FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (v->HTotal[k] / v->PixelClock[k]) / v->VRatioChroma[k];
} else {
LinesInDETC = 0;
FullDETBufferingTimeC = 999999;
}
ActiveDRAMClockChangeLatencyMarginY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY
- - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark;
+ - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
- if (NumberOfActivePlanes > 1) {
+ if (v->NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY
- - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightY[k] * HTotal[k] / PixelClock[k] / VRatio[k];
+ - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightY[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatio[k];
}
if (BytePerPixelDETC[k] > 0) {
ActiveDRAMClockChangeLatencyMarginC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC
- - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark;
+ - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
- if (NumberOfActivePlanes > 1) {
+ if (v->NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC
- - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightC[k] * HTotal[k] / PixelClock[k] / VRatioChroma[k];
+ - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightC[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatioChroma[k];
}
v->ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMarginY, ActiveDRAMClockChangeLatencyMarginC);
} else {
v->ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY;
}
- if (WritebackEnable[k] == true) {
- WritebackDRAMClockChangeLatencyHiding = WritebackInterfaceBufferSize * 1024
- / (WritebackDestinationWidth[k] * WritebackDestinationHeight[k] / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4);
- if (WritebackPixelFormat[k] == dm_444_64) {
+ if (v->WritebackEnable[k] == true) {
+ WritebackDRAMClockChangeLatencyHiding = v->WritebackInterfaceBufferSize * 1024
+ / (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k] / (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4);
+ if (v->WritebackPixelFormat[k] == dm_444_64) {
WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding / 2;
}
WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - v->WritebackDRAMClockChangeWatermark;
@@ -5862,14 +5640,14 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
v->MinActiveDRAMClockChangeMargin = 999999;
PlaneWithMinActiveDRAMClockChangeMargin = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->ActiveDRAMClockChangeLatencyMargin[k] < v->MinActiveDRAMClockChangeMargin) {
v->MinActiveDRAMClockChangeMargin = v->ActiveDRAMClockChangeLatencyMargin[k];
- if (BlendingAndTiming[k] == k) {
+ if (v->BlendingAndTiming[k] == k) {
PlaneWithMinActiveDRAMClockChangeMargin = k;
} else {
- for (j = 0; j < NumberOfActivePlanes; ++j) {
- if (BlendingAndTiming[k] == j) {
+ for (j = 0; j < v->NumberOfActivePlanes; ++j) {
+ if (v->BlendingAndTiming[k] == j) {
PlaneWithMinActiveDRAMClockChangeMargin = j;
}
}
@@ -5877,11 +5655,11 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
}
}
- *MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + DRAMClockChangeLatency;
+ v->MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + v->DRAMClockChangeLatency ;
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (BlendingAndTiming[k] == k)) && !(BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (v->BlendingAndTiming[k] == k)) && !(v->BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
&& v->ActiveDRAMClockChangeLatencyMargin[k] < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = v->ActiveDRAMClockChangeLatencyMargin[k];
}
@@ -5889,25 +5667,25 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
v->TotalNumberOfActiveOTG = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (BlendingAndTiming[k] == k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->BlendingAndTiming[k] == k) {
v->TotalNumberOfActiveOTG = v->TotalNumberOfActiveOTG + 1;
}
}
if (v->MinActiveDRAMClockChangeMargin > 0 && PrefetchMode == 0) {
*DRAMClockChangeSupport = dm_dram_clock_change_vactive;
- } else if ((SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
+ } else if ((v->SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
|| SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0) {
*DRAMClockChangeSupport = dm_dram_clock_change_vblank;
} else {
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
}
- *StutterExitWatermark = SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
- *StutterEnterPlusExitWatermark = (SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
- *Z8StutterExitWatermark = SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
- *Z8StutterEnterPlusExitWatermark = SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
+ *StutterExitWatermark = v->SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
+ *StutterEnterPlusExitWatermark = (v->SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
+ *Z8StutterExitWatermark = v->SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
+ *Z8StutterEnterPlusExitWatermark = v->SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, *StutterExitWatermark);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 8e4c9d0887ce..e573e706430d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -121,8 +121,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = {
},
},
.num_states = 1,
- .sr_exit_time_us = 20.16,
- .sr_enter_plus_exit_time_us = 27.13,
+ .sr_exit_time_us = 42.97,
+ .sr_enter_plus_exit_time_us = 49.94,
.sr_exit_z8_time_us = 285.0,
.sr_enter_plus_exit_z8_time_us = 320,
.writeback_latency_us = 12.0,
@@ -244,6 +244,50 @@ void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr)
}
/**
+ * Finds dummy_latency_index when MCLK switching using firmware based
+ * vblank stretch is enabled. This function will iterate through the
+ * table of dummy pstate latencies until the lowest value that allows
+ * dm_allow_self_refresh_and_mclk_switch to happen is found
+ */
+int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel)
+{
+ const int max_latency_table_entries = 4;
+ const struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
+ int dummy_latency_index = 0;
+
+ dc_assert_fp_enabled();
+
+ while (dummy_latency_index < max_latency_table_entries) {
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us =
+ dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
+ dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
+
+ if (vlevel < context->bw_ctx.dml.vba.soc.num_states &&
+ vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] != dm_dram_clock_change_unsupported)
+ break;
+
+ dummy_latency_index++;
+ }
+
+ if (dummy_latency_index == max_latency_table_entries) {
+ ASSERT(dummy_latency_index != max_latency_table_entries);
+ /* If the execution gets here, it means dummy p_states are
+ * not possible. This should never happen and would mean
+ * something is severely wrong.
+ * Here we reset dummy_latency_index to 3, because it is
+ * better to have underflows than system crashes.
+ */
+ dummy_latency_index = max_latency_table_entries - 1;
+ }
+
+ return dummy_latency_index;
+}
+
+/**
* dcn32_helper_populate_phantom_dlg_params - Get DLG params for phantom pipes
* and populate pipe_ctx with those params.
*
@@ -1646,7 +1690,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch(dc, context);
if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
- dummy_latency_index = dcn30_find_dummy_latency_index_for_fw_based_mclk_switch(dc,
+ dummy_latency_index = dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(dc,
context, pipes, pipe_cnt, vlevel);
/* After calling dcn30_find_dummy_latency_index_for_fw_based_mclk_switch
@@ -1882,6 +1926,45 @@ static void remove_entry_from_table_at_index(struct _vcs_dpi_voltage_scaling_st
memset(&table[--(*num_entries)], 0, sizeof(struct _vcs_dpi_voltage_scaling_st));
}
+void dcn32_patch_dpm_table(struct clk_bw_params *bw_params)
+{
+ int i;
+ unsigned int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0,
+ max_phyclk_mhz = 0, max_dtbclk_mhz = 0, max_fclk_mhz = 0, max_uclk_mhz = 0;
+
+ for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
+ if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
+ max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
+ if (bw_params->clk_table.entries[i].fclk_mhz > max_fclk_mhz)
+ max_fclk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
+ if (bw_params->clk_table.entries[i].memclk_mhz > max_uclk_mhz)
+ max_uclk_mhz = bw_params->clk_table.entries[i].memclk_mhz;
+ if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
+ max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
+ if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
+ max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
+ if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
+ max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
+ if (bw_params->clk_table.entries[i].dtbclk_mhz > max_dtbclk_mhz)
+ max_dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
+ }
+
+ /* Scan through clock values we currently have and if they are 0,
+ * then populate it with dcn3_2_soc.clock_limits[] value.
+ *
+ * Do it for DCFCLK, DISPCLK, DTBCLK and UCLK as any of those being
+ * 0, will cause it to skip building the clock table.
+ */
+ if (max_dcfclk_mhz == 0)
+ bw_params->clk_table.entries[0].dcfclk_mhz = dcn3_2_soc.clock_limits[0].dcfclk_mhz;
+ if (max_dispclk_mhz == 0)
+ bw_params->clk_table.entries[0].dispclk_mhz = dcn3_2_soc.clock_limits[0].dispclk_mhz;
+ if (max_dtbclk_mhz == 0)
+ bw_params->clk_table.entries[0].dtbclk_mhz = dcn3_2_soc.clock_limits[0].dtbclk_mhz;
+ if (max_uclk_mhz == 0)
+ bw_params->clk_table.entries[0].memclk_mhz = dcn3_2_soc.clock_limits[0].dram_speed_mts / 16;
+}
+
static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
index 3ed06ab855be..e1b79e2aab8c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
@@ -71,4 +71,12 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params);
+int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel);
+
+void dcn32_patch_dpm_table(struct clk_bw_params *bw_params);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
index 9a60f27eceaa..6980f698eb23 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
@@ -1992,6 +1992,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
dml32_CalculateODMMode(
mode_lib->vba.MaximumPixelsPerLinePerDSCUnit,
mode_lib->vba.HActive[k],
+ mode_lib->vba.OutputFormat[k],
mode_lib->vba.Output[k],
mode_lib->vba.ODMUse[k],
mode_lib->vba.MaxDispclk[i],
@@ -2013,6 +2014,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
dml32_CalculateODMMode(
mode_lib->vba.MaximumPixelsPerLinePerDSCUnit,
mode_lib->vba.HActive[k],
+ mode_lib->vba.OutputFormat[k],
mode_lib->vba.Output[k],
mode_lib->vba.ODMUse[k],
mode_lib->vba.MaxDispclk[i],
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
index 59c2547d01b1..365d290bba99 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
@@ -27,6 +27,8 @@
#include "display_mode_vba_32.h"
#include "../display_mode_lib.h"
+#define DCN32_MAX_FMT_420_BUFFER_WIDTH 4096
+
unsigned int dml32_dscceComputeDelay(
unsigned int bpc,
double BPP,
@@ -1182,6 +1184,7 @@ void dml32_CalculateDETBufferSize(
void dml32_CalculateODMMode(
unsigned int MaximumPixelsPerLinePerDSCUnit,
unsigned int HActive,
+ enum output_format_class OutFormat,
enum output_encoder_class Output,
enum odm_combine_policy ODMUse,
double StateDispclk,
@@ -1253,6 +1256,29 @@ void dml32_CalculateODMMode(
else
*TotalAvailablePipesSupport = false;
}
+ if (OutFormat == dm_420 && HActive > DCN32_MAX_FMT_420_BUFFER_WIDTH &&
+ ODMUse != dm_odm_combine_policy_4to1) {
+ if (HActive > DCN32_MAX_FMT_420_BUFFER_WIDTH * 4) {
+ *ODMMode = dm_odm_combine_mode_disabled;
+ *NumberOfDPP = 0;
+ *TotalAvailablePipesSupport = false;
+ } else if (HActive > DCN32_MAX_FMT_420_BUFFER_WIDTH * 2 ||
+ *ODMMode == dm_odm_combine_mode_4to1) {
+ *ODMMode = dm_odm_combine_mode_4to1;
+ *RequiredDISPCLKPerSurface = SurfaceRequiredDISPCLKWithODMCombineFourToOne;
+ *NumberOfDPP = 4;
+ } else {
+ *ODMMode = dm_odm_combine_mode_2to1;
+ *RequiredDISPCLKPerSurface = SurfaceRequiredDISPCLKWithODMCombineTwoToOne;
+ *NumberOfDPP = 2;
+ }
+ }
+ if (Output == dm_hdmi && OutFormat == dm_420 &&
+ HActive > DCN32_MAX_FMT_420_BUFFER_WIDTH) {
+ *ODMMode = dm_odm_combine_mode_disabled;
+ *NumberOfDPP = 0;
+ *TotalAvailablePipesSupport = false;
+ }
}
double dml32_CalculateRequiredDispclk(
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
index 924e361ad243..0b427d89b3c5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
@@ -216,6 +216,7 @@ void dml32_CalculateDETBufferSize(
void dml32_CalculateODMMode(
unsigned int MaximumPixelsPerLinePerDSCUnit,
unsigned int HActive,
+ enum output_format_class OutFormat,
enum output_encoder_class Output,
enum odm_combine_policy ODMUse,
double StateDispclk,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index 68c2ed434d2c..cff5fd55a0ad 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -340,6 +340,8 @@ struct clk_mgr_internal {
bool smu_present;
void *wm_range_table;
long long wm_range_table_addr;
+
+ bool dpm_present;
};
struct clk_mgr_internal_funcs {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 58158764adc0..7614125c92c7 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -219,6 +219,10 @@ void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc,
struct dc_state *context,
uint8_t disabled_master_pipe_idx);
+void reset_sync_context_for_pipe(const struct dc *dc,
+ struct dc_state *context,
+ uint8_t pipe_idx);
+
uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter transmitter);
const struct link_hwss *get_link_hwss(const struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
index 50bfa513cb35..7e85cdc5bd34 100644
--- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h
+++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
@@ -269,7 +269,8 @@ union MESAPI__ADD_QUEUE {
uint32_t map_kiq_utility_queue : 1;
uint32_t is_kfd_process : 1;
uint32_t trap_en : 1;
- uint32_t reserved : 21;
+ uint32_t is_aql_queue : 1;
+ uint32_t reserved : 20;
};
struct MES_API_STATUS api_status;
uint64_t tma_addr;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 24488f4cb78c..93f9b8377539 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -209,7 +209,8 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
if (!adev->scpm_enabled)
return 0;
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7))
+ if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7)) ||
+ (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)))
return 0;
/* override pptable_id from driver parameter */
@@ -218,27 +219,6 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
dev_info(adev->dev, "override pptable id %d\n", pptable_id);
} else {
pptable_id = smu->smu_table.boot_values.pp_table_id;
-
- /*
- * Temporary solution for SMU V13.0.0 with SCPM enabled:
- * - use vbios carried pptable when pptable_id is 3664, 3715 or 3795
- * - use 36831 soft pptable when pptable_id is 3683
- */
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) {
- switch (pptable_id) {
- case 3664:
- case 3715:
- case 3795:
- pptable_id = 0;
- break;
- case 3683:
- pptable_id = 36831;
- break;
- default:
- dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
- return -EINVAL;
- }
- }
}
/* "pptable_id == 0" means vbios carries the pptable. */
@@ -471,26 +451,6 @@ int smu_v13_0_setup_pptable(struct smu_context *smu)
} else {
pptable_id = smu->smu_table.boot_values.pp_table_id;
- /*
- * Temporary solution for SMU V13.0.0 with SCPM disabled:
- * - use 3664, 3683 or 3715 on request
- * - use 3664 when pptable_id is 0
- * TODO: drop these when the pptable carried in vbios is ready.
- */
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) {
- switch (pptable_id) {
- case 0:
- pptable_id = 3664;
- break;
- case 3664:
- case 3683:
- case 3715:
- break;
- default:
- dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
- return -EINVAL;
- }
- }
}
/* force using vbios pptable in sriov mode */
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 7db2fd9ea74a..1d454485e0d9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -239,82 +239,47 @@ smu_v13_0_0_get_allowed_feature_mask(struct smu_context *smu,
uint32_t *feature_mask, uint32_t num)
{
struct amdgpu_device *adev = smu->adev;
+ u32 smu_version;
if (num > 2)
return -EINVAL;
- memset(feature_mask, 0, sizeof(uint32_t) * num);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_DATA_READ_BIT);
+ memset(feature_mask, 0xff, sizeof(uint32_t) * num);
- if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) {
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_IMU_BIT);
+ if (!(adev->pm.pp_feature & PP_SCLK_DPM_MASK)) {
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_IMU_BIT);
}
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MM_DPM_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_VCN_BIT);
-
- if ((adev->pg_flags & AMD_PG_SUPPORT_ATHUB) &&
- (adev->pg_flags & AMD_PG_SUPPORT_MMHUB))
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT);
-
- if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
+ if (!(adev->pg_flags & AMD_PG_SUPPORT_ATHUB) ||
+ !(adev->pg_flags & AMD_PG_SUPPORT_MMHUB))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT);
-#if 0
- if (adev->pm.pp_feature & PP_GFXOFF_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT);
-#endif
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_THROTTLERS_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FAN_CONTROL_BIT);
+ if (!(adev->pm.pp_feature & PP_SOCCLK_DPM_MASK))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DF_CSTATE_BIT);
-
- if (adev->pm.pp_feature & PP_MCLK_DPM_MASK) {
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT);
+ /* PMFW 78.58 contains a critical fix for gfxoff feature */
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ if ((smu_version < 0x004e3a00) ||
+ !(adev->pm.pp_feature & PP_GFXOFF_MASK))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFXOFF_BIT);
+
+ if (!(adev->pm.pp_feature & PP_MCLK_DPM_MASK)) {
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_UCLK_BIT);
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT);
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT);
}
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MEM_TEMP_READ_BIT);
+ if (!(adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
- if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_SOCCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_MPCLK_DS_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_MPCLK_DS_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_FCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCN_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_DCFCLK_BIT);
-
- if (adev->pm.pp_feature & PP_PCIE_DPM_MASK) {
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_LINK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_LCLK_BIT);
+ if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_LINK_BIT);
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_LCLK_BIT);
}
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_DSTATE_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_CG_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_FCLK_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_CTF_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_UCLK_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VR0HOT_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT);
-
- if (adev->pm.pp_feature & PP_ULV_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT);
+ if (!(adev->pm.pp_feature & PP_ULV_MASK))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_ULV_BIT);
return 0;
}
@@ -410,58 +375,11 @@ static int smu_v13_0_0_setup_pptable(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct amdgpu_device *adev = smu->adev;
- uint32_t pptable_id;
int ret = 0;
- /*
- * With SCPM enabled, the pptable used will be signed. It cannot
- * be used directly by driver. To get the raw pptable, we need to
- * rely on the combo pptable(and its revelant SMU message).
- */
- if (adev->scpm_enabled) {
- ret = smu_v13_0_0_get_pptable_from_pmfw(smu,
- &smu_table->power_play_table,
- &smu_table->power_play_table_size);
- } else {
- /* override pptable_id from driver parameter */
- if (amdgpu_smu_pptable_id >= 0) {
- pptable_id = amdgpu_smu_pptable_id;
- dev_info(adev->dev, "override pptable id %d\n", pptable_id);
- } else {
- pptable_id = smu_table->boot_values.pp_table_id;
- }
-
- /*
- * Temporary solution for SMU V13.0.0 with SCPM disabled:
- * - use vbios carried pptable when pptable_id is 3664, 3715 or 3795
- * - use soft pptable when pptable_id is 3683
- */
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) {
- switch (pptable_id) {
- case 3664:
- case 3715:
- case 3795:
- pptable_id = 0;
- break;
- case 3683:
- break;
- default:
- dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
- return -EINVAL;
- }
- }
-
- /* force using vbios pptable in sriov mode */
- if ((amdgpu_sriov_vf(adev) || !pptable_id) && (amdgpu_emu_mode != 1))
- ret = smu_v13_0_0_get_pptable_from_pmfw(smu,
- &smu_table->power_play_table,
- &smu_table->power_play_table_size);
- else
- ret = smu_v13_0_get_pptable_from_firmware(smu,
- &smu_table->power_play_table,
- &smu_table->power_play_table_size,
- pptable_id);
- }
+ ret = smu_v13_0_0_get_pptable_from_pmfw(smu,
+ &smu_table->power_play_table,
+ &smu_table->power_play_table_size);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 8aadcc0aa90b..df9370e0ff23 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1864,12 +1864,6 @@ EXPORT_SYMBOL_GPL(analogix_dp_remove);
int analogix_dp_suspend(struct analogix_dp_device *dp)
{
clk_disable_unprepare(dp->clock);
-
- if (dp->plat_data->panel) {
- if (drm_panel_unprepare(dp->plat_data->panel))
- DRM_ERROR("failed to turnoff the panel\n");
- }
-
return 0;
}
EXPORT_SYMBOL_GPL(analogix_dp_suspend);
@@ -1884,13 +1878,6 @@ int analogix_dp_resume(struct analogix_dp_device *dp)
return ret;
}
- if (dp->plat_data->panel) {
- if (drm_panel_prepare(dp->plat_data->panel)) {
- DRM_ERROR("failed to setup the panel\n");
- return -EBUSY;
- }
- }
-
return 0;
}
EXPORT_SYMBOL_GPL(analogix_dp_resume);
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index 28bad30dc4e5..5968f4af190b 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -188,7 +188,7 @@ static int lt8912_write_lvds_config(struct lt8912 *lt)
{0x03, 0xff},
};
- return regmap_multi_reg_write(lt->regmap[I2C_CEC_DSI], seq, ARRAY_SIZE(seq));
+ return regmap_multi_reg_write(lt->regmap[I2C_MAIN], seq, ARRAY_SIZE(seq));
};
static inline struct lt8912 *bridge_to_lt8912(struct drm_bridge *b)
@@ -268,7 +268,7 @@ static int lt8912_video_setup(struct lt8912 *lt)
u32 hactive, h_total, hpw, hfp, hbp;
u32 vactive, v_total, vpw, vfp, vbp;
u8 settle = 0x08;
- int ret;
+ int ret, hsync_activehigh, vsync_activehigh;
if (!lt)
return -EINVAL;
@@ -278,12 +278,14 @@ static int lt8912_video_setup(struct lt8912 *lt)
hpw = lt->mode.hsync_len;
hbp = lt->mode.hback_porch;
h_total = hactive + hfp + hpw + hbp;
+ hsync_activehigh = lt->mode.flags & DISPLAY_FLAGS_HSYNC_HIGH;
vactive = lt->mode.vactive;
vfp = lt->mode.vfront_porch;
vpw = lt->mode.vsync_len;
vbp = lt->mode.vback_porch;
v_total = vactive + vfp + vpw + vbp;
+ vsync_activehigh = lt->mode.flags & DISPLAY_FLAGS_VSYNC_HIGH;
if (vactive <= 600)
settle = 0x04;
@@ -317,6 +319,13 @@ static int lt8912_video_setup(struct lt8912 *lt)
ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x3e, hfp & 0xff);
ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x3f, hfp >> 8);
+ ret |= regmap_update_bits(lt->regmap[I2C_MAIN], 0xab, BIT(0),
+ vsync_activehigh ? BIT(0) : 0);
+ ret |= regmap_update_bits(lt->regmap[I2C_MAIN], 0xab, BIT(1),
+ hsync_activehigh ? BIT(1) : 0);
+ ret |= regmap_update_bits(lt->regmap[I2C_MAIN], 0xb2, BIT(0),
+ lt->connector.display_info.is_hdmi ? BIT(0) : 0);
+
return ret;
}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
index 073adfe438dd..4e41c144a290 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig
+++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
@@ -2,6 +2,7 @@
config DRM_HISI_HIBMC
tristate "DRM Support for Hisilicon Hibmc"
depends on DRM && PCI && (ARM64 || COMPILE_TEST)
+ depends on MMU
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
select DRM_TTM
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index dabdfe09f5e5..0bcde53c50c6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -1269,6 +1269,10 @@ static void i915_gem_context_release_work(struct work_struct *work)
trace_i915_context_free(ctx);
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
+ spin_lock(&ctx->i915->gem.contexts.lock);
+ list_del(&ctx->link);
+ spin_unlock(&ctx->i915->gem.contexts.lock);
+
if (ctx->syncobj)
drm_syncobj_put(ctx->syncobj);
@@ -1521,10 +1525,6 @@ static void context_close(struct i915_gem_context *ctx)
ctx->file_priv = ERR_PTR(-EBADF);
- spin_lock(&ctx->i915->gem.contexts.lock);
- list_del(&ctx->link);
- spin_unlock(&ctx->i915->gem.contexts.lock);
-
client = ctx->client;
if (client) {
spin_lock(&client->ctx_lock);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 633a7e5dba3b..6b5d4ea22b67 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -166,6 +166,21 @@ struct intel_engine_execlists {
struct timer_list preempt;
/**
+ * @preempt_target: active request at the time of the preemption request
+ *
+ * We force a preemption to occur if the pending contexts have not
+ * been promoted to active upon receipt of the CS ack event within
+ * the timeout. This timeout maybe chosen based on the target,
+ * using a very short timeout if the context is no longer schedulable.
+ * That short timeout may not be applicable to other contexts, so
+ * if a context switch should happen within before the preemption
+ * timeout, we may shoot early at an innocent context. To prevent this,
+ * we record which context was active at the time of the preemption
+ * request and only reset that context upon the timeout.
+ */
+ const struct i915_request *preempt_target;
+
+ /**
* @ccid: identifier for contexts submitted to this engine
*/
u32 ccid;
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 4b909cb88cdf..c718e6dc40b5 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -1241,6 +1241,9 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
if (!rq)
return 0;
+ /* Only allow ourselves to force reset the currently active context */
+ engine->execlists.preempt_target = rq;
+
/* Force a fast reset for terminated contexts (ignoring sysfs!) */
if (unlikely(intel_context_is_banned(rq->context) || bad_request(rq)))
return INTEL_CONTEXT_BANNED_PREEMPT_TIMEOUT_MS;
@@ -2427,8 +2430,24 @@ static void execlists_submission_tasklet(struct tasklet_struct *t)
GEM_BUG_ON(inactive - post > ARRAY_SIZE(post));
if (unlikely(preempt_timeout(engine))) {
+ const struct i915_request *rq = *engine->execlists.active;
+
+ /*
+ * If after the preempt-timeout expired, we are still on the
+ * same active request/context as before we initiated the
+ * preemption, reset the engine.
+ *
+ * However, if we have processed a CS event to switch contexts,
+ * but not yet processed the CS event for the pending
+ * preemption, reset the timer allowing the new context to
+ * gracefully exit.
+ */
cancel_timer(&engine->execlists.preempt);
- engine->execlists.error_interrupt |= ERROR_PREEMPT;
+ if (rq == engine->execlists.preempt_target)
+ engine->execlists.error_interrupt |= ERROR_PREEMPT;
+ else
+ set_timer_ms(&engine->execlists.preempt,
+ active_preempt_timeout(engine, rq));
}
if (unlikely(READ_ONCE(engine->execlists.error_interrupt))) {
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
index 73a8b46e0234..d09a0e845d09 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
@@ -545,8 +545,7 @@ static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_ratl, RATL_MASK);
static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_vr_thermalert, VR_THERMALERT_MASK);
static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_vr_tdc, VR_TDC_MASK);
-static const struct attribute *freq_attrs[] = {
- &dev_attr_punit_req_freq_mhz.attr,
+static const struct attribute *throttle_reason_attrs[] = {
&attr_throttle_reason_status.attr,
&attr_throttle_reason_pl1.attr,
&attr_throttle_reason_pl2.attr,
@@ -763,12 +762,20 @@ void intel_gt_sysfs_pm_init(struct intel_gt *gt, struct kobject *kobj)
if (!is_object_gt(kobj))
return;
- ret = sysfs_create_files(kobj, freq_attrs);
+ ret = sysfs_create_file(kobj, &dev_attr_punit_req_freq_mhz.attr);
if (ret)
drm_warn(&gt->i915->drm,
- "failed to create gt%u throttle sysfs files (%pe)",
+ "failed to create gt%u punit_req_freq_mhz sysfs (%pe)",
gt->info.id, ERR_PTR(ret));
+ if (GRAPHICS_VER(gt->i915) >= 11) {
+ ret = sysfs_create_files(kobj, throttle_reason_attrs);
+ if (ret)
+ drm_warn(&gt->i915->drm,
+ "failed to create gt%u throttle sysfs files (%pe)",
+ gt->info.id, ERR_PTR(ret));
+ }
+
if (HAS_MEDIA_RATIO_MODE(gt->i915) && intel_uc_uses_guc_slpc(&gt->uc)) {
ret = sysfs_create_files(kobj, media_perf_power_attrs);
if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 702e5b89be22..b605d0ceaefa 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1191,7 +1191,8 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv)
intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc);
- i915_gem_drain_freed_objects(dev_priv);
+ /* Flush any outstanding work, including i915_gem_context.release_work. */
+ i915_gem_drain_workqueue(dev_priv);
drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list));
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index 2d72cc5ddaba..6b6d5335c834 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -157,7 +157,7 @@ static void mtk_dither_config(struct device *dev, unsigned int w,
{
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
- mtk_ddp_write(cmdq_pkt, h << 16 | w, &priv->cmdq_reg, priv->regs, DISP_REG_DITHER_SIZE);
+ mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_REG_DITHER_SIZE);
mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, &priv->cmdq_reg, priv->regs,
DISP_REG_DITHER_CFG);
mtk_dither_set_common(priv->regs, &priv->cmdq_reg, bpc, DISP_REG_DITHER_CFG,
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 9cc406e1eee1..3b7d13028fb6 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -685,6 +685,16 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
if (--dsi->refcount != 0)
return;
+ /*
+ * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since
+ * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(),
+ * which needs irq for vblank, and mtk_dsi_stop() will disable irq.
+ * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(),
+ * after dsi is fully set.
+ */
+ mtk_dsi_stop(dsi);
+
+ mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500);
mtk_dsi_reset_engine(dsi);
mtk_dsi_lane0_ulp_mode_enter(dsi);
mtk_dsi_clk_ulp_mode_enter(dsi);
@@ -735,17 +745,6 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
if (!dsi->enabled)
return;
- /*
- * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since
- * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(),
- * which needs irq for vblank, and mtk_dsi_stop() will disable irq.
- * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(),
- * after dsi is fully set.
- */
- mtk_dsi_stop(dsi);
-
- mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500);
-
dsi->enabled = false;
}
@@ -808,10 +807,13 @@ static void mtk_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge,
static const struct drm_bridge_funcs mtk_dsi_bridge_funcs = {
.attach = mtk_dsi_bridge_attach,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_disable = mtk_dsi_bridge_atomic_disable,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_enable = mtk_dsi_bridge_atomic_enable,
.atomic_pre_enable = mtk_dsi_bridge_atomic_pre_enable,
.atomic_post_disable = mtk_dsi_bridge_atomic_post_disable,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
.mode_set = mtk_dsi_bridge_mode_set,
};
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 251a1bb648cc..a222bf76804f 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -262,7 +262,11 @@ mgag200_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
return ret;
- drm_fbdev_generic_setup(dev, 0);
+ /*
+ * FIXME: A 24-bit color depth does not work with 24 bpp on
+ * G200ER. Force 32 bpp.
+ */
+ drm_fbdev_generic_setup(dev, 32);
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index ff5e1a44c43a..1e716c23019a 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -2257,7 +2257,7 @@ static const struct panel_desc innolux_g121i1_l01 = {
.enable = 200,
.disable = 20,
},
- .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 471c47db546b..c836cf884185 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -823,7 +823,7 @@ static int davinci_i2c_probe(struct platform_device *pdev)
r = pm_runtime_resume_and_get(dev->dev);
if (r < 0) {
dev_err(dev->dev, "failed to runtime_get device: %d\n", r);
- return r;
+ goto err_pm;
}
i2c_davinci_init(dev);
@@ -882,6 +882,7 @@ static int davinci_i2c_probe(struct platform_device *pdev)
err_unuse_clocks:
pm_runtime_dont_use_autosuspend(dev->dev);
pm_runtime_put_sync(dev->dev);
+err_pm:
pm_runtime_disable(dev->dev);
return r;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index e47fa3465671..3082183bd66a 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1583,7 +1583,7 @@ static int i2c_imx_remove(struct platform_device *pdev)
if (i2c_imx->dma)
i2c_imx_dma_free(i2c_imx);
- if (ret == 0) {
+ if (ret >= 0) {
/* setup chip registers to defaults */
imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR);
imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR);
diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c
index 32235c62f3d2..60908c5737f1 100644
--- a/drivers/i2c/busses/i2c-mlxbf.c
+++ b/drivers/i2c/busses/i2c-mlxbf.c
@@ -6,6 +6,7 @@
*/
#include <linux/acpi.h>
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -63,13 +64,14 @@
*/
#define MLXBF_I2C_TYU_PLL_OUT_FREQ (400 * 1000 * 1000)
/* Reference clock for Bluefield - 156 MHz. */
-#define MLXBF_I2C_PLL_IN_FREQ (156 * 1000 * 1000)
+#define MLXBF_I2C_PLL_IN_FREQ 156250000ULL
/* Constant used to determine the PLL frequency. */
-#define MLNXBF_I2C_COREPLL_CONST 16384
+#define MLNXBF_I2C_COREPLL_CONST 16384ULL
+
+#define MLXBF_I2C_FREQUENCY_1GHZ 1000000000ULL
/* PLL registers. */
-#define MLXBF_I2C_CORE_PLL_REG0 0x0
#define MLXBF_I2C_CORE_PLL_REG1 0x4
#define MLXBF_I2C_CORE_PLL_REG2 0x8
@@ -181,22 +183,15 @@
#define MLXBF_I2C_COREPLL_FREQ MLXBF_I2C_TYU_PLL_OUT_FREQ
/* Core PLL TYU configuration. */
-#define MLXBF_I2C_COREPLL_CORE_F_TYU_MASK GENMASK(12, 0)
-#define MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK GENMASK(3, 0)
-#define MLXBF_I2C_COREPLL_CORE_R_TYU_MASK GENMASK(5, 0)
-
-#define MLXBF_I2C_COREPLL_CORE_F_TYU_SHIFT 3
-#define MLXBF_I2C_COREPLL_CORE_OD_TYU_SHIFT 16
-#define MLXBF_I2C_COREPLL_CORE_R_TYU_SHIFT 20
+#define MLXBF_I2C_COREPLL_CORE_F_TYU_MASK GENMASK(15, 3)
+#define MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK GENMASK(19, 16)
+#define MLXBF_I2C_COREPLL_CORE_R_TYU_MASK GENMASK(25, 20)
/* Core PLL YU configuration. */
#define MLXBF_I2C_COREPLL_CORE_F_YU_MASK GENMASK(25, 0)
#define MLXBF_I2C_COREPLL_CORE_OD_YU_MASK GENMASK(3, 0)
-#define MLXBF_I2C_COREPLL_CORE_R_YU_MASK GENMASK(5, 0)
+#define MLXBF_I2C_COREPLL_CORE_R_YU_MASK GENMASK(31, 26)
-#define MLXBF_I2C_COREPLL_CORE_F_YU_SHIFT 0
-#define MLXBF_I2C_COREPLL_CORE_OD_YU_SHIFT 1
-#define MLXBF_I2C_COREPLL_CORE_R_YU_SHIFT 26
/* Core PLL frequency. */
static u64 mlxbf_i2c_corepll_frequency;
@@ -479,8 +474,6 @@ static struct mutex mlxbf_i2c_bus_lock;
#define MLXBF_I2C_MASK_8 GENMASK(7, 0)
#define MLXBF_I2C_MASK_16 GENMASK(15, 0)
-#define MLXBF_I2C_FREQUENCY_1GHZ 1000000000
-
/*
* Function to poll a set of bits at a specific address; it checks whether
* the bits are equal to zero when eq_zero is set to 'true', and not equal
@@ -669,7 +662,7 @@ static int mlxbf_i2c_smbus_enable(struct mlxbf_i2c_priv *priv, u8 slave,
/* Clear status bits. */
writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_STATUS);
/* Set the cause data. */
- writel(~0x0, priv->smbus->io + MLXBF_I2C_CAUSE_OR_CLEAR);
+ writel(~0x0, priv->mst_cause->io + MLXBF_I2C_CAUSE_OR_CLEAR);
/* Zero PEC byte. */
writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_PEC);
/* Zero byte count. */
@@ -738,6 +731,9 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv,
if (flags & MLXBF_I2C_F_WRITE) {
write_en = 1;
write_len += operation->length;
+ if (data_idx + operation->length >
+ MLXBF_I2C_MASTER_DATA_DESC_SIZE)
+ return -ENOBUFS;
memcpy(data_desc + data_idx,
operation->buffer, operation->length);
data_idx += operation->length;
@@ -1407,24 +1403,19 @@ static int mlxbf_i2c_init_master(struct platform_device *pdev,
return 0;
}
-static u64 mlxbf_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res)
+static u64 mlxbf_i2c_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res)
{
- u64 core_frequency, pad_frequency;
+ u64 core_frequency;
u8 core_od, core_r;
u32 corepll_val;
u16 core_f;
- pad_frequency = MLXBF_I2C_PLL_IN_FREQ;
-
corepll_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1);
/* Get Core PLL configuration bits. */
- core_f = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_F_TYU_SHIFT) &
- MLXBF_I2C_COREPLL_CORE_F_TYU_MASK;
- core_od = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_OD_TYU_SHIFT) &
- MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK;
- core_r = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_R_TYU_SHIFT) &
- MLXBF_I2C_COREPLL_CORE_R_TYU_MASK;
+ core_f = FIELD_GET(MLXBF_I2C_COREPLL_CORE_F_TYU_MASK, corepll_val);
+ core_od = FIELD_GET(MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK, corepll_val);
+ core_r = FIELD_GET(MLXBF_I2C_COREPLL_CORE_R_TYU_MASK, corepll_val);
/*
* Compute PLL output frequency as follow:
@@ -1436,31 +1427,26 @@ static u64 mlxbf_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res)
* Where PLL_OUT_FREQ and PLL_IN_FREQ refer to CoreFrequency
* and PadFrequency, respectively.
*/
- core_frequency = pad_frequency * (++core_f);
+ core_frequency = MLXBF_I2C_PLL_IN_FREQ * (++core_f);
core_frequency /= (++core_r) * (++core_od);
return core_frequency;
}
-static u64 mlxbf_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res)
+static u64 mlxbf_i2c_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res)
{
u32 corepll_reg1_val, corepll_reg2_val;
- u64 corepll_frequency, pad_frequency;
+ u64 corepll_frequency;
u8 core_od, core_r;
u32 core_f;
- pad_frequency = MLXBF_I2C_PLL_IN_FREQ;
-
corepll_reg1_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1);
corepll_reg2_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG2);
/* Get Core PLL configuration bits */
- core_f = rol32(corepll_reg1_val, MLXBF_I2C_COREPLL_CORE_F_YU_SHIFT) &
- MLXBF_I2C_COREPLL_CORE_F_YU_MASK;
- core_r = rol32(corepll_reg1_val, MLXBF_I2C_COREPLL_CORE_R_YU_SHIFT) &
- MLXBF_I2C_COREPLL_CORE_R_YU_MASK;
- core_od = rol32(corepll_reg2_val, MLXBF_I2C_COREPLL_CORE_OD_YU_SHIFT) &
- MLXBF_I2C_COREPLL_CORE_OD_YU_MASK;
+ core_f = FIELD_GET(MLXBF_I2C_COREPLL_CORE_F_YU_MASK, corepll_reg1_val);
+ core_r = FIELD_GET(MLXBF_I2C_COREPLL_CORE_R_YU_MASK, corepll_reg1_val);
+ core_od = FIELD_GET(MLXBF_I2C_COREPLL_CORE_OD_YU_MASK, corepll_reg2_val);
/*
* Compute PLL output frequency as follow:
@@ -1472,7 +1458,7 @@ static u64 mlxbf_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res)
* Where PLL_OUT_FREQ and PLL_IN_FREQ refer to CoreFrequency
* and PadFrequency, respectively.
*/
- corepll_frequency = (pad_frequency * core_f) / MLNXBF_I2C_COREPLL_CONST;
+ corepll_frequency = (MLXBF_I2C_PLL_IN_FREQ * core_f) / MLNXBF_I2C_COREPLL_CONST;
corepll_frequency /= (++core_r) * (++core_od);
return corepll_frequency;
@@ -2180,14 +2166,14 @@ static struct mlxbf_i2c_chip_info mlxbf_i2c_chip[] = {
[1] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_1],
[2] = &mlxbf_i2c_gpio_res[MLXBF_I2C_CHIP_TYPE_1]
},
- .calculate_freq = mlxbf_calculate_freq_from_tyu
+ .calculate_freq = mlxbf_i2c_calculate_freq_from_tyu
},
[MLXBF_I2C_CHIP_TYPE_2] = {
.type = MLXBF_I2C_CHIP_TYPE_2,
.shared_res = {
[0] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_2]
},
- .calculate_freq = mlxbf_calculate_freq_from_yu
+ .calculate_freq = mlxbf_i2c_calculate_freq_from_yu
}
};
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index 774507b54b57..313904be5f3b 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -243,9 +243,10 @@ struct i2c_mux_core *i2c_mux_alloc(struct i2c_adapter *parent,
int (*deselect)(struct i2c_mux_core *, u32))
{
struct i2c_mux_core *muxc;
+ size_t mux_size;
- muxc = devm_kzalloc(dev, struct_size(muxc, adapter, max_adapters)
- + sizeof_priv, GFP_KERNEL);
+ mux_size = struct_size(muxc, adapter, max_adapters);
+ muxc = devm_kzalloc(dev, size_add(mux_size, sizeof_priv), GFP_KERNEL);
if (!muxc)
return NULL;
if (sizeof_priv)
diff --git a/drivers/input/keyboard/iqs62x-keys.c b/drivers/input/keyboard/iqs62x-keys.c
index 93446b21f98f..db793a550c25 100644
--- a/drivers/input/keyboard/iqs62x-keys.c
+++ b/drivers/input/keyboard/iqs62x-keys.c
@@ -77,6 +77,7 @@ static int iqs62x_keys_parse_prop(struct platform_device *pdev,
if (ret) {
dev_err(&pdev->dev, "Failed to read switch code: %d\n",
ret);
+ fwnode_handle_put(child);
return ret;
}
iqs62x_keys->switches[i].code = val;
@@ -90,6 +91,8 @@ static int iqs62x_keys_parse_prop(struct platform_device *pdev,
iqs62x_keys->switches[i].flag = (i == IQS62X_SW_HALL_N ?
IQS62X_EVENT_HALL_N_T :
IQS62X_EVENT_HALL_S_T);
+
+ fwnode_handle_put(child);
}
return 0;
diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c
index 65286762b02a..ad8660be0127 100644
--- a/drivers/input/keyboard/snvs_pwrkey.c
+++ b/drivers/input/keyboard/snvs_pwrkey.c
@@ -20,7 +20,7 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
-#define SNVS_HPVIDR1_REG 0xF8
+#define SNVS_HPVIDR1_REG 0xBF8
#define SNVS_LPSR_REG 0x4C /* LP Status Register */
#define SNVS_LPCR_REG 0x38 /* LP Control Register */
#define SNVS_HPSR_REG 0x14
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 434d48ae4b12..ffad142801b3 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -186,7 +186,6 @@ static const char * const smbus_pnp_ids[] = {
"LEN2044", /* L470 */
"LEN2054", /* E480 */
"LEN2055", /* E580 */
- "LEN2064", /* T14 Gen 1 AMD / P14s Gen 1 AMD */
"LEN2068", /* T14 Gen 1 */
"SYN3052", /* HP EliteBook 840 G4 */
"SYN3221", /* HP 15-ay000 */
diff --git a/drivers/input/touchscreen/melfas_mip4.c b/drivers/input/touchscreen/melfas_mip4.c
index 2745bf1aee38..83f4be05e27b 100644
--- a/drivers/input/touchscreen/melfas_mip4.c
+++ b/drivers/input/touchscreen/melfas_mip4.c
@@ -1453,7 +1453,7 @@ static int mip4_probe(struct i2c_client *client, const struct i2c_device_id *id)
"ce", GPIOD_OUT_LOW);
if (IS_ERR(ts->gpio_ce)) {
error = PTR_ERR(ts->gpio_ce);
- if (error != EPROBE_DEFER)
+ if (error != -EPROBE_DEFER)
dev_err(&client->dev,
"Failed to get gpio: %d\n", error);
return error;
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 497c912ad9e1..5a8f780e7ffd 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -2349,13 +2349,6 @@ static int dmar_device_hotplug(acpi_handle handle, bool insert)
if (!dmar_in_use())
return 0;
- /*
- * It's unlikely that any I/O board is hot added before the IOMMU
- * subsystem is initialized.
- */
- if (IS_ENABLED(CONFIG_INTEL_IOMMU) && !intel_iommu_enabled)
- return -EOPNOTSUPP;
-
if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
tmp = handle;
} else {
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 1f2cd43cf9bc..31bc50e538a3 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -399,7 +399,7 @@ static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu)
{
unsigned long fl_sagaw, sl_sagaw;
- fl_sagaw = BIT(2) | (cap_fl1gp_support(iommu->cap) ? BIT(3) : 0);
+ fl_sagaw = BIT(2) | (cap_5lp_support(iommu->cap) ? BIT(3) : 0);
sl_sagaw = cap_sagaw(iommu->cap);
/* Second level only. */
@@ -3019,7 +3019,13 @@ static int __init init_dmars(void)
#ifdef CONFIG_INTEL_IOMMU_SVM
if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
+ /*
+ * Call dmar_alloc_hwirq() with dmar_global_lock held,
+ * could cause possible lock race condition.
+ */
+ up_write(&dmar_global_lock);
ret = intel_svm_enable_prq(iommu);
+ down_write(&dmar_global_lock);
if (ret)
goto free_iommu;
}
@@ -3932,6 +3938,7 @@ int __init intel_iommu_init(void)
force_on = (!intel_iommu_tboot_noforce && tboot_force_iommu()) ||
platform_optin_force_iommu();
+ down_write(&dmar_global_lock);
if (dmar_table_init()) {
if (force_on)
panic("tboot: Failed to initialize DMAR table\n");
@@ -3944,6 +3951,16 @@ int __init intel_iommu_init(void)
goto out_free_dmar;
}
+ up_write(&dmar_global_lock);
+
+ /*
+ * The bus notifier takes the dmar_global_lock, so lockdep will
+ * complain later when we register it under the lock.
+ */
+ dmar_register_bus_notifier();
+
+ down_write(&dmar_global_lock);
+
if (!no_iommu)
intel_iommu_debugfs_init();
@@ -3988,9 +4005,11 @@ int __init intel_iommu_init(void)
pr_err("Initialization failed\n");
goto out_free_dmar;
}
+ up_write(&dmar_global_lock);
init_iommu_pm_ops();
+ down_read(&dmar_global_lock);
for_each_active_iommu(iommu, drhd) {
/*
* The flush queue implementation does not perform
@@ -4008,11 +4027,13 @@ int __init intel_iommu_init(void)
"%s", iommu->name);
iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
}
+ up_read(&dmar_global_lock);
bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
if (si_domain && !hw_pass_through)
register_memory_notifier(&intel_iommu_memory_nb);
+ down_read(&dmar_global_lock);
if (probe_acpi_namespace_devices())
pr_warn("ACPI name space devices didn't probe correctly\n");
@@ -4023,15 +4044,17 @@ int __init intel_iommu_init(void)
iommu_disable_protect_mem_regions(iommu);
}
+ up_read(&dmar_global_lock);
- intel_iommu_enabled = 1;
- dmar_register_bus_notifier();
pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
+ intel_iommu_enabled = 1;
+
return 0;
out_free_dmar:
intel_iommu_free_dmars();
+ up_write(&dmar_global_lock);
return ret;
}
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 66b9fa408bf2..eb5ea5b69cfa 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -561,6 +561,11 @@ config IRQ_LOONGARCH_CPU
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
+ select LOONGSON_LIOINTC
+ select LOONGSON_EIOINTC
+ select LOONGSON_PCH_PIC
+ select LOONGSON_PCH_MSI
+ select LOONGSON_PCH_LPC
help
Support for the LoongArch CPU Interrupt Controller. For details of
irq chip hierarchy on LoongArch platforms please read the document
@@ -623,8 +628,9 @@ config LOONGSON_PCH_MSI
config LOONGSON_PCH_LPC
bool "Loongson PCH LPC Controller"
+ depends on LOONGARCH
depends on MACH_LOONGSON64
- default (MACH_LOONGSON64 && LOONGARCH)
+ default MACH_LOONGSON64
select IRQ_DOMAIN_HIERARCHY
help
Support for the Loongson PCH LPC Controller.
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 5ff09de6c48f..beead1a0191c 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1574,13 +1574,15 @@ static int its_select_cpu(struct irq_data *d,
const struct cpumask *aff_mask)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
- cpumask_var_t tmpmask;
+ static DEFINE_RAW_SPINLOCK(tmpmask_lock);
+ static struct cpumask __tmpmask;
+ struct cpumask *tmpmask;
+ unsigned long flags;
int cpu, node;
-
- if (!alloc_cpumask_var(&tmpmask, GFP_ATOMIC))
- return -ENOMEM;
-
node = its_dev->its->numa_node;
+ tmpmask = &__tmpmask;
+
+ raw_spin_lock_irqsave(&tmpmask_lock, flags);
if (!irqd_affinity_is_managed(d)) {
/* First try the NUMA node */
@@ -1634,7 +1636,7 @@ static int its_select_cpu(struct irq_data *d,
cpu = cpumask_pick_least_loaded(d, tmpmask);
}
out:
- free_cpumask_var(tmpmask);
+ raw_spin_unlock_irqrestore(&tmpmask_lock, flags);
pr_debug("IRQ%d -> %*pbl CPU%d\n", d->irq, cpumask_pr_args(aff_mask), cpu);
return cpu;
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index a73763d475f0..6a3f7498ea8e 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -716,7 +716,7 @@ static int stm32_exti_h_domain_alloc(struct irq_domain *dm,
irq_domain_set_hwirq_and_chip(dm, virq, hwirq, chip, chip_data);
- if (!host_data->drv_data || !host_data->drv_data->desc_irqs)
+ if (!host_data->drv_data->desc_irqs)
return -EINVAL;
desc_irq = host_data->drv_data->desc_irqs[hwirq];
diff --git a/drivers/media/dvb-core/dvb_vb2.c b/drivers/media/dvb-core/dvb_vb2.c
index a1bd6d9c9223..909df82fed33 100644
--- a/drivers/media/dvb-core/dvb_vb2.c
+++ b/drivers/media/dvb-core/dvb_vb2.c
@@ -354,6 +354,12 @@ int dvb_vb2_reqbufs(struct dvb_vb2_ctx *ctx, struct dmx_requestbuffers *req)
int dvb_vb2_querybuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b)
{
+ struct vb2_queue *q = &ctx->vb_q;
+
+ if (b->index >= q->num_buffers) {
+ dprintk(1, "[%s] buffer index out of range\n", ctx->name);
+ return -EINVAL;
+ }
vb2_core_querybuf(&ctx->vb_q, b->index, b);
dprintk(3, "[%s] index=%d\n", ctx->name, b->index);
return 0;
@@ -378,8 +384,13 @@ int dvb_vb2_expbuf(struct dvb_vb2_ctx *ctx, struct dmx_exportbuffer *exp)
int dvb_vb2_qbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b)
{
+ struct vb2_queue *q = &ctx->vb_q;
int ret;
+ if (b->index >= q->num_buffers) {
+ dprintk(1, "[%s] buffer index out of range\n", ctx->name);
+ return -EINVAL;
+ }
ret = vb2_core_qbuf(&ctx->vb_q, b->index, b, NULL);
if (ret) {
dprintk(1, "[%s] index=%d errno=%d\n", ctx->name,
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c
index 95e8c29ccc65..d2f5f30582a9 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c
@@ -228,7 +228,6 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
{
struct mtk_vcodec_dev *dev;
struct video_device *vfd_enc;
- struct resource *res;
phandle rproc_phandle;
enum mtk_vcodec_fw_type fw_type;
int ret;
@@ -272,14 +271,12 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
goto err_res;
}
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "failed to get irq resource");
- ret = -ENOENT;
+ dev->enc_irq = platform_get_irq(pdev, 0);
+ if (dev->enc_irq < 0) {
+ ret = dev->enc_irq;
goto err_res;
}
- dev->enc_irq = platform_get_irq(pdev, 0);
irq_set_status_flags(dev->enc_irq, IRQ_NOAUTOEN);
ret = devm_request_irq(&pdev->dev, dev->enc_irq,
mtk_vcodec_enc_irq_handler,
diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
index 7835bb0f32fc..e012b21c4fd7 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.c
+++ b/drivers/media/usb/b2c2/flexcop-usb.c
@@ -511,7 +511,7 @@ static int flexcop_usb_init(struct flexcop_usb *fc_usb)
if (fc_usb->uintf->cur_altsetting->desc.bNumEndpoints < 1)
return -ENODEV;
- if (!usb_endpoint_is_isoc_in(&fc_usb->uintf->cur_altsetting->endpoint[1].desc))
+ if (!usb_endpoint_is_isoc_in(&fc_usb->uintf->cur_altsetting->endpoint[0].desc))
return -ENODEV;
switch (fc_usb->udev->speed) {
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 9c05776f11d1..d509a4a2f08e 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -2740,7 +2740,7 @@ static const struct usb_device_id uvc_ids[] = {
.idProduct = 0x4034,
.bInterfaceClass = USB_CLASS_VIDEO,
.bInterfaceSubClass = 1,
- .bInterfaceProtocol = 0,
+ .bInterfaceProtocol = UVC_PC_PROTOCOL_15,
.driver_info = (kernel_ulong_t)&uvc_ctrl_power_line_limited },
/* LogiLink Wireless Webcam */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 0f3d6b5667b0..55c26e7d370e 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -1040,6 +1040,8 @@ int v4l2_compat_get_array_args(struct file *file, void *mbuf,
{
int err = 0;
+ memset(mbuf, 0, array_size);
+
switch (cmd) {
case VIDIOC_G_FMT32:
case VIDIOC_S_FMT32:
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index c314025d977e..e6fd355a2e92 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -2872,9 +2872,9 @@ static const struct v4l2_ioctl_info v4l2_ioctls[] = {
IOCTL_INFO(VIDIOC_S_PRIORITY, v4l_s_priority, v4l_print_u32, INFO_FL_PRIO),
IOCTL_INFO(VIDIOC_G_SLICED_VBI_CAP, v4l_g_sliced_vbi_cap, v4l_print_sliced_vbi_cap, INFO_FL_CLEAR(v4l2_sliced_vbi_cap, type)),
IOCTL_INFO(VIDIOC_LOG_STATUS, v4l_log_status, v4l_print_newline, 0),
- IOCTL_INFO(VIDIOC_G_EXT_CTRLS, v4l_g_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL),
- IOCTL_INFO(VIDIOC_S_EXT_CTRLS, v4l_s_ext_ctrls, v4l_print_ext_controls, INFO_FL_PRIO | INFO_FL_CTRL),
- IOCTL_INFO(VIDIOC_TRY_EXT_CTRLS, v4l_try_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL),
+ IOCTL_INFO(VIDIOC_G_EXT_CTRLS, v4l_g_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL | INFO_FL_ALWAYS_COPY),
+ IOCTL_INFO(VIDIOC_S_EXT_CTRLS, v4l_s_ext_ctrls, v4l_print_ext_controls, INFO_FL_PRIO | INFO_FL_CTRL | INFO_FL_ALWAYS_COPY),
+ IOCTL_INFO(VIDIOC_TRY_EXT_CTRLS, v4l_try_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL | INFO_FL_ALWAYS_COPY),
IOCTL_INFO(VIDIOC_ENUM_FRAMESIZES, v4l_stub_enum_framesizes, v4l_print_frmsizeenum, INFO_FL_CLEAR(v4l2_frmsizeenum, pixel_format)),
IOCTL_INFO(VIDIOC_ENUM_FRAMEINTERVALS, v4l_stub_enum_frameintervals, v4l_print_frmivalenum, INFO_FL_CLEAR(v4l2_frmivalenum, height)),
IOCTL_INFO(VIDIOC_G_ENC_INDEX, v4l_stub_g_enc_index, v4l_print_enc_idx, 0),
@@ -3367,8 +3367,7 @@ video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg,
array_buf = kvmalloc(array_size, GFP_KERNEL);
err = -ENOMEM;
if (array_buf == NULL)
- goto out_array_args;
- err = -EFAULT;
+ goto out;
if (in_compat_syscall())
err = v4l2_compat_get_array_args(file, array_buf,
user_ptr, array_size,
@@ -3377,7 +3376,7 @@ video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg,
err = copy_from_user(array_buf, user_ptr, array_size) ?
-EFAULT : 0;
if (err)
- goto out_array_args;
+ goto out;
*kernel_ptr = array_buf;
}
@@ -3395,6 +3394,13 @@ video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg,
trace_v4l2_qbuf(video_devdata(file)->minor, parg);
}
+ /*
+ * Some ioctls can return an error, but still have valid
+ * results that must be returned.
+ */
+ if (err < 0 && !always_copy)
+ goto out;
+
if (has_array_args) {
*kernel_ptr = (void __force *)user_ptr;
if (in_compat_syscall()) {
@@ -3409,16 +3415,8 @@ video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg,
} else if (copy_to_user(user_ptr, array_buf, array_size)) {
err = -EFAULT;
}
- goto out_array_args;
}
- /*
- * Some ioctls can return an error, but still have valid
- * results that must be returned.
- */
- if (err < 0 && !always_copy)
- goto out;
-out_array_args:
if (video_put_user((void __user *)arg, parg, cmd, orig_cmd))
err = -EFAULT;
out:
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 06aa62ce0ed1..3662bf5320ce 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -870,7 +870,8 @@ try_again:
* the CCS bit is set as well. We deliberately deviate from the spec in
* regards to this, which allows UHS-I to be supported for SDSC cards.
*/
- if (!mmc_host_is_spi(host) && rocr && (*rocr & SD_ROCR_S18A)) {
+ if (!mmc_host_is_spi(host) && (ocr & SD_OCR_S18R) &&
+ rocr && (*rocr & SD_ROCR_S18A)) {
err = mmc_set_uhs_voltage(host, pocr);
if (err == -EAGAIN) {
retries--;
diff --git a/drivers/mmc/host/mmc_hsq.c b/drivers/mmc/host/mmc_hsq.c
index a5e05ed0fda3..9d35453e7371 100644
--- a/drivers/mmc/host/mmc_hsq.c
+++ b/drivers/mmc/host/mmc_hsq.c
@@ -34,7 +34,7 @@ static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
spin_lock_irqsave(&hsq->lock, flags);
/* Make sure we are not already running a request now */
- if (hsq->mrq) {
+ if (hsq->mrq || hsq->recovery_halt) {
spin_unlock_irqrestore(&hsq->lock, flags);
return;
}
diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
index b6eb75f4bbfc..dfc3ffd5b1f8 100644
--- a/drivers/mmc/host/moxart-mmc.c
+++ b/drivers/mmc/host/moxart-mmc.c
@@ -111,8 +111,8 @@
#define CLK_DIV_MASK 0x7f
/* REG_BUS_WIDTH */
-#define BUS_WIDTH_8 BIT(2)
-#define BUS_WIDTH_4 BIT(1)
+#define BUS_WIDTH_4_SUPPORT BIT(3)
+#define BUS_WIDTH_4 BIT(2)
#define BUS_WIDTH_1 BIT(0)
#define MMC_VDD_360 23
@@ -524,9 +524,6 @@ static void moxart_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
case MMC_BUS_WIDTH_4:
writel(BUS_WIDTH_4, host->base + REG_BUS_WIDTH);
break;
- case MMC_BUS_WIDTH_8:
- writel(BUS_WIDTH_8, host->base + REG_BUS_WIDTH);
- break;
default:
writel(BUS_WIDTH_1, host->base + REG_BUS_WIDTH);
break;
@@ -651,16 +648,8 @@ static int moxart_probe(struct platform_device *pdev)
dmaengine_slave_config(host->dma_chan_rx, &cfg);
}
- switch ((readl(host->base + REG_BUS_WIDTH) >> 3) & 3) {
- case 1:
+ if (readl(host->base + REG_BUS_WIDTH) & BUS_WIDTH_4_SUPPORT)
mmc->caps |= MMC_CAP_4_BIT_DATA;
- break;
- case 2:
- mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
- break;
- default:
- break;
- }
writel(0, host->base + REG_INTERRUPT_MASK);
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 7689ffec5ad1..251172890af7 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -3928,7 +3928,7 @@ bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC)) {
*cmd_error = -EILSEQ;
- if (!mmc_op_tuning(host->cmd->opcode))
+ if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
sdhci_err_stats_inc(host, CMD_CRC);
} else if (intmask & SDHCI_INT_TIMEOUT) {
*cmd_error = -ETIMEDOUT;
@@ -3938,7 +3938,7 @@ bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC)) {
*data_error = -EILSEQ;
- if (!mmc_op_tuning(host->cmd->opcode))
+ if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
sdhci_err_stats_inc(host, DAT_CRC);
} else if (intmask & SDHCI_INT_DATA_TIMEOUT) {
*data_error = -ETIMEDOUT;
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 184608bd8999..e58a1e0cadd2 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -88,8 +88,9 @@ static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = {
static const u16 ad_ticks_per_sec = 1000 / AD_TIMER_INTERVAL;
static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
-static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned =
- MULTICAST_LACPDU_ADDR;
+const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned = {
+ 0x01, 0x80, 0xC2, 0x00, 0x00, 0x02
+};
/* ================= main 802.3ad protocol functions ================== */
static int ad_lacpdu_send(struct port *port);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 5c2febe94428..86d42306aa5e 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -865,12 +865,8 @@ static void bond_hw_addr_flush(struct net_device *bond_dev,
dev_uc_unsync(slave_dev, bond_dev);
dev_mc_unsync(slave_dev, bond_dev);
- if (BOND_MODE(bond) == BOND_MODE_8023AD) {
- /* del lacpdu mc addr from mc list */
- u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
-
- dev_mc_del(slave_dev, lacpdu_multicast);
- }
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
+ dev_mc_del(slave_dev, lacpdu_mcast_addr);
}
/*--------------------------- Active slave change ---------------------------*/
@@ -890,7 +886,8 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(old_active->dev, -1);
- bond_hw_addr_flush(bond->dev, old_active->dev);
+ if (bond->dev->flags & IFF_UP)
+ bond_hw_addr_flush(bond->dev, old_active->dev);
}
if (new_active) {
@@ -901,10 +898,12 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(new_active->dev, 1);
- netif_addr_lock_bh(bond->dev);
- dev_uc_sync(new_active->dev, bond->dev);
- dev_mc_sync(new_active->dev, bond->dev);
- netif_addr_unlock_bh(bond->dev);
+ if (bond->dev->flags & IFF_UP) {
+ netif_addr_lock_bh(bond->dev);
+ dev_uc_sync(new_active->dev, bond->dev);
+ dev_mc_sync(new_active->dev, bond->dev);
+ netif_addr_unlock_bh(bond->dev);
+ }
}
}
@@ -2166,16 +2165,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
}
}
- netif_addr_lock_bh(bond_dev);
- dev_mc_sync_multiple(slave_dev, bond_dev);
- dev_uc_sync_multiple(slave_dev, bond_dev);
- netif_addr_unlock_bh(bond_dev);
-
- if (BOND_MODE(bond) == BOND_MODE_8023AD) {
- /* add lacpdu mc addr to mc list */
- u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
+ if (bond_dev->flags & IFF_UP) {
+ netif_addr_lock_bh(bond_dev);
+ dev_mc_sync_multiple(slave_dev, bond_dev);
+ dev_uc_sync_multiple(slave_dev, bond_dev);
+ netif_addr_unlock_bh(bond_dev);
- dev_mc_add(slave_dev, lacpdu_multicast);
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
+ dev_mc_add(slave_dev, lacpdu_mcast_addr);
}
}
@@ -2447,7 +2444,8 @@ static int __bond_release_one(struct net_device *bond_dev,
if (old_flags & IFF_ALLMULTI)
dev_set_allmulti(slave_dev, -1);
- bond_hw_addr_flush(bond_dev, slave_dev);
+ if (old_flags & IFF_UP)
+ bond_hw_addr_flush(bond_dev, slave_dev);
}
slave_disable_netpoll(slave);
@@ -4184,6 +4182,12 @@ static int bond_open(struct net_device *bond_dev)
struct list_head *iter;
struct slave *slave;
+ if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN && !bond->rr_tx_counter) {
+ bond->rr_tx_counter = alloc_percpu(u32);
+ if (!bond->rr_tx_counter)
+ return -ENOMEM;
+ }
+
/* reset slave->backup and slave->inactive */
if (bond_has_slaves(bond)) {
bond_for_each_slave(bond, slave, iter) {
@@ -4221,6 +4225,9 @@ static int bond_open(struct net_device *bond_dev)
/* register to receive LACPDUs */
bond->recv_probe = bond_3ad_lacpdu_recv;
bond_3ad_initiate_agg_selection(bond, 1);
+
+ bond_for_each_slave(bond, slave, iter)
+ dev_mc_add(slave->dev, lacpdu_mcast_addr);
}
if (bond_mode_can_use_xmit_hash(bond))
@@ -4232,6 +4239,7 @@ static int bond_open(struct net_device *bond_dev)
static int bond_close(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
+ struct slave *slave;
bond_work_cancel_all(bond);
bond->send_peer_notif = 0;
@@ -4239,6 +4247,19 @@ static int bond_close(struct net_device *bond_dev)
bond_alb_deinitialize(bond);
bond->recv_probe = NULL;
+ if (bond_uses_primary(bond)) {
+ rcu_read_lock();
+ slave = rcu_dereference(bond->curr_active_slave);
+ if (slave)
+ bond_hw_addr_flush(bond_dev, slave->dev);
+ rcu_read_unlock();
+ } else {
+ struct list_head *iter;
+
+ bond_for_each_slave(bond, slave, iter)
+ bond_hw_addr_flush(bond_dev, slave->dev);
+ }
+
return 0;
}
@@ -6228,15 +6249,6 @@ static int bond_init(struct net_device *bond_dev)
if (!bond->wq)
return -ENOMEM;
- if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN) {
- bond->rr_tx_counter = alloc_percpu(u32);
- if (!bond->rr_tx_counter) {
- destroy_workqueue(bond->wq);
- bond->wq = NULL;
- return -ENOMEM;
- }
- }
-
spin_lock_init(&bond->stats_lock);
netdev_lockdep_set_classes(bond_dev);
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index f23a03300a81..029cd8194ed5 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -235,9 +235,22 @@ static inline u8 c_can_get_tx_tail(const struct c_can_tx_ring *ring)
return ring->tail & (ring->obj_num - 1);
}
-static inline u8 c_can_get_tx_free(const struct c_can_tx_ring *ring)
+static inline u8 c_can_get_tx_free(const struct c_can_priv *priv,
+ const struct c_can_tx_ring *ring)
{
- return ring->obj_num - (ring->head - ring->tail);
+ u8 head = c_can_get_tx_head(ring);
+ u8 tail = c_can_get_tx_tail(ring);
+
+ if (priv->type == BOSCH_D_CAN)
+ return ring->obj_num - (ring->head - ring->tail);
+
+ /* This is not a FIFO. C/D_CAN sends out the buffers
+ * prioritized. The lowest buffer number wins.
+ */
+ if (head < tail)
+ return 0;
+
+ return ring->obj_num - head;
}
#endif /* C_CAN_H */
diff --git a/drivers/net/can/c_can/c_can_main.c b/drivers/net/can/c_can/c_can_main.c
index dc8132862f33..d6605dbb7737 100644
--- a/drivers/net/can/c_can/c_can_main.c
+++ b/drivers/net/can/c_can/c_can_main.c
@@ -429,7 +429,7 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
static bool c_can_tx_busy(const struct c_can_priv *priv,
const struct c_can_tx_ring *tx_ring)
{
- if (c_can_get_tx_free(tx_ring) > 0)
+ if (c_can_get_tx_free(priv, tx_ring) > 0)
return false;
netif_stop_queue(priv->dev);
@@ -437,7 +437,7 @@ static bool c_can_tx_busy(const struct c_can_priv *priv,
/* Memory barrier before checking tx_free (head and tail) */
smp_mb();
- if (c_can_get_tx_free(tx_ring) == 0) {
+ if (c_can_get_tx_free(priv, tx_ring) == 0) {
netdev_dbg(priv->dev,
"Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n",
tx_ring->head, tx_ring->tail,
@@ -465,7 +465,7 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
idx = c_can_get_tx_head(tx_ring);
tx_ring->head++;
- if (c_can_get_tx_free(tx_ring) == 0)
+ if (c_can_get_tx_free(priv, tx_ring) == 0)
netif_stop_queue(dev);
if (idx < c_can_get_tx_tail(tx_ring))
@@ -748,7 +748,7 @@ static void c_can_do_tx(struct net_device *dev)
return;
tx_ring->tail += pkts;
- if (c_can_get_tx_free(tx_ring)) {
+ if (c_can_get_tx_free(priv, tx_ring)) {
/* Make sure that anybody stopping the queue after
* this sees the new tx_ring->tail.
*/
@@ -760,8 +760,7 @@ static void c_can_do_tx(struct net_device *dev)
stats->tx_packets += pkts;
tail = c_can_get_tx_tail(tx_ring);
-
- if (tail == 0) {
+ if (priv->type == BOSCH_D_CAN && tail == 0) {
u8 head = c_can_get_tx_head(tx_ring);
/* Start transmission for all cached messages */
diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c
index f857968efed7..ccb438eca517 100644
--- a/drivers/net/can/flexcan/flexcan-core.c
+++ b/drivers/net/can/flexcan/flexcan-core.c
@@ -941,11 +941,6 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
u32 reg_ctrl, reg_id, reg_iflag1;
int i;
- if (unlikely(drop)) {
- skb = ERR_PTR(-ENOBUFS);
- goto mark_as_read;
- }
-
mb = flexcan_get_mb(priv, n);
if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) {
@@ -974,6 +969,11 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
reg_ctrl = priv->read(&mb->can_ctrl);
}
+ if (unlikely(drop)) {
+ skb = ERR_PTR(-ENOBUFS);
+ goto mark_as_read;
+ }
+
if (reg_ctrl & FLEXCAN_MB_CNT_EDL)
skb = alloc_canfd_skb(offload->dev, &cfd);
else
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index baf749c8cda3..c1ff3c046d62 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -824,6 +824,7 @@ static int gs_can_open(struct net_device *netdev)
flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
/* finally start device */
+ dev->can.state = CAN_STATE_ERROR_ACTIVE;
dm->mode = cpu_to_le32(GS_CAN_MODE_START);
dm->flags = cpu_to_le32(flags);
rc = usb_control_msg(interface_to_usbdev(dev->iface),
@@ -835,13 +836,12 @@ static int gs_can_open(struct net_device *netdev)
if (rc < 0) {
netdev_err(netdev, "Couldn't start device (err=%d)\n", rc);
kfree(dm);
+ dev->can.state = CAN_STATE_STOPPED;
return rc;
}
kfree(dm);
- dev->can.state = CAN_STATE_ERROR_ACTIVE;
-
parent->active_channels++;
if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
netif_start_queue(netdev);
@@ -925,17 +925,21 @@ static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
}
/* blink LED's for finding the this interface */
-static int gs_usb_set_phys_id(struct net_device *dev,
+static int gs_usb_set_phys_id(struct net_device *netdev,
enum ethtool_phys_id_state state)
{
+ const struct gs_can *dev = netdev_priv(netdev);
int rc = 0;
+ if (!(dev->feature & GS_CAN_FEATURE_IDENTIFY))
+ return -EOPNOTSUPP;
+
switch (state) {
case ETHTOOL_ID_ACTIVE:
- rc = gs_usb_set_identify(dev, GS_CAN_IDENTIFY_ON);
+ rc = gs_usb_set_identify(netdev, GS_CAN_IDENTIFY_ON);
break;
case ETHTOOL_ID_INACTIVE:
- rc = gs_usb_set_identify(dev, GS_CAN_IDENTIFY_OFF);
+ rc = gs_usb_set_identify(netdev, GS_CAN_IDENTIFY_OFF);
break;
default:
break;
@@ -1072,9 +1076,10 @@ static struct gs_can *gs_make_candev(unsigned int channel,
dev->feature |= GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX |
GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO;
- if (le32_to_cpu(dconf->sw_version) > 1)
- if (feature & GS_CAN_FEATURE_IDENTIFY)
- netdev->ethtool_ops = &gs_usb_ethtool_ops;
+ /* GS_CAN_FEATURE_IDENTIFY is only supported for sw_version > 1 */
+ if (!(le32_to_cpu(dconf->sw_version) > 1 &&
+ feature & GS_CAN_FEATURE_IDENTIFY))
+ dev->feature &= ~GS_CAN_FEATURE_IDENTIFY;
kfree(bt_const);
diff --git a/drivers/net/dsa/microchip/lan937x_main.c b/drivers/net/dsa/microchip/lan937x_main.c
index daedd2bf20c1..5579644e8fde 100644
--- a/drivers/net/dsa/microchip/lan937x_main.c
+++ b/drivers/net/dsa/microchip/lan937x_main.c
@@ -244,10 +244,6 @@ void lan937x_port_setup(struct ksz_device *dev, int port, bool cpu_port)
lan937x_port_cfg(dev, port, REG_PORT_CTRL_0,
PORT_TAIL_TAG_ENABLE, true);
- /* disable frame check length field */
- lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_0, PORT_CHECK_LENGTH,
- false);
-
/* set back pressure for half duplex */
lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE,
true);
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 835807911be0..409d5c3d76ea 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -506,14 +506,19 @@ static bool mt7531_dual_sgmii_supported(struct mt7530_priv *priv)
static int
mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface)
{
- struct mt7530_priv *priv = ds->priv;
+ return 0;
+}
+
+static void
+mt7531_pll_setup(struct mt7530_priv *priv)
+{
u32 top_sig;
u32 hwstrap;
u32 xtal;
u32 val;
if (mt7531_dual_sgmii_supported(priv))
- return 0;
+ return;
val = mt7530_read(priv, MT7531_CREV);
top_sig = mt7530_read(priv, MT7531_TOP_SIG_SR);
@@ -592,8 +597,6 @@ mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface)
val |= EN_COREPLL;
mt7530_write(priv, MT7531_PLLGP_EN, val);
usleep_range(25, 35);
-
- return 0;
}
static void
@@ -2326,11 +2329,17 @@ mt7531_setup(struct dsa_switch *ds)
return -ENODEV;
}
+ /* all MACs must be forced link-down before sw reset */
+ for (i = 0; i < MT7530_NUM_PORTS; i++)
+ mt7530_write(priv, MT7530_PMCR_P(i), MT7531_FORCE_LNK);
+
/* Reset the switch through internal reset */
mt7530_write(priv, MT7530_SYS_CTRL,
SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
SYS_CTRL_REG_RST);
+ mt7531_pll_setup(priv);
+
if (mt7531_dual_sgmii_supported(priv)) {
priv->p5_intf_sel = P5_INTF_SEL_GMAC5_SGMII;
@@ -2887,8 +2896,6 @@ mt7531_cpu_port_config(struct dsa_switch *ds, int port)
case 6:
interface = PHY_INTERFACE_MODE_2500BASEX;
- mt7531_pad_setup(ds, interface);
-
priv->p6_interface = interface;
break;
default:
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 88595863d8bc..8a0af371e7dc 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -94,11 +94,8 @@ static int aq_ndev_close(struct net_device *ndev)
int err = 0;
err = aq_nic_stop(aq_nic);
- if (err < 0)
- goto err_exit;
aq_nic_deinit(aq_nic, true);
-err_exit:
return err;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index f46eefb5a029..96da0ba3d507 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -659,7 +659,6 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
for (i = 0; i < nr_pkts; i++) {
struct bnxt_sw_tx_bd *tx_buf;
- bool compl_deferred = false;
struct sk_buff *skb;
int j, last;
@@ -668,6 +667,8 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
skb = tx_buf->skb;
tx_buf->skb = NULL;
+ tx_bytes += skb->len;
+
if (tx_buf->is_push) {
tx_buf->is_push = 0;
goto next_tx_int;
@@ -688,8 +689,9 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
}
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ /* PTP worker takes ownership of the skb */
if (!bnxt_get_tx_ts_p5(bp, skb))
- compl_deferred = true;
+ skb = NULL;
else
atomic_inc(&bp->ptp_cfg->tx_avail);
}
@@ -698,9 +700,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
next_tx_int:
cons = NEXT_TX(cons);
- tx_bytes += skb->len;
- if (!compl_deferred)
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(skb);
}
netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index 7f3c0875b6f5..8e316367f6ce 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -317,9 +317,9 @@ void bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp)
if (!(bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS) && (ptp->tstamp_filters &
(PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE |
- PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE))) {
+ PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE))) {
ptp->tstamp_filters &= ~(PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE |
- PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE);
+ PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE);
netdev_warn(bp->dev, "Unsupported FW for all RX pkts timestamp filter\n");
}
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 66c7d08d376a..a2897549f9c4 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -5109,6 +5109,7 @@ static int __maybe_unused macb_suspend(struct device *dev)
if (!(bp->wol & MACB_WOL_ENABLED)) {
rtnl_lock();
phylink_stop(bp->phylink);
+ phy_exit(bp->sgmii_phy);
rtnl_unlock();
spin_lock_irqsave(&bp->lock, flags);
macb_reset_hw(bp);
@@ -5198,6 +5199,9 @@ static int __maybe_unused macb_resume(struct device *dev)
macb_set_rx_mode(netdev);
macb_restore_features(bp);
rtnl_lock();
+ if (!device_may_wakeup(&bp->dev->dev))
+ phy_init(bp->sgmii_phy);
+
phylink_start(bp->phylink);
rtnl_unlock();
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index a7f291c89702..557c591a6ce3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -14,6 +14,7 @@
#include "cudbg_entity.h"
#include "cudbg_lib.h"
#include "cudbg_zlib.h"
+#include "cxgb4_tc_mqprio.h"
static const u32 t6_tp_pio_array[][IREG_NUM_ELEM] = {
{0x7e40, 0x7e44, 0x020, 28}, /* t6_tp_pio_regs_20_to_3b */
@@ -3458,7 +3459,7 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
for (i = 0; i < utxq->ntxq; i++)
QDESC_GET_TXQ(&utxq->uldtxq[i].q,
cudbg_uld_txq_to_qtype(j),
- out_unlock);
+ out_unlock_uld);
}
}
@@ -3475,7 +3476,7 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
for (i = 0; i < urxq->nrxq; i++)
QDESC_GET_RXQ(&urxq->uldrxq[i].rspq,
cudbg_uld_rxq_to_qtype(j),
- out_unlock);
+ out_unlock_uld);
}
/* ULD FLQ */
@@ -3487,7 +3488,7 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
for (i = 0; i < urxq->nrxq; i++)
QDESC_GET_FLQ(&urxq->uldrxq[i].fl,
cudbg_uld_flq_to_qtype(j),
- out_unlock);
+ out_unlock_uld);
}
/* ULD CIQ */
@@ -3500,29 +3501,34 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
for (i = 0; i < urxq->nciq; i++)
QDESC_GET_RXQ(&urxq->uldrxq[base + i].rspq,
cudbg_uld_ciq_to_qtype(j),
- out_unlock);
+ out_unlock_uld);
}
}
+ mutex_unlock(&uld_mutex);
+
+ if (!padap->tc_mqprio)
+ goto out;
+ mutex_lock(&padap->tc_mqprio->mqprio_mutex);
/* ETHOFLD TXQ */
if (s->eohw_txq)
for (i = 0; i < s->eoqsets; i++)
QDESC_GET_TXQ(&s->eohw_txq[i].q,
- CUDBG_QTYPE_ETHOFLD_TXQ, out);
+ CUDBG_QTYPE_ETHOFLD_TXQ, out_unlock_mqprio);
/* ETHOFLD RXQ and FLQ */
if (s->eohw_rxq) {
for (i = 0; i < s->eoqsets; i++)
QDESC_GET_RXQ(&s->eohw_rxq[i].rspq,
- CUDBG_QTYPE_ETHOFLD_RXQ, out);
+ CUDBG_QTYPE_ETHOFLD_RXQ, out_unlock_mqprio);
for (i = 0; i < s->eoqsets; i++)
QDESC_GET_FLQ(&s->eohw_rxq[i].fl,
- CUDBG_QTYPE_ETHOFLD_FLQ, out);
+ CUDBG_QTYPE_ETHOFLD_FLQ, out_unlock_mqprio);
}
-out_unlock:
- mutex_unlock(&uld_mutex);
+out_unlock_mqprio:
+ mutex_unlock(&padap->tc_mqprio->mqprio_mutex);
out:
qdesc_info->qdesc_entry_size = sizeof(*qdesc_entry);
@@ -3559,6 +3565,10 @@ out_free:
#undef QDESC_GET
return rc;
+
+out_unlock_uld:
+ mutex_unlock(&uld_mutex);
+ goto out;
}
int cudbg_collect_flash(struct cudbg_init *pdbg_init,
diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile
index a139f2e9d59f..e0e8dfd13793 100644
--- a/drivers/net/ethernet/freescale/enetc/Makefile
+++ b/drivers/net/ethernet/freescale/enetc/Makefile
@@ -9,7 +9,6 @@ fsl-enetc-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o
fsl-enetc-vf-y := enetc_vf.o $(common-objs)
-fsl-enetc-vf-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
obj-$(CONFIG_FSL_ENETC_IERB) += fsl-enetc-ierb.o
fsl-enetc-ierb-y := enetc_ierb.o
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 4470a4a3e4c3..9f5b921039bd 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -2432,7 +2432,7 @@ int enetc_close(struct net_device *ndev)
return 0;
}
-static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
+int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct tc_mqprio_qopt *mqprio = type_data;
@@ -2486,25 +2486,6 @@ static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
return 0;
}
-int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
- void *type_data)
-{
- switch (type) {
- case TC_SETUP_QDISC_MQPRIO:
- return enetc_setup_tc_mqprio(ndev, type_data);
- case TC_SETUP_QDISC_TAPRIO:
- return enetc_setup_tc_taprio(ndev, type_data);
- case TC_SETUP_QDISC_CBS:
- return enetc_setup_tc_cbs(ndev, type_data);
- case TC_SETUP_QDISC_ETF:
- return enetc_setup_tc_txtime(ndev, type_data);
- case TC_SETUP_BLOCK:
- return enetc_setup_tc_psfp(ndev, type_data);
- default:
- return -EOPNOTSUPP;
- }
-}
-
static int enetc_setup_xdp_prog(struct net_device *dev, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
@@ -2600,29 +2581,6 @@ static int enetc_set_rss(struct net_device *ndev, int en)
return 0;
}
-static int enetc_set_psfp(struct net_device *ndev, int en)
-{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- int err;
-
- if (en) {
- err = enetc_psfp_enable(priv);
- if (err)
- return err;
-
- priv->active_offloads |= ENETC_F_QCI;
- return 0;
- }
-
- err = enetc_psfp_disable(priv);
- if (err)
- return err;
-
- priv->active_offloads &= ~ENETC_F_QCI;
-
- return 0;
-}
-
static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -2641,11 +2599,9 @@ static void enetc_enable_txvlan(struct net_device *ndev, bool en)
enetc_bdr_enable_txvlan(&priv->si->hw, i, en);
}
-int enetc_set_features(struct net_device *ndev,
- netdev_features_t features)
+void enetc_set_features(struct net_device *ndev, netdev_features_t features)
{
netdev_features_t changed = ndev->features ^ features;
- int err = 0;
if (changed & NETIF_F_RXHASH)
enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
@@ -2657,11 +2613,6 @@ int enetc_set_features(struct net_device *ndev,
if (changed & NETIF_F_HW_VLAN_CTAG_TX)
enetc_enable_txvlan(ndev,
!!(features & NETIF_F_HW_VLAN_CTAG_TX));
-
- if (changed & NETIF_F_HW_TC)
- err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
-
- return err;
}
#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 29922c20531f..2cfe6944ebd3 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -393,11 +393,9 @@ void enetc_start(struct net_device *ndev);
void enetc_stop(struct net_device *ndev);
netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev);
struct net_device_stats *enetc_get_stats(struct net_device *ndev);
-int enetc_set_features(struct net_device *ndev,
- netdev_features_t features);
+void enetc_set_features(struct net_device *ndev, netdev_features_t features);
int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd);
-int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
- void *type_data);
+int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data);
int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp);
int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
struct xdp_frame **frames, u32 flags);
@@ -465,6 +463,7 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data);
int enetc_psfp_init(struct enetc_ndev_priv *priv);
int enetc_psfp_clean(struct enetc_ndev_priv *priv);
+int enetc_set_psfp(struct net_device *ndev, bool en);
static inline void enetc_get_max_cap(struct enetc_ndev_priv *priv)
{
@@ -540,4 +539,9 @@ static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
{
return 0;
}
+
+static inline int enetc_set_psfp(struct net_device *ndev, bool en)
+{
+ return 0;
+}
#endif
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index c4a0e836d4f0..bb7750222691 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -709,6 +709,13 @@ static int enetc_pf_set_features(struct net_device *ndev,
{
netdev_features_t changed = ndev->features ^ features;
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int err;
+
+ if (changed & NETIF_F_HW_TC) {
+ err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
+ if (err)
+ return err;
+ }
if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
struct enetc_pf *pf = enetc_si_priv(priv->si);
@@ -722,7 +729,28 @@ static int enetc_pf_set_features(struct net_device *ndev,
if (changed & NETIF_F_LOOPBACK)
enetc_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK));
- return enetc_set_features(ndev, features);
+ enetc_set_features(ndev, features);
+
+ return 0;
+}
+
+static int enetc_pf_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ return enetc_setup_tc_mqprio(ndev, type_data);
+ case TC_SETUP_QDISC_TAPRIO:
+ return enetc_setup_tc_taprio(ndev, type_data);
+ case TC_SETUP_QDISC_CBS:
+ return enetc_setup_tc_cbs(ndev, type_data);
+ case TC_SETUP_QDISC_ETF:
+ return enetc_setup_tc_txtime(ndev, type_data);
+ case TC_SETUP_BLOCK:
+ return enetc_setup_tc_psfp(ndev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
}
static const struct net_device_ops enetc_ndev_ops = {
@@ -739,7 +767,7 @@ static const struct net_device_ops enetc_ndev_ops = {
.ndo_set_vf_spoofchk = enetc_pf_set_vf_spoofchk,
.ndo_set_features = enetc_pf_set_features,
.ndo_eth_ioctl = enetc_ioctl,
- .ndo_setup_tc = enetc_setup_tc,
+ .ndo_setup_tc = enetc_pf_setup_tc,
.ndo_bpf = enetc_setup_bpf,
.ndo_xdp_xmit = enetc_xdp_xmit,
};
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index 582a663ed0ba..f8a2f02ce22d 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -1517,6 +1517,29 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
}
}
+int enetc_set_psfp(struct net_device *ndev, bool en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int err;
+
+ if (en) {
+ err = enetc_psfp_enable(priv);
+ if (err)
+ return err;
+
+ priv->active_offloads |= ENETC_F_QCI;
+ return 0;
+ }
+
+ err = enetc_psfp_disable(priv);
+ if (err)
+ return err;
+
+ priv->active_offloads &= ~ENETC_F_QCI;
+
+ return 0;
+}
+
int enetc_psfp_init(struct enetc_ndev_priv *priv)
{
if (epsfp.psfp_sfi_bitmap)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index 17924305afa2..dfcaac302e24 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -88,7 +88,20 @@ static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr)
static int enetc_vf_set_features(struct net_device *ndev,
netdev_features_t features)
{
- return enetc_set_features(ndev, features);
+ enetc_set_features(ndev, features);
+
+ return 0;
+}
+
+static int enetc_vf_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ return enetc_setup_tc_mqprio(ndev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
}
/* Probing/ Init */
@@ -100,7 +113,7 @@ static const struct net_device_ops enetc_ndev_ops = {
.ndo_set_mac_address = enetc_vf_set_mac_addr,
.ndo_set_features = enetc_vf_set_features,
.ndo_eth_ioctl = enetc_ioctl,
- .ndo_setup_tc = enetc_setup_tc,
+ .ndo_setup_tc = enetc_vf_setup_tc,
};
static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index d77ee8936c6a..a5fed00cb971 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -561,6 +561,7 @@ struct fec_enet_private {
struct clk *clk_2x_txclk;
bool ptp_clk_on;
+ struct mutex ptp_clk_mutex;
unsigned int num_tx_queues;
unsigned int num_rx_queues;
@@ -638,13 +639,6 @@ struct fec_enet_private {
int pps_enable;
unsigned int next_counter;
- struct {
- struct timespec64 ts_phc;
- u64 ns_sys;
- u32 at_corr;
- u8 at_inc_corr;
- } ptp_saved_state;
-
u64 ethtool_stats[];
};
@@ -655,8 +649,5 @@ void fec_ptp_disable_hwts(struct net_device *ndev);
int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
-void fec_ptp_save_state(struct fec_enet_private *fep);
-int fec_ptp_restore_state(struct fec_enet_private *fep);
-
/****************************************************************************/
#endif /* FEC_H */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 6152f6dbf1bc..92c55e1a5507 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -286,11 +286,8 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define FEC_MMFR_TA (2 << 16)
#define FEC_MMFR_DATA(v) (v & 0xffff)
/* FEC ECR bits definition */
-#define FEC_ECR_RESET BIT(0)
-#define FEC_ECR_ETHEREN BIT(1)
-#define FEC_ECR_MAGICEN BIT(2)
-#define FEC_ECR_SLEEP BIT(3)
-#define FEC_ECR_EN1588 BIT(4)
+#define FEC_ECR_MAGICEN (1 << 2)
+#define FEC_ECR_SLEEP (1 << 3)
#define FEC_MII_TIMEOUT 30000 /* us */
@@ -986,9 +983,6 @@ fec_restart(struct net_device *ndev)
u32 temp_mac[2];
u32 rcntl = OPT_FRAME_SIZE | 0x04;
u32 ecntl = 0x2; /* ETHEREN */
- struct ptp_clock_request ptp_rq = { .type = PTP_CLK_REQ_PPS };
-
- fec_ptp_save_state(fep);
/* Whack a reset. We should wait for this.
* For i.MX6SX SOC, enet use AXI bus, we use disable MAC
@@ -1142,7 +1136,7 @@ fec_restart(struct net_device *ndev)
}
if (fep->bufdesc_ex)
- ecntl |= FEC_ECR_EN1588;
+ ecntl |= (1 << 4);
if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
fep->rgmii_txc_dly)
@@ -1163,14 +1157,6 @@ fec_restart(struct net_device *ndev)
if (fep->bufdesc_ex)
fec_ptp_start_cyclecounter(ndev);
- /* Restart PPS if needed */
- if (fep->pps_enable) {
- /* Clear flag so fec_ptp_enable_pps() doesn't return immediately */
- fep->pps_enable = 0;
- fec_ptp_restore_state(fep);
- fep->ptp_caps.enable(&fep->ptp_caps, &ptp_rq, 1);
- }
-
/* Enable interrupts we wish to service */
if (fep->link)
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
@@ -1221,8 +1207,6 @@ fec_stop(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
u32 val;
- struct ptp_clock_request ptp_rq = { .type = PTP_CLK_REQ_PPS };
- u32 ecntl = 0;
/* We cannot expect a graceful transmit stop without link !!! */
if (fep->link) {
@@ -1232,8 +1216,6 @@ fec_stop(struct net_device *ndev)
netdev_err(ndev, "Graceful transmit stop did not complete!\n");
}
- fec_ptp_save_state(fep);
-
/* Whack a reset. We should wait for this.
* For i.MX6SX SOC, enet use AXI bus, we use disable MAC
* instead of reset MAC itself.
@@ -1253,28 +1235,12 @@ fec_stop(struct net_device *ndev)
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
- if (fep->bufdesc_ex)
- ecntl |= FEC_ECR_EN1588;
-
/* We have to keep ENET enabled to have MII interrupt stay working */
if (fep->quirks & FEC_QUIRK_ENET_MAC &&
!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
- ecntl |= FEC_ECR_ETHEREN;
+ writel(2, fep->hwp + FEC_ECNTRL);
writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
}
-
- writel(ecntl, fep->hwp + FEC_ECNTRL);
-
- if (fep->bufdesc_ex)
- fec_ptp_start_cyclecounter(ndev);
-
- /* Restart PPS if needed */
- if (fep->pps_enable) {
- /* Clear flag so fec_ptp_enable_pps() doesn't return immediately */
- fep->pps_enable = 0;
- fec_ptp_restore_state(fep);
- fep->ptp_caps.enable(&fep->ptp_caps, &ptp_rq, 1);
- }
}
@@ -2029,7 +1995,6 @@ static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- unsigned long flags;
int ret;
if (enable) {
@@ -2038,15 +2003,15 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
return ret;
if (fep->clk_ptp) {
- spin_lock_irqsave(&fep->tmreg_lock, flags);
+ mutex_lock(&fep->ptp_clk_mutex);
ret = clk_prepare_enable(fep->clk_ptp);
if (ret) {
- spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ mutex_unlock(&fep->ptp_clk_mutex);
goto failed_clk_ptp;
} else {
fep->ptp_clk_on = true;
}
- spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ mutex_unlock(&fep->ptp_clk_mutex);
}
ret = clk_prepare_enable(fep->clk_ref);
@@ -2061,10 +2026,10 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
} else {
clk_disable_unprepare(fep->clk_enet_out);
if (fep->clk_ptp) {
- spin_lock_irqsave(&fep->tmreg_lock, flags);
+ mutex_lock(&fep->ptp_clk_mutex);
clk_disable_unprepare(fep->clk_ptp);
fep->ptp_clk_on = false;
- spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ mutex_unlock(&fep->ptp_clk_mutex);
}
clk_disable_unprepare(fep->clk_ref);
clk_disable_unprepare(fep->clk_2x_txclk);
@@ -2077,10 +2042,10 @@ failed_clk_2x_txclk:
clk_disable_unprepare(fep->clk_ref);
failed_clk_ref:
if (fep->clk_ptp) {
- spin_lock_irqsave(&fep->tmreg_lock, flags);
+ mutex_lock(&fep->ptp_clk_mutex);
clk_disable_unprepare(fep->clk_ptp);
fep->ptp_clk_on = false;
- spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ mutex_unlock(&fep->ptp_clk_mutex);
}
failed_clk_ptp:
clk_disable_unprepare(fep->clk_enet_out);
@@ -3915,7 +3880,7 @@ fec_probe(struct platform_device *pdev)
}
fep->ptp_clk_on = false;
- spin_lock_init(&fep->tmreg_lock);
+ mutex_init(&fep->ptp_clk_mutex);
/* clk_ref is optional, depends on board */
fep->clk_ref = devm_clk_get_optional(&pdev->dev, "enet_clk_ref");
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 8dd5a2615a89..3dc3c0b626c2 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -365,19 +365,21 @@ static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
*/
static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
- struct fec_enet_private *fep =
+ struct fec_enet_private *adapter =
container_of(ptp, struct fec_enet_private, ptp_caps);
u64 ns;
unsigned long flags;
- spin_lock_irqsave(&fep->tmreg_lock, flags);
+ mutex_lock(&adapter->ptp_clk_mutex);
/* Check the ptp clock */
- if (!fep->ptp_clk_on) {
- spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ if (!adapter->ptp_clk_on) {
+ mutex_unlock(&adapter->ptp_clk_mutex);
return -EINVAL;
}
- ns = timecounter_read(&fep->tc);
- spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ spin_lock_irqsave(&adapter->tmreg_lock, flags);
+ ns = timecounter_read(&adapter->tc);
+ spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+ mutex_unlock(&adapter->ptp_clk_mutex);
*ts = ns_to_timespec64(ns);
@@ -402,10 +404,10 @@ static int fec_ptp_settime(struct ptp_clock_info *ptp,
unsigned long flags;
u32 counter;
- spin_lock_irqsave(&fep->tmreg_lock, flags);
+ mutex_lock(&fep->ptp_clk_mutex);
/* Check the ptp clock */
if (!fep->ptp_clk_on) {
- spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ mutex_unlock(&fep->ptp_clk_mutex);
return -EINVAL;
}
@@ -415,9 +417,11 @@ static int fec_ptp_settime(struct ptp_clock_info *ptp,
*/
counter = ns & fep->cc.mask;
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
writel(counter, fep->hwp + FEC_ATIME);
timecounter_init(&fep->tc, &fep->cc, ns);
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ mutex_unlock(&fep->ptp_clk_mutex);
return 0;
}
@@ -514,11 +518,13 @@ static void fec_time_keep(struct work_struct *work)
struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep);
unsigned long flags;
- spin_lock_irqsave(&fep->tmreg_lock, flags);
+ mutex_lock(&fep->ptp_clk_mutex);
if (fep->ptp_clk_on) {
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
timecounter_read(&fep->tc);
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
}
- spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ mutex_unlock(&fep->ptp_clk_mutex);
schedule_delayed_work(&fep->time_keep, HZ);
}
@@ -593,6 +599,8 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
}
fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed;
+ spin_lock_init(&fep->tmreg_lock);
+
fec_ptp_start_cyclecounter(ndev);
INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
@@ -625,36 +633,7 @@ void fec_ptp_stop(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
- if (fep->pps_enable)
- fec_ptp_enable_pps(fep, 0);
-
cancel_delayed_work_sync(&fep->time_keep);
if (fep->ptp_clock)
ptp_clock_unregister(fep->ptp_clock);
}
-
-void fec_ptp_save_state(struct fec_enet_private *fep)
-{
- u32 atime_inc_corr;
-
- fec_ptp_gettime(&fep->ptp_caps, &fep->ptp_saved_state.ts_phc);
- fep->ptp_saved_state.ns_sys = ktime_get_ns();
-
- fep->ptp_saved_state.at_corr = readl(fep->hwp + FEC_ATIME_CORR);
- atime_inc_corr = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_CORR_MASK;
- fep->ptp_saved_state.at_inc_corr = (u8)(atime_inc_corr >> FEC_T_INC_CORR_OFFSET);
-}
-
-int fec_ptp_restore_state(struct fec_enet_private *fep)
-{
- u32 atime_inc = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK;
- u64 ns_sys;
-
- writel(fep->ptp_saved_state.at_corr, fep->hwp + FEC_ATIME_CORR);
- atime_inc |= ((u32)fep->ptp_saved_state.at_inc_corr) << FEC_T_INC_CORR_OFFSET;
- writel(atime_inc, fep->hwp + FEC_ATIME_INC);
-
- ns_sys = ktime_get_ns() - fep->ptp_saved_state.ns_sys;
- timespec64_add_ns(&fep->ptp_saved_state.ts_phc, ns_sys);
- return fec_ptp_settime(&fep->ptp_caps, &fep->ptp_saved_state.ts_phc);
-}
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 8c939628e2d8..2e6461b0ea8b 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -157,7 +157,7 @@ static int gve_alloc_page_dqo(struct gve_priv *priv,
int err;
err = gve_alloc_page(priv, &priv->pdev->dev, &buf_state->page_info.page,
- &buf_state->addr, DMA_FROM_DEVICE, GFP_KERNEL);
+ &buf_state->addr, DMA_FROM_DEVICE, GFP_ATOMIC);
if (err)
return err;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 10c1e1ea83a1..e3d9804aeb25 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -5909,6 +5909,26 @@ static int i40e_get_link_speed(struct i40e_vsi *vsi)
}
/**
+ * i40e_bw_bytes_to_mbits - Convert max_tx_rate from bytes to mbits
+ * @vsi: Pointer to vsi structure
+ * @max_tx_rate: max TX rate in bytes to be converted into Mbits
+ *
+ * Helper function to convert units before send to set BW limit
+ **/
+static u64 i40e_bw_bytes_to_mbits(struct i40e_vsi *vsi, u64 max_tx_rate)
+{
+ if (max_tx_rate < I40E_BW_MBPS_DIVISOR) {
+ dev_warn(&vsi->back->pdev->dev,
+ "Setting max tx rate to minimum usable value of 50Mbps.\n");
+ max_tx_rate = I40E_BW_CREDIT_DIVISOR;
+ } else {
+ do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
+ }
+
+ return max_tx_rate;
+}
+
+/**
* i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
* @vsi: VSI to be configured
* @seid: seid of the channel/VSI
@@ -5930,10 +5950,10 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
max_tx_rate, seid);
return -EINVAL;
}
- if (max_tx_rate && max_tx_rate < 50) {
+ if (max_tx_rate && max_tx_rate < I40E_BW_CREDIT_DIVISOR) {
dev_warn(&pf->pdev->dev,
"Setting max tx rate to minimum usable value of 50Mbps.\n");
- max_tx_rate = 50;
+ max_tx_rate = I40E_BW_CREDIT_DIVISOR;
}
/* Tx rate credits are in values of 50Mbps, 0 is disabled */
@@ -8224,9 +8244,9 @@ config_tc:
if (i40e_is_tc_mqprio_enabled(pf)) {
if (vsi->mqprio_qopt.max_rate[0]) {
- u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
+ u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
+ vsi->mqprio_qopt.max_rate[0]);
- do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
if (!ret) {
u64 credits = max_tx_rate;
@@ -10971,10 +10991,10 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
}
if (vsi->mqprio_qopt.max_rate[0]) {
- u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
+ u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
+ vsi->mqprio_qopt.max_rate[0]);
u64 credits = 0;
- do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
if (ret)
goto end_unlock;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 4f184c50f6e8..7e9f6a69eb10 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -2039,6 +2039,25 @@ static void i40e_del_qch(struct i40e_vf *vf)
}
/**
+ * i40e_vc_get_max_frame_size
+ * @vf: pointer to the VF
+ *
+ * Max frame size is determined based on the current port's max frame size and
+ * whether a port VLAN is configured on this VF. The VF is not aware whether
+ * it's in a port VLAN so the PF needs to account for this in max frame size
+ * checks and sending the max frame size to the VF.
+ **/
+static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf)
+{
+ u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size;
+
+ if (vf->port_vlan_id)
+ max_frame_size -= VLAN_HLEN;
+
+ return max_frame_size;
+}
+
+/**
* i40e_vc_get_vf_resources_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2139,6 +2158,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
+ vfres->max_mtu = i40e_vc_get_max_frame_size(vf);
if (vf->lan_vsi_idx) {
vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 10aa99dfdcdb..0c89f16bf1e2 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1077,7 +1077,6 @@ static int iavf_set_mac(struct net_device *netdev, void *p)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
struct sockaddr *addr = p;
- bool handle_mac = iavf_is_mac_set_handled(netdev, addr->sa_data);
int ret;
if (!is_valid_ether_addr(addr->sa_data))
@@ -1094,10 +1093,9 @@ static int iavf_set_mac(struct net_device *netdev, void *p)
return 0;
}
- if (handle_mac)
- goto done;
-
- ret = wait_event_interruptible_timeout(adapter->vc_waitqueue, false, msecs_to_jiffies(2500));
+ ret = wait_event_interruptible_timeout(adapter->vc_waitqueue,
+ iavf_is_mac_set_handled(netdev, addr->sa_data),
+ msecs_to_jiffies(2500));
/* If ret < 0 then it means wait was interrupted.
* If ret == 0 then it means we got a timeout.
@@ -1111,7 +1109,6 @@ static int iavf_set_mac(struct net_device *netdev, void *p)
if (!ret)
return -EAGAIN;
-done:
if (!ether_addr_equal(netdev->dev_addr, addr->sa_data))
return -EACCES;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 06d18797d25a..18b6a702a1d6 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -114,8 +114,11 @@ u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
{
u32 head, tail;
+ /* underlying hardware might not allow access and/or always return
+ * 0 for the head/tail registers so just use the cached values
+ */
head = ring->next_to_clean;
- tail = readl(ring->tail);
+ tail = ring->next_to_use;
if (head != tail)
return (head < tail) ?
@@ -1390,7 +1393,7 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
#endif
struct sk_buff *skb;
- if (!rx_buffer)
+ if (!rx_buffer || !size)
return NULL;
/* prefetch first cache line of first page */
va = page_address(rx_buffer->page) + rx_buffer->page_offset;
@@ -1548,7 +1551,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++;
- if (rx_buffer)
+ if (rx_buffer && size)
rx_buffer->pagecnt_bias++;
break;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 15ee85dc33bd..5a9e6563923e 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -269,11 +269,14 @@ int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter)
void iavf_configure_queues(struct iavf_adapter *adapter)
{
struct virtchnl_vsi_queue_config_info *vqci;
- struct virtchnl_queue_pair_info *vqpi;
+ int i, max_frame = adapter->vf_res->max_mtu;
int pairs = adapter->num_active_queues;
- int i, max_frame = IAVF_MAX_RXBUFFER;
+ struct virtchnl_queue_pair_info *vqpi;
size_t len;
+ if (max_frame > IAVF_MAX_RXBUFFER || !max_frame)
+ max_frame = IAVF_MAX_RXBUFFER;
+
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 0c4ec9264071..58d483e2f539 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -914,7 +914,7 @@ static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt)
*/
static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
{
- u16 offset = 0, qmap = 0, tx_count = 0, pow = 0;
+ u16 offset = 0, qmap = 0, tx_count = 0, rx_count = 0, pow = 0;
u16 num_txq_per_tc, num_rxq_per_tc;
u16 qcount_tx = vsi->alloc_txq;
u16 qcount_rx = vsi->alloc_rxq;
@@ -981,23 +981,25 @@ static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
* at least 1)
*/
if (offset)
- vsi->num_rxq = offset;
+ rx_count = offset;
else
- vsi->num_rxq = num_rxq_per_tc;
+ rx_count = num_rxq_per_tc;
- if (vsi->num_rxq > vsi->alloc_rxq) {
+ if (rx_count > vsi->alloc_rxq) {
dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
- vsi->num_rxq, vsi->alloc_rxq);
+ rx_count, vsi->alloc_rxq);
return -EINVAL;
}
- vsi->num_txq = tx_count;
- if (vsi->num_txq > vsi->alloc_txq) {
+ if (tx_count > vsi->alloc_txq) {
dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
- vsi->num_txq, vsi->alloc_txq);
+ tx_count, vsi->alloc_txq);
return -EINVAL;
}
+ vsi->num_txq = tx_count;
+ vsi->num_rxq = rx_count;
+
if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
/* since there is a chance that num_rxq could have been changed
@@ -3490,6 +3492,7 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
u16 pow, offset = 0, qcount_tx = 0, qcount_rx = 0, qmap;
u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0];
int tc0_qcount = vsi->mqprio_qopt.qopt.count[0];
+ u16 new_txq, new_rxq;
u8 netdev_tc = 0;
int i;
@@ -3530,21 +3533,24 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
}
}
- /* Set actual Tx/Rx queue pairs */
- vsi->num_txq = offset + qcount_tx;
- if (vsi->num_txq > vsi->alloc_txq) {
+ new_txq = offset + qcount_tx;
+ if (new_txq > vsi->alloc_txq) {
dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
- vsi->num_txq, vsi->alloc_txq);
+ new_txq, vsi->alloc_txq);
return -EINVAL;
}
- vsi->num_rxq = offset + qcount_rx;
- if (vsi->num_rxq > vsi->alloc_rxq) {
+ new_rxq = offset + qcount_rx;
+ if (new_rxq > vsi->alloc_rxq) {
dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
- vsi->num_rxq, vsi->alloc_rxq);
+ new_rxq, vsi->alloc_rxq);
return -EINVAL;
}
+ /* Set actual Tx/Rx queue pairs */
+ vsi->num_txq = new_txq;
+ vsi->num_rxq = new_rxq;
+
/* Setup queue TC[0].qmap for given VSI context */
ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
@@ -3576,6 +3582,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_pf *pf = vsi->back;
+ struct ice_tc_cfg old_tc_cfg;
struct ice_vsi_ctx *ctx;
struct device *dev;
int i, ret = 0;
@@ -3600,6 +3607,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
max_txqs[i] = vsi->num_txq;
}
+ memcpy(&old_tc_cfg, &vsi->tc_cfg, sizeof(old_tc_cfg));
vsi->tc_cfg.ena_tc = ena_tc;
vsi->tc_cfg.numtc = num_tc;
@@ -3616,8 +3624,10 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
else
ret = ice_vsi_setup_q_map(vsi, ctx);
- if (ret)
+ if (ret) {
+ memcpy(&vsi->tc_cfg, &old_tc_cfg, sizeof(vsi->tc_cfg));
goto out;
+ }
/* must to indicate which section of VSI context are being modified */
ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 8c30eea61b6d..e109cb93886b 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -2399,8 +2399,6 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
return -EBUSY;
}
- ice_unplug_aux_dev(pf);
-
switch (reset) {
case ICE_RESET_PFR:
set_bit(ICE_PFR_REQ, pf->state);
@@ -6651,7 +6649,7 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
*/
int ice_down(struct ice_vsi *vsi)
{
- int i, tx_err, rx_err, link_err = 0, vlan_err = 0;
+ int i, tx_err, rx_err, vlan_err = 0;
WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
@@ -6685,20 +6683,13 @@ int ice_down(struct ice_vsi *vsi)
ice_napi_disable_all(vsi);
- if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
- link_err = ice_force_phys_link_state(vsi, false);
- if (link_err)
- netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
- vsi->vsi_num, link_err);
- }
-
ice_for_each_txq(vsi, i)
ice_clean_tx_ring(vsi->tx_rings[i]);
ice_for_each_rxq(vsi, i)
ice_clean_rx_ring(vsi->rx_rings[i]);
- if (tx_err || rx_err || link_err || vlan_err) {
+ if (tx_err || rx_err || vlan_err) {
netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
vsi->vsi_num, vsi->vsw->sw_id);
return -EIO;
@@ -6860,6 +6851,8 @@ int ice_vsi_open(struct ice_vsi *vsi)
if (err)
goto err_setup_rx;
+ ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
+
if (vsi->type == ICE_VSI_PF) {
/* Notify the stack of the actual queue counts. */
err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
@@ -8892,6 +8885,16 @@ int ice_stop(struct net_device *netdev)
return -EBUSY;
}
+ if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
+ int link_err = ice_force_phys_link_state(vsi, false);
+
+ if (link_err) {
+ netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
+ vsi->vsi_num, link_err);
+ return -EIO;
+ }
+ }
+
ice_vsi_close(vsi);
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 836dce840712..dd2285d4bef4 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -610,7 +610,7 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
if (test_bit(ICE_VSI_DOWN, vsi->state))
return -ENETDOWN;
- if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq)
+ if (!ice_is_xdp_ena_vsi(vsi))
return -ENXIO;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
@@ -621,6 +621,9 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
xdp_ring = vsi->xdp_rings[queue_index];
spin_lock(&xdp_ring->tx_lock);
} else {
+ /* Generally, should not happen */
+ if (unlikely(queue_index >= vsi->num_xdp_txq))
+ return -ENXIO;
xdp_ring = vsi->xdp_rings[queue_index];
}
@@ -1464,7 +1467,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
bool wd;
if (tx_ring->xsk_pool)
- wd = ice_xmit_zc(tx_ring, ICE_DESC_UNUSED(tx_ring), budget);
+ wd = ice_xmit_zc(tx_ring);
else if (ice_ring_is_xdp(tx_ring))
wd = true;
else
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 03ce85f6e6df..056c904b83cc 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -392,13 +392,6 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
goto failure;
}
- if (!is_power_of_2(vsi->rx_rings[qid]->count) ||
- !is_power_of_2(vsi->tx_rings[qid]->count)) {
- netdev_err(vsi->netdev, "Please align ring sizes to power of 2\n");
- pool_failure = -EINVAL;
- goto failure;
- }
-
if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
if (if_running) {
@@ -534,11 +527,10 @@ exit:
bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
{
u16 rx_thresh = ICE_RING_QUARTER(rx_ring);
- u16 batched, leftover, i, tail_bumps;
+ u16 leftover, i, tail_bumps;
- batched = ALIGN_DOWN(count, rx_thresh);
- tail_bumps = batched / rx_thresh;
- leftover = count & (rx_thresh - 1);
+ tail_bumps = count / rx_thresh;
+ leftover = count - (tail_bumps * rx_thresh);
for (i = 0; i < tail_bumps; i++)
if (!__ice_alloc_rx_bufs_zc(rx_ring, rx_thresh))
@@ -788,69 +780,57 @@ ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf)
}
/**
- * ice_clean_xdp_irq_zc - Reclaim resources after transmit completes on XDP ring
- * @xdp_ring: XDP ring to clean
- * @napi_budget: amount of descriptors that NAPI allows us to clean
- *
- * Returns count of cleaned descriptors
+ * ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ
+ * @xdp_ring: XDP Tx ring
*/
-static u16 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring, int napi_budget)
+static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
{
- u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
- int budget = napi_budget / tx_thresh;
- u16 next_dd = xdp_ring->next_dd;
- u16 ntc, cleared_dds = 0;
-
- do {
- struct ice_tx_desc *next_dd_desc;
- u16 desc_cnt = xdp_ring->count;
- struct ice_tx_buf *tx_buf;
- u32 xsk_frames;
- u16 i;
-
- next_dd_desc = ICE_TX_DESC(xdp_ring, next_dd);
- if (!(next_dd_desc->cmd_type_offset_bsz &
- cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
- break;
+ u16 ntc = xdp_ring->next_to_clean;
+ struct ice_tx_desc *tx_desc;
+ u16 cnt = xdp_ring->count;
+ struct ice_tx_buf *tx_buf;
+ u16 xsk_frames = 0;
+ u16 last_rs;
+ int i;
- cleared_dds++;
- xsk_frames = 0;
- if (likely(!xdp_ring->xdp_tx_active)) {
- xsk_frames = tx_thresh;
- goto skip;
- }
+ last_rs = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : cnt - 1;
+ tx_desc = ICE_TX_DESC(xdp_ring, last_rs);
+ if ((tx_desc->cmd_type_offset_bsz &
+ cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
+ if (last_rs >= ntc)
+ xsk_frames = last_rs - ntc + 1;
+ else
+ xsk_frames = last_rs + cnt - ntc + 1;
+ }
- ntc = xdp_ring->next_to_clean;
+ if (!xsk_frames)
+ return;
- for (i = 0; i < tx_thresh; i++) {
- tx_buf = &xdp_ring->tx_buf[ntc];
+ if (likely(!xdp_ring->xdp_tx_active))
+ goto skip;
- if (tx_buf->raw_buf) {
- ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
- tx_buf->raw_buf = NULL;
- } else {
- xsk_frames++;
- }
+ ntc = xdp_ring->next_to_clean;
+ for (i = 0; i < xsk_frames; i++) {
+ tx_buf = &xdp_ring->tx_buf[ntc];
- ntc++;
- if (ntc >= xdp_ring->count)
- ntc = 0;
+ if (tx_buf->raw_buf) {
+ ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
+ tx_buf->raw_buf = NULL;
+ } else {
+ xsk_frames++;
}
+
+ ntc++;
+ if (ntc >= xdp_ring->count)
+ ntc = 0;
+ }
skip:
- xdp_ring->next_to_clean += tx_thresh;
- if (xdp_ring->next_to_clean >= desc_cnt)
- xdp_ring->next_to_clean -= desc_cnt;
- if (xsk_frames)
- xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
- next_dd_desc->cmd_type_offset_bsz = 0;
- next_dd = next_dd + tx_thresh;
- if (next_dd >= desc_cnt)
- next_dd = tx_thresh - 1;
- } while (--budget);
-
- xdp_ring->next_dd = next_dd;
-
- return cleared_dds * tx_thresh;
+ tx_desc->cmd_type_offset_bsz = 0;
+ xdp_ring->next_to_clean += xsk_frames;
+ if (xdp_ring->next_to_clean >= cnt)
+ xdp_ring->next_to_clean -= cnt;
+ if (xsk_frames)
+ xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
}
/**
@@ -885,7 +865,6 @@ static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc,
static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
unsigned int *total_bytes)
{
- u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
u16 ntu = xdp_ring->next_to_use;
struct ice_tx_desc *tx_desc;
u32 i;
@@ -905,13 +884,6 @@ static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *de
}
xdp_ring->next_to_use = ntu;
-
- if (xdp_ring->next_to_use > xdp_ring->next_rs) {
- tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
- tx_desc->cmd_type_offset_bsz |=
- cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
- xdp_ring->next_rs += tx_thresh;
- }
}
/**
@@ -924,7 +896,6 @@ static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *de
static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
u32 nb_pkts, unsigned int *total_bytes)
{
- u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
u32 batched, leftover, i;
batched = ALIGN_DOWN(nb_pkts, PKTS_PER_BATCH);
@@ -933,54 +904,54 @@ static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *d
ice_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes);
for (; i < batched + leftover; i++)
ice_xmit_pkt(xdp_ring, &descs[i], total_bytes);
+}
- if (xdp_ring->next_to_use > xdp_ring->next_rs) {
- struct ice_tx_desc *tx_desc;
+/**
+ * ice_set_rs_bit - set RS bit on last produced descriptor (one behind current NTU)
+ * @xdp_ring: XDP ring to produce the HW Tx descriptors on
+ */
+static void ice_set_rs_bit(struct ice_tx_ring *xdp_ring)
+{
+ u16 ntu = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1;
+ struct ice_tx_desc *tx_desc;
- tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
- tx_desc->cmd_type_offset_bsz |=
- cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
- xdp_ring->next_rs += tx_thresh;
- }
+ tx_desc = ICE_TX_DESC(xdp_ring, ntu);
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
}
/**
* ice_xmit_zc - take entries from XSK Tx ring and place them onto HW Tx ring
* @xdp_ring: XDP ring to produce the HW Tx descriptors on
- * @budget: number of free descriptors on HW Tx ring that can be used
- * @napi_budget: amount of descriptors that NAPI allows us to clean
*
* Returns true if there is no more work that needs to be done, false otherwise
*/
-bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, u32 budget, int napi_budget)
+bool ice_xmit_zc(struct ice_tx_ring *xdp_ring)
{
struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
- u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
u32 nb_pkts, nb_processed = 0;
unsigned int total_bytes = 0;
+ int budget;
+
+ ice_clean_xdp_irq_zc(xdp_ring);
- if (budget < tx_thresh)
- budget += ice_clean_xdp_irq_zc(xdp_ring, napi_budget);
+ budget = ICE_DESC_UNUSED(xdp_ring);
+ budget = min_t(u16, budget, ICE_RING_QUARTER(xdp_ring));
nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
if (!nb_pkts)
return true;
if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
- struct ice_tx_desc *tx_desc;
-
nb_processed = xdp_ring->count - xdp_ring->next_to_use;
ice_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes);
- tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
- tx_desc->cmd_type_offset_bsz |=
- cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
- xdp_ring->next_rs = tx_thresh - 1;
xdp_ring->next_to_use = 0;
}
ice_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed,
&total_bytes);
+ ice_set_rs_bit(xdp_ring);
ice_xdp_ring_update_tail(xdp_ring);
ice_update_tx_ring_stats(xdp_ring, nb_pkts, total_bytes);
@@ -1058,14 +1029,16 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
*/
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring)
{
- u16 count_mask = rx_ring->count - 1;
u16 ntc = rx_ring->next_to_clean;
u16 ntu = rx_ring->next_to_use;
- for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) {
+ while (ntc != ntu) {
struct xdp_buff *xdp = *ice_xdp_buf(rx_ring, ntc);
xsk_buff_free(xdp);
+ ntc++;
+ if (ntc >= rx_ring->count)
+ ntc = 0;
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index 4edbe81eb646..6fa181f080ef 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -26,13 +26,10 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count);
bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
-bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, u32 budget, int napi_budget);
+bool ice_xmit_zc(struct ice_tx_ring *xdp_ring);
int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc);
#else
-static inline bool
-ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring,
- u32 __always_unused budget,
- int __always_unused napi_budget)
+static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring)
{
return false;
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
index 0eec05d905eb..4a3baa7e0142 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
@@ -700,10 +700,10 @@ void mvpp2_dbgfs_cleanup(struct mvpp2 *priv)
void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name)
{
- static struct dentry *mvpp2_root;
- struct dentry *mvpp2_dir;
+ struct dentry *mvpp2_dir, *mvpp2_root;
int ret, i;
+ mvpp2_root = debugfs_lookup(MVPP2_DRIVER_NAME, NULL);
if (!mvpp2_root)
mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index ede3e53b9790..a895862b4821 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -368,6 +368,7 @@ static int prestera_port_sfp_bind(struct prestera_port *port)
if (!sw->np)
return 0;
+ of_node_get(sw->np);
ports = of_find_node_by_name(sw->np, "ports");
for_each_child_of_node(ports, node) {
@@ -417,6 +418,7 @@ static int prestera_port_sfp_bind(struct prestera_port *port)
}
out:
+ of_node_put(node);
of_node_put(ports);
return err;
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
index f538a749ebd4..59470d99f522 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
@@ -872,6 +872,7 @@ static void prestera_pci_remove(struct pci_dev *pdev)
static const struct pci_device_id prestera_pci_devices[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0xC804) },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0xC80C) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0xCC1E) },
{ }
};
MODULE_DEVICE_TABLE(pci, prestera_pci_devices);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 5ace4609de47..b344632beadd 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1458,7 +1458,7 @@ static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
static bool mtk_page_pool_enabled(struct mtk_eth *eth)
{
- return !eth->hwlro;
+ return MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2);
}
static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index ecf85e9ed824..0f9668a4079d 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -319,8 +319,8 @@
#define MTK_RXD5_PPE_CPU_REASON GENMASK(22, 18)
#define MTK_RXD5_SRC_PORT GENMASK(29, 26)
-#define RX_DMA_GET_SPORT(x) (((x) >> 19) & 0xf)
-#define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0x7)
+#define RX_DMA_GET_SPORT(x) (((x) >> 19) & 0x7)
+#define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0xf)
/* PDMA V2 descriptor rxd3 */
#define RX_DMA_VTAG_V2 BIT(0)
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
index 85155cd9405c..aa780b1614a3 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
@@ -179,6 +179,9 @@ static int mlxbf_gige_mdio_read(struct mii_bus *bus, int phy_add, int phy_reg)
/* Only return ad bits of the gw register */
ret &= MLXBF_GIGE_MDIO_GW_AD_MASK;
+ /* The MDIO lock is set on read. To release it, clear gw register */
+ writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
+
return ret;
}
@@ -203,6 +206,9 @@ static int mlxbf_gige_mdio_write(struct mii_bus *bus, int phy_add,
temp, !(temp & MLXBF_GIGE_MDIO_GW_BUSY_MASK),
5, 1000000);
+ /* The MDIO lock is set on read. To release it, clear gw register */
+ writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
+
return ret;
}
@@ -240,8 +246,8 @@ int mlxbf_gige_mdio_probe(struct platform_device *pdev, struct mlxbf_gige *priv)
}
priv->clk_io = devm_ioremap(dev, res->start, resource_size(res));
- if (IS_ERR(priv->clk_io))
- return PTR_ERR(priv->clk_io);
+ if (!priv->clk_io)
+ return -ENOMEM;
mlxbf_gige_mdio_cfg(priv);
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 00d8198072ae..a6f99b4344d9 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -397,6 +397,11 @@ static void mana_gd_process_eq_events(void *arg)
break;
}
+ /* Per GDMA spec, rmb is necessary after checking owner_bits, before
+ * reading eqe.
+ */
+ rmb();
+
mana_gd_process_eqe(eq);
eq->head++;
@@ -1134,6 +1139,11 @@ static int mana_gd_read_cqe(struct gdma_queue *cq, struct gdma_comp *comp)
if (WARN_ON_ONCE(owner_bits != new_bits))
return -1;
+ /* Per GDMA spec, rmb is necessary after checking owner_bits, before
+ * reading completion info
+ */
+ rmb();
+
comp->wq_num = cqe->cqe_info.wq_num;
comp->is_sq = cqe->cqe_info.is_sq;
memcpy(comp->cqe_data, cqe->cqe_data, GDMA_COMP_DATA_SIZE);
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 306026e6aa11..8f8005bc6eea 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -290,6 +290,13 @@ static int ocelot_port_num_untagged_vlans(struct ocelot *ocelot, int port)
if (!(vlan->portmask & BIT(port)))
continue;
+ /* Ignore the VLAN added by ocelot_add_vlan_unaware_pvid(),
+ * because this is never active in hardware at the same time as
+ * the bridge VLANs, which only matter in VLAN-aware mode.
+ */
+ if (vlan->vid >= OCELOT_RSV_VLAN_RANGE_START)
+ continue;
+
if (vlan->untagged & BIT(port))
num_untagged++;
}
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index b357ac4c56c5..7e32b04eb0c7 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1449,6 +1449,8 @@ static int ravb_phy_init(struct net_device *ndev)
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
}
+ /* Indicate that the MAC is responsible for managing PHY PM */
+ phydev->mac_managed_pm = true;
phy_attached_info(phydev);
return 0;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 67ade78fb767..7fd8828d3a84 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2029,6 +2029,8 @@ static int sh_eth_phy_init(struct net_device *ndev)
if (mdp->cd->register_type != SH_ETH_REG_GIGABIT)
phy_set_max_speed(phydev, SPEED_100);
+ /* Indicate that the MAC is responsible for managing PHY PM */
+ phydev->mac_managed_pm = true;
phy_attached_info(phydev);
return 0;
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index ee734b69150f..d1e1aa19a68e 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -4213,7 +4213,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.ev_test_generate = efx_ef10_ev_test_generate,
.filter_table_probe = efx_ef10_filter_table_probe,
.filter_table_restore = efx_mcdi_filter_table_restore,
- .filter_table_remove = efx_mcdi_filter_table_remove,
+ .filter_table_remove = efx_ef10_filter_table_remove,
.filter_update_rx_scatter = efx_mcdi_update_rx_scatter,
.filter_insert = efx_mcdi_filter_insert,
.filter_remove_safe = efx_mcdi_filter_remove_safe,
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index 032b8c0bd788..5b4d661ab986 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -319,7 +319,7 @@ int efx_probe_interrupts(struct efx_nic *efx)
efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
efx->n_rx_channels = 1;
efx->n_tx_channels = 1;
- efx->tx_channel_offset = 1;
+ efx->tx_channel_offset = efx_separate_tx_channels ? 1 : 0;
efx->n_xdp_channels = 0;
efx->xdp_channel_offset = efx->n_channels;
efx->legacy_irq = efx->pci_dev->irq;
diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c
index 017212a40df3..f54ebd007286 100644
--- a/drivers/net/ethernet/sfc/siena/efx_channels.c
+++ b/drivers/net/ethernet/sfc/siena/efx_channels.c
@@ -320,7 +320,7 @@ int efx_siena_probe_interrupts(struct efx_nic *efx)
efx->n_channels = 1 + (efx_siena_separate_tx_channels ? 1 : 0);
efx->n_rx_channels = 1;
efx->n_tx_channels = 1;
- efx->tx_channel_offset = 1;
+ efx->tx_channel_offset = efx_siena_separate_tx_channels ? 1 : 0;
efx->n_xdp_channels = 0;
efx->xdp_channel_offset = efx->n_channels;
efx->legacy_irq = efx->pci_dev->irq;
diff --git a/drivers/net/ethernet/sfc/siena/tx.c b/drivers/net/ethernet/sfc/siena/tx.c
index e166dcb9b99c..91e87594ed1e 100644
--- a/drivers/net/ethernet/sfc/siena/tx.c
+++ b/drivers/net/ethernet/sfc/siena/tx.c
@@ -336,7 +336,7 @@ netdev_tx_t efx_siena_hard_start_xmit(struct sk_buff *skb,
* previous packets out.
*/
if (!netdev_xmit_more())
- efx_tx_send_pending(tx_queue->channel);
+ efx_tx_send_pending(efx_get_tx_channel(efx, index));
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index d12474042c84..c5f88f7a7a04 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -549,7 +549,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
* previous packets out.
*/
if (!netdev_xmit_more())
- efx_tx_send_pending(tx_queue->channel);
+ efx_tx_send_pending(efx_get_tx_channel(efx, index));
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 592d29abcb1c..9083159b93f1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3801,6 +3801,15 @@ static int __stmmac_open(struct net_device *dev,
stmmac_reset_queues_param(priv);
+ if (priv->plat->serdes_powerup) {
+ ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
+ if (ret < 0) {
+ netdev_err(priv->dev, "%s: Serdes powerup failed\n",
+ __func__);
+ goto init_error;
+ }
+ }
+
ret = stmmac_hw_setup(dev, true);
if (ret < 0) {
netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
@@ -3904,6 +3913,10 @@ static int stmmac_release(struct net_device *dev)
/* Disable the MAC Rx/Tx */
stmmac_mac_set(priv, priv->ioaddr, false);
+ /* Powerdown Serdes if there is */
+ if (priv->plat->serdes_powerdown)
+ priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
+
netif_carrier_off(dev);
stmmac_release_ptp(priv);
@@ -7293,14 +7306,6 @@ int stmmac_dvr_probe(struct device *device,
goto error_netdev_register;
}
- if (priv->plat->serdes_powerup) {
- ret = priv->plat->serdes_powerup(ndev,
- priv->plat->bsp_priv);
-
- if (ret < 0)
- goto error_serdes_powerup;
- }
-
#ifdef CONFIG_DEBUG_FS
stmmac_init_fs(ndev);
#endif
@@ -7315,8 +7320,6 @@ int stmmac_dvr_probe(struct device *device,
return ret;
-error_serdes_powerup:
- unregister_netdev(ndev);
error_netdev_register:
phylink_destroy(priv->phylink);
error_xpcs_setup:
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 8594ee839628..88aa0d310aee 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2020,9 +2020,9 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
skb_reserve(copy_skb, 2);
skb_put(copy_skb, len);
- dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
skb_copy_from_linear_data(skb, copy_skb->data, len);
- dma_sync_single_for_device(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
+ dma_sync_single_for_device(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
/* Reuse original ring buffer. */
hme_write_rxd(hp, this,
(RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 74e845fa2e07..aa8f828a0ae7 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -213,6 +213,7 @@ static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_iounmap(pdev, rrpriv->regs);
if (pdev)
pci_release_regions(pdev);
+ pci_disable_device(pdev);
out2:
free_netdev(dev);
out3:
diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
index ec010cf2e816..6f874f99b910 100644
--- a/drivers/net/ipa/ipa_qmi.c
+++ b/drivers/net/ipa/ipa_qmi.c
@@ -308,12 +308,12 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
mem = ipa_mem_find(ipa, IPA_MEM_V4_ROUTE);
req.v4_route_tbl_info_valid = 1;
req.v4_route_tbl_info.start = ipa->mem_offset + mem->offset;
- req.v4_route_tbl_info.count = mem->size / sizeof(__le64);
+ req.v4_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE);
req.v6_route_tbl_info_valid = 1;
req.v6_route_tbl_info.start = ipa->mem_offset + mem->offset;
- req.v6_route_tbl_info.count = mem->size / sizeof(__le64);
+ req.v6_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER);
req.v4_filter_tbl_start_valid = 1;
@@ -352,7 +352,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.v4_hash_route_tbl_info_valid = 1;
req.v4_hash_route_tbl_info.start =
ipa->mem_offset + mem->offset;
- req.v4_hash_route_tbl_info.count = mem->size / sizeof(__le64);
+ req.v4_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
}
mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE_HASHED);
@@ -360,7 +360,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.v6_hash_route_tbl_info_valid = 1;
req.v6_hash_route_tbl_info.start =
ipa->mem_offset + mem->offset;
- req.v6_hash_route_tbl_info.count = mem->size / sizeof(__le64);
+ req.v6_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
}
mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER_HASHED);
diff --git a/drivers/net/ipa/ipa_qmi_msg.c b/drivers/net/ipa/ipa_qmi_msg.c
index 6838e8065072..75d3fc0092e9 100644
--- a/drivers/net/ipa/ipa_qmi_msg.c
+++ b/drivers/net/ipa/ipa_qmi_msg.c
@@ -311,7 +311,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
.tlv_type = 0x12,
.offset = offsetof(struct ipa_init_modem_driver_req,
v4_route_tbl_info),
- .ei_array = ipa_mem_array_ei,
+ .ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
@@ -332,7 +332,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
.tlv_type = 0x13,
.offset = offsetof(struct ipa_init_modem_driver_req,
v6_route_tbl_info),
- .ei_array = ipa_mem_array_ei,
+ .ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
@@ -496,7 +496,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
.tlv_type = 0x1b,
.offset = offsetof(struct ipa_init_modem_driver_req,
v4_hash_route_tbl_info),
- .ei_array = ipa_mem_array_ei,
+ .ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
@@ -517,7 +517,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
.tlv_type = 0x1c,
.offset = offsetof(struct ipa_init_modem_driver_req,
v6_hash_route_tbl_info),
- .ei_array = ipa_mem_array_ei,
+ .ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h
index 495e85abe50b..9651aa59b596 100644
--- a/drivers/net/ipa/ipa_qmi_msg.h
+++ b/drivers/net/ipa/ipa_qmi_msg.h
@@ -86,9 +86,11 @@ enum ipa_platform_type {
IPA_QMI_PLATFORM_TYPE_MSM_QNX_V01 = 0x5, /* QNX MSM */
};
-/* This defines the start and end offset of a range of memory. Both
- * fields are offsets relative to the start of IPA shared memory.
- * The end value is the last addressable byte *within* the range.
+/* This defines the start and end offset of a range of memory. The start
+ * value is a byte offset relative to the start of IPA shared memory. The
+ * end value is the last addressable unit *within* the range. Typically
+ * the end value is in units of bytes, however it can also be a maximum
+ * array index value.
*/
struct ipa_mem_bounds {
u32 start;
@@ -129,18 +131,19 @@ struct ipa_init_modem_driver_req {
u8 hdr_tbl_info_valid;
struct ipa_mem_bounds hdr_tbl_info;
- /* Routing table information. These define the location and size of
- * non-hashable IPv4 and IPv6 filter tables. The start values are
- * offsets relative to the start of IPA shared memory.
+ /* Routing table information. These define the location and maximum
+ * *index* (not byte) for the modem portion of non-hashable IPv4 and
+ * IPv6 routing tables. The start values are byte offsets relative
+ * to the start of IPA shared memory.
*/
u8 v4_route_tbl_info_valid;
- struct ipa_mem_array v4_route_tbl_info;
+ struct ipa_mem_bounds v4_route_tbl_info;
u8 v6_route_tbl_info_valid;
- struct ipa_mem_array v6_route_tbl_info;
+ struct ipa_mem_bounds v6_route_tbl_info;
/* Filter table information. These define the location of the
* non-hashable IPv4 and IPv6 filter tables. The start values are
- * offsets relative to the start of IPA shared memory.
+ * byte offsets relative to the start of IPA shared memory.
*/
u8 v4_filter_tbl_start_valid;
u32 v4_filter_tbl_start;
@@ -181,18 +184,20 @@ struct ipa_init_modem_driver_req {
u8 zip_tbl_info_valid;
struct ipa_mem_bounds zip_tbl_info;
- /* Routing table information. These define the location and size
- * of hashable IPv4 and IPv6 filter tables. The start values are
- * offsets relative to the start of IPA shared memory.
+ /* Routing table information. These define the location and maximum
+ * *index* (not byte) for the modem portion of hashable IPv4 and IPv6
+ * routing tables (if supported by hardware). The start values are
+ * byte offsets relative to the start of IPA shared memory.
*/
u8 v4_hash_route_tbl_info_valid;
- struct ipa_mem_array v4_hash_route_tbl_info;
+ struct ipa_mem_bounds v4_hash_route_tbl_info;
u8 v6_hash_route_tbl_info_valid;
- struct ipa_mem_array v6_hash_route_tbl_info;
+ struct ipa_mem_bounds v6_hash_route_tbl_info;
/* Filter table information. These define the location and size
- * of hashable IPv4 and IPv6 filter tables. The start values are
- * offsets relative to the start of IPA shared memory.
+ * of hashable IPv4 and IPv6 filter tables (if supported by hardware).
+ * The start values are byte offsets relative to the start of IPA
+ * shared memory.
*/
u8 v4_hash_filter_tbl_start_valid;
u32 v4_hash_filter_tbl_start;
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
index 2f5a58bfc529..69efe672ca52 100644
--- a/drivers/net/ipa/ipa_table.c
+++ b/drivers/net/ipa/ipa_table.c
@@ -108,8 +108,6 @@
/* Assignment of route table entries to the modem and AP */
#define IPA_ROUTE_MODEM_MIN 0
-#define IPA_ROUTE_MODEM_COUNT 8
-
#define IPA_ROUTE_AP_MIN IPA_ROUTE_MODEM_COUNT
#define IPA_ROUTE_AP_COUNT \
(IPA_ROUTE_COUNT_MAX - IPA_ROUTE_MODEM_COUNT)
diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h
index b6a9a0d79d68..1538e2e1732f 100644
--- a/drivers/net/ipa/ipa_table.h
+++ b/drivers/net/ipa/ipa_table.h
@@ -13,6 +13,9 @@ struct ipa;
/* The maximum number of filter table entries (IPv4, IPv6; hashed or not) */
#define IPA_FILTER_COUNT_MAX 14
+/* The number of route table entries allotted to the modem */
+#define IPA_ROUTE_MODEM_COUNT 8
+
/* The maximum number of route table entries (IPv4, IPv6; hashed or not) */
#define IPA_ROUTE_COUNT_MAX 15
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index dfeb5b392e64..bb1c298c1e78 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -495,7 +495,6 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
static int ipvlan_process_outbound(struct sk_buff *skb)
{
- struct ethhdr *ethh = eth_hdr(skb);
int ret = NET_XMIT_DROP;
/* The ipvlan is a pseudo-L2 device, so the packets that we receive
@@ -505,6 +504,8 @@ static int ipvlan_process_outbound(struct sk_buff *skb)
if (skb_mac_header_was_set(skb)) {
/* In this mode we dont care about
* multicast and broadcast traffic */
+ struct ethhdr *ethh = eth_hdr(skb);
+
if (is_multicast_ether_addr(ethh->h_dest)) {
pr_debug_ratelimited(
"Dropped {multi|broad}cast of type=[%x]\n",
@@ -589,7 +590,7 @@ out:
static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
{
const struct ipvl_dev *ipvlan = netdev_priv(dev);
- struct ethhdr *eth = eth_hdr(skb);
+ struct ethhdr *eth = skb_eth_hdr(skb);
struct ipvl_addr *addr;
void *lyr3h;
int addr_type;
@@ -619,6 +620,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
return dev_forward_skb(ipvlan->phy_dev, skb);
} else if (is_multicast_ether_addr(eth->h_dest)) {
+ skb_reset_mac_header(skb);
ipvlan_skb_crossing_ns(skb, NULL);
ipvlan_multicast_enqueue(ipvlan->port, skb, true);
return NET_XMIT_SUCCESS;
diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c
index 9e3c815a070f..796e9c7857d0 100644
--- a/drivers/net/mdio/of_mdio.c
+++ b/drivers/net/mdio/of_mdio.c
@@ -231,6 +231,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
return 0;
unregister:
+ of_node_put(child);
mdiobus_unregister(mdio);
return rc;
}
diff --git a/drivers/net/netdevsim/hwstats.c b/drivers/net/netdevsim/hwstats.c
index 605a38e16db0..0e58aa7f0374 100644
--- a/drivers/net/netdevsim/hwstats.c
+++ b/drivers/net/netdevsim/hwstats.c
@@ -433,11 +433,11 @@ int nsim_dev_hwstats_init(struct nsim_dev *nsim_dev)
goto err_remove_hwstats_recursive;
}
- debugfs_create_file("enable_ifindex", 0600, hwstats->l3_ddir, hwstats,
+ debugfs_create_file("enable_ifindex", 0200, hwstats->l3_ddir, hwstats,
&nsim_dev_hwstats_l3_enable_fops.fops);
- debugfs_create_file("disable_ifindex", 0600, hwstats->l3_ddir, hwstats,
+ debugfs_create_file("disable_ifindex", 0200, hwstats->l3_ddir, hwstats,
&nsim_dev_hwstats_l3_disable_fops.fops);
- debugfs_create_file("fail_next_enable", 0600, hwstats->l3_ddir, hwstats,
+ debugfs_create_file("fail_next_enable", 0200, hwstats->l3_ddir, hwstats,
&nsim_dev_hwstats_l3_fail_fops.fops);
INIT_DELAYED_WORK(&hwstats->traffic_dw,
diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c
index 8b7a46db30e0..7111e2e958e9 100644
--- a/drivers/net/phy/aquantia_main.c
+++ b/drivers/net/phy/aquantia_main.c
@@ -91,6 +91,9 @@
#define VEND1_GLOBAL_FW_ID_MAJOR GENMASK(15, 8)
#define VEND1_GLOBAL_FW_ID_MINOR GENMASK(7, 0)
+#define VEND1_GLOBAL_GEN_STAT2 0xc831
+#define VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG BIT(15)
+
#define VEND1_GLOBAL_RSVD_STAT1 0xc885
#define VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID GENMASK(7, 4)
#define VEND1_GLOBAL_RSVD_STAT1_PROV_ID GENMASK(3, 0)
@@ -125,6 +128,12 @@
#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2 BIT(1)
#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 BIT(0)
+/* Sleep and timeout for checking if the Processor-Intensive
+ * MDIO operation is finished
+ */
+#define AQR107_OP_IN_PROG_SLEEP 1000
+#define AQR107_OP_IN_PROG_TIMEOUT 100000
+
struct aqr107_hw_stat {
const char *name;
int reg;
@@ -597,16 +606,52 @@ static void aqr107_link_change_notify(struct phy_device *phydev)
phydev_info(phydev, "Aquantia 1000Base-T2 mode active\n");
}
+static int aqr107_wait_processor_intensive_op(struct phy_device *phydev)
+{
+ int val, err;
+
+ /* The datasheet notes to wait at least 1ms after issuing a
+ * processor intensive operation before checking.
+ * We cannot use the 'sleep_before_read' parameter of read_poll_timeout
+ * because that just determines the maximum time slept, not the minimum.
+ */
+ usleep_range(1000, 5000);
+
+ err = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
+ VEND1_GLOBAL_GEN_STAT2, val,
+ !(val & VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG),
+ AQR107_OP_IN_PROG_SLEEP,
+ AQR107_OP_IN_PROG_TIMEOUT, false);
+ if (err) {
+ phydev_err(phydev, "timeout: processor-intensive MDIO operation\n");
+ return err;
+ }
+
+ return 0;
+}
+
static int aqr107_suspend(struct phy_device *phydev)
{
- return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
- MDIO_CTRL1_LPOWER);
+ int err;
+
+ err = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER);
+ if (err)
+ return err;
+
+ return aqr107_wait_processor_intensive_op(phydev);
}
static int aqr107_resume(struct phy_device *phydev)
{
- return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
- MDIO_CTRL1_LPOWER);
+ int err;
+
+ err = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER);
+ if (err)
+ return err;
+
+ return aqr107_wait_processor_intensive_op(phydev);
}
static int aqr107_probe(struct phy_device *phydev)
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 6f52b4fb6888..38234d7e14c5 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -2679,16 +2679,19 @@ static int lan8804_config_init(struct phy_device *phydev)
static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev)
{
int irq_status, tsu_irq_status;
+ int ret = IRQ_NONE;
irq_status = phy_read(phydev, LAN8814_INTS);
- if (irq_status > 0 && (irq_status & LAN8814_INT_LINK))
- phy_trigger_machine(phydev);
-
if (irq_status < 0) {
phy_error(phydev);
return IRQ_NONE;
}
+ if (irq_status & LAN8814_INT_LINK) {
+ phy_trigger_machine(phydev);
+ ret = IRQ_HANDLED;
+ }
+
while (1) {
tsu_irq_status = lanphy_read_page_reg(phydev, 4,
LAN8814_INTR_STS_REG);
@@ -2697,12 +2700,15 @@ static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev)
(tsu_irq_status & (LAN8814_INTR_STS_REG_1588_TSU0_ |
LAN8814_INTR_STS_REG_1588_TSU1_ |
LAN8814_INTR_STS_REG_1588_TSU2_ |
- LAN8814_INTR_STS_REG_1588_TSU3_)))
+ LAN8814_INTR_STS_REG_1588_TSU3_))) {
lan8814_handle_ptp_interrupt(phydev);
- else
+ ret = IRQ_HANDLED;
+ } else {
break;
+ }
}
- return IRQ_HANDLED;
+
+ return ret;
}
static int lan8814_ack_interrupt(struct phy_device *phydev)
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 12ff276b80ae..4df8c337221b 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -316,11 +316,13 @@ static __maybe_unused int mdio_bus_phy_resume(struct device *dev)
phydev->suspended_by_mdio_bus = 0;
- /* If we manged to get here with the PHY state machine in a state neither
- * PHY_HALTED nor PHY_READY this is an indication that something went wrong
- * and we should most likely be using MAC managed PM and we are not.
+ /* If we managed to get here with the PHY state machine in a state
+ * neither PHY_HALTED, PHY_READY nor PHY_UP, this is an indication
+ * that something went wrong and we should most likely be using
+ * MAC managed PM, but we are not.
*/
- WARN_ON(phydev->state != PHY_HALTED && phydev->state != PHY_READY);
+ WARN_ON(phydev->state != PHY_HALTED && phydev->state != PHY_READY &&
+ phydev->state != PHY_UP);
ret = phy_init_hw(phydev);
if (ret < 0)
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index aac133a1e27a..154a3c0a6dfd 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1275,10 +1275,12 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
}
}
- netif_addr_lock_bh(dev);
- dev_uc_sync_multiple(port_dev, dev);
- dev_mc_sync_multiple(port_dev, dev);
- netif_addr_unlock_bh(dev);
+ if (dev->flags & IFF_UP) {
+ netif_addr_lock_bh(dev);
+ dev_uc_sync_multiple(port_dev, dev);
+ dev_mc_sync_multiple(port_dev, dev);
+ netif_addr_unlock_bh(dev);
+ }
port->index = -1;
list_add_tail_rcu(&port->list, &team->port_list);
@@ -1349,8 +1351,10 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
netdev_rx_handler_unregister(port_dev);
team_port_disable_netpoll(port);
vlan_vids_del_by_dev(port_dev, dev);
- dev_uc_unsync(port_dev, dev);
- dev_mc_unsync(port_dev, dev);
+ if (dev->flags & IFF_UP) {
+ dev_uc_unsync(port_dev, dev);
+ dev_mc_unsync(port_dev, dev);
+ }
dev_close(port_dev);
team_port_leave(team, port);
@@ -1700,6 +1704,14 @@ static int team_open(struct net_device *dev)
static int team_close(struct net_device *dev)
{
+ struct team *team = netdev_priv(dev);
+ struct team_port *port;
+
+ list_for_each_entry(port, &team->port_list, list) {
+ dev_uc_unsync(port->dev, dev);
+ dev_mc_unsync(port->dev, dev);
+ }
+
return 0;
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 259b2b84b2b3..db736b944016 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -2828,7 +2828,10 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
rcu_assign_pointer(tfile->tun, tun);
}
- netif_carrier_on(tun->dev);
+ if (ifr->ifr_flags & IFF_NO_CARRIER)
+ netif_carrier_off(tun->dev);
+ else
+ netif_carrier_on(tun->dev);
/* Make sure persistent devices do not get stuck in
* xoff state.
@@ -3056,8 +3059,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
* This is needed because we never checked for invalid flags on
* TUNSETIFF.
*/
- return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES,
- (unsigned int __user*)argp);
+ return put_user(IFF_TUN | IFF_TAP | IFF_NO_CARRIER |
+ TUN_FEATURES, (unsigned int __user*)argp);
} else if (cmd == TUNSETQUEUE) {
return tun_set_queue(file, &ifr);
} else if (cmd == SIOCGSKNS) {
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 0cb187def5bc..26c34a7c21bd 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1402,6 +1402,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
{QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
+ {QMI_FIXED_INTF(0x413c, 0x81c2, 8)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x413c, 0x81cc, 8)}, /* Dell Wireless 5816e */
{QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
{QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index aaa89b4cfd50..e368b0780753 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1598,6 +1598,7 @@ void usbnet_disconnect (struct usb_interface *intf)
struct usbnet *dev;
struct usb_device *xdev;
struct net_device *net;
+ struct urb *urb;
dev = usb_get_intfdata(intf);
usb_set_intfdata(intf, NULL);
@@ -1614,7 +1615,11 @@ void usbnet_disconnect (struct usb_interface *intf)
net = dev->net;
unregister_netdev (net);
- usb_scuttle_anchored_urbs(&dev->deferred);
+ while ((urb = usb_get_from_anchor(&dev->deferred))) {
+ dev_kfree_skb(urb->context);
+ kfree(urb->sg);
+ usb_free_urb(urb);
+ }
if (dev->driver_info->unbind)
dev->driver_info->unbind(dev, intf);
diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c
index d0f3b6d7f408..5c804bcabfe6 100644
--- a/drivers/net/wireguard/netlink.c
+++ b/drivers/net/wireguard/netlink.c
@@ -436,14 +436,13 @@ static int set_peer(struct wg_device *wg, struct nlattr **attrs)
if (attrs[WGPEER_A_ENDPOINT]) {
struct sockaddr *addr = nla_data(attrs[WGPEER_A_ENDPOINT]);
size_t len = nla_len(attrs[WGPEER_A_ENDPOINT]);
+ struct endpoint endpoint = { { { 0 } } };
- if ((len == sizeof(struct sockaddr_in) &&
- addr->sa_family == AF_INET) ||
- (len == sizeof(struct sockaddr_in6) &&
- addr->sa_family == AF_INET6)) {
- struct endpoint endpoint = { { { 0 } } };
-
- memcpy(&endpoint.addr, addr, len);
+ if (len == sizeof(struct sockaddr_in) && addr->sa_family == AF_INET) {
+ endpoint.addr4 = *(struct sockaddr_in *)addr;
+ wg_socket_set_peer_endpoint(peer, &endpoint);
+ } else if (len == sizeof(struct sockaddr_in6) && addr->sa_family == AF_INET6) {
+ endpoint.addr6 = *(struct sockaddr_in6 *)addr;
wg_socket_set_peer_endpoint(peer, &endpoint);
}
}
diff --git a/drivers/net/wireguard/selftest/ratelimiter.c b/drivers/net/wireguard/selftest/ratelimiter.c
index ba87d294604f..d4bb40a695ab 100644
--- a/drivers/net/wireguard/selftest/ratelimiter.c
+++ b/drivers/net/wireguard/selftest/ratelimiter.c
@@ -6,29 +6,28 @@
#ifdef DEBUG
#include <linux/jiffies.h>
-#include <linux/hrtimer.h>
static const struct {
bool result;
- u64 nsec_to_sleep_before;
+ unsigned int msec_to_sleep_before;
} expected_results[] __initconst = {
[0 ... PACKETS_BURSTABLE - 1] = { true, 0 },
[PACKETS_BURSTABLE] = { false, 0 },
- [PACKETS_BURSTABLE + 1] = { true, NSEC_PER_SEC / PACKETS_PER_SECOND },
+ [PACKETS_BURSTABLE + 1] = { true, MSEC_PER_SEC / PACKETS_PER_SECOND },
[PACKETS_BURSTABLE + 2] = { false, 0 },
- [PACKETS_BURSTABLE + 3] = { true, (NSEC_PER_SEC / PACKETS_PER_SECOND) * 2 },
+ [PACKETS_BURSTABLE + 3] = { true, (MSEC_PER_SEC / PACKETS_PER_SECOND) * 2 },
[PACKETS_BURSTABLE + 4] = { true, 0 },
[PACKETS_BURSTABLE + 5] = { false, 0 }
};
static __init unsigned int maximum_jiffies_at_index(int index)
{
- u64 total_nsecs = 2 * NSEC_PER_SEC / PACKETS_PER_SECOND / 3;
+ unsigned int total_msecs = 2 * MSEC_PER_SEC / PACKETS_PER_SECOND / 3;
int i;
for (i = 0; i <= index; ++i)
- total_nsecs += expected_results[i].nsec_to_sleep_before;
- return nsecs_to_jiffies(total_nsecs);
+ total_msecs += expected_results[i].msec_to_sleep_before;
+ return msecs_to_jiffies(total_msecs);
}
static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4,
@@ -43,12 +42,8 @@ static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4,
loop_start_time = jiffies;
for (i = 0; i < ARRAY_SIZE(expected_results); ++i) {
- if (expected_results[i].nsec_to_sleep_before) {
- ktime_t timeout = ktime_add(ktime_add_ns(ktime_get_coarse_boottime(), TICK_NSEC * 4 / 3),
- ns_to_ktime(expected_results[i].nsec_to_sleep_before));
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_hrtimeout_range_clock(&timeout, 0, HRTIMER_MODE_ABS, CLOCK_BOOTTIME);
- }
+ if (expected_results[i].msec_to_sleep_before)
+ msleep(expected_results[i].msec_to_sleep_before);
if (time_is_before_jiffies(loop_start_time +
maximum_jiffies_at_index(i)))
@@ -132,7 +127,7 @@ bool __init wg_ratelimiter_selftest(void)
if (IS_ENABLED(CONFIG_KASAN) || IS_ENABLED(CONFIG_UBSAN))
return true;
- BUILD_BUG_ON(NSEC_PER_SEC % PACKETS_PER_SECOND != 0);
+ BUILD_BUG_ON(MSEC_PER_SEC % PACKETS_PER_SECOND != 0);
if (wg_ratelimiter_init())
goto out;
@@ -172,7 +167,7 @@ bool __init wg_ratelimiter_selftest(void)
++test;
#endif
- for (trials = TRIALS_BEFORE_GIVING_UP;;) {
+ for (trials = TRIALS_BEFORE_GIVING_UP; IS_ENABLED(DEBUG_RATELIMITER_TIMINGS);) {
int test_count = 0, ret;
ret = timings_test(skb4, hdr4, skb6, hdr6, &test_count);
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index a647a406b87b..b20409f8c13a 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -140,6 +140,7 @@ config IWLMEI
depends on INTEL_MEI
depends on PM
depends on CFG80211
+ depends on BROKEN
help
Enables the iwlmei kernel module.
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 5eb28f8ee87e..11536f115198 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1833,8 +1833,8 @@ static void iwl_mvm_parse_ppe(struct iwl_mvm *mvm,
* If nss < MAX: we can set zeros in other streams
*/
if (nss > MAX_HE_SUPP_NSS) {
- IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss,
- MAX_HE_SUPP_NSS);
+ IWL_DEBUG_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss,
+ MAX_HE_SUPP_NSS);
nss = MAX_HE_SUPP_NSS;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 253cbc1956d1..6de13d641438 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -267,7 +267,8 @@ static void mt76_init_stream_cap(struct mt76_phy *phy,
}
vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
- vht_cap->vht_mcs.tx_highest |=
+ if (ieee80211_hw_check(phy->hw, SUPPORTS_VHT_EXT_NSS_BW))
+ vht_cap->vht_mcs.tx_highest |=
cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
index ad6c7d632eed..d6aae60c440d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -1088,7 +1088,7 @@ u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid)
offset %= 32;
val = mt76_rr(dev, addr);
- val >>= (tid % 32);
+ val >>= offset;
if (offset > 20) {
addr += 4;
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index bf4f5c09d9b1..bbe5099c836d 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1712,8 +1712,6 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
res->flags = IORESOURCE_MEM;
for (i = 0; i < nd_region->ndr_mappings; i++) {
- uuid_t uuid;
-
nsl_get_uuid(ndd, nd_label, &uuid);
if (has_uuid_at_pos(nd_region, &uuid, cookie, i))
continue;
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 7e88cd242380..96e6e9a5f235 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -45,7 +45,7 @@ static struct nd_region *to_region(struct pmem_device *pmem)
return to_nd_region(to_dev(pmem)->parent);
}
-static phys_addr_t to_phys(struct pmem_device *pmem, phys_addr_t offset)
+static phys_addr_t pmem_to_phys(struct pmem_device *pmem, phys_addr_t offset)
{
return pmem->phys_addr + offset;
}
@@ -63,7 +63,7 @@ static phys_addr_t to_offset(struct pmem_device *pmem, sector_t sector)
static void pmem_mkpage_present(struct pmem_device *pmem, phys_addr_t offset,
unsigned int len)
{
- phys_addr_t phys = to_phys(pmem, offset);
+ phys_addr_t phys = pmem_to_phys(pmem, offset);
unsigned long pfn_start, pfn_end, pfn;
/* only pmem in the linear map supports HWPoison */
@@ -97,7 +97,7 @@ static void pmem_clear_bb(struct pmem_device *pmem, sector_t sector, long blks)
static long __pmem_clear_poison(struct pmem_device *pmem,
phys_addr_t offset, unsigned int len)
{
- phys_addr_t phys = to_phys(pmem, offset);
+ phys_addr_t phys = pmem_to_phys(pmem, offset);
long cleared = nvdimm_clear_poison(to_dev(pmem), phys, len);
if (cleared > 0) {
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 66446f1e06cf..8d5a7ae19844 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2162,14 +2162,14 @@ static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
static int nvme_pr_clear(struct block_device *bdev, u64 key)
{
- u32 cdw10 = 1 | (key ? 1 << 3 : 0);
+ u32 cdw10 = 1 | (key ? 0 : 1 << 3);
- return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
+ return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
}
static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
{
- u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 1 << 3 : 0);
+ u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 0 : 1 << 3);
return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 98864b853eef..67d3335e9cc8 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -3470,6 +3470,10 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN |
NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1987, 0x5019), /* phison E19 */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x1987, 0x5021), /* Phison E21 */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */
.driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 77d1ba3a4154..e87567dbe99f 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -873,7 +873,7 @@ int dev_pm_opp_config_clks_simple(struct device *dev,
}
}
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_config_clks_simple);
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index 80d8309652a4..b80a9b74662b 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -36,7 +36,7 @@
#define CMN_CI_CHILD_COUNT GENMASK_ULL(15, 0)
#define CMN_CI_CHILD_PTR_OFFSET GENMASK_ULL(31, 16)
-#define CMN_CHILD_NODE_ADDR GENMASK(27, 0)
+#define CMN_CHILD_NODE_ADDR GENMASK(29, 0)
#define CMN_CHILD_NODE_EXTERNAL BIT(31)
#define CMN_MAX_DIMENSION 12
diff --git a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
index a4d7d9bd100d..67712c77d806 100644
--- a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
@@ -274,7 +274,6 @@ struct mvebu_a3700_comphy_lane {
int submode;
bool invert_tx;
bool invert_rx;
- bool needs_reset;
};
struct gbe_phy_init_data_fix {
@@ -1097,40 +1096,12 @@ mvebu_a3700_comphy_pcie_power_off(struct mvebu_a3700_comphy_lane *lane)
0x0, PU_PLL_BIT | PU_RX_BIT | PU_TX_BIT);
}
-static int mvebu_a3700_comphy_reset(struct phy *phy)
+static void mvebu_a3700_comphy_usb3_power_off(struct mvebu_a3700_comphy_lane *lane)
{
- struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy);
- u16 mask, data;
-
- dev_dbg(lane->dev, "resetting lane %d\n", lane->id);
-
- /* COMPHY reset for internal logic */
- comphy_lane_reg_set(lane, COMPHY_SFT_RESET,
- SFT_RST_NO_REG, SFT_RST_NO_REG);
-
- /* COMPHY register reset (cleared automatically) */
- comphy_lane_reg_set(lane, COMPHY_SFT_RESET, SFT_RST, SFT_RST);
-
- /* PIPE soft and register reset */
- data = PIPE_SOFT_RESET | PIPE_REG_RESET;
- mask = data;
- comphy_lane_reg_set(lane, COMPHY_PIPE_RST_CLK_CTRL, data, mask);
-
- /* Release PIPE register reset */
- comphy_lane_reg_set(lane, COMPHY_PIPE_RST_CLK_CTRL,
- 0x0, PIPE_REG_RESET);
-
- /* Reset SB configuration register (only for lanes 0 and 1) */
- if (lane->id == 0 || lane->id == 1) {
- u32 mask, data;
-
- data = PIN_RESET_CORE_BIT | PIN_RESET_COMPHY_BIT |
- PIN_PU_PLL_BIT | PIN_PU_RX_BIT | PIN_PU_TX_BIT;
- mask = data | PIN_PU_IVREF_BIT | PIN_TX_IDLE_BIT;
- comphy_periph_reg_set(lane, COMPHY_PHY_CFG1, data, mask);
- }
-
- return 0;
+ /*
+ * The USB3 MAC sets the USB3 PHY to low state, so we do not
+ * need to power off USB3 PHY again.
+ */
}
static bool mvebu_a3700_comphy_check_mode(int lane,
@@ -1171,10 +1142,6 @@ static int mvebu_a3700_comphy_set_mode(struct phy *phy, enum phy_mode mode,
(lane->mode != mode || lane->submode != submode))
return -EBUSY;
- /* If changing mode, ensure reset is called */
- if (lane->mode != PHY_MODE_INVALID && lane->mode != mode)
- lane->needs_reset = true;
-
/* Just remember the mode, ->power_on() will do the real setup */
lane->mode = mode;
lane->submode = submode;
@@ -1185,7 +1152,6 @@ static int mvebu_a3700_comphy_set_mode(struct phy *phy, enum phy_mode mode,
static int mvebu_a3700_comphy_power_on(struct phy *phy)
{
struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy);
- int ret;
if (!mvebu_a3700_comphy_check_mode(lane->id, lane->mode,
lane->submode)) {
@@ -1193,14 +1159,6 @@ static int mvebu_a3700_comphy_power_on(struct phy *phy)
return -EINVAL;
}
- if (lane->needs_reset) {
- ret = mvebu_a3700_comphy_reset(phy);
- if (ret)
- return ret;
-
- lane->needs_reset = false;
- }
-
switch (lane->mode) {
case PHY_MODE_USB_HOST_SS:
dev_dbg(lane->dev, "set lane %d to USB3 host mode\n", lane->id);
@@ -1224,38 +1182,28 @@ static int mvebu_a3700_comphy_power_off(struct phy *phy)
{
struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy);
- switch (lane->mode) {
- case PHY_MODE_USB_HOST_SS:
- /*
- * The USB3 MAC sets the USB3 PHY to low state, so we do not
- * need to power off USB3 PHY again.
- */
- break;
-
- case PHY_MODE_SATA:
- mvebu_a3700_comphy_sata_power_off(lane);
- break;
-
- case PHY_MODE_ETHERNET:
+ switch (lane->id) {
+ case 0:
+ mvebu_a3700_comphy_usb3_power_off(lane);
mvebu_a3700_comphy_ethernet_power_off(lane);
- break;
-
- case PHY_MODE_PCIE:
+ return 0;
+ case 1:
mvebu_a3700_comphy_pcie_power_off(lane);
- break;
-
+ mvebu_a3700_comphy_ethernet_power_off(lane);
+ return 0;
+ case 2:
+ mvebu_a3700_comphy_usb3_power_off(lane);
+ mvebu_a3700_comphy_sata_power_off(lane);
+ return 0;
default:
dev_err(lane->dev, "invalid COMPHY mode\n");
return -EINVAL;
}
-
- return 0;
}
static const struct phy_ops mvebu_a3700_comphy_ops = {
.power_on = mvebu_a3700_comphy_power_on,
.power_off = mvebu_a3700_comphy_power_off,
- .reset = mvebu_a3700_comphy_reset,
.set_mode = mvebu_a3700_comphy_set_mode,
.owner = THIS_MODULE,
};
@@ -1393,8 +1341,7 @@ static int mvebu_a3700_comphy_probe(struct platform_device *pdev)
* To avoid relying on the bootloader/firmware configuration,
* power off all comphys.
*/
- mvebu_a3700_comphy_reset(phy);
- lane->needs_reset = false;
+ mvebu_a3700_comphy_power_off(phy);
}
provider = devm_of_phy_provider_register(&pdev->dev,
diff --git a/drivers/reset/reset-imx7.c b/drivers/reset/reset-imx7.c
index 185a333df66c..d2408725eb2c 100644
--- a/drivers/reset/reset-imx7.c
+++ b/drivers/reset/reset-imx7.c
@@ -329,6 +329,7 @@ static int imx8mp_reset_set(struct reset_controller_dev *rcdev,
break;
case IMX8MP_RESET_PCIE_CTRL_APPS_EN:
+ case IMX8MP_RESET_PCIEPHY_PERST:
value = assert ? 0 : bit;
break;
}
diff --git a/drivers/reset/reset-microchip-sparx5.c b/drivers/reset/reset-microchip-sparx5.c
index 00b612a0effa..f3528dd1d084 100644
--- a/drivers/reset/reset-microchip-sparx5.c
+++ b/drivers/reset/reset-microchip-sparx5.c
@@ -33,11 +33,8 @@ static struct regmap_config sparx5_reset_regmap_config = {
.reg_stride = 4,
};
-static int sparx5_switch_reset(struct reset_controller_dev *rcdev,
- unsigned long id)
+static int sparx5_switch_reset(struct mchp_reset_context *ctx)
{
- struct mchp_reset_context *ctx =
- container_of(rcdev, struct mchp_reset_context, rcdev);
u32 val;
/* Make sure the core is PROTECTED from reset */
@@ -54,8 +51,14 @@ static int sparx5_switch_reset(struct reset_controller_dev *rcdev,
1, 100);
}
+static int sparx5_reset_noop(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return 0;
+}
+
static const struct reset_control_ops sparx5_reset_ops = {
- .reset = sparx5_switch_reset,
+ .reset = sparx5_reset_noop,
};
static int mchp_sparx5_map_syscon(struct platform_device *pdev, char *name,
@@ -122,6 +125,11 @@ static int mchp_sparx5_reset_probe(struct platform_device *pdev)
ctx->rcdev.of_node = dn;
ctx->props = device_get_match_data(&pdev->dev);
+ /* Issue the reset very early, our actual reset callback is a noop. */
+ err = sparx5_switch_reset(ctx);
+ if (err)
+ return err;
+
return devm_reset_controller_register(&pdev->dev, &ctx->rcdev);
}
@@ -163,6 +171,10 @@ static int __init mchp_sparx5_reset_init(void)
return platform_driver_register(&mchp_sparx5_reset_driver);
}
+/*
+ * Because this is a global reset, keep this postcore_initcall() to issue the
+ * reset as early as possible during the kernel startup.
+ */
postcore_initcall(mchp_sparx5_reset_init);
MODULE_DESCRIPTION("Microchip Sparx5 switch reset driver");
diff --git a/drivers/reset/reset-npcm.c b/drivers/reset/reset-npcm.c
index 24c55efa98e5..f2333506b0a6 100644
--- a/drivers/reset/reset-npcm.c
+++ b/drivers/reset/reset-npcm.c
@@ -291,7 +291,7 @@ static void npcm_usb_reset_npcm8xx(struct npcm_rc_data *rc)
iprst2 |= ipsrst2_bits;
iprst3 |= (ipsrst3_bits | NPCM_IPSRST3_USBPHY1 |
NPCM_IPSRST3_USBPHY2);
- iprst2 |= ipsrst4_bits;
+ iprst4 |= ipsrst4_bits;
writel(iprst1, rc->base + NPCM_IPSRST1);
writel(iprst2, rc->base + NPCM_IPSRST2);
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index dc78a523a69f..b6b938aa6615 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -675,12 +675,12 @@ int dasd_alias_remove_device(struct dasd_device *device)
struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
{
struct dasd_eckd_private *alias_priv, *private = base_device->private;
- struct alias_pav_group *group = private->pavgroup;
struct alias_lcu *lcu = private->lcu;
struct dasd_device *alias_device;
+ struct alias_pav_group *group;
unsigned long flags;
- if (!group || !lcu)
+ if (!lcu)
return NULL;
if (lcu->pav == NO_PAV ||
lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
@@ -697,6 +697,11 @@ struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
}
spin_lock_irqsave(&lcu->lock, flags);
+ group = private->pavgroup;
+ if (!group) {
+ spin_unlock_irqrestore(&lcu->lock, flags);
+ return NULL;
+ }
alias_device = group->next;
if (!alias_device) {
if (list_empty(&group->aliaslist)) {
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 6c8c41fac4e1..ee82207b4e60 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -984,6 +984,11 @@ static ssize_t assign_adapter_store(struct device *dev,
goto done;
}
+ if (test_bit_inv(apid, matrix_mdev->matrix.apm)) {
+ ret = count;
+ goto done;
+ }
+
set_bit_inv(apid, matrix_mdev->matrix.apm);
ret = vfio_ap_mdev_validate_masks(matrix_mdev);
@@ -1109,6 +1114,11 @@ static ssize_t unassign_adapter_store(struct device *dev,
goto done;
}
+ if (!test_bit_inv(apid, matrix_mdev->matrix.apm)) {
+ ret = count;
+ goto done;
+ }
+
clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm);
vfio_ap_mdev_hot_unplug_adapter(matrix_mdev, apid);
ret = count;
@@ -1183,6 +1193,11 @@ static ssize_t assign_domain_store(struct device *dev,
goto done;
}
+ if (test_bit_inv(apqi, matrix_mdev->matrix.aqm)) {
+ ret = count;
+ goto done;
+ }
+
set_bit_inv(apqi, matrix_mdev->matrix.aqm);
ret = vfio_ap_mdev_validate_masks(matrix_mdev);
@@ -1286,6 +1301,11 @@ static ssize_t unassign_domain_store(struct device *dev,
goto done;
}
+ if (!test_bit_inv(apqi, matrix_mdev->matrix.aqm)) {
+ ret = count;
+ goto done;
+ }
+
clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm);
vfio_ap_mdev_hot_unplug_domain(matrix_mdev, apqi);
ret = count;
@@ -1329,6 +1349,11 @@ static ssize_t assign_control_domain_store(struct device *dev,
goto done;
}
+ if (test_bit_inv(id, matrix_mdev->matrix.adm)) {
+ ret = count;
+ goto done;
+ }
+
/* Set the bit in the ADM (bitmask) corresponding to the AP control
* domain number (id). The bits in the mask, from most significant to
* least significant, correspond to IDs 0 up to the one less than the
@@ -1378,6 +1403,11 @@ static ssize_t unassign_control_domain_store(struct device *dev,
goto done;
}
+ if (!test_bit_inv(domid, matrix_mdev->matrix.adm)) {
+ ret = count;
+ goto done;
+ }
+
clear_bit_inv(domid, matrix_mdev->matrix.adm);
if (test_bit_inv(domid, matrix_mdev->shadow_apcb.adm)) {
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 9c82e5dc4fcc..a36fa1c128a8 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -872,7 +872,8 @@ int sas_change_queue_depth(struct scsi_device *sdev, int depth)
struct domain_device *dev = sdev_to_domain_dev(sdev);
if (dev_is_sata(dev))
- return __ata_change_queue_depth(dev->sata_dev.ap, sdev, depth);
+ return ata_change_queue_depth(dev->sata_dev.ap,
+ sas_to_ata_dev(dev), sdev, depth);
if (!sdev->tagged_supported)
depth = 1;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 565339a0811d..331e896d8225 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -2993,7 +2993,7 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
if (ioc->is_mcpu_endpoint ||
sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma ||
- dma_get_required_mask(&pdev->dev) <= 32)
+ dma_get_required_mask(&pdev->dev) <= DMA_BIT_MASK(32))
ioc->dma_mask = 32;
/* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
else if (ioc->hba_mpi_version_belonged > MPI2_VERSION)
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 3d6b137314f3..bbc4d5890ae6 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -3686,11 +3686,6 @@ err2:
err1:
scsi_host_put(lport->host);
err0:
- if (qedf) {
- QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n");
-
- clear_bit(QEDF_PROBING, &qedf->flags);
- }
return rc;
}
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 62666df1a59e..4acff4e84b90 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -2151,8 +2151,10 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess,
le32_to_cpu(abts->exchange_addr_to_abort));
- if (!abort_cmd)
+ if (!abort_cmd) {
+ mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
return -EIO;
+ }
mcmd->unpacked_lun = abort_cmd->se_cmd.orig_fe_lun;
if (abort_cmd->qpair) {
diff --git a/drivers/soc/bcm/brcmstb/biuctrl.c b/drivers/soc/bcm/brcmstb/biuctrl.c
index 1467bbd59690..e1d7b4543248 100644
--- a/drivers/soc/bcm/brcmstb/biuctrl.c
+++ b/drivers/soc/bcm/brcmstb/biuctrl.c
@@ -288,7 +288,6 @@ static int __init setup_hifcpubiuctrl_regs(struct device_node *np)
if (BRCM_ID(family_id) == 0x7260 && BRCM_REV(family_id) == 0)
cpubiuctrl_regs = b53_cpubiuctrl_no_wb_regs;
out:
- of_node_put(np);
return ret;
}
diff --git a/drivers/soc/sunxi/sunxi_sram.c b/drivers/soc/sunxi/sunxi_sram.c
index a8f3876963a0..09754cd1d57d 100644
--- a/drivers/soc/sunxi/sunxi_sram.c
+++ b/drivers/soc/sunxi/sunxi_sram.c
@@ -78,8 +78,8 @@ static struct sunxi_sram_desc sun4i_a10_sram_d = {
static struct sunxi_sram_desc sun50i_a64_sram_c = {
.data = SUNXI_SRAM_DATA("C", 0x4, 24, 1,
- SUNXI_SRAM_MAP(0, 1, "cpu"),
- SUNXI_SRAM_MAP(1, 0, "de2")),
+ SUNXI_SRAM_MAP(1, 0, "cpu"),
+ SUNXI_SRAM_MAP(0, 1, "de2")),
};
static const struct of_device_id sunxi_sram_dt_ids[] = {
@@ -254,6 +254,7 @@ int sunxi_sram_claim(struct device *dev)
writel(val | ((device << sram_data->offset) & mask),
base + sram_data->reg);
+ sram_desc->claimed = true;
spin_unlock(&sram_lock);
return 0;
@@ -329,11 +330,11 @@ static struct regmap_config sunxi_sram_emac_clock_regmap = {
.writeable_reg = sunxi_sram_regmap_accessible_reg,
};
-static int sunxi_sram_probe(struct platform_device *pdev)
+static int __init sunxi_sram_probe(struct platform_device *pdev)
{
- struct dentry *d;
struct regmap *emac_clock;
const struct sunxi_sramc_variant *variant;
+ struct device *dev = &pdev->dev;
sram_dev = &pdev->dev;
@@ -345,13 +346,6 @@ static int sunxi_sram_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
-
- d = debugfs_create_file("sram", S_IRUGO, NULL, NULL,
- &sunxi_sram_fops);
- if (!d)
- return -ENOMEM;
-
if (variant->num_emac_clocks > 0) {
emac_clock = devm_regmap_init_mmio(&pdev->dev, base,
&sunxi_sram_emac_clock_regmap);
@@ -360,6 +354,10 @@ static int sunxi_sram_probe(struct platform_device *pdev)
return PTR_ERR(emac_clock);
}
+ of_platform_populate(dev->of_node, NULL, NULL, dev);
+
+ debugfs_create_file("sram", 0444, NULL, NULL, &sunxi_sram_fops);
+
return 0;
}
@@ -409,9 +407,8 @@ static struct platform_driver sunxi_sram_driver = {
.name = "sunxi-sram",
.of_match_table = sunxi_sram_dt_match,
},
- .probe = sunxi_sram_probe,
};
-module_platform_driver(sunxi_sram_driver);
+builtin_platform_driver_probe(sunxi_sram_driver, sunxi_sram_probe);
MODULE_AUTHOR("Maxime Ripard <[email protected]>");
MODULE_DESCRIPTION("Allwinner sunXi SRAM Controller Driver");
diff --git a/drivers/staging/media/rkvdec/rkvdec-h264.c b/drivers/staging/media/rkvdec/rkvdec-h264.c
index 4af5a831bde0..4fc167b42cf0 100644
--- a/drivers/staging/media/rkvdec/rkvdec-h264.c
+++ b/drivers/staging/media/rkvdec/rkvdec-h264.c
@@ -1162,8 +1162,8 @@ static int rkvdec_h264_run(struct rkvdec_ctx *ctx)
schedule_delayed_work(&rkvdec->watchdog_work, msecs_to_jiffies(2000));
- writel(0xffffffff, rkvdec->regs + RKVDEC_REG_STRMD_ERR_EN);
- writel(0xffffffff, rkvdec->regs + RKVDEC_REG_H264_ERR_E);
+ writel(0, rkvdec->regs + RKVDEC_REG_STRMD_ERR_EN);
+ writel(0, rkvdec->regs + RKVDEC_REG_H264_ERR_E);
writel(1, rkvdec->regs + RKVDEC_REG_PREF_LUMA_CACHE_COMMAND);
writel(1, rkvdec->regs + RKVDEC_REG_PREF_CHR_CACHE_COMMAND);
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index ae38f0d25a8d..572b5896caa3 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -2529,6 +2529,7 @@ struct tb *icm_probe(struct tb_nhi *nhi)
tb->cm_ops = &icm_icl_ops;
break;
+ case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI:
case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI:
icm->is_supported = icm_tgl_is_supported;
icm->get_mode = icm_ar_get_mode;
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
index f09da5b62233..01190d9ced16 100644
--- a/drivers/thunderbolt/nhi.h
+++ b/drivers/thunderbolt/nhi.h
@@ -55,6 +55,7 @@ extern const struct tb_nhi_ops icl_nhi_ops;
* need for the PCI quirk anymore as we will use ICM also on Apple
* hardware.
*/
+#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI 0x1134
#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI 0x1137
#define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_NHI 0x157d
#define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE 0x157e
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index c63c1f4ff9dc..77d7f07ca075 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -2413,6 +2413,7 @@ int tb_switch_configure(struct tb_switch *sw)
* additional capabilities.
*/
sw->config.cmuv = USB4_VERSION_1_0;
+ sw->config.plug_events_delay = 0xa;
/* Enumerate the switch */
ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 0dcecbbc3967..f7fbef83583c 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -1334,6 +1334,7 @@ static int omap8250_probe(struct platform_device *pdev)
up.port.throttle = omap_8250_throttle;
up.port.unthrottle = omap_8250_unthrottle;
up.port.rs485_config = serial8250_em485_config;
+ up.port.rs485_supported = serial8250_em485_supported;
up.rs485_start_tx = serial8250_em485_start_tx;
up.rs485_stop_tx = serial8250_em485_stop_tx;
up.port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index b20f6f2fa51c..fbc4b071b330 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -2724,14 +2724,15 @@ static int lpuart_probe(struct platform_device *pdev)
lpuart_reg.cons = LPUART_CONSOLE;
handler = lpuart_int;
}
- ret = uart_add_one_port(&lpuart_reg, &sport->port);
- if (ret)
- goto failed_attach_port;
ret = lpuart_global_reset(sport);
if (ret)
goto failed_reset;
+ ret = uart_add_one_port(&lpuart_reg, &sport->port);
+ if (ret)
+ goto failed_attach_port;
+
ret = uart_get_rs485_mode(&sport->port);
if (ret)
goto failed_get_rs485;
@@ -2747,9 +2748,9 @@ static int lpuart_probe(struct platform_device *pdev)
failed_irq_request:
failed_get_rs485:
-failed_reset:
uart_remove_one_port(&lpuart_reg, &sport->port);
failed_attach_port:
+failed_reset:
lpuart_disable_clks(sport);
return ret;
}
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index ad4f3567ff90..a5748e41483b 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -525,7 +525,7 @@ static void tegra_uart_tx_dma_complete(void *args)
count = tup->tx_bytes_requested - state.residue;
async_tx_ack(tup->tx_dma_desc);
spin_lock_irqsave(&tup->uport.lock, flags);
- xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+ uart_xmit_advance(&tup->uport, count);
tup->tx_in_progress = 0;
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&tup->uport);
@@ -613,7 +613,6 @@ static unsigned int tegra_uart_tx_empty(struct uart_port *u)
static void tegra_uart_stop_tx(struct uart_port *u)
{
struct tegra_uart_port *tup = to_tegra_uport(u);
- struct circ_buf *xmit = &tup->uport.state->xmit;
struct dma_tx_state state;
unsigned int count;
@@ -624,7 +623,7 @@ static void tegra_uart_stop_tx(struct uart_port *u)
dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
count = tup->tx_bytes_requested - state.residue;
async_tx_ack(tup->tx_dma_desc);
- xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+ uart_xmit_advance(&tup->uport, count);
tup->tx_in_progress = 0;
}
diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c
index 5c3a07546a58..4b1d4fe8458e 100644
--- a/drivers/tty/serial/sifive.c
+++ b/drivers/tty/serial/sifive.c
@@ -945,7 +945,7 @@ static int sifive_serial_probe(struct platform_device *pdev)
return PTR_ERR(base);
}
- clk = devm_clk_get(&pdev->dev, NULL);
+ clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "unable to find controller clock\n");
return PTR_ERR(clk);
diff --git a/drivers/tty/serial/tegra-tcu.c b/drivers/tty/serial/tegra-tcu.c
index 4877c54c613d..889b701ba7c6 100644
--- a/drivers/tty/serial/tegra-tcu.c
+++ b/drivers/tty/serial/tegra-tcu.c
@@ -101,7 +101,7 @@ static void tegra_tcu_uart_start_tx(struct uart_port *port)
break;
tegra_tcu_write(tcu, &xmit->buf[xmit->tail], count);
- xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+ uart_xmit_advance(port, count);
}
uart_write_wakeup(port);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index d4b1e70d1498..bbab424b0d55 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -6039,7 +6039,7 @@ re_enumerate:
*
* Return: The same as for usb_reset_and_verify_device().
* However, if a reset is already in progress (for instance, if a
- * driver doesn't have pre_ or post_reset() callbacks, and while
+ * driver doesn't have pre_reset() or post_reset() callbacks, and while
* being unbound or re-bound during the ongoing reset its disconnect()
* or probe() routine tries to perform a second, nested reset), the
* routine returns -EINPROGRESS.
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 8c8e32651473..d0237b30c9be 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1752,12 +1752,6 @@ static int dwc3_probe(struct platform_device *pdev)
dwc3_get_properties(dwc);
- if (!dwc->sysdev_is_parent) {
- ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64));
- if (ret)
- return ret;
- }
-
dwc->reset = devm_reset_control_array_get_optional_shared(dev);
if (IS_ERR(dwc->reset))
return PTR_ERR(dwc->reset);
@@ -1823,6 +1817,13 @@ static int dwc3_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dwc);
dwc3_cache_hwparams(dwc);
+ if (!dwc->sysdev_is_parent &&
+ DWC3_GHWPARAMS0_AWIDTH(dwc->hwparams.hwparams0) == 64) {
+ ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64));
+ if (ret)
+ goto disable_clks;
+ }
+
spin_lock_init(&dwc->lock);
mutex_init(&dwc->mutex);
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
index 166b5bde45cb..6c14a79279f9 100644
--- a/drivers/usb/dwc3/dwc3-st.c
+++ b/drivers/usb/dwc3/dwc3-st.c
@@ -251,7 +251,7 @@ static int st_dwc3_probe(struct platform_device *pdev)
/* Manage SoftReset */
reset_control_deassert(dwc3_data->rstc_rst);
- child = of_get_child_by_name(node, "dwc3");
+ child = of_get_child_by_name(node, "usb");
if (!child) {
dev_err(&pdev->dev, "failed to find dwc3 core node\n");
ret = -ENODEV;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index a5e8374a8d71..697683e3fbff 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -256,6 +256,7 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_EM060K 0x030b
#define QUECTEL_PRODUCT_EM12 0x0512
#define QUECTEL_PRODUCT_RM500Q 0x0800
+#define QUECTEL_PRODUCT_RM520N 0x0801
#define QUECTEL_PRODUCT_EC200S_CN 0x6002
#define QUECTEL_PRODUCT_EC200T 0x6026
#define QUECTEL_PRODUCT_RM500K 0x7001
@@ -1138,6 +1139,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff),
.driver_info = NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) },
+ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0203, 0xff), /* BG95-M3 */
+ .driver_info = ZLP },
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
@@ -1159,6 +1162,9 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
.driver_info = ZLP },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) },
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 23ab3b048d9b..251778d14e2d 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -52,6 +52,13 @@ UNUSUAL_DEV(0x059f, 0x1061, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME),
+/* Reported-by: Hongling Zeng <[email protected]> */
+UNUSUAL_DEV(0x090c, 0x2000, 0x0000, 0x9999,
+ "Hiksemi",
+ "External HDD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_UAS),
+
/*
* Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
* commands in UAS mode. Observed with the 1.28 firmware; are there others?
@@ -76,6 +83,13 @@ UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_REPORT_LUNS),
+/* Reported-by: Hongling Zeng <[email protected]> */
+UNUSUAL_DEV(0x0bda, 0x9210, 0x0000, 0x9999,
+ "Hiksemi",
+ "External HDD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_UAS),
+
/* Reported-by: Benjamin Tissoires <[email protected]> */
UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999,
"Initio Corporation",
@@ -118,6 +132,13 @@ UNUSUAL_DEV(0x154b, 0xf00d, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_ATA_1X),
+/* Reported-by: Hongling Zeng <[email protected]> */
+UNUSUAL_DEV(0x17ef, 0x3899, 0x0000, 0x9999,
+ "Thinkplus",
+ "External HDD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_UAS),
+
/* Reported-by: Hans de Goede <[email protected]> */
UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
"VIA",
diff --git a/drivers/usb/typec/Kconfig b/drivers/usb/typec/Kconfig
index 5defdfead653..831e7049977d 100644
--- a/drivers/usb/typec/Kconfig
+++ b/drivers/usb/typec/Kconfig
@@ -56,6 +56,7 @@ config TYPEC_ANX7411
tristate "Analogix ANX7411 Type-C DRP Port controller driver"
depends on I2C
depends on USB_ROLE_SWITCH
+ depends on POWER_SUPPLY
help
Say Y or M here if your system has Analogix ANX7411 Type-C DRP Port
controller driver.
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 7f2624f42724..6364f0d467ea 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -588,8 +588,6 @@ static int ucsi_get_pdos(struct ucsi_connector *con, int is_partner,
num_pdos * sizeof(u32));
if (ret < 0 && ret != -ETIMEDOUT)
dev_err(ucsi->dev, "UCSI_GET_PDOS failed (%d)\n", ret);
- if (ret == 0 && offset == 0)
- dev_warn(ucsi->dev, "UCSI_GET_PDOS returned 0 bytes\n");
return ret;
}
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
index 75a703b803a2..3e4486bfa0b7 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.c
+++ b/drivers/vdpa/ifcvf/ifcvf_base.c
@@ -323,7 +323,7 @@ u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
u32 q_pair_id;
ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
- q_pair_id = qid / hw->nr_vring;
+ q_pair_id = qid / 2;
avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
last_avail_idx = vp_ioread16(avail_idx_addr);
@@ -337,7 +337,7 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
u32 q_pair_id;
ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
- q_pair_id = qid / hw->nr_vring;
+ q_pair_id = qid / 2;
avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
hw->vring[qid].last_avail_idx = num;
vp_iowrite16(num, avail_idx_addr);
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index ed100a35e596..90913365def4 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -1320,6 +1320,8 @@ static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *
static int create_rqt(struct mlx5_vdpa_net *ndev)
{
+ int rqt_table_size = roundup_pow_of_two(ndev->rqt_size);
+ int act_sz = roundup_pow_of_two(ndev->cur_num_vqs / 2);
__be32 *list;
void *rqtc;
int inlen;
@@ -1327,7 +1329,7 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
int i, j;
int err;
- inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + ndev->rqt_size * MLX5_ST_SZ_BYTES(rq_num);
+ inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + rqt_table_size * MLX5_ST_SZ_BYTES(rq_num);
in = kzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1336,12 +1338,12 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
- MLX5_SET(rqtc, rqtc, rqt_max_size, ndev->rqt_size);
+ MLX5_SET(rqtc, rqtc, rqt_max_size, rqt_table_size);
list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
- for (i = 0, j = 0; i < ndev->rqt_size; i++, j += 2)
+ for (i = 0, j = 0; i < act_sz; i++, j += 2)
list[i] = cpu_to_be32(ndev->vqs[j % ndev->cur_num_vqs].virtq_id);
- MLX5_SET(rqtc, rqtc, rqt_actual_size, ndev->rqt_size);
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, act_sz);
err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn);
kfree(in);
if (err)
@@ -1354,6 +1356,7 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
{
+ int act_sz = roundup_pow_of_two(num / 2);
__be32 *list;
void *rqtc;
int inlen;
@@ -1361,7 +1364,7 @@ static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
int i, j;
int err;
- inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + ndev->rqt_size * MLX5_ST_SZ_BYTES(rq_num);
+ inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + act_sz * MLX5_ST_SZ_BYTES(rq_num);
in = kzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1372,10 +1375,10 @@ static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
- for (i = 0, j = 0; i < ndev->rqt_size; i++, j += 2)
+ for (i = 0, j = 0; i < act_sz; i++, j = j + 2)
list[i] = cpu_to_be32(ndev->vqs[j % num].virtq_id);
- MLX5_SET(rqtc, rqtc, rqt_actual_size, ndev->rqt_size);
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, act_sz);
err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn);
kfree(in);
if (err)
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index 41c0b29739f1..35dceee3ed56 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -673,10 +673,15 @@ static void vduse_vdpa_get_config(struct vdpa_device *vdpa, unsigned int offset,
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
- if (offset > dev->config_size ||
- len > dev->config_size - offset)
+ /* Initialize the buffer in case of partial copy. */
+ memset(buf, 0, len);
+
+ if (offset > dev->config_size)
return;
+ if (len > dev->config_size - offset)
+ len = dev->config_size - offset;
+
memcpy(buf, dev->config + offset, len);
}
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index d5f3f763717e..d4b251925796 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -382,9 +382,10 @@ int xenbus_setup_ring(struct xenbus_device *dev, gfp_t gfp, void **vaddr,
unsigned long ring_size = nr_pages * XEN_PAGE_SIZE;
grant_ref_t gref_head;
unsigned int i;
+ void *addr;
int ret;
- *vaddr = alloc_pages_exact(ring_size, gfp | __GFP_ZERO);
+ addr = *vaddr = alloc_pages_exact(ring_size, gfp | __GFP_ZERO);
if (!*vaddr) {
ret = -ENOMEM;
goto err;
@@ -401,13 +402,15 @@ int xenbus_setup_ring(struct xenbus_device *dev, gfp_t gfp, void **vaddr,
unsigned long gfn;
if (is_vmalloc_addr(*vaddr))
- gfn = pfn_to_gfn(vmalloc_to_pfn(vaddr[i]));
+ gfn = pfn_to_gfn(vmalloc_to_pfn(addr));
else
- gfn = virt_to_gfn(vaddr[i]);
+ gfn = virt_to_gfn(addr);
grefs[i] = gnttab_claim_grant_reference(&gref_head);
gnttab_grant_foreign_access_ref(grefs[i], dev->otherend_id,
gfn, 0);
+
+ addr += XEN_PAGE_SIZE;
}
return 0;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 1af28b066b42..2633137c3e9f 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -4475,6 +4475,17 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
/*
+ * If we had UNFINISHED_DROPS we could still be processing them, so
+ * clear that bit and wake up relocation so it can stop.
+ * We must do this before stopping the block group reclaim task, because
+ * at btrfs_relocate_block_group() we wait for this bit, and after the
+ * wait we stop with -EINTR if btrfs_fs_closing() returns non-zero - we
+ * have just set BTRFS_FS_CLOSING_START, so btrfs_fs_closing() will
+ * return 1.
+ */
+ btrfs_wake_unfinished_drop(fs_info);
+
+ /*
* We may have the reclaim task running and relocating a data block group,
* in which case it may create delayed iputs. So stop it before we park
* the cleaner kthread otherwise we can get new delayed iputs after
@@ -4492,12 +4503,6 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
*/
kthread_park(fs_info->cleaner_kthread);
- /*
- * If we had UNFINISHED_DROPS we could still be processing them, so
- * clear that bit and wake up relocation so it can stop.
- */
- btrfs_wake_unfinished_drop(fs_info);
-
/* wait for the qgroup rescan worker to stop */
btrfs_qgroup_wait_for_completion(fs_info, false);
@@ -4520,6 +4525,31 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
/* clear out the rbtree of defraggable inodes */
btrfs_cleanup_defrag_inodes(fs_info);
+ /*
+ * After we parked the cleaner kthread, ordered extents may have
+ * completed and created new delayed iputs. If one of the async reclaim
+ * tasks is running and in the RUN_DELAYED_IPUTS flush state, then we
+ * can hang forever trying to stop it, because if a delayed iput is
+ * added after it ran btrfs_run_delayed_iputs() and before it called
+ * btrfs_wait_on_delayed_iputs(), it will hang forever since there is
+ * no one else to run iputs.
+ *
+ * So wait for all ongoing ordered extents to complete and then run
+ * delayed iputs. This works because once we reach this point no one
+ * can either create new ordered extents nor create delayed iputs
+ * through some other means.
+ *
+ * Also note that btrfs_wait_ordered_roots() is not safe here, because
+ * it waits for BTRFS_ORDERED_COMPLETE to be set on an ordered extent,
+ * but the delayed iput for the respective inode is made only when doing
+ * the final btrfs_put_ordered_extent() (which must happen at
+ * btrfs_finish_ordered_io() when we are unmounting).
+ */
+ btrfs_flush_workqueue(fs_info->endio_write_workers);
+ /* Ordered extents for free space inodes. */
+ btrfs_flush_workqueue(fs_info->endio_freespace_worker);
+ btrfs_run_delayed_iputs(fs_info);
+
cancel_work_sync(&fs_info->async_reclaim_work);
cancel_work_sync(&fs_info->async_data_reclaim_work);
cancel_work_sync(&fs_info->preempt_reclaim_work);
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index 62e7007a7e46..73c6929f7be6 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -1918,10 +1918,44 @@ out_unlock:
return ret;
}
+static void wait_eb_writebacks(struct btrfs_block_group *block_group)
+{
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
+ const u64 end = block_group->start + block_group->length;
+ struct radix_tree_iter iter;
+ struct extent_buffer *eb;
+ void __rcu **slot;
+
+ rcu_read_lock();
+ radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter,
+ block_group->start >> fs_info->sectorsize_bits) {
+ eb = radix_tree_deref_slot(slot);
+ if (!eb)
+ continue;
+ if (radix_tree_deref_retry(eb)) {
+ slot = radix_tree_iter_retry(&iter);
+ continue;
+ }
+
+ if (eb->start < block_group->start)
+ continue;
+ if (eb->start >= end)
+ break;
+
+ slot = radix_tree_iter_resume(slot, &iter);
+ rcu_read_unlock();
+ wait_on_extent_buffer_writeback(eb);
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
+}
+
static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
struct map_lookup *map;
+ const bool is_metadata = (block_group->flags &
+ (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM));
int ret = 0;
int i;
@@ -1932,8 +1966,7 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
}
/* Check if we have unwritten allocated space */
- if ((block_group->flags &
- (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)) &&
+ if (is_metadata &&
block_group->start + block_group->alloc_offset > block_group->meta_write_pointer) {
spin_unlock(&block_group->lock);
return -EAGAIN;
@@ -1958,6 +1991,9 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
/* No need to wait for NOCOW writers. Zoned mode does not allow that */
btrfs_wait_ordered_roots(fs_info, U64_MAX, block_group->start,
block_group->length);
+ /* Wait for extent buffers to be written. */
+ if (is_metadata)
+ wait_eb_writebacks(block_group);
spin_lock(&block_group->lock);
diff --git a/fs/coredump.c b/fs/coredump.c
index 9f4aae202109..1ab4f5b76a1e 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -832,6 +832,38 @@ static int __dump_skip(struct coredump_params *cprm, size_t nr)
}
}
+static int dump_emit_page(struct coredump_params *cprm, struct page *page)
+{
+ struct bio_vec bvec = {
+ .bv_page = page,
+ .bv_offset = 0,
+ .bv_len = PAGE_SIZE,
+ };
+ struct iov_iter iter;
+ struct file *file = cprm->file;
+ loff_t pos = file->f_pos;
+ ssize_t n;
+
+ if (cprm->to_skip) {
+ if (!__dump_skip(cprm, cprm->to_skip))
+ return 0;
+ cprm->to_skip = 0;
+ }
+ if (cprm->written + PAGE_SIZE > cprm->limit)
+ return 0;
+ if (dump_interrupted())
+ return 0;
+ iov_iter_bvec(&iter, WRITE, &bvec, 1, PAGE_SIZE);
+ n = __kernel_write_iter(cprm->file, &iter, &pos);
+ if (n != PAGE_SIZE)
+ return 0;
+ file->f_pos = pos;
+ cprm->written += PAGE_SIZE;
+ cprm->pos += PAGE_SIZE;
+
+ return 1;
+}
+
int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
{
if (cprm->to_skip) {
@@ -863,7 +895,6 @@ int dump_user_range(struct coredump_params *cprm, unsigned long start,
for (addr = start; addr < start + len; addr += PAGE_SIZE) {
struct page *page;
- int stop;
/*
* To avoid having to allocate page tables for virtual address
@@ -874,10 +905,7 @@ int dump_user_range(struct coredump_params *cprm, unsigned long start,
*/
page = get_dump_page(addr);
if (page) {
- void *kaddr = kmap_local_page(page);
-
- stop = !dump_emit(cprm, kaddr, PAGE_SIZE);
- kunmap_local(kaddr);
+ int stop = !dump_emit_page(cprm, page);
put_page(page);
if (stop)
return 0;
diff --git a/fs/dax.c b/fs/dax.c
index c440dcef4b1b..1c6867810cbd 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1445,6 +1445,9 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
loff_t done = 0;
int ret;
+ if (!iomi.len)
+ return 0;
+
if (iov_iter_rw(iter) == WRITE) {
lockdep_assert_held_write(&iomi.inode->i_rwsem);
iomi.flags |= IOMAP_WRITE;
diff --git a/fs/exec.c b/fs/exec.c
index 9a5ca7b82bfc..d046dbb9cbd0 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -65,7 +65,6 @@
#include <linux/io_uring.h>
#include <linux/syscall_user_dispatch.h>
#include <linux/coredump.h>
-#include <linux/time_namespace.h>
#include <linux/uaccess.h>
#include <asm/mmu_context.h>
@@ -979,12 +978,10 @@ static int exec_mmap(struct mm_struct *mm)
{
struct task_struct *tsk;
struct mm_struct *old_mm, *active_mm;
- bool vfork;
int ret;
/* Notify parent that we're no longer interested in the old VM */
tsk = current;
- vfork = !!tsk->vfork_done;
old_mm = current->mm;
exec_mm_release(tsk, old_mm);
if (old_mm)
@@ -1029,10 +1026,6 @@ static int exec_mmap(struct mm_struct *mm)
tsk->mm->vmacache_seqnum = 0;
vmacache_flush(tsk);
task_unlock(tsk);
-
- if (vfork)
- timens_on_fork(tsk->nsproxy, tsk);
-
if (old_mm) {
mmap_read_unlock(old_mm);
BUG_ON(active_mm != old_mm);
diff --git a/fs/exfat/fatent.c b/fs/exfat/fatent.c
index ee0b7cf51157..41ae4cce1f42 100644
--- a/fs/exfat/fatent.c
+++ b/fs/exfat/fatent.c
@@ -270,8 +270,7 @@ int exfat_zeroed_cluster(struct inode *dir, unsigned int clu)
struct super_block *sb = dir->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct buffer_head *bh;
- sector_t blknr, last_blknr;
- int i;
+ sector_t blknr, last_blknr, i;
blknr = exfat_cluster_to_sector(sbi, clu);
last_blknr = blknr + sbi->sect_per_clus;
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 9bca5565547b..3bf9a6926798 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -167,8 +167,6 @@ enum SHIFT_DIRECTION {
#define EXT4_MB_CR0_OPTIMIZED 0x8000
/* Avg fragment size rb tree lookup succeeded at least once for cr = 1 */
#define EXT4_MB_CR1_OPTIMIZED 0x00010000
-/* Perform linear traversal for one group */
-#define EXT4_MB_SEARCH_NEXT_LINEAR 0x00020000
struct ext4_allocation_request {
/* target inode for block we're allocating */
struct inode *inode;
@@ -1600,8 +1598,8 @@ struct ext4_sb_info {
struct list_head s_discard_list;
struct work_struct s_discard_work;
atomic_t s_retry_alloc_pending;
- struct rb_root s_mb_avg_fragment_size_root;
- rwlock_t s_mb_rb_lock;
+ struct list_head *s_mb_avg_fragment_size;
+ rwlock_t *s_mb_avg_fragment_size_locks;
struct list_head *s_mb_largest_free_orders;
rwlock_t *s_mb_largest_free_orders_locks;
@@ -3413,6 +3411,8 @@ struct ext4_group_info {
ext4_grpblk_t bb_first_free; /* first free block */
ext4_grpblk_t bb_free; /* total free blocks */
ext4_grpblk_t bb_fragments; /* nr of freespace fragments */
+ int bb_avg_fragment_size_order; /* order of average
+ fragment in BG */
ext4_grpblk_t bb_largest_free_order;/* order of largest frag in BG */
ext4_group_t bb_group; /* Group number */
struct list_head bb_prealloc_list;
@@ -3420,7 +3420,7 @@ struct ext4_group_info {
void *bb_bitmap;
#endif
struct rw_semaphore alloc_sem;
- struct rb_node bb_avg_fragment_size_rb;
+ struct list_head bb_avg_fragment_size_node;
struct list_head bb_largest_free_order_node;
ext4_grpblk_t bb_counters[]; /* Nr of free power-of-two-block
* regions, index is order.
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index c148bb97b527..5235974126bd 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -460,6 +460,10 @@ static int __ext4_ext_check(const char *function, unsigned int line,
error_msg = "invalid eh_entries";
goto corrupted;
}
+ if (unlikely((eh->eh_entries == 0) && (depth > 0))) {
+ error_msg = "eh_entries is 0 but eh_depth is > 0";
+ goto corrupted;
+ }
if (!ext4_valid_extent_entries(inode, eh, lblk, &pblk, depth)) {
error_msg = "invalid extent entries";
goto corrupted;
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index f73e5eb43eae..208b87ce8858 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -510,7 +510,7 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent,
goto fallback;
}
- max_dirs = ndirs / ngroups + inodes_per_group / 16;
+ max_dirs = ndirs / ngroups + inodes_per_group*flex_size / 16;
min_inodes = avefreei - inodes_per_group*flex_size / 4;
if (min_inodes < 1)
min_inodes = 1;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index bd8f8b5c3d30..9dad93059945 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -140,13 +140,15 @@
* number of buddy bitmap orders possible) number of lists. Group-infos are
* placed in appropriate lists.
*
- * 2) Average fragment size rb tree (sbi->s_mb_avg_fragment_size_root)
+ * 2) Average fragment size lists (sbi->s_mb_avg_fragment_size)
*
- * Locking: sbi->s_mb_rb_lock (rwlock)
+ * Locking: sbi->s_mb_avg_fragment_size_locks(array of rw locks)
*
- * This is a red black tree consisting of group infos and the tree is sorted
- * by average fragment sizes (which is calculated as ext4_group_info->bb_free
- * / ext4_group_info->bb_fragments).
+ * This is an array of lists where in the i-th list there are groups with
+ * average fragment size >= 2^i and < 2^(i+1). The average fragment size
+ * is computed as ext4_group_info->bb_free / ext4_group_info->bb_fragments.
+ * Note that we don't bother with a special list for completely empty groups
+ * so we only have MB_NUM_ORDERS(sb) lists.
*
* When "mb_optimize_scan" mount option is set, mballoc consults the above data
* structures to decide the order in which groups are to be traversed for
@@ -160,7 +162,8 @@
*
* At CR = 1, we only consider groups where average fragment size > request
* size. So, we lookup a group which has average fragment size just above or
- * equal to request size using our rb tree (data structure 2) in O(log N) time.
+ * equal to request size using our average fragment size group lists (data
+ * structure 2) in O(1) time.
*
* If "mb_optimize_scan" mount option is not set, mballoc traverses groups in
* linear order which requires O(N) search time for each CR 0 and CR 1 phase.
@@ -802,65 +805,51 @@ static void ext4_mb_mark_free_simple(struct super_block *sb,
}
}
-static void ext4_mb_rb_insert(struct rb_root *root, struct rb_node *new,
- int (*cmp)(struct rb_node *, struct rb_node *))
+static int mb_avg_fragment_size_order(struct super_block *sb, ext4_grpblk_t len)
{
- struct rb_node **iter = &root->rb_node, *parent = NULL;
+ int order;
- while (*iter) {
- parent = *iter;
- if (cmp(new, *iter) > 0)
- iter = &((*iter)->rb_left);
- else
- iter = &((*iter)->rb_right);
- }
-
- rb_link_node(new, parent, iter);
- rb_insert_color(new, root);
-}
-
-static int
-ext4_mb_avg_fragment_size_cmp(struct rb_node *rb1, struct rb_node *rb2)
-{
- struct ext4_group_info *grp1 = rb_entry(rb1,
- struct ext4_group_info,
- bb_avg_fragment_size_rb);
- struct ext4_group_info *grp2 = rb_entry(rb2,
- struct ext4_group_info,
- bb_avg_fragment_size_rb);
- int num_frags_1, num_frags_2;
-
- num_frags_1 = grp1->bb_fragments ?
- grp1->bb_free / grp1->bb_fragments : 0;
- num_frags_2 = grp2->bb_fragments ?
- grp2->bb_free / grp2->bb_fragments : 0;
-
- return (num_frags_2 - num_frags_1);
+ /*
+ * We don't bother with a special lists groups with only 1 block free
+ * extents and for completely empty groups.
+ */
+ order = fls(len) - 2;
+ if (order < 0)
+ return 0;
+ if (order == MB_NUM_ORDERS(sb))
+ order--;
+ return order;
}
-/*
- * Reinsert grpinfo into the avg_fragment_size tree with new average
- * fragment size.
- */
+/* Move group to appropriate avg_fragment_size list */
static void
mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
+ int new_order;
if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_free == 0)
return;
- write_lock(&sbi->s_mb_rb_lock);
- if (!RB_EMPTY_NODE(&grp->bb_avg_fragment_size_rb)) {
- rb_erase(&grp->bb_avg_fragment_size_rb,
- &sbi->s_mb_avg_fragment_size_root);
- RB_CLEAR_NODE(&grp->bb_avg_fragment_size_rb);
- }
+ new_order = mb_avg_fragment_size_order(sb,
+ grp->bb_free / grp->bb_fragments);
+ if (new_order == grp->bb_avg_fragment_size_order)
+ return;
- ext4_mb_rb_insert(&sbi->s_mb_avg_fragment_size_root,
- &grp->bb_avg_fragment_size_rb,
- ext4_mb_avg_fragment_size_cmp);
- write_unlock(&sbi->s_mb_rb_lock);
+ if (grp->bb_avg_fragment_size_order != -1) {
+ write_lock(&sbi->s_mb_avg_fragment_size_locks[
+ grp->bb_avg_fragment_size_order]);
+ list_del(&grp->bb_avg_fragment_size_node);
+ write_unlock(&sbi->s_mb_avg_fragment_size_locks[
+ grp->bb_avg_fragment_size_order]);
+ }
+ grp->bb_avg_fragment_size_order = new_order;
+ write_lock(&sbi->s_mb_avg_fragment_size_locks[
+ grp->bb_avg_fragment_size_order]);
+ list_add_tail(&grp->bb_avg_fragment_size_node,
+ &sbi->s_mb_avg_fragment_size[grp->bb_avg_fragment_size_order]);
+ write_unlock(&sbi->s_mb_avg_fragment_size_locks[
+ grp->bb_avg_fragment_size_order]);
}
/*
@@ -909,86 +898,55 @@ static void ext4_mb_choose_next_group_cr0(struct ext4_allocation_context *ac,
*new_cr = 1;
} else {
*group = grp->bb_group;
- ac->ac_last_optimal_group = *group;
ac->ac_flags |= EXT4_MB_CR0_OPTIMIZED;
}
}
/*
- * Choose next group by traversing average fragment size tree. Updates *new_cr
- * if cr lvel needs an update. Sets EXT4_MB_SEARCH_NEXT_LINEAR to indicate that
- * the linear search should continue for one iteration since there's lock
- * contention on the rb tree lock.
+ * Choose next group by traversing average fragment size list of suitable
+ * order. Updates *new_cr if cr level needs an update.
*/
static void ext4_mb_choose_next_group_cr1(struct ext4_allocation_context *ac,
int *new_cr, ext4_group_t *group, ext4_group_t ngroups)
{
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
- int avg_fragment_size, best_so_far;
- struct rb_node *node, *found;
- struct ext4_group_info *grp;
-
- /*
- * If there is contention on the lock, instead of waiting for the lock
- * to become available, just continue searching lineraly. We'll resume
- * our rb tree search later starting at ac->ac_last_optimal_group.
- */
- if (!read_trylock(&sbi->s_mb_rb_lock)) {
- ac->ac_flags |= EXT4_MB_SEARCH_NEXT_LINEAR;
- return;
- }
+ struct ext4_group_info *grp = NULL, *iter;
+ int i;
if (unlikely(ac->ac_flags & EXT4_MB_CR1_OPTIMIZED)) {
if (sbi->s_mb_stats)
atomic_inc(&sbi->s_bal_cr1_bad_suggestions);
- /* We have found something at CR 1 in the past */
- grp = ext4_get_group_info(ac->ac_sb, ac->ac_last_optimal_group);
- for (found = rb_next(&grp->bb_avg_fragment_size_rb); found != NULL;
- found = rb_next(found)) {
- grp = rb_entry(found, struct ext4_group_info,
- bb_avg_fragment_size_rb);
+ }
+
+ for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len);
+ i < MB_NUM_ORDERS(ac->ac_sb); i++) {
+ if (list_empty(&sbi->s_mb_avg_fragment_size[i]))
+ continue;
+ read_lock(&sbi->s_mb_avg_fragment_size_locks[i]);
+ if (list_empty(&sbi->s_mb_avg_fragment_size[i])) {
+ read_unlock(&sbi->s_mb_avg_fragment_size_locks[i]);
+ continue;
+ }
+ list_for_each_entry(iter, &sbi->s_mb_avg_fragment_size[i],
+ bb_avg_fragment_size_node) {
if (sbi->s_mb_stats)
atomic64_inc(&sbi->s_bal_cX_groups_considered[1]);
- if (likely(ext4_mb_good_group(ac, grp->bb_group, 1)))
+ if (likely(ext4_mb_good_group(ac, iter->bb_group, 1))) {
+ grp = iter;
break;
- }
- goto done;
- }
-
- node = sbi->s_mb_avg_fragment_size_root.rb_node;
- best_so_far = 0;
- found = NULL;
-
- while (node) {
- grp = rb_entry(node, struct ext4_group_info,
- bb_avg_fragment_size_rb);
- avg_fragment_size = 0;
- if (ext4_mb_good_group(ac, grp->bb_group, 1)) {
- avg_fragment_size = grp->bb_fragments ?
- grp->bb_free / grp->bb_fragments : 0;
- if (!best_so_far || avg_fragment_size < best_so_far) {
- best_so_far = avg_fragment_size;
- found = node;
}
}
- if (avg_fragment_size > ac->ac_g_ex.fe_len)
- node = node->rb_right;
- else
- node = node->rb_left;
+ read_unlock(&sbi->s_mb_avg_fragment_size_locks[i]);
+ if (grp)
+ break;
}
-done:
- if (found) {
- grp = rb_entry(found, struct ext4_group_info,
- bb_avg_fragment_size_rb);
+ if (grp) {
*group = grp->bb_group;
ac->ac_flags |= EXT4_MB_CR1_OPTIMIZED;
} else {
*new_cr = 2;
}
-
- read_unlock(&sbi->s_mb_rb_lock);
- ac->ac_last_optimal_group = *group;
}
static inline int should_optimize_scan(struct ext4_allocation_context *ac)
@@ -1017,11 +975,6 @@ next_linear_group(struct ext4_allocation_context *ac, int group, int ngroups)
goto inc_and_return;
}
- if (ac->ac_flags & EXT4_MB_SEARCH_NEXT_LINEAR) {
- ac->ac_flags &= ~EXT4_MB_SEARCH_NEXT_LINEAR;
- goto inc_and_return;
- }
-
return group;
inc_and_return:
/*
@@ -1049,8 +1002,10 @@ static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac,
{
*new_cr = ac->ac_criteria;
- if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining)
+ if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining) {
+ *group = next_linear_group(ac, *group, ngroups);
return;
+ }
if (*new_cr == 0) {
ext4_mb_choose_next_group_cr0(ac, new_cr, group, ngroups);
@@ -1075,23 +1030,25 @@ mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
struct ext4_sb_info *sbi = EXT4_SB(sb);
int i;
- if (test_opt2(sb, MB_OPTIMIZE_SCAN) && grp->bb_largest_free_order >= 0) {
+ for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--)
+ if (grp->bb_counters[i] > 0)
+ break;
+ /* No need to move between order lists? */
+ if (!test_opt2(sb, MB_OPTIMIZE_SCAN) ||
+ i == grp->bb_largest_free_order) {
+ grp->bb_largest_free_order = i;
+ return;
+ }
+
+ if (grp->bb_largest_free_order >= 0) {
write_lock(&sbi->s_mb_largest_free_orders_locks[
grp->bb_largest_free_order]);
list_del_init(&grp->bb_largest_free_order_node);
write_unlock(&sbi->s_mb_largest_free_orders_locks[
grp->bb_largest_free_order]);
}
- grp->bb_largest_free_order = -1; /* uninit */
-
- for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--) {
- if (grp->bb_counters[i] > 0) {
- grp->bb_largest_free_order = i;
- break;
- }
- }
- if (test_opt2(sb, MB_OPTIMIZE_SCAN) &&
- grp->bb_largest_free_order >= 0 && grp->bb_free) {
+ grp->bb_largest_free_order = i;
+ if (grp->bb_largest_free_order >= 0 && grp->bb_free) {
write_lock(&sbi->s_mb_largest_free_orders_locks[
grp->bb_largest_free_order]);
list_add_tail(&grp->bb_largest_free_order_node,
@@ -1148,13 +1105,13 @@ void ext4_mb_generate_buddy(struct super_block *sb,
EXT4_GROUP_INFO_BBITMAP_CORRUPT);
}
mb_set_largest_free_order(sb, grp);
+ mb_update_avg_fragment_size(sb, grp);
clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
period = get_cycles() - period;
atomic_inc(&sbi->s_mb_buddies_generated);
atomic64_add(period, &sbi->s_mb_generation_time);
- mb_update_avg_fragment_size(sb, grp);
}
/* The buddy information is attached the buddy cache inode
@@ -2636,7 +2593,7 @@ static noinline_for_stack int
ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
{
ext4_group_t prefetch_grp = 0, ngroups, group, i;
- int cr = -1;
+ int cr = -1, new_cr;
int err = 0, first_err = 0;
unsigned int nr = 0, prefetch_ios = 0;
struct ext4_sb_info *sbi;
@@ -2707,17 +2664,14 @@ repeat:
* from the goal value specified
*/
group = ac->ac_g_ex.fe_group;
- ac->ac_last_optimal_group = group;
ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups;
prefetch_grp = group;
- for (i = 0; i < ngroups; group = next_linear_group(ac, group, ngroups),
- i++) {
- int ret = 0, new_cr;
+ for (i = 0, new_cr = cr; i < ngroups; i++,
+ ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) {
+ int ret = 0;
cond_resched();
-
- ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups);
if (new_cr != cr) {
cr = new_cr;
goto repeat;
@@ -2991,9 +2945,7 @@ __acquires(&EXT4_SB(sb)->s_mb_rb_lock)
struct super_block *sb = pde_data(file_inode(seq->file));
unsigned long position;
- read_lock(&EXT4_SB(sb)->s_mb_rb_lock);
-
- if (*pos < 0 || *pos >= MB_NUM_ORDERS(sb) + 1)
+ if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb))
return NULL;
position = *pos + 1;
return (void *) ((unsigned long) position);
@@ -3005,7 +2957,7 @@ static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, lof
unsigned long position;
++*pos;
- if (*pos < 0 || *pos >= MB_NUM_ORDERS(sb) + 1)
+ if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb))
return NULL;
position = *pos + 1;
return (void *) ((unsigned long) position);
@@ -3017,29 +2969,22 @@ static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
struct ext4_sb_info *sbi = EXT4_SB(sb);
unsigned long position = ((unsigned long) v);
struct ext4_group_info *grp;
- struct rb_node *n;
- unsigned int count, min, max;
+ unsigned int count;
position--;
if (position >= MB_NUM_ORDERS(sb)) {
- seq_puts(seq, "fragment_size_tree:\n");
- n = rb_first(&sbi->s_mb_avg_fragment_size_root);
- if (!n) {
- seq_puts(seq, "\ttree_min: 0\n\ttree_max: 0\n\ttree_nodes: 0\n");
- return 0;
- }
- grp = rb_entry(n, struct ext4_group_info, bb_avg_fragment_size_rb);
- min = grp->bb_fragments ? grp->bb_free / grp->bb_fragments : 0;
- count = 1;
- while (rb_next(n)) {
- count++;
- n = rb_next(n);
- }
- grp = rb_entry(n, struct ext4_group_info, bb_avg_fragment_size_rb);
- max = grp->bb_fragments ? grp->bb_free / grp->bb_fragments : 0;
+ position -= MB_NUM_ORDERS(sb);
+ if (position == 0)
+ seq_puts(seq, "avg_fragment_size_lists:\n");
- seq_printf(seq, "\ttree_min: %u\n\ttree_max: %u\n\ttree_nodes: %u\n",
- min, max, count);
+ count = 0;
+ read_lock(&sbi->s_mb_avg_fragment_size_locks[position]);
+ list_for_each_entry(grp, &sbi->s_mb_avg_fragment_size[position],
+ bb_avg_fragment_size_node)
+ count++;
+ read_unlock(&sbi->s_mb_avg_fragment_size_locks[position]);
+ seq_printf(seq, "\tlist_order_%u_groups: %u\n",
+ (unsigned int)position, count);
return 0;
}
@@ -3049,9 +2994,11 @@ static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
seq_puts(seq, "max_free_order_lists:\n");
}
count = 0;
+ read_lock(&sbi->s_mb_largest_free_orders_locks[position]);
list_for_each_entry(grp, &sbi->s_mb_largest_free_orders[position],
bb_largest_free_order_node)
count++;
+ read_unlock(&sbi->s_mb_largest_free_orders_locks[position]);
seq_printf(seq, "\tlist_order_%u_groups: %u\n",
(unsigned int)position, count);
@@ -3059,11 +3006,7 @@ static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
}
static void ext4_mb_seq_structs_summary_stop(struct seq_file *seq, void *v)
-__releases(&EXT4_SB(sb)->s_mb_rb_lock)
{
- struct super_block *sb = pde_data(file_inode(seq->file));
-
- read_unlock(&EXT4_SB(sb)->s_mb_rb_lock);
}
const struct seq_operations ext4_mb_seq_structs_summary_ops = {
@@ -3176,8 +3119,9 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
init_rwsem(&meta_group_info[i]->alloc_sem);
meta_group_info[i]->bb_free_root = RB_ROOT;
INIT_LIST_HEAD(&meta_group_info[i]->bb_largest_free_order_node);
- RB_CLEAR_NODE(&meta_group_info[i]->bb_avg_fragment_size_rb);
+ INIT_LIST_HEAD(&meta_group_info[i]->bb_avg_fragment_size_node);
meta_group_info[i]->bb_largest_free_order = -1; /* uninit */
+ meta_group_info[i]->bb_avg_fragment_size_order = -1; /* uninit */
meta_group_info[i]->bb_group = group;
mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group);
@@ -3426,7 +3370,24 @@ int ext4_mb_init(struct super_block *sb)
i++;
} while (i < MB_NUM_ORDERS(sb));
- sbi->s_mb_avg_fragment_size_root = RB_ROOT;
+ sbi->s_mb_avg_fragment_size =
+ kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
+ GFP_KERNEL);
+ if (!sbi->s_mb_avg_fragment_size) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ sbi->s_mb_avg_fragment_size_locks =
+ kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t),
+ GFP_KERNEL);
+ if (!sbi->s_mb_avg_fragment_size_locks) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ for (i = 0; i < MB_NUM_ORDERS(sb); i++) {
+ INIT_LIST_HEAD(&sbi->s_mb_avg_fragment_size[i]);
+ rwlock_init(&sbi->s_mb_avg_fragment_size_locks[i]);
+ }
sbi->s_mb_largest_free_orders =
kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
GFP_KERNEL);
@@ -3445,7 +3406,6 @@ int ext4_mb_init(struct super_block *sb)
INIT_LIST_HEAD(&sbi->s_mb_largest_free_orders[i]);
rwlock_init(&sbi->s_mb_largest_free_orders_locks[i]);
}
- rwlock_init(&sbi->s_mb_rb_lock);
spin_lock_init(&sbi->s_md_lock);
sbi->s_mb_free_pending = 0;
@@ -3516,6 +3476,8 @@ out_free_locality_groups:
free_percpu(sbi->s_locality_groups);
sbi->s_locality_groups = NULL;
out:
+ kfree(sbi->s_mb_avg_fragment_size);
+ kfree(sbi->s_mb_avg_fragment_size_locks);
kfree(sbi->s_mb_largest_free_orders);
kfree(sbi->s_mb_largest_free_orders_locks);
kfree(sbi->s_mb_offsets);
@@ -3582,6 +3544,8 @@ int ext4_mb_release(struct super_block *sb)
kvfree(group_info);
rcu_read_unlock();
}
+ kfree(sbi->s_mb_avg_fragment_size);
+ kfree(sbi->s_mb_avg_fragment_size_locks);
kfree(sbi->s_mb_largest_free_orders);
kfree(sbi->s_mb_largest_free_orders_locks);
kfree(sbi->s_mb_offsets);
@@ -5193,6 +5157,7 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
int bsbits = ac->ac_sb->s_blocksize_bits;
loff_t size, isize;
+ bool inode_pa_eligible, group_pa_eligible;
if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
return;
@@ -5200,25 +5165,27 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
return;
+ group_pa_eligible = sbi->s_mb_group_prealloc > 0;
+ inode_pa_eligible = true;
size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
>> bsbits;
+ /* No point in using inode preallocation for closed files */
if ((size == isize) && !ext4_fs_is_busy(sbi) &&
- !inode_is_open_for_write(ac->ac_inode)) {
- ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
- return;
- }
+ !inode_is_open_for_write(ac->ac_inode))
+ inode_pa_eligible = false;
- if (sbi->s_mb_group_prealloc <= 0) {
- ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
- return;
- }
-
- /* don't use group allocation for large files */
size = max(size, isize);
- if (size > sbi->s_mb_stream_request) {
- ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
+ /* Don't use group allocation for large files */
+ if (size > sbi->s_mb_stream_request)
+ group_pa_eligible = false;
+
+ if (!group_pa_eligible) {
+ if (inode_pa_eligible)
+ ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
+ else
+ ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
return;
}
@@ -5565,6 +5532,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
ext4_fsblk_t block = 0;
unsigned int inquota = 0;
unsigned int reserv_clstrs = 0;
+ int retries = 0;
u64 seq;
might_sleep();
@@ -5667,7 +5635,8 @@ repeat:
ar->len = ac->ac_b_ex.fe_len;
}
} else {
- if (ext4_mb_discard_preallocations_should_retry(sb, ac, &seq))
+ if (++retries < 3 &&
+ ext4_mb_discard_preallocations_should_retry(sb, ac, &seq))
goto repeat;
/*
* If block allocation fails then the pa allocated above
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
index 39da92ceabf8..dcda2a943cee 100644
--- a/fs/ext4/mballoc.h
+++ b/fs/ext4/mballoc.h
@@ -178,7 +178,6 @@ struct ext4_allocation_context {
/* copy of the best found extent taken before preallocation efforts */
struct ext4_free_extent ac_f_ex;
- ext4_group_t ac_last_optimal_group;
__u32 ac_groups_considered;
__u32 ac_flags; /* allocation hints */
__u16 ac_groups_scanned;
diff --git a/fs/internal.h b/fs/internal.h
index 87e96b9024ce..3e206d3e317c 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -16,6 +16,7 @@ struct shrink_control;
struct fs_context;
struct user_namespace;
struct pipe_inode_info;
+struct iov_iter;
/*
* block/bdev.c
@@ -221,3 +222,5 @@ ssize_t do_getxattr(struct user_namespace *mnt_userns,
int setxattr_copy(const char __user *name, struct xattr_ctx *ctx);
int do_setxattr(struct user_namespace *mnt_userns, struct dentry *dentry,
struct xattr_ctx *ctx);
+
+ssize_t __kernel_write_iter(struct file *file, struct iov_iter *from, loff_t *pos);
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 5ae8de09b271..001f4e053c85 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -2092,7 +2092,8 @@ get_ctx_vol_failed:
// TODO: Initialize security.
/* Get the extended system files' directory inode. */
vol->extend_ino = ntfs_iget(sb, FILE_Extend);
- if (IS_ERR(vol->extend_ino) || is_bad_inode(vol->extend_ino)) {
+ if (IS_ERR(vol->extend_ino) || is_bad_inode(vol->extend_ino) ||
+ !S_ISDIR(vol->extend_ino->i_mode)) {
if (!IS_ERR(vol->extend_ino))
iput(vol->extend_ino);
ntfs_error(sb, "Failed to load $Extend.");
diff --git a/fs/open.c b/fs/open.c
index 8a813fa5ca56..cf7e5c350a54 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -716,6 +716,8 @@ int chown_common(const struct path *path, uid_t user, gid_t group)
fs_userns = i_user_ns(inode);
retry_deleg:
+ newattrs.ia_vfsuid = INVALID_VFSUID;
+ newattrs.ia_vfsgid = INVALID_VFSGID;
newattrs.ia_valid = ATTR_CTIME;
if ((user != (uid_t)-1) && !setattr_vfsuid(&newattrs, uid))
return -EINVAL;
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index b2fd3c20e7c2..0c034ea39954 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -28,14 +28,11 @@
#include <linux/crypto.h>
#include <linux/string.h>
#include <linux/timer.h>
-#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/jiffies.h>
#include <linux/workqueue.h>
-#include <crypto/acompress.h>
-
#include "internal.h"
/*
@@ -93,8 +90,7 @@ module_param(compress, charp, 0444);
MODULE_PARM_DESC(compress, "compression to use");
/* Compression parameters */
-static struct crypto_acomp *tfm;
-static struct acomp_req *creq;
+static struct crypto_comp *tfm;
struct pstore_zbackend {
int (*zbufsize)(size_t size);
@@ -272,21 +268,12 @@ static const struct pstore_zbackend zbackends[] = {
static int pstore_compress(const void *in, void *out,
unsigned int inlen, unsigned int outlen)
{
- struct scatterlist src, dst;
int ret;
if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS))
return -EINVAL;
- sg_init_table(&src, 1);
- sg_set_buf(&src, in, inlen);
-
- sg_init_table(&dst, 1);
- sg_set_buf(&dst, out, outlen);
-
- acomp_request_set_params(creq, &src, &dst, inlen, outlen);
-
- ret = crypto_acomp_compress(creq);
+ ret = crypto_comp_compress(tfm, in, inlen, out, &outlen);
if (ret) {
pr_err("crypto_comp_compress failed, ret = %d!\n", ret);
return ret;
@@ -297,7 +284,7 @@ static int pstore_compress(const void *in, void *out,
static void allocate_buf_for_compression(void)
{
- struct crypto_acomp *acomp;
+ struct crypto_comp *ctx;
int size;
char *buf;
@@ -309,7 +296,7 @@ static void allocate_buf_for_compression(void)
if (!psinfo || tfm)
return;
- if (!crypto_has_acomp(zbackend->name, 0, CRYPTO_ALG_ASYNC)) {
+ if (!crypto_has_comp(zbackend->name, 0, 0)) {
pr_err("Unknown compression: %s\n", zbackend->name);
return;
}
@@ -328,24 +315,16 @@ static void allocate_buf_for_compression(void)
return;
}
- acomp = crypto_alloc_acomp(zbackend->name, 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR_OR_NULL(acomp)) {
+ ctx = crypto_alloc_comp(zbackend->name, 0, 0);
+ if (IS_ERR_OR_NULL(ctx)) {
kfree(buf);
pr_err("crypto_alloc_comp('%s') failed: %ld\n", zbackend->name,
- PTR_ERR(acomp));
- return;
- }
-
- creq = acomp_request_alloc(acomp);
- if (!creq) {
- crypto_free_acomp(acomp);
- kfree(buf);
- pr_err("acomp_request_alloc('%s') failed\n", zbackend->name);
+ PTR_ERR(ctx));
return;
}
/* A non-NULL big_oops_buf indicates compression is available. */
- tfm = acomp;
+ tfm = ctx;
big_oops_buf_sz = size;
big_oops_buf = buf;
@@ -355,8 +334,7 @@ static void allocate_buf_for_compression(void)
static void free_buf_for_compression(void)
{
if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && tfm) {
- acomp_request_free(creq);
- crypto_free_acomp(tfm);
+ crypto_free_comp(tfm);
tfm = NULL;
}
kfree(big_oops_buf);
@@ -693,8 +671,6 @@ static void decompress_record(struct pstore_record *record)
int ret;
int unzipped_len;
char *unzipped, *workspace;
- struct acomp_req *dreq;
- struct scatterlist src, dst;
if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !record->compressed)
return;
@@ -718,30 +694,16 @@ static void decompress_record(struct pstore_record *record)
if (!workspace)
return;
- dreq = acomp_request_alloc(tfm);
- if (!dreq) {
- kfree(workspace);
- return;
- }
-
- sg_init_table(&src, 1);
- sg_set_buf(&src, record->buf, record->size);
-
- sg_init_table(&dst, 1);
- sg_set_buf(&dst, workspace, unzipped_len);
-
- acomp_request_set_params(dreq, &src, &dst, record->size, unzipped_len);
-
/* After decompression "unzipped_len" is almost certainly smaller. */
- ret = crypto_acomp_decompress(dreq);
+ ret = crypto_comp_decompress(tfm, record->buf, record->size,
+ workspace, &unzipped_len);
if (ret) {
- pr_err("crypto_acomp_decompress failed, ret = %d!\n", ret);
+ pr_err("crypto_comp_decompress failed, ret = %d!\n", ret);
kfree(workspace);
return;
}
/* Append ECC notice to decompressed buffer. */
- unzipped_len = dreq->dlen;
memcpy(workspace + unzipped_len, record->buf + record->size,
record->ecc_notice_size);
@@ -749,7 +711,6 @@ static void decompress_record(struct pstore_record *record)
unzipped = kmemdup(workspace, unzipped_len + record->ecc_notice_size,
GFP_KERNEL);
kfree(workspace);
- acomp_request_free(dreq);
if (!unzipped)
return;
diff --git a/fs/read_write.c b/fs/read_write.c
index 1a261dcf1778..328ce8cf9a85 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -496,14 +496,9 @@ static ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t
}
/* caller is responsible for file_start_write/file_end_write */
-ssize_t __kernel_write(struct file *file, const void *buf, size_t count, loff_t *pos)
+ssize_t __kernel_write_iter(struct file *file, struct iov_iter *from, loff_t *pos)
{
- struct kvec iov = {
- .iov_base = (void *)buf,
- .iov_len = min_t(size_t, count, MAX_RW_COUNT),
- };
struct kiocb kiocb;
- struct iov_iter iter;
ssize_t ret;
if (WARN_ON_ONCE(!(file->f_mode & FMODE_WRITE)))
@@ -519,8 +514,7 @@ ssize_t __kernel_write(struct file *file, const void *buf, size_t count, loff_t
init_sync_kiocb(&kiocb, file);
kiocb.ki_pos = pos ? *pos : 0;
- iov_iter_kvec(&iter, WRITE, &iov, 1, iov.iov_len);
- ret = file->f_op->write_iter(&kiocb, &iter);
+ ret = file->f_op->write_iter(&kiocb, from);
if (ret > 0) {
if (pos)
*pos = kiocb.ki_pos;
@@ -530,6 +524,18 @@ ssize_t __kernel_write(struct file *file, const void *buf, size_t count, loff_t
inc_syscw(current);
return ret;
}
+
+/* caller is responsible for file_start_write/file_end_write */
+ssize_t __kernel_write(struct file *file, const void *buf, size_t count, loff_t *pos)
+{
+ struct kvec iov = {
+ .iov_base = (void *)buf,
+ .iov_len = min_t(size_t, count, MAX_RW_COUNT),
+ };
+ struct iov_iter iter;
+ iov_iter_kvec(&iter, WRITE, &iov, 1, iov.iov_len);
+ return __kernel_write_iter(file, &iter, pos);
+}
/*
* This "EXPORT_SYMBOL_GPL()" is more of a "EXPORT_SYMBOL_DONTUSE()",
* but autofs is one of the few internal kernel users that actually
diff --git a/fs/xfs/xfs_notify_failure.c b/fs/xfs/xfs_notify_failure.c
index 69d9c83ea4b2..5b1f9a24ed59 100644
--- a/fs/xfs/xfs_notify_failure.c
+++ b/fs/xfs/xfs_notify_failure.c
@@ -175,13 +175,13 @@ xfs_dax_notify_failure(
u64 ddev_start;
u64 ddev_end;
- if (!(mp->m_sb.sb_flags & SB_BORN)) {
+ if (!(mp->m_super->s_flags & SB_BORN)) {
xfs_warn(mp, "filesystem is not ready for notify_failure()!");
return -EIO;
}
if (mp->m_rtdev_targp && mp->m_rtdev_targp->bt_daxdev == dax_dev) {
- xfs_warn(mp,
+ xfs_debug(mp,
"notify_failure() not supported on realtime device!");
return -EOPNOTSUPP;
}
@@ -194,7 +194,7 @@ xfs_dax_notify_failure(
}
if (!xfs_has_rmapbt(mp)) {
- xfs_warn(mp, "notify_failure() needs rmapbt enabled!");
+ xfs_debug(mp, "notify_failure() needs rmapbt enabled!");
return -EOPNOTSUPP;
}
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 7515a465ec03..7c90b1ab3e00 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -543,10 +543,9 @@
*/
#ifdef CONFIG_CFI_CLANG
#define TEXT_CFI_JT \
- . = ALIGN(PMD_SIZE); \
+ ALIGN_FUNCTION(); \
__cfi_jt_start = .; \
*(.text..L.cfi.jumptable .text..L.cfi.jumptable.*) \
- . = ALIGN(PMD_SIZE); \
__cfi_jt_end = .;
#else
#define TEXT_CFI_JT
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index bd047864c7ac..e8ad12b5b9d2 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -1127,9 +1127,10 @@ cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
* cover a worst-case of every other cpu being on one of two nodes for a
* very large NR_CPUS.
*
- * Use PAGE_SIZE as a minimum for smaller configurations.
+ * Use PAGE_SIZE as a minimum for smaller configurations while avoiding
+ * unsigned comparison to -1.
*/
-#define CPUMAP_FILE_MAX_BYTES ((((NR_CPUS * 9)/32 - 1) > PAGE_SIZE) \
+#define CPUMAP_FILE_MAX_BYTES (((NR_CPUS * 9)/32 > PAGE_SIZE) \
? (NR_CPUS * 9)/32 - 1 : PAGE_SIZE)
#define CPULIST_FILE_MAX_BYTES (((NR_CPUS * 7)/2 > PAGE_SIZE) ? (NR_CPUS * 7)/2 : PAGE_SIZE)
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 8917a32173c4..d81a51978d01 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -65,7 +65,6 @@ struct dmar_pci_notify_info {
extern struct rw_semaphore dmar_global_lock;
extern struct list_head dmar_drhd_units;
-extern int intel_iommu_enabled;
#define for_each_drhd_unit(drhd) \
list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
@@ -89,8 +88,7 @@ extern int intel_iommu_enabled;
static inline bool dmar_rcu_check(void)
{
return rwsem_is_locked(&dmar_global_lock) ||
- system_state == SYSTEM_BOOTING ||
- (IS_ENABLED(CONFIG_INTEL_IOMMU) && !intel_iommu_enabled);
+ system_state == SYSTEM_BOOTING;
}
#define dmar_rcu_dereference(p) rcu_dereference_check((p), dmar_rcu_check())
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 698032e5ef2d..20765d1c5f80 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -1136,8 +1136,8 @@ extern int ata_scsi_slave_config(struct scsi_device *sdev);
extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
int queue_depth);
-extern int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
- int queue_depth);
+extern int ata_change_queue_depth(struct ata_port *ap, struct ata_device *dev,
+ struct scsi_device *sdev, int queue_depth);
extern struct ata_device *ata_dev_pair(struct ata_device *adev);
extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 6257867fbf95..567f12323f55 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1788,42 +1788,6 @@ static inline void count_objcg_event(struct obj_cgroup *objcg,
rcu_read_unlock();
}
-/**
- * get_mem_cgroup_from_obj - get a memcg associated with passed kernel object.
- * @p: pointer to object from which memcg should be extracted. It can be NULL.
- *
- * Retrieves the memory group into which the memory of the pointed kernel
- * object is accounted. If memcg is found, its reference is taken.
- * If a passed kernel object is uncharged, or if proper memcg cannot be found,
- * as well as if mem_cgroup is disabled, NULL is returned.
- *
- * Return: valid memcg pointer with taken reference or NULL.
- */
-static inline struct mem_cgroup *get_mem_cgroup_from_obj(void *p)
-{
- struct mem_cgroup *memcg;
-
- rcu_read_lock();
- do {
- memcg = mem_cgroup_from_obj(p);
- } while (memcg && !css_tryget(&memcg->css));
- rcu_read_unlock();
- return memcg;
-}
-
-/**
- * mem_cgroup_or_root - always returns a pointer to a valid memory cgroup.
- * @memcg: pointer to a valid memory cgroup or NULL.
- *
- * If passed argument is not NULL, returns it without any additional checks
- * and changes. Otherwise, root_mem_cgroup is returned.
- *
- * NOTE: root_mem_cgroup can be NULL during early boot.
- */
-static inline struct mem_cgroup *mem_cgroup_or_root(struct mem_cgroup *memcg)
-{
- return memcg ? memcg : root_mem_cgroup;
-}
#else
static inline bool mem_cgroup_kmem_disabled(void)
{
@@ -1880,15 +1844,6 @@ static inline void count_objcg_event(struct obj_cgroup *objcg,
{
}
-static inline struct mem_cgroup *get_mem_cgroup_from_obj(void *p)
-{
- return NULL;
-}
-
-static inline struct mem_cgroup *mem_cgroup_or_root(struct mem_cgroup *memcg)
-{
- return NULL;
-}
#endif /* CONFIG_MEMCG_KMEM */
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 19010491a603..c3b4cc84877b 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -139,6 +139,11 @@ struct dev_pagemap {
};
};
+static inline bool pgmap_has_memory_failure(struct dev_pagemap *pgmap)
+{
+ return pgmap->ops && pgmap->ops->memory_failure;
+}
+
static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap)
{
if (pgmap->flags & PGMAP_ALTMAP_VALID)
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index f527f27e6438..08605ce7379d 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -42,7 +42,31 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func);
void rcu_barrier_tasks(void);
void rcu_barrier_tasks_rude(void);
void synchronize_rcu(void);
+
+struct rcu_gp_oldstate;
unsigned long get_completed_synchronize_rcu(void);
+void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
+
+// Maximum number of unsigned long values corresponding to
+// not-yet-completed RCU grace periods.
+#define NUM_ACTIVE_RCU_POLL_OLDSTATE 2
+
+/**
+ * same_state_synchronize_rcu - Are two old-state values identical?
+ * @oldstate1: First old-state value.
+ * @oldstate2: Second old-state value.
+ *
+ * The two old-state values must have been obtained from either
+ * get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or
+ * get_completed_synchronize_rcu(). Returns @true if the two values are
+ * identical and @false otherwise. This allows structures whose lifetimes
+ * are tracked by old-state values to push these values to a list header,
+ * allowing those structures to be slightly smaller.
+ */
+static inline bool same_state_synchronize_rcu(unsigned long oldstate1, unsigned long oldstate2)
+{
+ return oldstate1 == oldstate2;
+}
#ifdef CONFIG_PREEMPT_RCU
@@ -496,13 +520,21 @@ do { \
* against NULL. Although rcu_access_pointer() may also be used in cases
* where update-side locks prevent the value of the pointer from changing,
* you should instead use rcu_dereference_protected() for this use case.
+ * Within an RCU read-side critical section, there is little reason to
+ * use rcu_access_pointer().
+ *
+ * It is usually best to test the rcu_access_pointer() return value
+ * directly in order to avoid accidental dereferences being introduced
+ * by later inattentive changes. In other words, assigning the
+ * rcu_access_pointer() return value to a local variable results in an
+ * accident waiting to happen.
*
* It is also permissible to use rcu_access_pointer() when read-side
- * access to the pointer was removed at least one grace period ago, as
- * is the case in the context of the RCU callback that is freeing up
- * the data, or after a synchronize_rcu() returns. This can be useful
- * when tearing down multi-linked structures after a grace period
- * has elapsed.
+ * access to the pointer was removed at least one grace period ago, as is
+ * the case in the context of the RCU callback that is freeing up the data,
+ * or after a synchronize_rcu() returns. This can be useful when tearing
+ * down multi-linked structures after a grace period has elapsed. However,
+ * rcu_dereference_protected() is normally preferred for this use case.
*/
#define rcu_access_pointer(p) __rcu_access_pointer((p), __UNIQUE_ID(rcu), __rcu)
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 62815c0a2dce..768196a5f39d 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -14,25 +14,75 @@
#include <asm/param.h> /* for HZ */
+struct rcu_gp_oldstate {
+ unsigned long rgos_norm;
+};
+
+// Maximum number of rcu_gp_oldstate values corresponding to
+// not-yet-completed RCU grace periods.
+#define NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE 2
+
+/*
+ * Are the two oldstate values the same? See the Tree RCU version for
+ * docbook header.
+ */
+static inline bool same_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp1,
+ struct rcu_gp_oldstate *rgosp2)
+{
+ return rgosp1->rgos_norm == rgosp2->rgos_norm;
+}
+
unsigned long get_state_synchronize_rcu(void);
+
+static inline void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
+{
+ rgosp->rgos_norm = get_state_synchronize_rcu();
+}
+
unsigned long start_poll_synchronize_rcu(void);
+
+static inline void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
+{
+ rgosp->rgos_norm = start_poll_synchronize_rcu();
+}
+
bool poll_state_synchronize_rcu(unsigned long oldstate);
+static inline bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
+{
+ return poll_state_synchronize_rcu(rgosp->rgos_norm);
+}
+
static inline void cond_synchronize_rcu(unsigned long oldstate)
{
might_sleep();
}
+static inline void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
+{
+ cond_synchronize_rcu(rgosp->rgos_norm);
+}
+
static inline unsigned long start_poll_synchronize_rcu_expedited(void)
{
return start_poll_synchronize_rcu();
}
+static inline void start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
+{
+ rgosp->rgos_norm = start_poll_synchronize_rcu_expedited();
+}
+
static inline void cond_synchronize_rcu_expedited(unsigned long oldstate)
{
cond_synchronize_rcu(oldstate);
}
+static inline void cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
+{
+ cond_synchronize_rcu_expedited(rgosp->rgos_norm);
+}
+
extern void rcu_barrier(void);
static inline void synchronize_rcu_expedited(void)
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 47eaa4cb0df7..5efb51486e8a 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -40,12 +40,52 @@ bool rcu_eqs_special_set(int cpu);
void rcu_momentary_dyntick_idle(void);
void kfree_rcu_scheduler_running(void);
bool rcu_gp_might_be_stalled(void);
+
+struct rcu_gp_oldstate {
+ unsigned long rgos_norm;
+ unsigned long rgos_exp;
+};
+
+// Maximum number of rcu_gp_oldstate values corresponding to
+// not-yet-completed RCU grace periods.
+#define NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE 4
+
+/**
+ * same_state_synchronize_rcu_full - Are two old-state values identical?
+ * @rgosp1: First old-state value.
+ * @rgosp2: Second old-state value.
+ *
+ * The two old-state values must have been obtained from either
+ * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(),
+ * or get_completed_synchronize_rcu_full(). Returns @true if the two
+ * values are identical and @false otherwise. This allows structures
+ * whose lifetimes are tracked by old-state values to push these values
+ * to a list header, allowing those structures to be slightly smaller.
+ *
+ * Note that equality is judged on a bitwise basis, so that an
+ * @rcu_gp_oldstate structure with an already-completed state in one field
+ * will compare not-equal to a structure with an already-completed state
+ * in the other field. After all, the @rcu_gp_oldstate structure is opaque
+ * so how did such a situation come to pass in the first place?
+ */
+static inline bool same_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp1,
+ struct rcu_gp_oldstate *rgosp2)
+{
+ return rgosp1->rgos_norm == rgosp2->rgos_norm && rgosp1->rgos_exp == rgosp2->rgos_exp;
+}
+
unsigned long start_poll_synchronize_rcu_expedited(void);
+void start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp);
void cond_synchronize_rcu_expedited(unsigned long oldstate);
+void cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp);
unsigned long get_state_synchronize_rcu(void);
+void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
unsigned long start_poll_synchronize_rcu(void);
+void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
bool poll_state_synchronize_rcu(unsigned long oldstate);
+bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
void cond_synchronize_rcu(unsigned long oldstate);
+void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
bool rcu_is_idle_cpu(int cpu);
diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h
index a193884ecf2b..4f765bc788ff 100644
--- a/include/linux/scmi_protocol.h
+++ b/include/linux/scmi_protocol.h
@@ -84,7 +84,7 @@ struct scmi_protocol_handle;
struct scmi_clk_proto_ops {
int (*count_get)(const struct scmi_protocol_handle *ph);
- const struct scmi_clock_info *(*info_get)
+ const struct scmi_clock_info __must_check *(*info_get)
(const struct scmi_protocol_handle *ph, u32 clk_id);
int (*rate_get)(const struct scmi_protocol_handle *ph, u32 clk_id,
u64 *rate);
@@ -466,7 +466,7 @@ enum scmi_sensor_class {
*/
struct scmi_sensor_proto_ops {
int (*count_get)(const struct scmi_protocol_handle *ph);
- const struct scmi_sensor_info *(*info_get)
+ const struct scmi_sensor_info __must_check *(*info_get)
(const struct scmi_protocol_handle *ph, u32 sensor_id);
int (*trip_point_config)(const struct scmi_protocol_handle *ph,
u32 sensor_id, u8 trip_id, u64 trip_value);
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 6e4f4765d209..1eaea9fe44d8 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -624,6 +624,23 @@ struct uart_state {
/* number of characters left in xmit buffer before we ask for more */
#define WAKEUP_CHARS 256
+/**
+ * uart_xmit_advance - Advance xmit buffer and account Tx'ed chars
+ * @up: uart_port structure describing the port
+ * @chars: number of characters sent
+ *
+ * This function advances the tail of circular xmit buffer by the number of
+ * @chars transmitted and handles accounting of transmitted bytes (into
+ * @up's icount.tx).
+ */
+static inline void uart_xmit_advance(struct uart_port *up, unsigned int chars)
+{
+ struct circ_buf *xmit = &up->state->xmit;
+
+ xmit->tail = (xmit->tail + chars) & (UART_XMIT_SIZE - 1);
+ up->icount.tx += chars;
+}
+
struct module;
struct tty_driver;
diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h
index 6cfaa0a9a9b9..5aa5e0faf6a1 100644
--- a/include/linux/srcutiny.h
+++ b/include/linux/srcutiny.h
@@ -15,10 +15,10 @@
struct srcu_struct {
short srcu_lock_nesting[2]; /* srcu_read_lock() nesting depth. */
- unsigned short srcu_idx; /* Current reader array element in bit 0x2. */
- unsigned short srcu_idx_max; /* Furthest future srcu_idx request. */
u8 srcu_gp_running; /* GP workqueue running? */
u8 srcu_gp_waiting; /* GP waiting for readers? */
+ unsigned long srcu_idx; /* Current reader array element in bit 0x2. */
+ unsigned long srcu_idx_max; /* Furthest future srcu_idx request. */
struct swait_queue_head srcu_wq;
/* Last srcu_read_unlock() wakes GP. */
struct rcu_head *srcu_cb_head; /* Pending callbacks: Head. */
@@ -82,10 +82,12 @@ static inline void srcu_torture_stats_print(struct srcu_struct *ssp,
int idx;
idx = ((data_race(READ_ONCE(ssp->srcu_idx)) + 1) & 0x2) >> 1;
- pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n",
+ pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd) gp: %lu->%lu\n",
tt, tf, idx,
data_race(READ_ONCE(ssp->srcu_lock_nesting[!idx])),
- data_race(READ_ONCE(ssp->srcu_lock_nesting[idx])));
+ data_race(READ_ONCE(ssp->srcu_lock_nesting[idx])),
+ data_race(READ_ONCE(ssp->srcu_idx)),
+ data_race(READ_ONCE(ssp->srcu_idx_max)));
}
#endif
diff --git a/include/net/bluetooth/hci_sock.h b/include/net/bluetooth/hci_sock.h
index 0520e21ab698..9949870f7d78 100644
--- a/include/net/bluetooth/hci_sock.h
+++ b/include/net/bluetooth/hci_sock.h
@@ -124,8 +124,6 @@ struct hci_dev_info {
__u16 acl_pkts;
__u16 sco_mtu;
__u16 sco_pkts;
- __u16 iso_mtu;
- __u16 iso_pkts;
struct hci_dev_stats stat;
};
diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
index be2992e6de5d..a016f275cb01 100644
--- a/include/net/bond_3ad.h
+++ b/include/net/bond_3ad.h
@@ -15,8 +15,6 @@
#define PKT_TYPE_LACPDU cpu_to_be16(ETH_P_SLOW)
#define AD_TIMER_INTERVAL 100 /*msec*/
-#define MULTICAST_LACPDU_ADDR {0x01, 0x80, 0xC2, 0x00, 0x00, 0x02}
-
#define AD_LACP_SLOW 0
#define AD_LACP_FAST 1
diff --git a/include/net/bonding.h b/include/net/bonding.h
index afd606df149a..e999f851738b 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -786,6 +786,9 @@ extern struct rtnl_link_ops bond_link_ops;
/* exported from bond_sysfs_slave.c */
extern const struct sysfs_ops slave_sysfs_ops;
+/* exported from bond_3ad.c */
+extern const u8 lacpdu_mcast_addr[];
+
static inline netdev_tx_t bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
{
dev_core_stats_tx_dropped_inc(dev);
diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h
index d0d188c3294b..a8994f307fc3 100644
--- a/include/net/ieee802154_netdev.h
+++ b/include/net/ieee802154_netdev.h
@@ -15,6 +15,22 @@
#ifndef IEEE802154_NETDEVICE_H
#define IEEE802154_NETDEVICE_H
+#define IEEE802154_REQUIRED_SIZE(struct_type, member) \
+ (offsetof(typeof(struct_type), member) + \
+ sizeof(((typeof(struct_type) *)(NULL))->member))
+
+#define IEEE802154_ADDR_OFFSET \
+ offsetof(typeof(struct sockaddr_ieee802154), addr)
+
+#define IEEE802154_MIN_NAMELEN (IEEE802154_ADDR_OFFSET + \
+ IEEE802154_REQUIRED_SIZE(struct ieee802154_addr_sa, addr_type))
+
+#define IEEE802154_NAMELEN_SHORT (IEEE802154_ADDR_OFFSET + \
+ IEEE802154_REQUIRED_SIZE(struct ieee802154_addr_sa, short_addr))
+
+#define IEEE802154_NAMELEN_LONG (IEEE802154_ADDR_OFFSET + \
+ IEEE802154_REQUIRED_SIZE(struct ieee802154_addr_sa, hwaddr))
+
#include <net/af_ieee802154.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
@@ -165,6 +181,27 @@ static inline void ieee802154_devaddr_to_raw(void *raw, __le64 addr)
memcpy(raw, &temp, IEEE802154_ADDR_LEN);
}
+static inline int
+ieee802154_sockaddr_check_size(struct sockaddr_ieee802154 *daddr, int len)
+{
+ struct ieee802154_addr_sa *sa;
+
+ sa = &daddr->addr;
+ if (len < IEEE802154_MIN_NAMELEN)
+ return -EINVAL;
+ switch (sa->addr_type) {
+ case IEEE802154_ADDR_SHORT:
+ if (len < IEEE802154_NAMELEN_SHORT)
+ return -EINVAL;
+ break;
+ case IEEE802154_ADDR_LONG:
+ if (len < IEEE802154_NAMELEN_LONG)
+ return -EINVAL;
+ break;
+ }
+ return 0;
+}
+
static inline void ieee802154_addr_from_sa(struct ieee802154_addr *a,
const struct ieee802154_addr_sa *sa)
{
diff --git a/include/trace/events/scmi.h b/include/trace/events/scmi.h
index 65016a767b7a..f160d68f961d 100644
--- a/include/trace/events/scmi.h
+++ b/include/trace/events/scmi.h
@@ -27,9 +27,9 @@ TRACE_EVENT(scmi_fc_call,
__entry->val2 = val2;
),
- TP_printk("[0x%02X]:[0x%02X]:[%08X]:%u:%u",
- __entry->protocol_id, __entry->msg_id,
- __entry->res_id, __entry->val1, __entry->val2)
+ TP_printk("pt=%02X msg_id=%02X res_id:%u vals=%u:%u",
+ __entry->protocol_id, __entry->msg_id,
+ __entry->res_id, __entry->val1, __entry->val2)
);
TRACE_EVENT(scmi_xfer_begin,
@@ -53,9 +53,9 @@ TRACE_EVENT(scmi_xfer_begin,
__entry->poll = poll;
),
- TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u poll=%u",
- __entry->transfer_id, __entry->msg_id, __entry->protocol_id,
- __entry->seq, __entry->poll)
+ TP_printk("pt=%02X msg_id=%02X seq=%04X transfer_id=%X poll=%u",
+ __entry->protocol_id, __entry->msg_id, __entry->seq,
+ __entry->transfer_id, __entry->poll)
);
TRACE_EVENT(scmi_xfer_response_wait,
@@ -81,9 +81,9 @@ TRACE_EVENT(scmi_xfer_response_wait,
__entry->poll = poll;
),
- TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u tmo_ms=%u poll=%u",
- __entry->transfer_id, __entry->msg_id, __entry->protocol_id,
- __entry->seq, __entry->timeout, __entry->poll)
+ TP_printk("pt=%02X msg_id=%02X seq=%04X transfer_id=%X tmo_ms=%u poll=%u",
+ __entry->protocol_id, __entry->msg_id, __entry->seq,
+ __entry->transfer_id, __entry->timeout, __entry->poll)
);
TRACE_EVENT(scmi_xfer_end,
@@ -107,9 +107,9 @@ TRACE_EVENT(scmi_xfer_end,
__entry->status = status;
),
- TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u status=%d",
- __entry->transfer_id, __entry->msg_id, __entry->protocol_id,
- __entry->seq, __entry->status)
+ TP_printk("pt=%02X msg_id=%02X seq=%04X transfer_id=%X s=%d",
+ __entry->protocol_id, __entry->msg_id, __entry->seq,
+ __entry->transfer_id, __entry->status)
);
TRACE_EVENT(scmi_rx_done,
@@ -133,9 +133,9 @@ TRACE_EVENT(scmi_rx_done,
__entry->msg_type = msg_type;
),
- TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u msg_type=%u",
- __entry->transfer_id, __entry->msg_id, __entry->protocol_id,
- __entry->seq, __entry->msg_type)
+ TP_printk("pt=%02X msg_id=%02X seq=%04X transfer_id=%X msg_type=%u",
+ __entry->protocol_id, __entry->msg_id, __entry->seq,
+ __entry->transfer_id, __entry->msg_type)
);
TRACE_EVENT(scmi_msg_dump,
diff --git a/include/uapi/linux/if_tun.h b/include/uapi/linux/if_tun.h
index 2ec07de1d73b..b6d7b868f290 100644
--- a/include/uapi/linux/if_tun.h
+++ b/include/uapi/linux/if_tun.h
@@ -67,6 +67,8 @@
#define IFF_TAP 0x0002
#define IFF_NAPI 0x0010
#define IFF_NAPI_FRAGS 0x0020
+/* Used in TUNSETIFF to bring up tun/tap without carrier */
+#define IFF_NO_CARRIER 0x0040
#define IFF_NO_PI 0x1000
/* This flag has no real effect */
#define IFF_ONE_QUEUE 0x2000
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index b9640ad5069f..242d896c00f3 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2648,6 +2648,9 @@ static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
io_kill_timeouts(ctx, NULL, true);
/* if we failed setting up the ctx, we might not have any rings */
io_iopoll_try_reap_events(ctx);
+ /* drop cached put refs after potentially doing completions */
+ if (current->io_uring)
+ io_uring_drop_tctx_refs(current);
}
INIT_WORK(&ctx->exit_work, io_ring_exit_work);
@@ -3354,6 +3357,10 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
goto err;
}
+ if (ctx->flags & IORING_SETUP_SINGLE_ISSUER
+ && !(ctx->flags & IORING_SETUP_R_DISABLED))
+ ctx->submitter_task = get_task_struct(current);
+
file = io_uring_get_file(ctx);
if (IS_ERR(file)) {
ret = PTR_ERR(file);
@@ -3545,6 +3552,9 @@ static int io_register_enable_rings(struct io_ring_ctx *ctx)
if (!(ctx->flags & IORING_SETUP_R_DISABLED))
return -EBADFD;
+ if (ctx->flags & IORING_SETUP_SINGLE_ISSUER && !ctx->submitter_task)
+ ctx->submitter_task = get_task_struct(current);
+
if (ctx->restrictions.registered)
ctx->restricted = 1;
diff --git a/io_uring/poll.c b/io_uring/poll.c
index d5bad0bea6e4..0d9f49c575e0 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -857,7 +857,7 @@ int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (sqe->buf_index || sqe->off || sqe->addr)
return -EINVAL;
flags = READ_ONCE(sqe->len);
- if (flags & ~(IORING_POLL_ADD_MULTI|IORING_POLL_ADD_LEVEL))
+ if (flags & ~IORING_POLL_ADD_MULTI)
return -EINVAL;
if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP))
return -EINVAL;
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index e4bb5d57f4d1..5f2090d051ac 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -6049,6 +6049,9 @@ struct cgroup *cgroup_get_from_id(u64 id)
if (!kn)
goto out;
+ if (kernfs_type(kn) != KERNFS_DIR)
+ goto put;
+
rcu_read_lock();
cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
@@ -6056,7 +6059,7 @@ struct cgroup *cgroup_get_from_id(u64 id)
cgrp = NULL;
rcu_read_unlock();
-
+put:
kernfs_put(kn);
out:
return cgrp;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 2621fd24ad26..ff4bffc502c6 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6893,9 +6893,16 @@ static void perf_output_read_group(struct perf_output_handle *handle,
{
struct perf_event *leader = event->group_leader, *sub;
u64 read_format = event->attr.read_format;
+ unsigned long flags;
u64 values[6];
int n = 0;
+ /*
+ * Disabling interrupts avoids all counter scheduling
+ * (context switches, timer based rotation and IPIs).
+ */
+ local_irq_save(flags);
+
values[n++] = 1 + leader->nr_siblings;
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
@@ -6931,6 +6938,8 @@ static void perf_output_read_group(struct perf_output_handle *handle,
__output_copy(handle, values, n * sizeof(u64));
}
+
+ local_irq_restore(flags);
}
#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
diff --git a/kernel/fork.c b/kernel/fork.c
index 8a9e92068b15..2b6bd511c6ed 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -2047,11 +2047,8 @@ static __latent_entropy struct task_struct *copy_process(
/*
* If the new process will be in a different time namespace
* do not allow it to share VM or a thread group with the forking task.
- *
- * On vfork, the child process enters the target time namespace only
- * after exec.
*/
- if ((clone_flags & (CLONE_VM | CLONE_VFORK)) == CLONE_VM) {
+ if (clone_flags & (CLONE_THREAD | CLONE_VM)) {
if (nsp->time_ns != nsp->time_ns_for_children)
return ERR_PTR(-EINVAL);
}
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c
index b4cbb406bc28..eec72ca962e2 100644
--- a/kernel/nsproxy.c
+++ b/kernel/nsproxy.c
@@ -179,8 +179,7 @@ int copy_namespaces(unsigned long flags, struct task_struct *tsk)
if (IS_ERR(new_ns))
return PTR_ERR(new_ns);
- if ((flags & CLONE_VM) == 0)
- timens_on_fork(new_ns, tsk);
+ timens_on_fork(new_ns, tsk);
tsk->nsproxy = new_ns;
return 0;
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index d8e1b270a065..503c2aa845a4 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -84,10 +84,15 @@ torture_param(int, fwd_progress_holdoff, 60, "Time between forward-progress test
torture_param(bool, fwd_progress_need_resched, 1, "Hide cond_resched() behind need_resched()");
torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
torture_param(bool, gp_cond_exp, false, "Use conditional/async expedited GP wait primitives");
+torture_param(bool, gp_cond_full, false, "Use conditional/async full-state GP wait primitives");
+torture_param(bool, gp_cond_exp_full, false,
+ "Use conditional/async full-stateexpedited GP wait primitives");
torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
torture_param(bool, gp_normal, false, "Use normal (non-expedited) GP wait primitives");
torture_param(bool, gp_poll, false, "Use polling GP wait primitives");
torture_param(bool, gp_poll_exp, false, "Use polling expedited GP wait primitives");
+torture_param(bool, gp_poll_full, false, "Use polling full-state GP wait primitives");
+torture_param(bool, gp_poll_exp_full, false, "Use polling full-state expedited GP wait primitives");
torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers");
@@ -194,16 +199,24 @@ static int rcu_torture_writer_state;
#define RTWS_DEF_FREE 3
#define RTWS_EXP_SYNC 4
#define RTWS_COND_GET 5
-#define RTWS_COND_GET_EXP 6
-#define RTWS_COND_SYNC 7
-#define RTWS_COND_SYNC_EXP 8
-#define RTWS_POLL_GET 9
-#define RTWS_POLL_GET_EXP 10
-#define RTWS_POLL_WAIT 11
-#define RTWS_POLL_WAIT_EXP 12
-#define RTWS_SYNC 13
-#define RTWS_STUTTER 14
-#define RTWS_STOPPING 15
+#define RTWS_COND_GET_FULL 6
+#define RTWS_COND_GET_EXP 7
+#define RTWS_COND_GET_EXP_FULL 8
+#define RTWS_COND_SYNC 9
+#define RTWS_COND_SYNC_FULL 10
+#define RTWS_COND_SYNC_EXP 11
+#define RTWS_COND_SYNC_EXP_FULL 12
+#define RTWS_POLL_GET 13
+#define RTWS_POLL_GET_FULL 14
+#define RTWS_POLL_GET_EXP 15
+#define RTWS_POLL_GET_EXP_FULL 16
+#define RTWS_POLL_WAIT 17
+#define RTWS_POLL_WAIT_FULL 18
+#define RTWS_POLL_WAIT_EXP 19
+#define RTWS_POLL_WAIT_EXP_FULL 20
+#define RTWS_SYNC 21
+#define RTWS_STUTTER 22
+#define RTWS_STOPPING 23
static const char * const rcu_torture_writer_state_names[] = {
"RTWS_FIXED_DELAY",
"RTWS_DELAY",
@@ -211,13 +224,21 @@ static const char * const rcu_torture_writer_state_names[] = {
"RTWS_DEF_FREE",
"RTWS_EXP_SYNC",
"RTWS_COND_GET",
+ "RTWS_COND_GET_FULL",
"RTWS_COND_GET_EXP",
+ "RTWS_COND_GET_EXP_FULL",
"RTWS_COND_SYNC",
+ "RTWS_COND_SYNC_FULL",
"RTWS_COND_SYNC_EXP",
+ "RTWS_COND_SYNC_EXP_FULL",
"RTWS_POLL_GET",
+ "RTWS_POLL_GET_FULL",
"RTWS_POLL_GET_EXP",
+ "RTWS_POLL_GET_EXP_FULL",
"RTWS_POLL_WAIT",
+ "RTWS_POLL_WAIT_FULL",
"RTWS_POLL_WAIT_EXP",
+ "RTWS_POLL_WAIT_EXP_FULL",
"RTWS_SYNC",
"RTWS_STUTTER",
"RTWS_STOPPING",
@@ -332,13 +353,21 @@ struct rcu_torture_ops {
void (*exp_sync)(void);
unsigned long (*get_gp_state_exp)(void);
unsigned long (*start_gp_poll_exp)(void);
+ void (*start_gp_poll_exp_full)(struct rcu_gp_oldstate *rgosp);
bool (*poll_gp_state_exp)(unsigned long oldstate);
void (*cond_sync_exp)(unsigned long oldstate);
+ void (*cond_sync_exp_full)(struct rcu_gp_oldstate *rgosp);
unsigned long (*get_gp_state)(void);
+ void (*get_gp_state_full)(struct rcu_gp_oldstate *rgosp);
unsigned long (*get_gp_completed)(void);
+ void (*get_gp_completed_full)(struct rcu_gp_oldstate *rgosp);
unsigned long (*start_gp_poll)(void);
+ void (*start_gp_poll_full)(struct rcu_gp_oldstate *rgosp);
bool (*poll_gp_state)(unsigned long oldstate);
+ bool (*poll_gp_state_full)(struct rcu_gp_oldstate *rgosp);
+ bool (*poll_need_2gp)(bool poll, bool poll_full);
void (*cond_sync)(unsigned long oldstate);
+ void (*cond_sync_full)(struct rcu_gp_oldstate *rgosp);
call_rcu_func_t call;
void (*cb_barrier)(void);
void (*fqs)(void);
@@ -489,6 +518,11 @@ static void rcu_sync_torture_init(void)
INIT_LIST_HEAD(&rcu_torture_removed);
}
+static bool rcu_poll_need_2gp(bool poll, bool poll_full)
+{
+ return poll;
+}
+
static struct rcu_torture_ops rcu_ops = {
.ttype = RCU_FLAVOR,
.init = rcu_sync_torture_init,
@@ -502,12 +536,19 @@ static struct rcu_torture_ops rcu_ops = {
.sync = synchronize_rcu,
.exp_sync = synchronize_rcu_expedited,
.get_gp_state = get_state_synchronize_rcu,
+ .get_gp_state_full = get_state_synchronize_rcu_full,
.get_gp_completed = get_completed_synchronize_rcu,
+ .get_gp_completed_full = get_completed_synchronize_rcu_full,
.start_gp_poll = start_poll_synchronize_rcu,
+ .start_gp_poll_full = start_poll_synchronize_rcu_full,
.poll_gp_state = poll_state_synchronize_rcu,
+ .poll_gp_state_full = poll_state_synchronize_rcu_full,
+ .poll_need_2gp = rcu_poll_need_2gp,
.cond_sync = cond_synchronize_rcu,
+ .cond_sync_full = cond_synchronize_rcu_full,
.get_gp_state_exp = get_state_synchronize_rcu,
.start_gp_poll_exp = start_poll_synchronize_rcu_expedited,
+ .start_gp_poll_exp_full = start_poll_synchronize_rcu_expedited_full,
.poll_gp_state_exp = poll_state_synchronize_rcu,
.cond_sync_exp = cond_synchronize_rcu_expedited,
.call = call_rcu,
@@ -709,6 +750,9 @@ static struct rcu_torture_ops srcud_ops = {
.deferred_free = srcu_torture_deferred_free,
.sync = srcu_torture_synchronize,
.exp_sync = srcu_torture_synchronize_expedited,
+ .get_gp_state = srcu_torture_get_gp_state,
+ .start_gp_poll = srcu_torture_start_gp_poll,
+ .poll_gp_state = srcu_torture_poll_gp_state,
.call = srcu_torture_call,
.cb_barrier = srcu_torture_barrier,
.stats = srcu_torture_stats,
@@ -1148,15 +1192,35 @@ static int nsynctypes;
*/
static void rcu_torture_write_types(void)
{
- bool gp_cond1 = gp_cond, gp_cond_exp1 = gp_cond_exp, gp_exp1 = gp_exp;
- bool gp_poll_exp1 = gp_poll_exp, gp_normal1 = gp_normal, gp_poll1 = gp_poll;
- bool gp_sync1 = gp_sync;
+ bool gp_cond1 = gp_cond, gp_cond_exp1 = gp_cond_exp, gp_cond_full1 = gp_cond_full;
+ bool gp_cond_exp_full1 = gp_cond_exp_full, gp_exp1 = gp_exp, gp_poll_exp1 = gp_poll_exp;
+ bool gp_poll_exp_full1 = gp_poll_exp_full, gp_normal1 = gp_normal, gp_poll1 = gp_poll;
+ bool gp_poll_full1 = gp_poll_full, gp_sync1 = gp_sync;
/* Initialize synctype[] array. If none set, take default. */
- if (!gp_cond1 && !gp_cond_exp1 && !gp_exp1 && !gp_poll_exp &&
- !gp_normal1 && !gp_poll1 && !gp_sync1)
- gp_cond1 = gp_cond_exp1 = gp_exp1 = gp_poll_exp1 =
- gp_normal1 = gp_poll1 = gp_sync1 = true;
+ if (!gp_cond1 &&
+ !gp_cond_exp1 &&
+ !gp_cond_full1 &&
+ !gp_cond_exp_full1 &&
+ !gp_exp1 &&
+ !gp_poll_exp1 &&
+ !gp_poll_exp_full1 &&
+ !gp_normal1 &&
+ !gp_poll1 &&
+ !gp_poll_full1 &&
+ !gp_sync1) {
+ gp_cond1 = true;
+ gp_cond_exp1 = true;
+ gp_cond_full1 = true;
+ gp_cond_exp_full1 = true;
+ gp_exp1 = true;
+ gp_poll_exp1 = true;
+ gp_poll_exp_full1 = true;
+ gp_normal1 = true;
+ gp_poll1 = true;
+ gp_poll_full1 = true;
+ gp_sync1 = true;
+ }
if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) {
synctype[nsynctypes++] = RTWS_COND_GET;
pr_info("%s: Testing conditional GPs.\n", __func__);
@@ -1169,6 +1233,19 @@ static void rcu_torture_write_types(void)
} else if (gp_cond_exp && (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp)) {
pr_alert("%s: gp_cond_exp without primitives.\n", __func__);
}
+ if (gp_cond_full1 && cur_ops->get_gp_state && cur_ops->cond_sync_full) {
+ synctype[nsynctypes++] = RTWS_COND_GET_FULL;
+ pr_info("%s: Testing conditional full-state GPs.\n", __func__);
+ } else if (gp_cond_full && (!cur_ops->get_gp_state || !cur_ops->cond_sync_full)) {
+ pr_alert("%s: gp_cond_full without primitives.\n", __func__);
+ }
+ if (gp_cond_exp_full1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp_full) {
+ synctype[nsynctypes++] = RTWS_COND_GET_EXP_FULL;
+ pr_info("%s: Testing conditional full-state expedited GPs.\n", __func__);
+ } else if (gp_cond_exp_full &&
+ (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp_full)) {
+ pr_alert("%s: gp_cond_exp_full without primitives.\n", __func__);
+ }
if (gp_exp1 && cur_ops->exp_sync) {
synctype[nsynctypes++] = RTWS_EXP_SYNC;
pr_info("%s: Testing expedited GPs.\n", __func__);
@@ -1187,12 +1264,25 @@ static void rcu_torture_write_types(void)
} else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) {
pr_alert("%s: gp_poll without primitives.\n", __func__);
}
+ if (gp_poll_full1 && cur_ops->start_gp_poll_full && cur_ops->poll_gp_state_full) {
+ synctype[nsynctypes++] = RTWS_POLL_GET_FULL;
+ pr_info("%s: Testing polling full-state GPs.\n", __func__);
+ } else if (gp_poll_full && (!cur_ops->start_gp_poll_full || !cur_ops->poll_gp_state_full)) {
+ pr_alert("%s: gp_poll_full without primitives.\n", __func__);
+ }
if (gp_poll_exp1 && cur_ops->start_gp_poll_exp && cur_ops->poll_gp_state_exp) {
synctype[nsynctypes++] = RTWS_POLL_GET_EXP;
pr_info("%s: Testing polling expedited GPs.\n", __func__);
} else if (gp_poll_exp && (!cur_ops->start_gp_poll_exp || !cur_ops->poll_gp_state_exp)) {
pr_alert("%s: gp_poll_exp without primitives.\n", __func__);
}
+ if (gp_poll_exp_full1 && cur_ops->start_gp_poll_exp_full && cur_ops->poll_gp_state_full) {
+ synctype[nsynctypes++] = RTWS_POLL_GET_EXP_FULL;
+ pr_info("%s: Testing polling full-state expedited GPs.\n", __func__);
+ } else if (gp_poll_exp_full &&
+ (!cur_ops->start_gp_poll_exp_full || !cur_ops->poll_gp_state_full)) {
+ pr_alert("%s: gp_poll_exp_full without primitives.\n", __func__);
+ }
if (gp_sync1 && cur_ops->sync) {
synctype[nsynctypes++] = RTWS_SYNC;
pr_info("%s: Testing normal GPs.\n", __func__);
@@ -1202,6 +1292,40 @@ static void rcu_torture_write_types(void)
}
/*
+ * Do the specified rcu_torture_writer() synchronous grace period,
+ * while also testing out the polled APIs. Note well that the single-CPU
+ * grace-period optimizations must be accounted for.
+ */
+static void do_rtws_sync(struct torture_random_state *trsp, void (*sync)(void))
+{
+ unsigned long cookie;
+ struct rcu_gp_oldstate cookie_full;
+ bool dopoll;
+ bool dopoll_full;
+ unsigned long r = torture_random(trsp);
+
+ dopoll = cur_ops->get_gp_state && cur_ops->poll_gp_state && !(r & 0x300);
+ dopoll_full = cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full && !(r & 0xc00);
+ if (dopoll || dopoll_full)
+ cpus_read_lock();
+ if (dopoll)
+ cookie = cur_ops->get_gp_state();
+ if (dopoll_full)
+ cur_ops->get_gp_state_full(&cookie_full);
+ if (cur_ops->poll_need_2gp && cur_ops->poll_need_2gp(dopoll, dopoll_full))
+ sync();
+ sync();
+ WARN_ONCE(dopoll && !cur_ops->poll_gp_state(cookie),
+ "%s: Cookie check 3 failed %pS() online %*pbl.",
+ __func__, sync, cpumask_pr_args(cpu_online_mask));
+ WARN_ONCE(dopoll_full && !cur_ops->poll_gp_state_full(&cookie_full),
+ "%s: Cookie check 4 failed %pS() online %*pbl",
+ __func__, sync, cpumask_pr_args(cpu_online_mask));
+ if (dopoll || dopoll_full)
+ cpus_read_unlock();
+}
+
+/*
* RCU torture writer kthread. Repeatedly substitutes a new structure
* for that pointed to by rcu_torture_current, freeing the old structure
* after a series of grace periods (the "pipeline").
@@ -1212,8 +1336,10 @@ rcu_torture_writer(void *arg)
bool boot_ended;
bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
unsigned long cookie;
+ struct rcu_gp_oldstate cookie_full;
int expediting = 0;
unsigned long gp_snap;
+ struct rcu_gp_oldstate gp_snap_full;
int i;
int idx;
int oldnice = task_nice(current);
@@ -1261,11 +1387,12 @@ rcu_torture_writer(void *arg)
atomic_inc(&rcu_torture_wcount[i]);
WRITE_ONCE(old_rp->rtort_pipe_count,
old_rp->rtort_pipe_count + 1);
+
+ // Make sure readers block polled grace periods.
if (cur_ops->get_gp_state && cur_ops->poll_gp_state) {
idx = cur_ops->readlock();
cookie = cur_ops->get_gp_state();
- WARN_ONCE(rcu_torture_writer_state != RTWS_DEF_FREE &&
- cur_ops->poll_gp_state(cookie),
+ WARN_ONCE(cur_ops->poll_gp_state(cookie),
"%s: Cookie check 1 failed %s(%d) %lu->%lu\n",
__func__,
rcu_torture_writer_state_getname(),
@@ -1277,6 +1404,21 @@ rcu_torture_writer(void *arg)
}
cur_ops->readunlock(idx);
}
+ if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) {
+ idx = cur_ops->readlock();
+ cur_ops->get_gp_state_full(&cookie_full);
+ WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full),
+ "%s: Cookie check 5 failed %s(%d) online %*pbl\n",
+ __func__,
+ rcu_torture_writer_state_getname(),
+ rcu_torture_writer_state,
+ cpumask_pr_args(cpu_online_mask));
+ if (cur_ops->get_gp_completed_full) {
+ cur_ops->get_gp_completed_full(&cookie_full);
+ WARN_ON_ONCE(!cur_ops->poll_gp_state_full(&cookie_full));
+ }
+ cur_ops->readunlock(idx);
+ }
switch (synctype[torture_random(&rand) % nsynctypes]) {
case RTWS_DEF_FREE:
rcu_torture_writer_state = RTWS_DEF_FREE;
@@ -1284,12 +1426,7 @@ rcu_torture_writer(void *arg)
break;
case RTWS_EXP_SYNC:
rcu_torture_writer_state = RTWS_EXP_SYNC;
- if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
- cookie = cur_ops->get_gp_state();
- cur_ops->exp_sync();
- cur_ops->exp_sync();
- if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
- WARN_ON_ONCE(!cur_ops->poll_gp_state(cookie));
+ do_rtws_sync(&rand, cur_ops->exp_sync);
rcu_torture_pipe_update(old_rp);
break;
case RTWS_COND_GET:
@@ -1308,6 +1445,22 @@ rcu_torture_writer(void *arg)
cur_ops->cond_sync_exp(gp_snap);
rcu_torture_pipe_update(old_rp);
break;
+ case RTWS_COND_GET_FULL:
+ rcu_torture_writer_state = RTWS_COND_GET_FULL;
+ cur_ops->get_gp_state_full(&gp_snap_full);
+ torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
+ rcu_torture_writer_state = RTWS_COND_SYNC_FULL;
+ cur_ops->cond_sync_full(&gp_snap_full);
+ rcu_torture_pipe_update(old_rp);
+ break;
+ case RTWS_COND_GET_EXP_FULL:
+ rcu_torture_writer_state = RTWS_COND_GET_EXP_FULL;
+ cur_ops->get_gp_state_full(&gp_snap_full);
+ torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
+ rcu_torture_writer_state = RTWS_COND_SYNC_EXP_FULL;
+ cur_ops->cond_sync_exp_full(&gp_snap_full);
+ rcu_torture_pipe_update(old_rp);
+ break;
case RTWS_POLL_GET:
rcu_torture_writer_state = RTWS_POLL_GET;
gp_snap = cur_ops->start_gp_poll();
@@ -1317,6 +1470,15 @@ rcu_torture_writer(void *arg)
&rand);
rcu_torture_pipe_update(old_rp);
break;
+ case RTWS_POLL_GET_FULL:
+ rcu_torture_writer_state = RTWS_POLL_GET_FULL;
+ cur_ops->start_gp_poll_full(&gp_snap_full);
+ rcu_torture_writer_state = RTWS_POLL_WAIT_FULL;
+ while (!cur_ops->poll_gp_state_full(&gp_snap_full))
+ torture_hrtimeout_jiffies(torture_random(&rand) % 16,
+ &rand);
+ rcu_torture_pipe_update(old_rp);
+ break;
case RTWS_POLL_GET_EXP:
rcu_torture_writer_state = RTWS_POLL_GET_EXP;
gp_snap = cur_ops->start_gp_poll_exp();
@@ -1326,14 +1488,18 @@ rcu_torture_writer(void *arg)
&rand);
rcu_torture_pipe_update(old_rp);
break;
+ case RTWS_POLL_GET_EXP_FULL:
+ rcu_torture_writer_state = RTWS_POLL_GET_EXP_FULL;
+ cur_ops->start_gp_poll_exp_full(&gp_snap_full);
+ rcu_torture_writer_state = RTWS_POLL_WAIT_EXP_FULL;
+ while (!cur_ops->poll_gp_state_full(&gp_snap_full))
+ torture_hrtimeout_jiffies(torture_random(&rand) % 16,
+ &rand);
+ rcu_torture_pipe_update(old_rp);
+ break;
case RTWS_SYNC:
rcu_torture_writer_state = RTWS_SYNC;
- if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
- cookie = cur_ops->get_gp_state();
- cur_ops->sync();
- cur_ops->sync();
- if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
- WARN_ON_ONCE(!cur_ops->poll_gp_state(cookie));
+ do_rtws_sync(&rand, cur_ops->sync);
rcu_torture_pipe_update(old_rp);
break;
default:
@@ -1400,6 +1566,7 @@ static int
rcu_torture_fakewriter(void *arg)
{
unsigned long gp_snap;
+ struct rcu_gp_oldstate gp_snap_full;
DEFINE_TORTURE_RANDOM(rand);
VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
@@ -1438,6 +1605,16 @@ rcu_torture_fakewriter(void *arg)
torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
cur_ops->cond_sync_exp(gp_snap);
break;
+ case RTWS_COND_GET_FULL:
+ cur_ops->get_gp_state_full(&gp_snap_full);
+ torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
+ cur_ops->cond_sync_full(&gp_snap_full);
+ break;
+ case RTWS_COND_GET_EXP_FULL:
+ cur_ops->get_gp_state_full(&gp_snap_full);
+ torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
+ cur_ops->cond_sync_exp_full(&gp_snap_full);
+ break;
case RTWS_POLL_GET:
gp_snap = cur_ops->start_gp_poll();
while (!cur_ops->poll_gp_state(gp_snap)) {
@@ -1445,6 +1622,13 @@ rcu_torture_fakewriter(void *arg)
&rand);
}
break;
+ case RTWS_POLL_GET_FULL:
+ cur_ops->start_gp_poll_full(&gp_snap_full);
+ while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
+ torture_hrtimeout_jiffies(torture_random(&rand) % 16,
+ &rand);
+ }
+ break;
case RTWS_POLL_GET_EXP:
gp_snap = cur_ops->start_gp_poll_exp();
while (!cur_ops->poll_gp_state_exp(gp_snap)) {
@@ -1452,6 +1636,13 @@ rcu_torture_fakewriter(void *arg)
&rand);
}
break;
+ case RTWS_POLL_GET_EXP_FULL:
+ cur_ops->start_gp_poll_exp_full(&gp_snap_full);
+ while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
+ torture_hrtimeout_jiffies(torture_random(&rand) % 16,
+ &rand);
+ }
+ break;
case RTWS_SYNC:
cur_ops->sync();
break;
@@ -1715,7 +1906,9 @@ rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
*/
static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
{
+ bool checkpolling = !(torture_random(trsp) & 0xfff);
unsigned long cookie;
+ struct rcu_gp_oldstate cookie_full;
int i;
unsigned long started;
unsigned long completed;
@@ -1731,8 +1924,12 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
WARN_ON_ONCE(!rcu_is_watching());
newstate = rcutorture_extend_mask(readstate, trsp);
rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++);
- if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
- cookie = cur_ops->get_gp_state();
+ if (checkpolling) {
+ if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
+ cookie = cur_ops->get_gp_state();
+ if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full)
+ cur_ops->get_gp_state_full(&cookie_full);
+ }
started = cur_ops->get_gp_seq();
ts = rcu_trace_clock_local();
p = rcu_dereference_check(rcu_torture_current,
@@ -1766,13 +1963,22 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
}
__this_cpu_inc(rcu_torture_batch[completed]);
preempt_enable();
- if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
- WARN_ONCE(cur_ops->poll_gp_state(cookie),
- "%s: Cookie check 2 failed %s(%d) %lu->%lu\n",
- __func__,
- rcu_torture_writer_state_getname(),
- rcu_torture_writer_state,
- cookie, cur_ops->get_gp_state());
+ if (checkpolling) {
+ if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
+ WARN_ONCE(cur_ops->poll_gp_state(cookie),
+ "%s: Cookie check 2 failed %s(%d) %lu->%lu\n",
+ __func__,
+ rcu_torture_writer_state_getname(),
+ rcu_torture_writer_state,
+ cookie, cur_ops->get_gp_state());
+ if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full)
+ WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full),
+ "%s: Cookie check 6 failed %s(%d) online %*pbl\n",
+ __func__,
+ rcu_torture_writer_state_getname(),
+ rcu_torture_writer_state,
+ cpumask_pr_args(cpu_online_mask));
+ }
rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
WARN_ON_ONCE(readstate);
// This next splat is expected behavior if leakpointer, especially
@@ -2600,12 +2806,12 @@ static int rcutorture_oom_notify(struct notifier_block *self,
for (i = 0; i < fwd_progress; i++)
ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
- rcu_barrier();
+ cur_ops->cb_barrier();
ncbs = 0;
for (i = 0; i < fwd_progress; i++)
ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
- rcu_barrier();
+ cur_ops->cb_barrier();
ncbs = 0;
for (i = 0; i < fwd_progress; i++)
ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c
index 92c002d65482..33adafdad261 100644
--- a/kernel/rcu/srcutiny.c
+++ b/kernel/rcu/srcutiny.c
@@ -117,7 +117,7 @@ void srcu_drive_gp(struct work_struct *wp)
struct srcu_struct *ssp;
ssp = container_of(wp, struct srcu_struct, srcu_work);
- if (ssp->srcu_gp_running || USHORT_CMP_GE(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max)))
+ if (ssp->srcu_gp_running || ULONG_CMP_GE(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max)))
return; /* Already running or nothing to do. */
/* Remove recently arrived callbacks and wait for readers. */
@@ -150,17 +150,17 @@ void srcu_drive_gp(struct work_struct *wp)
* straighten that out.
*/
WRITE_ONCE(ssp->srcu_gp_running, false);
- if (USHORT_CMP_LT(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max)))
+ if (ULONG_CMP_LT(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max)))
schedule_work(&ssp->srcu_work);
}
EXPORT_SYMBOL_GPL(srcu_drive_gp);
static void srcu_gp_start_if_needed(struct srcu_struct *ssp)
{
- unsigned short cookie;
+ unsigned long cookie;
cookie = get_state_synchronize_srcu(ssp);
- if (USHORT_CMP_GE(READ_ONCE(ssp->srcu_idx_max), cookie))
+ if (ULONG_CMP_GE(READ_ONCE(ssp->srcu_idx_max), cookie))
return;
WRITE_ONCE(ssp->srcu_idx_max, cookie);
if (!READ_ONCE(ssp->srcu_gp_running)) {
@@ -215,7 +215,7 @@ unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp)
barrier();
ret = (READ_ONCE(ssp->srcu_idx) + 3) & ~0x1;
barrier();
- return ret & USHRT_MAX;
+ return ret;
}
EXPORT_SYMBOL_GPL(get_state_synchronize_srcu);
@@ -240,10 +240,10 @@ EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu);
*/
bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
{
- bool ret = USHORT_CMP_GE(READ_ONCE(ssp->srcu_idx), cookie);
+ unsigned long cur_s = READ_ONCE(ssp->srcu_idx);
barrier();
- return ret;
+ return ULONG_CMP_GE(cur_s, cookie) || ULONG_CMP_LT(cur_s, cookie - 3);
}
EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu);
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
index 83c7e6620d40..f5bf6fb430da 100644
--- a/kernel/rcu/tasks.h
+++ b/kernel/rcu/tasks.h
@@ -560,7 +560,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
{
/* Complain if the scheduler has not started. */
- RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
+ WARN_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
"synchronize_rcu_tasks called too soon");
// If the grace-period kthread is running, use it.
@@ -1500,6 +1500,7 @@ static void rcu_tasks_trace_pregp_step(struct list_head *hop)
if (rcu_tasks_trace_pertask_prep(t, true))
trc_add_holdout(t, hop);
rcu_read_unlock();
+ cond_resched_tasks_rcu_qs();
}
// Only after all running tasks have been accounted for is it
@@ -1520,6 +1521,7 @@ static void rcu_tasks_trace_pregp_step(struct list_head *hop)
raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
}
raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
+ cond_resched_tasks_rcu_qs();
}
// Re-enable CPU hotplug now that the holdout list is populated.
@@ -1619,6 +1621,7 @@ static void check_all_holdout_tasks_trace(struct list_head *hop,
trc_del_holdout(t);
else if (needreport)
show_stalled_task_trace(t, firstreport);
+ cond_resched_tasks_rcu_qs();
}
// Re-enable CPU hotplug now that the holdout list scan has completed.
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index f0561ee16b9c..a33a8d4942c3 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -158,6 +158,10 @@ void synchronize_rcu(void)
}
EXPORT_SYMBOL_GPL(synchronize_rcu);
+static void tiny_rcu_leak_callback(struct rcu_head *rhp)
+{
+}
+
/*
* Post an RCU callback to be invoked after the end of an RCU grace
* period. But since we have but one CPU, that would be after any
@@ -165,9 +169,20 @@ EXPORT_SYMBOL_GPL(synchronize_rcu);
*/
void call_rcu(struct rcu_head *head, rcu_callback_t func)
{
+ static atomic_t doublefrees;
unsigned long flags;
- debug_rcu_head_queue(head);
+ if (debug_rcu_head_queue(head)) {
+ if (atomic_inc_return(&doublefrees) < 4) {
+ pr_err("%s(): Double-freed CB %p->%pS()!!! ", __func__, head, head->func);
+ mem_dump_obj(head);
+ }
+
+ if (!__is_kvfree_rcu_offset((unsigned long)head->func))
+ WRITE_ONCE(head->func, tiny_rcu_leak_callback);
+ return;
+ }
+
head->func = func;
head->next = NULL;
@@ -184,6 +199,16 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func)
EXPORT_SYMBOL_GPL(call_rcu);
/*
+ * Store a grace-period-counter "cookie". For more information,
+ * see the Tree RCU header comment.
+ */
+void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
+{
+ rgosp->rgos_norm = RCU_GET_STATE_COMPLETED;
+}
+EXPORT_SYMBOL_GPL(get_completed_synchronize_rcu_full);
+
+/*
* Return a grace-period-counter "cookie". For more information,
* see the Tree RCU header comment.
*/
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 79aea7df4345..6bb8e72bc815 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -76,6 +76,7 @@
/* Data structures. */
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
+ .gpwrap = true,
#ifdef CONFIG_RCU_NOCB_CPU
.cblist.flags = SEGCBLIST_RCU_CORE,
#endif
@@ -1755,6 +1756,8 @@ static noinline void rcu_gp_cleanup(void)
dump_blkd_tasks(rnp, 10);
WARN_ON_ONCE(rnp->qsmask);
WRITE_ONCE(rnp->gp_seq, new_gp_seq);
+ if (!rnp->parent)
+ smp_mb(); // Order against failing poll_state_synchronize_rcu_full().
rdp = this_cpu_ptr(&rcu_data);
if (rnp == rdp->mynode)
needgp = __note_gp_changes(rnp, rdp) || needgp;
@@ -2341,8 +2344,8 @@ void rcu_sched_clock_irq(int user)
rcu_flavor_sched_clock_irq(user);
if (rcu_pending(user))
invoke_rcu_core();
- if (user)
- rcu_tasks_classic_qs(current, false);
+ if (user || rcu_is_cpu_rrupt_from_idle())
+ rcu_note_voluntary_context_switch(current);
lockdep_assert_irqs_disabled();
trace_rcu_utilization(TPS("End scheduler-tick"));
@@ -2832,7 +2835,7 @@ EXPORT_SYMBOL_GPL(call_rcu);
/* Maximum number of jiffies to wait before draining a batch. */
-#define KFREE_DRAIN_JIFFIES (HZ / 50)
+#define KFREE_DRAIN_JIFFIES (5 * HZ)
#define KFREE_N_BATCHES 2
#define FREE_N_CHANNELS 2
@@ -3093,6 +3096,21 @@ need_offload_krc(struct kfree_rcu_cpu *krcp)
return !!krcp->head;
}
+static void
+schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp)
+{
+ long delay, delay_left;
+
+ delay = READ_ONCE(krcp->count) >= KVFREE_BULK_MAX_ENTR ? 1:KFREE_DRAIN_JIFFIES;
+ if (delayed_work_pending(&krcp->monitor_work)) {
+ delay_left = krcp->monitor_work.timer.expires - jiffies;
+ if (delay < delay_left)
+ mod_delayed_work(system_wq, &krcp->monitor_work, delay);
+ return;
+ }
+ queue_delayed_work(system_wq, &krcp->monitor_work, delay);
+}
+
/*
* This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
*/
@@ -3150,7 +3168,7 @@ static void kfree_rcu_monitor(struct work_struct *work)
// work to repeat an attempt. Because previous batches are
// still in progress.
if (need_offload_krc(krcp))
- schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
+ schedule_delayed_monitor_work(krcp);
raw_spin_unlock_irqrestore(&krcp->lock, flags);
}
@@ -3183,15 +3201,16 @@ static void fill_page_cache_func(struct work_struct *work)
bnode = (struct kvfree_rcu_bulk_data *)
__get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
- if (bnode) {
- raw_spin_lock_irqsave(&krcp->lock, flags);
- pushed = put_cached_bnode(krcp, bnode);
- raw_spin_unlock_irqrestore(&krcp->lock, flags);
+ if (!bnode)
+ break;
- if (!pushed) {
- free_page((unsigned long) bnode);
- break;
- }
+ raw_spin_lock_irqsave(&krcp->lock, flags);
+ pushed = put_cached_bnode(krcp, bnode);
+ raw_spin_unlock_irqrestore(&krcp->lock, flags);
+
+ if (!pushed) {
+ free_page((unsigned long) bnode);
+ break;
}
}
@@ -3338,7 +3357,7 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
// Set timer to drain after KFREE_DRAIN_JIFFIES.
if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING)
- schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
+ schedule_delayed_monitor_work(krcp);
unlock_return:
krc_this_cpu_unlock(krcp, flags);
@@ -3371,7 +3390,7 @@ kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
atomic_set(&krcp->backoff_page_cache_fill, 1);
}
- return count;
+ return count == 0 ? SHRINK_EMPTY : count;
}
static unsigned long
@@ -3414,49 +3433,27 @@ void __init kfree_rcu_scheduler_running(void)
raw_spin_lock_irqsave(&krcp->lock, flags);
if (need_offload_krc(krcp))
- schedule_delayed_work_on(cpu, &krcp->monitor_work, KFREE_DRAIN_JIFFIES);
+ schedule_delayed_monitor_work(krcp);
raw_spin_unlock_irqrestore(&krcp->lock, flags);
}
}
/*
* During early boot, any blocking grace-period wait automatically
- * implies a grace period. Later on, this is never the case for PREEMPTION.
+ * implies a grace period.
*
- * However, because a context switch is a grace period for !PREEMPTION, any
- * blocking grace-period wait automatically implies a grace period if
- * there is only one CPU online at any point time during execution of
- * either synchronize_rcu() or synchronize_rcu_expedited(). It is OK to
- * occasionally incorrectly indicate that there are multiple CPUs online
- * when there was in fact only one the whole time, as this just adds some
- * overhead: RCU still operates correctly.
+ * Later on, this could in theory be the case for kernels built with
+ * CONFIG_SMP=y && CONFIG_PREEMPTION=y running on a single CPU, but this
+ * is not a common case. Furthermore, this optimization would cause
+ * the rcu_gp_oldstate structure to expand by 50%, so this potential
+ * grace-period optimization is ignored once the scheduler is running.
*/
static int rcu_blocking_is_gp(void)
{
- int ret;
-
- // Invoking preempt_model_*() too early gets a splat.
- if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE ||
- preempt_model_full() || preempt_model_rt())
- return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE;
+ if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
+ return false;
might_sleep(); /* Check for RCU read-side critical section. */
- preempt_disable();
- /*
- * If the rcu_state.n_online_cpus counter is equal to one,
- * there is only one CPU, and that CPU sees all prior accesses
- * made by any CPU that was online at the time of its access.
- * Furthermore, if this counter is equal to one, its value cannot
- * change until after the preempt_enable() below.
- *
- * Furthermore, if rcu_state.n_online_cpus is equal to one here,
- * all later CPUs (both this one and any that come online later
- * on) are guaranteed to see all accesses prior to this point
- * in the code, without the need for additional memory barriers.
- * Those memory barriers are provided by CPU-hotplug code.
- */
- ret = READ_ONCE(rcu_state.n_online_cpus) <= 1;
- preempt_enable();
- return ret;
+ return true;
}
/**
@@ -3499,30 +3496,59 @@ static int rcu_blocking_is_gp(void)
*/
void synchronize_rcu(void)
{
+ unsigned long flags;
+ struct rcu_node *rnp;
+
RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
lock_is_held(&rcu_lock_map) ||
lock_is_held(&rcu_sched_lock_map),
"Illegal synchronize_rcu() in RCU read-side critical section");
- if (rcu_blocking_is_gp()) {
- // Note well that this code runs with !PREEMPT && !SMP.
- // In addition, all code that advances grace periods runs at
- // process level. Therefore, this normal GP overlaps with
- // other normal GPs only by being fully nested within them,
- // which allows reuse of ->gp_seq_polled_snap.
- rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_snap);
- rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_snap);
- if (rcu_init_invoked())
- cond_resched_tasks_rcu_qs();
- return; // Context allows vacuous grace periods.
+ if (!rcu_blocking_is_gp()) {
+ if (rcu_gp_is_expedited())
+ synchronize_rcu_expedited();
+ else
+ wait_rcu_gp(call_rcu);
+ return;
}
- if (rcu_gp_is_expedited())
- synchronize_rcu_expedited();
- else
- wait_rcu_gp(call_rcu);
+
+ // Context allows vacuous grace periods.
+ // Note well that this code runs with !PREEMPT && !SMP.
+ // In addition, all code that advances grace periods runs at
+ // process level. Therefore, this normal GP overlaps with other
+ // normal GPs only by being fully nested within them, which allows
+ // reuse of ->gp_seq_polled_snap.
+ rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_snap);
+ rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_snap);
+
+ // Update the normal grace-period counters to record
+ // this grace period, but only those used by the boot CPU.
+ // The rcu_scheduler_starting() will take care of the rest of
+ // these counters.
+ local_irq_save(flags);
+ WARN_ON_ONCE(num_online_cpus() > 1);
+ rcu_state.gp_seq += (1 << RCU_SEQ_CTR_SHIFT);
+ for (rnp = this_cpu_ptr(&rcu_data)->mynode; rnp; rnp = rnp->parent)
+ rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq;
+ local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(synchronize_rcu);
/**
+ * get_completed_synchronize_rcu_full - Return a full pre-completed polled state cookie
+ * @rgosp: Place to put state cookie
+ *
+ * Stores into @rgosp a value that will always be treated by functions
+ * like poll_state_synchronize_rcu_full() as a cookie whose grace period
+ * has already completed.
+ */
+void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
+{
+ rgosp->rgos_norm = RCU_GET_STATE_COMPLETED;
+ rgosp->rgos_exp = RCU_GET_STATE_COMPLETED;
+}
+EXPORT_SYMBOL_GPL(get_completed_synchronize_rcu_full);
+
+/**
* get_state_synchronize_rcu - Snapshot current RCU state
*
* Returns a cookie that is used by a later call to cond_synchronize_rcu()
@@ -3541,21 +3567,42 @@ unsigned long get_state_synchronize_rcu(void)
EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
/**
- * start_poll_synchronize_rcu - Snapshot and start RCU grace period
+ * get_state_synchronize_rcu_full - Snapshot RCU state, both normal and expedited
+ * @rgosp: location to place combined normal/expedited grace-period state
*
- * Returns a cookie that is used by a later call to cond_synchronize_rcu()
- * or poll_state_synchronize_rcu() to determine whether or not a full
- * grace period has elapsed in the meantime. If the needed grace period
- * is not already slated to start, notifies RCU core of the need for that
- * grace period.
+ * Places the normal and expedited grace-period states in @rgosp. This
+ * state value can be passed to a later call to cond_synchronize_rcu_full()
+ * or poll_state_synchronize_rcu_full() to determine whether or not a
+ * grace period (whether normal or expedited) has elapsed in the meantime.
+ * The rcu_gp_oldstate structure takes up twice the memory of an unsigned
+ * long, but is guaranteed to see all grace periods. In contrast, the
+ * combined state occupies less memory, but can sometimes fail to take
+ * grace periods into account.
*
- * Interrupts must be enabled for the case where it is necessary to awaken
- * the grace-period kthread.
+ * This does not guarantee that the needed grace period will actually
+ * start.
*/
-unsigned long start_poll_synchronize_rcu(void)
+void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
+{
+ struct rcu_node *rnp = rcu_get_root();
+
+ /*
+ * Any prior manipulation of RCU-protected data must happen
+ * before the loads from ->gp_seq and ->expedited_sequence.
+ */
+ smp_mb(); /* ^^^ */
+ rgosp->rgos_norm = rcu_seq_snap(&rnp->gp_seq);
+ rgosp->rgos_exp = rcu_seq_snap(&rcu_state.expedited_sequence);
+}
+EXPORT_SYMBOL_GPL(get_state_synchronize_rcu_full);
+
+/*
+ * Helper function for start_poll_synchronize_rcu() and
+ * start_poll_synchronize_rcu_full().
+ */
+static void start_poll_synchronize_rcu_common(void)
{
unsigned long flags;
- unsigned long gp_seq = get_state_synchronize_rcu();
bool needwake;
struct rcu_data *rdp;
struct rcu_node *rnp;
@@ -3575,17 +3622,57 @@ unsigned long start_poll_synchronize_rcu(void)
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
if (needwake)
rcu_gp_kthread_wake();
+}
+
+/**
+ * start_poll_synchronize_rcu - Snapshot and start RCU grace period
+ *
+ * Returns a cookie that is used by a later call to cond_synchronize_rcu()
+ * or poll_state_synchronize_rcu() to determine whether or not a full
+ * grace period has elapsed in the meantime. If the needed grace period
+ * is not already slated to start, notifies RCU core of the need for that
+ * grace period.
+ *
+ * Interrupts must be enabled for the case where it is necessary to awaken
+ * the grace-period kthread.
+ */
+unsigned long start_poll_synchronize_rcu(void)
+{
+ unsigned long gp_seq = get_state_synchronize_rcu();
+
+ start_poll_synchronize_rcu_common();
return gp_seq;
}
EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu);
/**
- * poll_state_synchronize_rcu - Conditionally wait for an RCU grace period
+ * start_poll_synchronize_rcu_full - Take a full snapshot and start RCU grace period
+ * @rgosp: value from get_state_synchronize_rcu_full() or start_poll_synchronize_rcu_full()
*
+ * Places the normal and expedited grace-period states in *@rgos. This
+ * state value can be passed to a later call to cond_synchronize_rcu_full()
+ * or poll_state_synchronize_rcu_full() to determine whether or not a
+ * grace period (whether normal or expedited) has elapsed in the meantime.
+ * If the needed grace period is not already slated to start, notifies
+ * RCU core of the need for that grace period.
+ *
+ * Interrupts must be enabled for the case where it is necessary to awaken
+ * the grace-period kthread.
+ */
+void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
+{
+ get_state_synchronize_rcu_full(rgosp);
+
+ start_poll_synchronize_rcu_common();
+}
+EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_full);
+
+/**
+ * poll_state_synchronize_rcu - Has the specified RCU grace period completed?
* @oldstate: value from get_state_synchronize_rcu() or start_poll_synchronize_rcu()
*
* If a full RCU grace period has elapsed since the earlier call from
- * which oldstate was obtained, return @true, otherwise return @false.
+ * which @oldstate was obtained, return @true, otherwise return @false.
* If @false is returned, it is the caller's responsibility to invoke this
* function later on until it does return @true. Alternatively, the caller
* can explicitly wait for a grace period, for example, by passing @oldstate
@@ -3594,10 +3681,11 @@ EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu);
* Yes, this function does not take counter wrap into account.
* But counter wrap is harmless. If the counter wraps, we have waited for
* more than a billion grace periods (and way more on a 64-bit system!).
- * Those needing to keep oldstate values for very long time periods
- * (many hours even on 32-bit systems) should check them occasionally
- * and either refresh them or set a flag indicating that the grace period
- * has completed.
+ * Those needing to keep old state values for very long time periods
+ * (many hours even on 32-bit systems) should check them occasionally and
+ * either refresh them or set a flag indicating that the grace period has
+ * completed. Alternatively, they can use get_completed_synchronize_rcu()
+ * to get a guaranteed-completed grace-period state.
*
* This function provides the same memory-ordering guarantees that
* would be provided by a synchronize_rcu() that was invoked at the call
@@ -3616,8 +3704,56 @@ bool poll_state_synchronize_rcu(unsigned long oldstate)
EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
/**
- * cond_synchronize_rcu - Conditionally wait for an RCU grace period
+ * poll_state_synchronize_rcu_full - Has the specified RCU grace period completed?
+ * @rgosp: value from get_state_synchronize_rcu_full() or start_poll_synchronize_rcu_full()
*
+ * If a full RCU grace period has elapsed since the earlier call from
+ * which *rgosp was obtained, return @true, otherwise return @false.
+ * If @false is returned, it is the caller's responsibility to invoke this
+ * function later on until it does return @true. Alternatively, the caller
+ * can explicitly wait for a grace period, for example, by passing @rgosp
+ * to cond_synchronize_rcu() or by directly invoking synchronize_rcu().
+ *
+ * Yes, this function does not take counter wrap into account.
+ * But counter wrap is harmless. If the counter wraps, we have waited
+ * for more than a billion grace periods (and way more on a 64-bit
+ * system!). Those needing to keep rcu_gp_oldstate values for very
+ * long time periods (many hours even on 32-bit systems) should check
+ * them occasionally and either refresh them or set a flag indicating
+ * that the grace period has completed. Alternatively, they can use
+ * get_completed_synchronize_rcu_full() to get a guaranteed-completed
+ * grace-period state.
+ *
+ * This function provides the same memory-ordering guarantees that would
+ * be provided by a synchronize_rcu() that was invoked at the call to
+ * the function that provided @rgosp, and that returned at the end of this
+ * function. And this guarantee requires that the root rcu_node structure's
+ * ->gp_seq field be checked instead of that of the rcu_state structure.
+ * The problem is that the just-ending grace-period's callbacks can be
+ * invoked between the time that the root rcu_node structure's ->gp_seq
+ * field is updated and the time that the rcu_state structure's ->gp_seq
+ * field is updated. Therefore, if a single synchronize_rcu() is to
+ * cause a subsequent poll_state_synchronize_rcu_full() to return @true,
+ * then the root rcu_node structure is the one that needs to be polled.
+ */
+bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
+{
+ struct rcu_node *rnp = rcu_get_root();
+
+ smp_mb(); // Order against root rcu_node structure grace-period cleanup.
+ if (rgosp->rgos_norm == RCU_GET_STATE_COMPLETED ||
+ rcu_seq_done_exact(&rnp->gp_seq, rgosp->rgos_norm) ||
+ rgosp->rgos_exp == RCU_GET_STATE_COMPLETED ||
+ rcu_seq_done_exact(&rcu_state.expedited_sequence, rgosp->rgos_exp)) {
+ smp_mb(); /* Ensure GP ends before subsequent accesses. */
+ return true;
+ }
+ return false;
+}
+EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu_full);
+
+/**
+ * cond_synchronize_rcu - Conditionally wait for an RCU grace period
* @oldstate: value from get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or start_poll_synchronize_rcu_expedited()
*
* If a full RCU grace period has elapsed since the earlier call to
@@ -3641,6 +3777,33 @@ void cond_synchronize_rcu(unsigned long oldstate)
}
EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
+/**
+ * cond_synchronize_rcu_full - Conditionally wait for an RCU grace period
+ * @rgosp: value from get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), or start_poll_synchronize_rcu_expedited_full()
+ *
+ * If a full RCU grace period has elapsed since the call to
+ * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(),
+ * or start_poll_synchronize_rcu_expedited_full() from which @rgosp was
+ * obtained, just return. Otherwise, invoke synchronize_rcu() to wait
+ * for a full grace period.
+ *
+ * Yes, this function does not take counter wrap into account.
+ * But counter wrap is harmless. If the counter wraps, we have waited for
+ * more than 2 billion grace periods (and way more on a 64-bit system!),
+ * so waiting for a couple of additional grace periods should be just fine.
+ *
+ * This function provides the same memory-ordering guarantees that
+ * would be provided by a synchronize_rcu() that was invoked at the call
+ * to the function that provided @rgosp and that returned at the end of
+ * this function.
+ */
+void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
+{
+ if (!poll_state_synchronize_rcu_full(rgosp))
+ synchronize_rcu();
+}
+EXPORT_SYMBOL_GPL(cond_synchronize_rcu_full);
+
/*
* Check to see if there is any immediate RCU-related work to be done by
* the current CPU, returning 1 if so and zero otherwise. The checks are
@@ -4312,9 +4475,20 @@ early_initcall(rcu_spawn_gp_kthread);
*/
void rcu_scheduler_starting(void)
{
+ unsigned long flags;
+ struct rcu_node *rnp;
+
WARN_ON(num_online_cpus() != 1);
WARN_ON(nr_context_switches() > 0);
rcu_test_sync_prims();
+
+ // Fix up the ->gp_seq counters.
+ local_irq_save(flags);
+ rcu_for_each_node_breadth_first(rnp)
+ rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq;
+ local_irq_restore(flags);
+
+ // Switch out of early boot mode.
rcu_scheduler_active = RCU_SCHEDULER_INIT;
rcu_test_sync_prims();
}
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index be667583a554..18e9b4cd78ef 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -828,11 +828,13 @@ static void rcu_exp_handler(void *unused)
{
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
struct rcu_node *rnp = rdp->mynode;
+ bool preempt_bh_enabled = !(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK));
if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
__this_cpu_read(rcu_data.cpu_no_qs.b.exp))
return;
- if (rcu_is_cpu_rrupt_from_idle()) {
+ if (rcu_is_cpu_rrupt_from_idle() ||
+ (IS_ENABLED(CONFIG_PREEMPT_COUNT) && preempt_bh_enabled)) {
rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
return;
}
@@ -906,6 +908,7 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
void synchronize_rcu_expedited(void)
{
bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT);
+ unsigned long flags;
struct rcu_exp_work rew;
struct rcu_node *rnp;
unsigned long s;
@@ -924,8 +927,11 @@ void synchronize_rcu_expedited(void)
// them, which allows reuse of ->gp_seq_polled_exp_snap.
rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_exp_snap);
rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_exp_snap);
- if (rcu_init_invoked())
- cond_resched();
+
+ local_irq_save(flags);
+ WARN_ON_ONCE(num_online_cpus() > 1);
+ rcu_state.expedited_sequence += (1 << RCU_SEQ_CTR_SHIFT);
+ local_irq_restore(flags);
return; // Context allows vacuous grace periods.
}
@@ -1028,6 +1034,24 @@ unsigned long start_poll_synchronize_rcu_expedited(void)
EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_expedited);
/**
+ * start_poll_synchronize_rcu_expedited_full - Take a full snapshot and start expedited grace period
+ * @rgosp: Place to put snapshot of grace-period state
+ *
+ * Places the normal and expedited grace-period states in rgosp. This
+ * state value can be passed to a later call to cond_synchronize_rcu_full()
+ * or poll_state_synchronize_rcu_full() to determine whether or not a
+ * grace period (whether normal or expedited) has elapsed in the meantime.
+ * If the needed expedited grace period is not already slated to start,
+ * initiates that grace period.
+ */
+void start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
+{
+ get_state_synchronize_rcu_full(rgosp);
+ (void)start_poll_synchronize_rcu_expedited();
+}
+EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_expedited_full);
+
+/**
* cond_synchronize_rcu_expedited - Conditionally wait for an expedited RCU grace period
*
* @oldstate: value from get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or start_poll_synchronize_rcu_expedited()
@@ -1053,3 +1077,30 @@ void cond_synchronize_rcu_expedited(unsigned long oldstate)
synchronize_rcu_expedited();
}
EXPORT_SYMBOL_GPL(cond_synchronize_rcu_expedited);
+
+/**
+ * cond_synchronize_rcu_expedited_full - Conditionally wait for an expedited RCU grace period
+ * @rgosp: value from get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), or start_poll_synchronize_rcu_expedited_full()
+ *
+ * If a full RCU grace period has elapsed since the call to
+ * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(),
+ * or start_poll_synchronize_rcu_expedited_full() from which @rgosp was
+ * obtained, just return. Otherwise, invoke synchronize_rcu_expedited()
+ * to wait for a full grace period.
+ *
+ * Yes, this function does not take counter wrap into account.
+ * But counter wrap is harmless. If the counter wraps, we have waited for
+ * more than 2 billion grace periods (and way more on a 64-bit system!),
+ * so waiting for a couple of additional grace periods should be just fine.
+ *
+ * This function provides the same memory-ordering guarantees that
+ * would be provided by a synchronize_rcu() that was invoked at the call
+ * to the function that provided @rgosp and that returned at the end of
+ * this function.
+ */
+void cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
+{
+ if (!poll_state_synchronize_rcu_full(rgosp))
+ synchronize_rcu_expedited();
+}
+EXPORT_SYMBOL_GPL(cond_synchronize_rcu_expedited_full);
diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
index a8f574d8850d..0a5f0ef41484 100644
--- a/kernel/rcu/tree_nocb.h
+++ b/kernel/rcu/tree_nocb.h
@@ -1111,7 +1111,7 @@ int rcu_nocb_cpu_deoffload(int cpu)
if (!ret)
cpumask_clear_cpu(cpu, rcu_nocb_mask);
} else {
- pr_info("NOCB: Can't CB-deoffload an offline CPU\n");
+ pr_info("NOCB: Cannot CB-deoffload offline CPU %d\n", rdp->cpu);
ret = -EINVAL;
}
}
@@ -1196,7 +1196,7 @@ int rcu_nocb_cpu_offload(int cpu)
if (!ret)
cpumask_set_cpu(cpu, rcu_nocb_mask);
} else {
- pr_info("NOCB: Can't CB-offload an offline CPU\n");
+ pr_info("NOCB: Cannot CB-offload offline CPU %d\n", rdp->cpu);
ret = -EINVAL;
}
}
@@ -1452,8 +1452,8 @@ static void show_rcu_nocb_gp_state(struct rcu_data *rdp)
(long)rdp->nocb_gp_seq,
rnp->grplo, rnp->grphi, READ_ONCE(rdp->nocb_gp_loops),
rdp->nocb_gp_kthread ? task_state_to_char(rdp->nocb_gp_kthread) : '.',
- rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1,
- show_rcu_should_be_on_cpu(rdp->nocb_cb_kthread));
+ rdp->nocb_gp_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1,
+ show_rcu_should_be_on_cpu(rdp->nocb_gp_kthread));
}
/* Dump out nocb kthread state for the specified rcu_data structure. */
@@ -1497,7 +1497,7 @@ static void show_rcu_nocb_state(struct rcu_data *rdp)
".B"[!!rcu_cblist_n_cbs(&rdp->nocb_bypass)],
rcu_segcblist_n_cbs(&rdp->cblist),
rdp->nocb_cb_kthread ? task_state_to_char(rdp->nocb_cb_kthread) : '.',
- rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1,
+ rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_cb_kthread) : -1,
show_rcu_should_be_on_cpu(rdp->nocb_cb_kthread));
/* It is OK for GP kthreads to have GP state. */
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 438ecae6bd7e..e3142ee35fc6 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -641,7 +641,8 @@ static void rcu_read_unlock_special(struct task_struct *t)
expboost = (t->rcu_blocked_node && READ_ONCE(t->rcu_blocked_node->exp_tasks)) ||
(rdp->grpmask & READ_ONCE(rnp->expmask)) ||
- IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ||
+ (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) &&
+ ((rdp->grpmask & READ_ONCE(rnp->qsmask)) || t->rcu_blocked_node)) ||
(IS_ENABLED(CONFIG_RCU_BOOST) && irqs_were_disabled &&
t->rcu_blocked_node);
// Need to defer quiescent state until everything is enabled.
@@ -718,9 +719,6 @@ static void rcu_flavor_sched_clock_irq(int user)
struct task_struct *t = current;
lockdep_assert_irqs_disabled();
- if (user || rcu_is_cpu_rrupt_from_idle()) {
- rcu_note_voluntary_context_switch(current);
- }
if (rcu_preempt_depth() > 0 ||
(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) {
/* No QS, force context switch if deferred. */
@@ -824,6 +822,7 @@ void rcu_read_unlock_strict(void)
if (irqs_disabled() || preempt_count() || !rcu_state.gp_kthread)
return;
rdp = this_cpu_ptr(&rcu_data);
+ rdp->cpu_no_qs.b.norm = false;
rcu_report_qs_rdp(rdp);
udelay(rcu_unlock_delay);
}
@@ -869,7 +868,7 @@ void rcu_all_qs(void)
if (!raw_cpu_read(rcu_data.rcu_urgent_qs))
return;
- preempt_disable();
+ preempt_disable(); // For CONFIG_PREEMPT_COUNT=y kernels
/* Load rcu_urgent_qs before other flags. */
if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
preempt_enable();
@@ -931,10 +930,13 @@ static notrace bool rcu_preempt_need_deferred_qs(struct task_struct *t)
return false;
}
-// Except that we do need to respond to a request by an expedited grace
-// period for a quiescent state from this CPU. Note that requests from
-// tasks are handled when removing the task from the blocked-tasks list
-// below.
+// Except that we do need to respond to a request by an expedited
+// grace period for a quiescent state from this CPU. Note that in
+// non-preemptible kernels, there can be no context switches within RCU
+// read-side critical sections, which in turn means that the leaf rcu_node
+// structure's blocked-tasks list is always empty. is therefore no need to
+// actually check it. Instead, a quiescent state from this CPU suffices,
+// and this function is only called from such a quiescent state.
notrace void rcu_preempt_deferred_qs(struct task_struct *t)
{
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
@@ -972,7 +974,6 @@ static void rcu_flavor_sched_clock_irq(int user)
* neither access nor modify, at least not while the
* corresponding CPU is online.
*/
-
rcu_qs();
}
}
@@ -1238,8 +1239,11 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
cpu != outgoingcpu)
cpumask_set_cpu(cpu, cm);
cpumask_and(cm, cm, housekeeping_cpumask(HK_TYPE_RCU));
- if (cpumask_empty(cm))
+ if (cpumask_empty(cm)) {
cpumask_copy(cm, housekeeping_cpumask(HK_TYPE_RCU));
+ if (outgoingcpu >= 0)
+ cpumask_clear_cpu(outgoingcpu, cm);
+ }
set_cpus_allowed_ptr(t, cm);
mutex_unlock(&rnp->boost_kthread_mutex);
free_cpumask_var(cm);
diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h
index c3fbbcc09327..5653560573e2 100644
--- a/kernel/rcu/tree_stall.h
+++ b/kernel/rcu/tree_stall.h
@@ -368,7 +368,7 @@ static void rcu_dump_cpu_stacks(void)
if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
if (cpu_is_offline(cpu))
pr_err("Offline CPU %d blocking current GP.\n", cpu);
- else if (!trigger_single_cpu_backtrace(cpu))
+ else
dump_cpu_task(cpu);
}
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
@@ -511,8 +511,7 @@ static void rcu_check_gp_kthread_starvation(void)
pr_err("RCU GP kthread last ran on offline CPU %d.\n", cpu);
} else {
pr_err("Stack dump where RCU GP kthread last ran:\n");
- if (!trigger_single_cpu_backtrace(cpu))
- dump_cpu_task(cpu);
+ dump_cpu_task(cpu);
}
}
wake_up_process(gpk);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index ee28253c9ac0..60fdc0faf1c9 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -73,6 +73,7 @@
#include <uapi/linux/sched/types.h>
+#include <asm/irq_regs.h>
#include <asm/switch_to.h>
#include <asm/tlb.h>
@@ -11183,6 +11184,19 @@ struct cgroup_subsys cpu_cgrp_subsys = {
void dump_cpu_task(int cpu)
{
+ if (cpu == smp_processor_id() && in_hardirq()) {
+ struct pt_regs *regs;
+
+ regs = get_irq_regs();
+ if (regs) {
+ show_regs(regs);
+ return;
+ }
+ }
+
+ if (trigger_single_cpu_backtrace(cpu))
+ return;
+
pr_info("Task dump for CPU %d:\n", cpu);
sched_show_task(cpu_curr(cpu));
}
diff --git a/kernel/smp.c b/kernel/smp.c
index 650810a6f29b..e8cdc025a046 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -370,8 +370,7 @@ static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *
if (cpu >= 0) {
if (static_branch_unlikely(&csdlock_debug_extended))
csd_lock_print_extended(csd, cpu);
- if (!trigger_single_cpu_backtrace(cpu))
- dump_cpu_task(cpu);
+ dump_cpu_task(cpu);
if (!cpu_cur_csd) {
pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_processor_id(), cpu);
arch_send_call_function_single_ipi(cpu);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index aeea9731ef80..39060a5d0905 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3066,10 +3066,8 @@ static bool __flush_work(struct work_struct *work, bool from_cancel)
if (WARN_ON(!work->func))
return false;
- if (!from_cancel) {
- lock_map_acquire(&work->lockdep_map);
- lock_map_release(&work->lockdep_map);
- }
+ lock_map_acquire(&work->lockdep_map);
+ lock_map_release(&work->lockdep_map);
if (start_flush_work(work, &barr, from_cancel)) {
wait_for_completion(&barr.done);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index bcbe60d6c80c..d3e5f36bb01e 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -264,8 +264,10 @@ config DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT
config DEBUG_INFO_DWARF4
bool "Generate DWARF Version 4 debuginfo"
select DEBUG_INFO
+ depends on !CC_IS_CLANG || (CC_IS_CLANG && (AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502)))
help
- Generate DWARF v4 debug info. This requires gcc 4.5+ and gdb 7.0+.
+ Generate DWARF v4 debug info. This requires gcc 4.5+, binutils 2.35.2
+ if using clang without clang's integrated assembler, and gdb 7.0+.
If you have consumers of DWARF debug info that are not ready for
newer revisions of DWARF, you may wish to choose this or have your
diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c
index cfdf63132d5a..4e51466c4e74 100644
--- a/mm/damon/dbgfs.c
+++ b/mm/damon/dbgfs.c
@@ -884,6 +884,7 @@ static int dbgfs_rm_context(char *name)
struct dentry *root, *dir, **new_dirs;
struct damon_ctx **new_ctxs;
int i, j;
+ int ret = 0;
if (damon_nr_running_ctxs())
return -EBUSY;
@@ -898,14 +899,16 @@ static int dbgfs_rm_context(char *name)
new_dirs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_dirs),
GFP_KERNEL);
- if (!new_dirs)
- return -ENOMEM;
+ if (!new_dirs) {
+ ret = -ENOMEM;
+ goto out_dput;
+ }
new_ctxs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_ctxs),
GFP_KERNEL);
if (!new_ctxs) {
- kfree(new_dirs);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_new_dirs;
}
for (i = 0, j = 0; i < dbgfs_nr_ctxs; i++) {
@@ -925,7 +928,13 @@ static int dbgfs_rm_context(char *name)
dbgfs_ctxs = new_ctxs;
dbgfs_nr_ctxs--;
- return 0;
+ goto out_dput;
+
+out_new_dirs:
+ kfree(new_dirs);
+out_dput:
+ dput(dir);
+ return ret;
}
static ssize_t dbgfs_rm_context_write(struct file *file,
diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
index 7488e27c87c3..bdef9682d0a0 100644
--- a/mm/damon/sysfs.c
+++ b/mm/damon/sysfs.c
@@ -2182,12 +2182,12 @@ static int damon_sysfs_add_target(struct damon_sysfs_target *sys_target,
if (!t)
return -ENOMEM;
+ damon_add_target(ctx, t);
if (damon_target_has_pid(ctx)) {
t->pid = find_get_pid(sys_target->pid);
if (!t->pid)
goto destroy_targets_out;
}
- damon_add_target(ctx, t);
err = damon_sysfs_set_regions(t, sys_target->regions);
if (err)
goto destroy_targets_out;
diff --git a/mm/frontswap.c b/mm/frontswap.c
index 1a97610308cb..279e55b4ed87 100644
--- a/mm/frontswap.c
+++ b/mm/frontswap.c
@@ -125,6 +125,9 @@ void frontswap_init(unsigned type, unsigned long *map)
* p->frontswap set to something valid to work properly.
*/
frontswap_map_set(sis, map);
+
+ if (!frontswap_enabled())
+ return;
frontswap_ops->init(type);
}
diff --git a/mm/gup.c b/mm/gup.c
index 5abdaf487460..00926abb4426 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -2345,8 +2345,28 @@ static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
}
#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
-static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
- unsigned int flags, struct page **pages, int *nr)
+/*
+ * Fast-gup relies on pte change detection to avoid concurrent pgtable
+ * operations.
+ *
+ * To pin the page, fast-gup needs to do below in order:
+ * (1) pin the page (by prefetching pte), then (2) check pte not changed.
+ *
+ * For the rest of pgtable operations where pgtable updates can be racy
+ * with fast-gup, we need to do (1) clear pte, then (2) check whether page
+ * is pinned.
+ *
+ * Above will work for all pte-level operations, including THP split.
+ *
+ * For THP collapse, it's a bit more complicated because fast-gup may be
+ * walking a pgtable page that is being freed (pte is still valid but pmd
+ * can be cleared already). To avoid race in such condition, we need to
+ * also check pmd here to make sure pmd doesn't change (corresponds to
+ * pmdp_collapse_flush() in the THP collapse code path).
+ */
+static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
+ unsigned long end, unsigned int flags,
+ struct page **pages, int *nr)
{
struct dev_pagemap *pgmap = NULL;
int nr_start = *nr, ret = 0;
@@ -2392,7 +2412,8 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
goto pte_unmap;
}
- if (unlikely(pte_val(pte) != pte_val(*ptep))) {
+ if (unlikely(pmd_val(pmd) != pmd_val(*pmdp)) ||
+ unlikely(pte_val(pte) != pte_val(*ptep))) {
gup_put_folio(folio, 1, flags);
goto pte_unmap;
}
@@ -2439,8 +2460,9 @@ pte_unmap:
* get_user_pages_fast_only implementation that can pin pages. Thus it's still
* useful to have gup_huge_pmd even if we can't operate on ptes.
*/
-static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
- unsigned int flags, struct page **pages, int *nr)
+static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
+ unsigned long end, unsigned int flags,
+ struct page **pages, int *nr)
{
return 0;
}
@@ -2764,7 +2786,7 @@ static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned lo
if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
PMD_SHIFT, next, flags, pages, nr))
return 0;
- } else if (!gup_pte_range(pmd, addr, next, flags, pages, nr))
+ } else if (!gup_pte_range(pmd, pmdp, addr, next, flags, pages, nr))
return 0;
} while (pmdp++, addr = next, addr != end);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index e9414ee57c5b..f42bb51e023a 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2894,11 +2894,9 @@ static void split_huge_pages_all(void)
max_zone_pfn = zone_end_pfn(zone);
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
int nr_pages;
- if (!pfn_valid(pfn))
- continue;
- page = pfn_to_page(pfn);
- if (!get_page_unless_zero(page))
+ page = pfn_to_online_page(pfn);
+ if (!page || !get_page_unless_zero(page))
continue;
if (zone != page_zone(page))
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index e070b8593b37..0bdfc7e1c933 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3420,6 +3420,7 @@ static int demote_free_huge_page(struct hstate *h, struct page *page)
{
int i, nid = page_to_nid(page);
struct hstate *target_hstate;
+ struct page *subpage;
int rc = 0;
target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order);
@@ -3453,15 +3454,16 @@ static int demote_free_huge_page(struct hstate *h, struct page *page)
mutex_lock(&target_hstate->resize_lock);
for (i = 0; i < pages_per_huge_page(h);
i += pages_per_huge_page(target_hstate)) {
+ subpage = nth_page(page, i);
if (hstate_is_gigantic(target_hstate))
- prep_compound_gigantic_page_for_demote(page + i,
+ prep_compound_gigantic_page_for_demote(subpage,
target_hstate->order);
else
- prep_compound_page(page + i, target_hstate->order);
- set_page_private(page + i, 0);
- set_page_refcounted(page + i);
- prep_new_huge_page(target_hstate, page + i, nid);
- put_page(page + i);
+ prep_compound_page(subpage, target_hstate->order);
+ set_page_private(subpage, 0);
+ set_page_refcounted(subpage);
+ prep_new_huge_page(target_hstate, subpage, nid);
+ put_page(subpage);
}
mutex_unlock(&target_hstate->resize_lock);
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 01f71786d530..70b7ac66411c 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1083,10 +1083,12 @@ static void collapse_huge_page(struct mm_struct *mm,
pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
/*
- * After this gup_fast can't run anymore. This also removes
- * any huge TLB entry from the CPU so we won't allow
- * huge and small TLB entries for the same virtual address
- * to avoid the risk of CPU bugs in that area.
+ * This removes any huge TLB entry from the CPU so we won't allow
+ * huge and small TLB entries for the same virtual address to
+ * avoid the risk of CPU bugs in that area.
+ *
+ * Parallel fast GUP is fine since fast GUP will back off when
+ * it detects PMD is changed.
*/
_pmd = pmdp_collapse_flush(vma, address, pmd);
spin_unlock(pmd_ptl);
diff --git a/mm/madvise.c b/mm/madvise.c
index 5f0f0948a50e..9ff51650f4f0 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -451,8 +451,11 @@ regular_page:
continue;
}
- /* Do not interfere with other mappings of this page */
- if (page_mapcount(page) != 1)
+ /*
+ * Do not interfere with other mappings of this page and
+ * non-LRU page.
+ */
+ if (!PageLRU(page) || page_mapcount(page) != 1)
continue;
VM_BUG_ON_PAGE(PageTransCompound(page), page);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 14439806b5ef..e7ac570dda75 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -345,13 +345,17 @@ static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma,
* not much we can do. We just print a message and ignore otherwise.
*/
+#define FSDAX_INVALID_PGOFF ULONG_MAX
+
/*
* Schedule a process for later kill.
* Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
*
- * Notice: @fsdax_pgoff is used only when @p is a fsdax page.
- * In other cases, such as anonymous and file-backend page, the address to be
- * killed can be caculated by @p itself.
+ * Note: @fsdax_pgoff is used only when @p is a fsdax page and a
+ * filesystem with a memory failure handler has claimed the
+ * memory_failure event. In all other cases, page->index and
+ * page->mapping are sufficient for mapping the page back to its
+ * corresponding user virtual address.
*/
static void add_to_kill(struct task_struct *tsk, struct page *p,
pgoff_t fsdax_pgoff, struct vm_area_struct *vma,
@@ -367,11 +371,7 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
tk->addr = page_address_in_vma(p, vma);
if (is_zone_device_page(p)) {
- /*
- * Since page->mapping is not used for fsdax, we need
- * calculate the address based on the vma.
- */
- if (p->pgmap->type == MEMORY_DEVICE_FS_DAX)
+ if (fsdax_pgoff != FSDAX_INVALID_PGOFF)
tk->addr = vma_pgoff_address(fsdax_pgoff, 1, vma);
tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr);
} else
@@ -523,7 +523,8 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
if (!page_mapped_in_vma(page, vma))
continue;
if (vma->vm_mm == t->mm)
- add_to_kill(t, page, 0, vma, to_kill);
+ add_to_kill(t, page, FSDAX_INVALID_PGOFF, vma,
+ to_kill);
}
}
read_unlock(&tasklist_lock);
@@ -559,7 +560,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
* to be informed of all such data corruptions.
*/
if (vma->vm_mm == t->mm)
- add_to_kill(t, page, 0, vma, to_kill);
+ add_to_kill(t, page, FSDAX_INVALID_PGOFF, vma,
+ to_kill);
}
}
read_unlock(&tasklist_lock);
@@ -743,6 +745,9 @@ static int kill_accessing_process(struct task_struct *p, unsigned long pfn,
};
priv.tk.tsk = p;
+ if (!p->mm)
+ return -EFAULT;
+
mmap_read_lock(p->mm);
ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwp_walk_ops,
(void *)&priv);
@@ -1928,7 +1933,7 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
* Call driver's implementation to handle the memory failure, otherwise
* fall back to generic handler.
*/
- if (pgmap->ops->memory_failure) {
+ if (pgmap_has_memory_failure(pgmap)) {
rc = pgmap->ops->memory_failure(pgmap, pfn, 1, flags);
/*
* Fall back to generic handler too if operation is not
diff --git a/mm/memory.c b/mm/memory.c
index 4ba73f5aa8bb..a78814413ac0 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4386,14 +4386,20 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
vmf->address, &vmf->ptl);
- ret = 0;
+
/* Re-check under ptl */
- if (likely(!vmf_pte_changed(vmf)))
+ if (likely(!vmf_pte_changed(vmf))) {
do_set_pte(vmf, page, vmf->address);
- else
+
+ /* no need to invalidate: a not-present page won't be cached */
+ update_mmu_cache(vma, vmf->address, vmf->pte);
+
+ ret = 0;
+ } else {
+ update_mmu_tlb(vma, vmf->address, vmf->pte);
ret = VM_FAULT_NOPAGE;
+ }
- update_mmu_tlb(vma, vmf->address, vmf->pte);
pte_unmap_unlock(vmf->pte, vmf->ptl);
return ret;
}
diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index 27fb37d65476..dbf6c7a7a7c9 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -7,6 +7,7 @@
#include <linux/export.h>
#include <linux/memremap.h>
#include <linux/migrate.h>
+#include <linux/mm.h>
#include <linux/mm_inline.h>
#include <linux/mmu_notifier.h>
#include <linux/oom.h>
@@ -193,10 +194,10 @@ again:
bool anon_exclusive;
pte_t swp_pte;
+ flush_cache_page(vma, addr, pte_pfn(*ptep));
anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
if (anon_exclusive) {
- flush_cache_page(vma, addr, pte_pfn(*ptep));
- ptep_clear_flush(vma, addr, ptep);
+ pte = ptep_clear_flush(vma, addr, ptep);
if (page_try_share_anon_rmap(page)) {
set_pte_at(mm, addr, ptep, pte);
@@ -206,11 +207,15 @@ again:
goto next;
}
} else {
- ptep_get_and_clear(mm, addr, ptep);
+ pte = ptep_get_and_clear(mm, addr, ptep);
}
migrate->cpages++;
+ /* Set the dirty flag on the folio now the pte is gone. */
+ if (pte_dirty(pte))
+ folio_mark_dirty(page_folio(page));
+
/* Setup special migration page table entry */
if (mpfn & MIGRATE_PFN_WRITE)
entry = make_writable_migration_entry(
@@ -254,13 +259,14 @@ next:
migrate->dst[migrate->npages] = 0;
migrate->src[migrate->npages++] = mpfn;
}
- arch_leave_lazy_mmu_mode();
- pte_unmap_unlock(ptep - 1, ptl);
/* Only flush the TLB if we actually modified any entries */
if (unmapped)
flush_tlb_range(walk->vma, start, end);
+ arch_leave_lazy_mmu_mode();
+ pte_unmap_unlock(ptep - 1, ptl);
+
return 0;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e5486d47406e..d04211f0ef0b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4708,6 +4708,30 @@ void fs_reclaim_release(gfp_t gfp_mask)
EXPORT_SYMBOL_GPL(fs_reclaim_release);
#endif
+/*
+ * Zonelists may change due to hotplug during allocation. Detect when zonelists
+ * have been rebuilt so allocation retries. Reader side does not lock and
+ * retries the allocation if zonelist changes. Writer side is protected by the
+ * embedded spin_lock.
+ */
+static DEFINE_SEQLOCK(zonelist_update_seq);
+
+static unsigned int zonelist_iter_begin(void)
+{
+ if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
+ return read_seqbegin(&zonelist_update_seq);
+
+ return 0;
+}
+
+static unsigned int check_retry_zonelist(unsigned int seq)
+{
+ if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
+ return read_seqretry(&zonelist_update_seq, seq);
+
+ return seq;
+}
+
/* Perform direct synchronous page reclaim */
static unsigned long
__perform_reclaim(gfp_t gfp_mask, unsigned int order,
@@ -5001,6 +5025,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
int compaction_retries;
int no_progress_loops;
unsigned int cpuset_mems_cookie;
+ unsigned int zonelist_iter_cookie;
int reserve_flags;
/*
@@ -5011,11 +5036,12 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
(__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
gfp_mask &= ~__GFP_ATOMIC;
-retry_cpuset:
+restart:
compaction_retries = 0;
no_progress_loops = 0;
compact_priority = DEF_COMPACT_PRIORITY;
cpuset_mems_cookie = read_mems_allowed_begin();
+ zonelist_iter_cookie = zonelist_iter_begin();
/*
* The fast path uses conservative alloc_flags to succeed only until
@@ -5187,9 +5213,13 @@ retry:
goto retry;
- /* Deal with possible cpuset update races before we start OOM killing */
- if (check_retry_cpuset(cpuset_mems_cookie, ac))
- goto retry_cpuset;
+ /*
+ * Deal with possible cpuset update races or zonelist updates to avoid
+ * a unnecessary OOM kill.
+ */
+ if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
+ check_retry_zonelist(zonelist_iter_cookie))
+ goto restart;
/* Reclaim has failed us, start killing things */
page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
@@ -5209,9 +5239,13 @@ retry:
}
nopage:
- /* Deal with possible cpuset update races before we fail */
- if (check_retry_cpuset(cpuset_mems_cookie, ac))
- goto retry_cpuset;
+ /*
+ * Deal with possible cpuset update races or zonelist updates to avoid
+ * a unnecessary OOM kill.
+ */
+ if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
+ check_retry_zonelist(zonelist_iter_cookie))
+ goto restart;
/*
* Make sure that __GFP_NOFAIL request doesn't leak out and make sure
@@ -5706,6 +5740,18 @@ refill:
/* reset page count bias and offset to start of new frag */
nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
offset = size - fragsz;
+ if (unlikely(offset < 0)) {
+ /*
+ * The caller is trying to allocate a fragment
+ * with fragsz > PAGE_SIZE but the cache isn't big
+ * enough to satisfy the request, this may
+ * happen in low memory conditions.
+ * We don't release the cache page because
+ * it could make memory pressure worse
+ * so we simply return NULL here.
+ */
+ return NULL;
+ }
}
nc->pagecnt_bias--;
@@ -6514,9 +6560,8 @@ static void __build_all_zonelists(void *data)
int nid;
int __maybe_unused cpu;
pg_data_t *self = data;
- static DEFINE_SPINLOCK(lock);
- spin_lock(&lock);
+ write_seqlock(&zonelist_update_seq);
#ifdef CONFIG_NUMA
memset(node_load, 0, sizeof(node_load));
@@ -6553,7 +6598,7 @@ static void __build_all_zonelists(void *data)
#endif
}
- spin_unlock(&lock);
+ write_sequnlock(&zonelist_update_seq);
}
static noinline void __init
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 9d73dc38e3d7..eb3a68ca92ad 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -288,6 +288,7 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
* @isolate_before: isolate the pageblock before the boundary_pfn
* @skip_isolation: the flag to skip the pageblock isolation in second
* isolate_single_pageblock()
+ * @migratetype: migrate type to set in error recovery.
*
* Free and in-use pages can be as big as MAX_ORDER-1 and contain more than one
* pageblock. When not all pageblocks within a page are isolated at the same
@@ -302,9 +303,9 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
* the in-use page then splitting the free page.
*/
static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
- gfp_t gfp_flags, bool isolate_before, bool skip_isolation)
+ gfp_t gfp_flags, bool isolate_before, bool skip_isolation,
+ int migratetype)
{
- unsigned char saved_mt;
unsigned long start_pfn;
unsigned long isolate_pageblock;
unsigned long pfn;
@@ -328,13 +329,13 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
start_pfn = max(ALIGN_DOWN(isolate_pageblock, MAX_ORDER_NR_PAGES),
zone->zone_start_pfn);
- saved_mt = get_pageblock_migratetype(pfn_to_page(isolate_pageblock));
+ if (skip_isolation) {
+ int mt = get_pageblock_migratetype(pfn_to_page(isolate_pageblock));
- if (skip_isolation)
- VM_BUG_ON(!is_migrate_isolate(saved_mt));
- else {
- ret = set_migratetype_isolate(pfn_to_page(isolate_pageblock), saved_mt, flags,
- isolate_pageblock, isolate_pageblock + pageblock_nr_pages);
+ VM_BUG_ON(!is_migrate_isolate(mt));
+ } else {
+ ret = set_migratetype_isolate(pfn_to_page(isolate_pageblock), migratetype,
+ flags, isolate_pageblock, isolate_pageblock + pageblock_nr_pages);
if (ret)
return ret;
@@ -475,7 +476,7 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
failed:
/* restore the original migratetype */
if (!skip_isolation)
- unset_migratetype_isolate(pfn_to_page(isolate_pageblock), saved_mt);
+ unset_migratetype_isolate(pfn_to_page(isolate_pageblock), migratetype);
return -EBUSY;
}
@@ -537,7 +538,8 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
bool skip_isolation = false;
/* isolate [isolate_start, isolate_start + pageblock_nr_pages) pageblock */
- ret = isolate_single_pageblock(isolate_start, flags, gfp_flags, false, skip_isolation);
+ ret = isolate_single_pageblock(isolate_start, flags, gfp_flags, false,
+ skip_isolation, migratetype);
if (ret)
return ret;
@@ -545,7 +547,8 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
skip_isolation = true;
/* isolate [isolate_end - pageblock_nr_pages, isolate_end) pageblock */
- ret = isolate_single_pageblock(isolate_end, flags, gfp_flags, true, skip_isolation);
+ ret = isolate_single_pageblock(isolate_end, flags, gfp_flags, true,
+ skip_isolation, migratetype);
if (ret) {
unset_migratetype_isolate(pfn_to_page(isolate_start), migratetype);
return ret;
diff --git a/mm/secretmem.c b/mm/secretmem.c
index e3e9590c6fb3..3f7154099795 100644
--- a/mm/secretmem.c
+++ b/mm/secretmem.c
@@ -285,7 +285,7 @@ static int secretmem_init(void)
secretmem_mnt = kern_mount(&secretmem_fs);
if (IS_ERR(secretmem_mnt))
- ret = PTR_ERR(secretmem_mnt);
+ return PTR_ERR(secretmem_mnt);
/* prevent secretmem mappings from ever getting PROT_EXEC */
secretmem_mnt->mnt_flags |= MNT_NOEXEC;
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 07b948288f84..ccc02573588f 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -475,6 +475,7 @@ void slab_kmem_cache_release(struct kmem_cache *s)
void kmem_cache_destroy(struct kmem_cache *s)
{
int refcnt;
+ bool rcu_set;
if (unlikely(!s) || !kasan_check_byte(s))
return;
@@ -482,6 +483,8 @@ void kmem_cache_destroy(struct kmem_cache *s)
cpus_read_lock();
mutex_lock(&slab_mutex);
+ rcu_set = s->flags & SLAB_TYPESAFE_BY_RCU;
+
refcnt = --s->refcount;
if (refcnt)
goto out_unlock;
@@ -492,7 +495,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
out_unlock:
mutex_unlock(&slab_mutex);
cpus_read_unlock();
- if (!refcnt && !(s->flags & SLAB_TYPESAFE_BY_RCU))
+ if (!refcnt && !rcu_set)
kmem_cache_release(s);
}
EXPORT_SYMBOL(kmem_cache_destroy);
diff --git a/mm/slub.c b/mm/slub.c
index 862dbd9af4f5..4b98dff9be8e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -310,6 +310,11 @@ static inline void stat(const struct kmem_cache *s, enum stat_item si)
*/
static nodemask_t slab_nodes;
+/*
+ * Workqueue used for flush_cpu_slab().
+ */
+static struct workqueue_struct *flushwq;
+
/********************************************************************
* Core slab cache functions
*******************************************************************/
@@ -2730,7 +2735,7 @@ static void flush_all_cpus_locked(struct kmem_cache *s)
INIT_WORK(&sfw->work, flush_cpu_slab);
sfw->skip = false;
sfw->s = s;
- schedule_work_on(cpu, &sfw->work);
+ queue_work_on(cpu, flushwq, &sfw->work);
}
for_each_online_cpu(cpu) {
@@ -4858,6 +4863,8 @@ void __init kmem_cache_init(void)
void __init kmem_cache_init_late(void)
{
+ flushwq = alloc_workqueue("slub_flushwq", WQ_MEM_RECLAIM, 0);
+ WARN_ON(!flushwq);
}
struct kmem_cache *
@@ -4926,6 +4933,8 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
/* Honor the call site pointer we received. */
trace_kmalloc(caller, ret, s, size, s->size, gfpflags);
+ ret = kasan_kmalloc(s, ret, size, gfpflags);
+
return ret;
}
EXPORT_SYMBOL(__kmalloc_track_caller);
@@ -4957,6 +4966,8 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
/* Honor the call site pointer we received. */
trace_kmalloc_node(caller, ret, s, size, s->size, gfpflags, node);
+ ret = kasan_kmalloc(s, ret, size, gfpflags);
+
return ret;
}
EXPORT_SYMBOL(__kmalloc_node_track_caller);
@@ -5890,7 +5901,8 @@ static char *create_unique_id(struct kmem_cache *s)
char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
char *p = name;
- BUG_ON(!name);
+ if (!name)
+ return ERR_PTR(-ENOMEM);
*p++ = ':';
/*
@@ -5948,6 +5960,8 @@ static int sysfs_slab_add(struct kmem_cache *s)
* for the symlinks.
*/
name = create_unique_id(s);
+ if (IS_ERR(name))
+ return PTR_ERR(name);
}
s->kobj.kset = kset;
diff --git a/mm/swap_state.c b/mm/swap_state.c
index e166051566f4..41afa6d45b23 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -151,7 +151,7 @@ void __delete_from_swap_cache(struct folio *folio,
for (i = 0; i < nr; i++) {
void *entry = xas_store(&xas, shadow);
- VM_BUG_ON_FOLIO(entry != folio, folio);
+ VM_BUG_ON_PAGE(entry != folio, entry);
set_page_private(folio_page(folio, i), 0);
xas_next(&xas);
}
diff --git a/mm/util.c b/mm/util.c
index c9439c66d8cf..346e40177bc6 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -619,6 +619,10 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
if (ret || size <= PAGE_SIZE)
return ret;
+ /* non-sleeping allocations are not supported by vmalloc */
+ if (!gfpflags_allow_blocking(flags))
+ return NULL;
+
/* Don't even allow crazy sizes */
if (unlikely(size > INT_MAX)) {
WARN_ON_ONCE(!(flags & __GFP_NOWARN));
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b2b1431352dc..382dbe97329f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2550,8 +2550,8 @@ static void shrink_active_list(unsigned long nr_to_scan,
}
if (unlikely(buffer_heads_over_limit)) {
- if (folio_get_private(folio) && folio_trylock(folio)) {
- if (folio_get_private(folio))
+ if (folio_test_private(folio) && folio_trylock(folio)) {
+ if (folio_test_private(folio))
filemap_release_folio(folio, 0);
folio_unlock(folio);
}
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index b8f8da7ee3de..41c1ad33d009 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -10,6 +10,7 @@
#include <linux/atomic.h>
#include <linux/byteorder/generic.h>
#include <linux/container_of.h>
+#include <linux/errno.h>
#include <linux/gfp.h>
#include <linux/if.h>
#include <linux/if_arp.h>
@@ -700,6 +701,9 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
int max_header_len = batadv_max_header_len();
int ret;
+ if (hard_iface->net_dev->mtu < ETH_MIN_MTU + max_header_len)
+ return -EINVAL;
+
if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
goto out;
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 9a0ae59cdc50..4f385d52a1c4 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1040,8 +1040,10 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl,
goto free_iterate;
}
- if (repl->valid_hooks != t->valid_hooks)
+ if (repl->valid_hooks != t->valid_hooks) {
+ ret = -EINVAL;
goto free_unlock;
+ }
if (repl->num_counters && repl->num_counters != t->private->nentries) {
ret = -EINVAL;
diff --git a/net/compat.c b/net/compat.c
index fe9be3c56ef7..385f04a6be2f 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -52,6 +52,7 @@ int __get_compat_msghdr(struct msghdr *kmsg,
kmsg->msg_namelen = sizeof(struct sockaddr_storage);
kmsg->msg_control_is_user = true;
+ kmsg->msg_get_inq = 0;
kmsg->msg_control_user = compat_ptr(msg->msg_control);
kmsg->msg_controllen = msg->msg_controllen;
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 764c4cb3fe8f..5dc3860e9fc7 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -1611,9 +1611,8 @@ static inline void __flow_hash_consistentify(struct flow_keys *keys)
switch (keys->control.addr_type) {
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
- addr_diff = (__force u32)keys->addrs.v4addrs.dst -
- (__force u32)keys->addrs.v4addrs.src;
- if (addr_diff < 0)
+ if ((__force u32)keys->addrs.v4addrs.dst <
+ (__force u32)keys->addrs.v4addrs.src)
swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst);
if ((__force u16)keys->ports.dst <
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 6b9f19122ec1..0ec2f5906a27 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -18,7 +18,6 @@
#include <linux/user_namespace.h>
#include <linux/net_namespace.h>
#include <linux/sched/task.h>
-#include <linux/sched/mm.h>
#include <linux/uidgid.h>
#include <linux/cookie.h>
@@ -1144,13 +1143,7 @@ static int __register_pernet_operations(struct list_head *list,
* setup_net() and cleanup_net() are not possible.
*/
for_each_net(net) {
- struct mem_cgroup *old, *memcg;
-
- memcg = mem_cgroup_or_root(get_mem_cgroup_from_obj(net));
- old = set_active_memcg(memcg);
error = ops_init(ops, net);
- set_active_memcg(old);
- mem_cgroup_put(memcg);
if (error)
goto out_undo;
list_add_tail(&net->exit_list, &net_exit_list);
diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
index 718fb77bb372..7889e1ef7fad 100644
--- a/net/ieee802154/socket.c
+++ b/net/ieee802154/socket.c
@@ -200,8 +200,9 @@ static int raw_bind(struct sock *sk, struct sockaddr *_uaddr, int len)
int err = 0;
struct net_device *dev = NULL;
- if (len < sizeof(*uaddr))
- return -EINVAL;
+ err = ieee802154_sockaddr_check_size(uaddr, len);
+ if (err < 0)
+ return err;
uaddr = (struct sockaddr_ieee802154 *)_uaddr;
if (uaddr->family != AF_IEEE802154)
@@ -493,7 +494,8 @@ static int dgram_bind(struct sock *sk, struct sockaddr *uaddr, int len)
ro->bound = 0;
- if (len < sizeof(*addr))
+ err = ieee802154_sockaddr_check_size(addr, len);
+ if (err < 0)
goto out;
if (addr->family != AF_IEEE802154)
@@ -564,8 +566,9 @@ static int dgram_connect(struct sock *sk, struct sockaddr *uaddr,
struct dgram_sock *ro = dgram_sk(sk);
int err = 0;
- if (len < sizeof(*addr))
- return -EINVAL;
+ err = ieee802154_sockaddr_check_size(addr, len);
+ if (err < 0)
+ return err;
if (addr->family != AF_IEEE802154)
return -EINVAL;
@@ -604,6 +607,7 @@ static int dgram_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
struct ieee802154_mac_cb *cb;
struct dgram_sock *ro = dgram_sk(sk);
struct ieee802154_addr dst_addr;
+ DECLARE_SOCKADDR(struct sockaddr_ieee802154*, daddr, msg->msg_name);
int hlen, tlen;
int err;
@@ -612,10 +616,20 @@ static int dgram_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
return -EOPNOTSUPP;
}
- if (!ro->connected && !msg->msg_name)
- return -EDESTADDRREQ;
- else if (ro->connected && msg->msg_name)
- return -EISCONN;
+ if (msg->msg_name) {
+ if (ro->connected)
+ return -EISCONN;
+ if (msg->msg_namelen < IEEE802154_MIN_NAMELEN)
+ return -EINVAL;
+ err = ieee802154_sockaddr_check_size(daddr, msg->msg_namelen);
+ if (err < 0)
+ return err;
+ ieee802154_addr_from_sa(&dst_addr, &daddr->addr);
+ } else {
+ if (!ro->connected)
+ return -EDESTADDRREQ;
+ dst_addr = ro->dst_addr;
+ }
if (!ro->bound)
dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154);
@@ -651,16 +665,6 @@ static int dgram_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
cb = mac_cb_init(skb);
cb->type = IEEE802154_FC_TYPE_DATA;
cb->ackreq = ro->want_ack;
-
- if (msg->msg_name) {
- DECLARE_SOCKADDR(struct sockaddr_ieee802154*,
- daddr, msg->msg_name);
-
- ieee802154_addr_from_sa(&dst_addr, &daddr->addr);
- } else {
- dst_addr = ro->dst_addr;
- }
-
cb->secen = ro->secen;
cb->secen_override = ro->secen_override;
cb->seclevel = ro->seclevel;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 73651d17e51f..e11d6b0b62b7 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1004,7 +1004,9 @@ static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
} else {
+ rcu_read_lock();
ip_mr_forward(net, mrt, skb->dev, skb, c, 0);
+ rcu_read_unlock();
}
}
}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 6cdfce6f2867..e373dde1f46f 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1761,19 +1761,28 @@ int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
if (sk->sk_state == TCP_LISTEN)
return -ENOTCONN;
- skb = tcp_recv_skb(sk, seq, &offset);
- if (!skb)
- return 0;
+ while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
+ u8 tcp_flags;
+ int used;
- __skb_unlink(skb, &sk->sk_receive_queue);
- WARN_ON(!skb_set_owner_sk_safe(skb, sk));
- copied = recv_actor(sk, skb);
- if (copied >= 0) {
- seq += copied;
- if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
+ __skb_unlink(skb, &sk->sk_receive_queue);
+ WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk));
+ tcp_flags = TCP_SKB_CB(skb)->tcp_flags;
+ used = recv_actor(sk, skb);
+ consume_skb(skb);
+ if (used < 0) {
+ if (!copied)
+ copied = used;
+ break;
+ }
+ seq += used;
+ copied += used;
+
+ if (tcp_flags & TCPHDR_FIN) {
++seq;
+ break;
+ }
}
- consume_skb(skb);
WRITE_ONCE(tp->copied_seq, seq);
tcp_rcv_space_adjust(sk);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index cd72158e953a..560d9eadeaa5 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1821,7 +1821,7 @@ int udp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
continue;
}
- WARN_ON(!skb_set_owner_sk_safe(skb, sk));
+ WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk));
used = recv_actor(sk, skb);
if (used <= 0) {
if (!copied)
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 2ce0c44d0081..dbb1430d6cc2 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -1070,13 +1070,13 @@ static int __init inet6_init(void)
for (r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r)
INIT_LIST_HEAD(r);
+ raw_hashinfo_init(&raw_v6_hashinfo);
+
if (disable_ipv6_mod) {
pr_info("Loaded, but administratively disabled, reboot required to enable\n");
goto out;
}
- raw_hashinfo_init(&raw_v6_hashinfo);
-
err = proto_register(&tcpv6_prot, 1);
if (err)
goto out;
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index a9ba41648e36..858fd8a28b5b 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1028,8 +1028,11 @@ static void ip6mr_cache_resolve(struct net *net, struct mr_table *mrt,
((struct nlmsgerr *)nlmsg_data(nlh))->error = -EMSGSIZE;
}
rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
- } else
+ } else {
+ rcu_read_lock();
ip6_mr_forward(net, mrt, skb->dev, skb, c);
+ rcu_read_unlock();
+ }
}
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 5265d2b6db12..fc764984d687 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -4040,7 +4040,6 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
if (!(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE) &&
(!elems->he_cap || !elems->he_operation)) {
- mutex_unlock(&sdata->local->sta_mtx);
sdata_info(sdata,
"HE AP is missing HE capability/operation\n");
ret = false;
@@ -5589,12 +5588,16 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
mutex_lock(&local->sta_mtx);
sta = sta_info_get(sdata, sdata->vif.cfg.ap_addr);
- if (WARN_ON(!sta))
+ if (WARN_ON(!sta)) {
+ mutex_unlock(&local->sta_mtx);
goto free;
+ }
link_sta = rcu_dereference_protected(sta->link[link->link_id],
lockdep_is_held(&local->sta_mtx));
- if (WARN_ON(!link_sta))
+ if (WARN_ON(!link_sta)) {
+ mutex_unlock(&local->sta_mtx);
goto free;
+ }
changed |= ieee80211_recalc_twt_req(link, link_sta, elems);
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 5f27e6746762..788a82f9c74d 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -10,6 +10,7 @@
#include <linux/random.h>
#include <linux/moduleparam.h>
#include <linux/ieee80211.h>
+#include <linux/minmax.h>
#include <net/mac80211.h>
#include "rate.h"
#include "sta_info.h"
@@ -1550,6 +1551,7 @@ minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
{
struct ieee80211_sta_rates *rates;
int i = 0;
+ int max_rates = min_t(int, mp->hw->max_rates, IEEE80211_TX_RATE_TABLE_SIZE);
rates = kzalloc(sizeof(*rates), GFP_ATOMIC);
if (!rates)
@@ -1559,10 +1561,10 @@ minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate[0]);
/* Fill up remaining, keep one entry for max_probe_rate */
- for (; i < (mp->hw->max_rates - 1); i++)
+ for (; i < (max_rates - 1); i++)
minstrel_ht_set_rate(mp, mi, rates, i, mi->max_tp_rate[i]);
- if (i < mp->hw->max_rates)
+ if (i < max_rates)
minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_prob_rate);
if (i < IEEE80211_TX_RATE_TABLE_SIZE)
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 8e77fd2e9fdf..3f9ddd7f04b6 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -729,7 +729,7 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local,
if (!sdata) {
skb->dev = NULL;
- } else {
+ } else if (!dropped) {
unsigned int hdr_size =
ieee80211_hdrlen(hdr->frame_control);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index bf7fe6cd9dfc..13249e97a069 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -5878,6 +5878,9 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
skb_reset_network_header(skb);
skb_reset_mac_header(skb);
+ if (local->hw.queues < IEEE80211_NUM_ACS)
+ goto start_xmit;
+
/* update QoS header to prioritize control port frames if possible,
* priorization also happens for control port frames send over
* AF_PACKET
@@ -5905,6 +5908,7 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
}
rcu_read_unlock();
+start_xmit:
/* mutex lock is only needed for incrementing the cookie counter */
mutex_lock(&local->mtx);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 53826c663723..efcefb2dd882 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -301,14 +301,14 @@ static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
local_bh_disable();
spin_lock(&fq->lock);
+ sdata->vif.txqs_stopped[ac] = false;
+
if (!test_bit(SDATA_STATE_RUNNING, &sdata->state))
goto out;
if (sdata->vif.type == NL80211_IFTYPE_AP)
ps = &sdata->bss->ps;
- sdata->vif.txqs_stopped[ac] = false;
-
list_for_each_entry_rcu(sta, &local->sta_list, list) {
if (sdata != sta->sdata)
continue;
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index d398f3810662..f8897a70c11d 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -150,9 +150,15 @@ static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq,
to->len, MPTCP_SKB_CB(from)->end_seq);
MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq;
- kfree_skb_partial(from, fragstolen);
+
+ /* note the fwd memory can reach a negative value after accounting
+ * for the delta, but the later skb free will restore a non
+ * negative one
+ */
atomic_add(delta, &sk->sk_rmem_alloc);
mptcp_rmem_charge(sk, delta);
+ kfree_skb_partial(from, fragstolen);
+
return true;
}
@@ -2656,7 +2662,7 @@ static void __mptcp_clear_xmit(struct sock *sk)
dfrag_clear(sk, dfrag);
}
-static void mptcp_cancel_work(struct sock *sk)
+void mptcp_cancel_work(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
@@ -2796,13 +2802,12 @@ static void __mptcp_destroy_sock(struct sock *sk)
sock_put(sk);
}
-static void mptcp_close(struct sock *sk, long timeout)
+bool __mptcp_close(struct sock *sk, long timeout)
{
struct mptcp_subflow_context *subflow;
struct mptcp_sock *msk = mptcp_sk(sk);
bool do_cancel_work = false;
- lock_sock(sk);
sk->sk_shutdown = SHUTDOWN_MASK;
if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) {
@@ -2844,6 +2849,17 @@ cleanup:
} else {
mptcp_reset_timeout(msk, 0);
}
+
+ return do_cancel_work;
+}
+
+static void mptcp_close(struct sock *sk, long timeout)
+{
+ bool do_cancel_work;
+
+ lock_sock(sk);
+
+ do_cancel_work = __mptcp_close(sk, timeout);
release_sock(sk);
if (do_cancel_work)
mptcp_cancel_work(sk);
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 132d50833df1..8f372b8f059c 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -612,6 +612,8 @@ void mptcp_subflow_reset(struct sock *ssk);
void mptcp_subflow_queue_clean(struct sock *ssk);
void mptcp_sock_graft(struct sock *sk, struct socket *parent);
struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk);
+bool __mptcp_close(struct sock *sk, long timeout);
+void mptcp_cancel_work(struct sock *sk);
bool mptcp_addresses_equal(const struct mptcp_addr_info *a,
const struct mptcp_addr_info *b, bool use_port);
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index c7d49fb6e7bd..07dd23d0fe04 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -602,30 +602,6 @@ static bool subflow_hmac_valid(const struct request_sock *req,
return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN);
}
-static void mptcp_sock_destruct(struct sock *sk)
-{
- /* if new mptcp socket isn't accepted, it is free'd
- * from the tcp listener sockets request queue, linked
- * from req->sk. The tcp socket is released.
- * This calls the ULP release function which will
- * also remove the mptcp socket, via
- * sock_put(ctx->conn).
- *
- * Problem is that the mptcp socket will be in
- * ESTABLISHED state and will not have the SOCK_DEAD flag.
- * Both result in warnings from inet_sock_destruct.
- */
- if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
- sk->sk_state = TCP_CLOSE;
- WARN_ON_ONCE(sk->sk_socket);
- sock_orphan(sk);
- }
-
- /* We don't need to clear msk->subflow, as it's still NULL at this point */
- mptcp_destroy_common(mptcp_sk(sk), 0);
- inet_sock_destruct(sk);
-}
-
static void mptcp_force_close(struct sock *sk)
{
/* the msk is not yet exposed to user-space */
@@ -768,7 +744,6 @@ create_child:
/* new mpc subflow takes ownership of the newly
* created mptcp socket
*/
- new_msk->sk_destruct = mptcp_sock_destruct;
mptcp_sk(new_msk)->setsockopt_seq = ctx->setsockopt_seq;
mptcp_pm_new_connection(mptcp_sk(new_msk), child, 1);
mptcp_token_accept(subflow_req, mptcp_sk(new_msk));
@@ -1763,13 +1738,19 @@ void mptcp_subflow_queue_clean(struct sock *listener_ssk)
for (msk = head; msk; msk = next) {
struct sock *sk = (struct sock *)msk;
- bool slow;
+ bool slow, do_cancel_work;
+ sock_hold(sk);
slow = lock_sock_fast_nested(sk);
next = msk->dl_next;
msk->first = NULL;
msk->dl_next = NULL;
+
+ do_cancel_work = __mptcp_close(sk, 0);
unlock_sock_fast(sk, slow);
+ if (do_cancel_work)
+ mptcp_cancel_work(sk);
+ sock_put(sk);
}
/* we are still under the listener msk socket lock */
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index 0d9332e9cf71..617f744a2e3a 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -33,6 +33,7 @@ MODULE_AUTHOR("Rusty Russell <[email protected]>");
MODULE_DESCRIPTION("ftp connection tracking helper");
MODULE_ALIAS("ip_conntrack_ftp");
MODULE_ALIAS_NFCT_HELPER(HELPER_NAME);
+static DEFINE_SPINLOCK(nf_ftp_lock);
#define MAX_PORTS 8
static u_int16_t ports[MAX_PORTS];
@@ -409,7 +410,8 @@ static int help(struct sk_buff *skb,
}
datalen = skb->len - dataoff;
- spin_lock_bh(&ct->lock);
+ /* seqadj (nat) uses ct->lock internally, nf_nat_ftp would cause deadlock */
+ spin_lock_bh(&nf_ftp_lock);
fb_ptr = skb->data + dataoff;
ends_in_nl = (fb_ptr[datalen - 1] == '\n');
@@ -538,7 +540,7 @@ out_update_nl:
if (ends_in_nl)
update_nl_seq(ct, seq, ct_ftp_info, dir, skb);
out:
- spin_unlock_bh(&ct->lock);
+ spin_unlock_bh(&nf_ftp_lock);
return ret;
}
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
index 992decbcaa5c..5703846bea3b 100644
--- a/net/netfilter/nf_conntrack_irc.c
+++ b/net/netfilter/nf_conntrack_irc.c
@@ -157,15 +157,37 @@ static int help(struct sk_buff *skb, unsigned int protoff,
data = ib_ptr;
data_limit = ib_ptr + datalen;
- /* strlen("\1DCC SENT t AAAAAAAA P\1\n")=24
- * 5+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14 */
- while (data < data_limit - (19 + MINMATCHLEN)) {
- if (memcmp(data, "\1DCC ", 5)) {
+ /* Skip any whitespace */
+ while (data < data_limit - 10) {
+ if (*data == ' ' || *data == '\r' || *data == '\n')
+ data++;
+ else
+ break;
+ }
+
+ /* strlen("PRIVMSG x ")=10 */
+ if (data < data_limit - 10) {
+ if (strncasecmp("PRIVMSG ", data, 8))
+ goto out;
+ data += 8;
+ }
+
+ /* strlen(" :\1DCC SENT t AAAAAAAA P\1\n")=26
+ * 7+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=26
+ */
+ while (data < data_limit - (21 + MINMATCHLEN)) {
+ /* Find first " :", the start of message */
+ if (memcmp(data, " :", 2)) {
data++;
continue;
}
+ data += 2;
+
+ /* then check that place only for the DCC command */
+ if (memcmp(data, "\1DCC ", 5))
+ goto out;
data += 5;
- /* we have at least (19+MINMATCHLEN)-5 bytes valid data left */
+ /* we have at least (21+MINMATCHLEN)-(2+5) bytes valid data left */
iph = ip_hdr(skb);
pr_debug("DCC found in master %pI4:%u %pI4:%u\n",
@@ -181,7 +203,7 @@ static int help(struct sk_buff *skb, unsigned int protoff,
pr_debug("DCC %s detected\n", dccprotos[i]);
/* we have at least
- * (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid
+ * (21+MINMATCHLEN)-7-dccprotos[i].matchlen bytes valid
* data left (== 14/13 bytes) */
if (parse_dcc(data, data_limit, &dcc_ip,
&dcc_port, &addr_beg_p, &addr_end_p)) {
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index daf06f71d31c..77f5e82d8e3f 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -477,7 +477,7 @@ static int ct_sip_walk_headers(const struct nf_conn *ct, const char *dptr,
return ret;
if (ret == 0)
break;
- dataoff += *matchoff;
+ dataoff = *matchoff;
}
*in_header = 0;
}
@@ -489,7 +489,7 @@ static int ct_sip_walk_headers(const struct nf_conn *ct, const char *dptr,
break;
if (ret == 0)
return ret;
- dataoff += *matchoff;
+ dataoff = *matchoff;
}
if (in_header)
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 816052089b33..63c70141b3e5 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -2197,7 +2197,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
struct netlink_ext_ack *extack)
{
const struct nlattr * const *nla = ctx->nla;
- struct nft_stats __percpu *stats = NULL;
struct nft_table *table = ctx->table;
struct nft_base_chain *basechain;
struct net *net = ctx->net;
@@ -2212,6 +2211,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
return -EOVERFLOW;
if (nla[NFTA_CHAIN_HOOK]) {
+ struct nft_stats __percpu *stats = NULL;
struct nft_chain_hook hook;
if (flags & NFT_CHAIN_BINDING)
@@ -2243,8 +2243,11 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
if (err < 0) {
nft_chain_release_hook(&hook);
kfree(basechain);
+ free_percpu(stats);
return err;
}
+ if (stats)
+ static_branch_inc(&nft_counters_enabled);
} else {
if (flags & NFT_CHAIN_BASE)
return -EINVAL;
@@ -2319,9 +2322,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
goto err_unregister_hook;
}
- if (stats)
- static_branch_inc(&nft_counters_enabled);
-
table->use++;
return 0;
diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
index 0fa2e2030427..ee6840bd5933 100644
--- a/net/netfilter/nfnetlink_osf.c
+++ b/net/netfilter/nfnetlink_osf.c
@@ -269,6 +269,7 @@ bool nf_osf_find(const struct sk_buff *skb,
struct nf_osf_hdr_ctx ctx;
const struct tcphdr *tcp;
struct tcphdr _tcph;
+ bool found = false;
memset(&ctx, 0, sizeof(ctx));
@@ -283,10 +284,11 @@ bool nf_osf_find(const struct sk_buff *skb,
data->genre = f->genre;
data->version = f->version;
+ found = true;
break;
}
- return true;
+ return found;
}
EXPORT_SYMBOL_GPL(nf_osf_find);
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index d55afb8d14be..5950974ae8f6 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -1394,7 +1394,7 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla,
err = tcf_ct_flow_table_get(net, params);
if (err)
- goto cleanup;
+ goto cleanup_params;
spin_lock_bh(&c->tcf_lock);
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
@@ -1409,6 +1409,9 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla,
return res;
+cleanup_params:
+ if (params->tmpl)
+ nf_ct_put(params->tmpl);
cleanup:
if (goto_ch)
tcf_chain_put_by_act(goto_ch);
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 790d6809be81..51d175f3fbcb 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -2137,6 +2137,7 @@ replay:
}
if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
+ tfilter_put(tp, fh);
NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
err = -EINVAL;
goto errout;
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 0b941dd63d26..86675a79da1e 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -67,6 +67,7 @@ struct taprio_sched {
u32 flags;
enum tk_offsets tk_offset;
int clockid;
+ bool offloaded;
atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+
* speeds it's sub-nanoseconds per byte
*/
@@ -1279,6 +1280,8 @@ static int taprio_enable_offload(struct net_device *dev,
goto done;
}
+ q->offloaded = true;
+
done:
taprio_offload_free(offload);
@@ -1293,12 +1296,9 @@ static int taprio_disable_offload(struct net_device *dev,
struct tc_taprio_qopt_offload *offload;
int err;
- if (!FULL_OFFLOAD_IS_ENABLED(q->flags))
+ if (!q->offloaded)
return 0;
- if (!ops->ndo_setup_tc)
- return -EOPNOTSUPP;
-
offload = taprio_offload_alloc(0);
if (!offload) {
NL_SET_ERR_MSG(extack,
@@ -1314,6 +1314,8 @@ static int taprio_disable_offload(struct net_device *dev,
goto out;
}
+ q->offloaded = false;
+
out:
taprio_offload_free(offload);
@@ -1949,12 +1951,14 @@ start_error:
static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
{
- struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
+ struct taprio_sched *q = qdisc_priv(sch);
+ struct net_device *dev = qdisc_dev(sch);
+ unsigned int ntx = cl - 1;
- if (!dev_queue)
+ if (ntx >= dev->num_tx_queues)
return NULL;
- return dev_queue->qdisc_sleeping;
+ return q->qdiscs[ntx];
}
static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index ebf56cdf17db..df89c2e08cbf 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -2239,7 +2239,7 @@ out:
static int smcr_buf_map_usable_links(struct smc_link_group *lgr,
struct smc_buf_desc *buf_desc, bool is_rmb)
{
- int i, rc = 0;
+ int i, rc = 0, cnt = 0;
/* protect against parallel link reconfiguration */
mutex_lock(&lgr->llc_conf_mutex);
@@ -2252,9 +2252,12 @@ static int smcr_buf_map_usable_links(struct smc_link_group *lgr,
rc = -ENOMEM;
goto out;
}
+ cnt++;
}
out:
mutex_unlock(&lgr->llc_conf_mutex);
+ if (!rc && !cnt)
+ rc = -EINVAL;
return rc;
}
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 2c127951764a..775836f6785a 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -1361,7 +1361,7 @@ static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate)
25599, /* 4.166666... */
17067, /* 2.777777... */
12801, /* 2.083333... */
- 11769, /* 1.851851... */
+ 11377, /* 1.851725... */
10239, /* 1.666666... */
8532, /* 1.388888... */
7680, /* 1.250000... */
@@ -1444,7 +1444,7 @@ static u32 cfg80211_calculate_bitrate_eht(struct rate_info *rate)
25599, /* 4.166666... */
17067, /* 2.777777... */
12801, /* 2.083333... */
- 11769, /* 1.851851... */
+ 11377, /* 1.851725... */
10239, /* 1.666666... */
8532, /* 1.388888... */
7680, /* 1.250000... */
diff --git a/scripts/Makefile.debug b/scripts/Makefile.debug
index 9f39b0130551..8cf1cb22dd93 100644
--- a/scripts/Makefile.debug
+++ b/scripts/Makefile.debug
@@ -1,20 +1,19 @@
DEBUG_CFLAGS :=
+debug-flags-y := -g
ifdef CONFIG_DEBUG_INFO_SPLIT
DEBUG_CFLAGS += -gsplit-dwarf
-else
-DEBUG_CFLAGS += -g
endif
-ifndef CONFIG_AS_IS_LLVM
-KBUILD_AFLAGS += -Wa,-gdwarf-2
-endif
-
-ifndef CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT
-dwarf-version-$(CONFIG_DEBUG_INFO_DWARF4) := 4
-dwarf-version-$(CONFIG_DEBUG_INFO_DWARF5) := 5
-DEBUG_CFLAGS += -gdwarf-$(dwarf-version-y)
+debug-flags-$(CONFIG_DEBUG_INFO_DWARF4) += -gdwarf-4
+debug-flags-$(CONFIG_DEBUG_INFO_DWARF5) += -gdwarf-5
+ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_AS_IS_GNU),yy)
+# Clang does not pass -g or -gdwarf-* option down to GAS.
+# Add -Wa, prefix to explicitly specify the flags.
+KBUILD_AFLAGS += $(addprefix -Wa$(comma), $(debug-flags-y))
endif
+DEBUG_CFLAGS += $(debug-flags-y)
+KBUILD_AFLAGS += $(debug-flags-y)
ifdef CONFIG_DEBUG_INFO_REDUCED
DEBUG_CFLAGS += -fno-var-tracking
@@ -29,5 +28,5 @@ KBUILD_AFLAGS += -gz=zlib
KBUILD_LDFLAGS += --compress-debug-sections=zlib
endif
-KBUILD_CFLAGS += $(DEBUG_CFLAGS)
+KBUILD_CFLAGS += $(DEBUG_CFLAGS)
export DEBUG_CFLAGS
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 79e759aac543..4aa09e0cb86a 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -3751,7 +3751,7 @@ sub process {
if ($realfile =~ /\.S$/ &&
$line =~ /^\+\s*(?:[A-Z]+_)?SYM_[A-Z]+_(?:START|END)(?:_[A-Z_]+)?\s*\(\s*\.L/) {
WARN("AVOID_L_PREFIX",
- "Avoid using '.L' prefixed local symbol names for denoting a range of code via 'SYM_*_START/END' annotations; see Documentation/asm-annotations.rst\n" . $herecurr);
+ "Avoid using '.L' prefixed local symbol names for denoting a range of code via 'SYM_*_START/END' annotations; see Documentation/core-api/asm-annotations.rst\n" . $herecurr);
}
# check we are in a valid source file C or perl if not then ignore this hunk
@@ -4695,12 +4695,12 @@ sub process {
}
}
-# avoid BUG() or BUG_ON()
- if ($line =~ /\b(?:BUG|BUG_ON)\b/) {
+# do not use BUG() or variants
+ if ($line =~ /\b(?!AA_|BUILD_|DCCP_|IDA_|KVM_|RWLOCK_|snd_|SPIN_)(?:[a-zA-Z_]*_)?BUG(?:_ON)?(?:_[A-Z_]+)?\s*\(/) {
my $msg_level = \&WARN;
$msg_level = \&CHK if ($file);
&{$msg_level}("AVOID_BUG",
- "Avoid crashing the kernel - try using WARN_ON & recovery code rather than BUG() or BUG_ON()\n" . $herecurr);
+ "Do not crash the kernel unless it is absolutely unavoidable--use WARN_ON_ONCE() plus recovery code (if feasible) instead of BUG() or variants\n" . $herecurr);
}
# avoid LINUX_VERSION_CODE
diff --git a/scripts/clang-tools/run-clang-tools.py b/scripts/clang-tools/run-clang-tools.py
index 1337cedca096..bb78c9bde55c 100755
--- a/scripts/clang-tools/run-clang-tools.py
+++ b/scripts/clang-tools/run-clang-tools.py
@@ -12,7 +12,6 @@ compile_commands.json.
import argparse
import json
import multiprocessing
-import os
import subprocess
import sys
diff --git a/scripts/kconfig/lkc.h b/scripts/kconfig/lkc.h
index fa8c010aa683..c396aa104090 100644
--- a/scripts/kconfig/lkc.h
+++ b/scripts/kconfig/lkc.h
@@ -98,7 +98,6 @@ bool menu_is_empty(struct menu *menu);
bool menu_is_visible(struct menu *menu);
bool menu_has_prompt(struct menu *menu);
const char *menu_get_prompt(struct menu *menu);
-struct menu *menu_get_root_menu(struct menu *menu);
struct menu *menu_get_parent_menu(struct menu *menu);
bool menu_has_help(struct menu *menu);
const char *menu_get_help(struct menu *menu);
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
index 3d6f7cba8846..62b6313f51c8 100644
--- a/scripts/kconfig/menu.c
+++ b/scripts/kconfig/menu.c
@@ -661,11 +661,6 @@ const char *menu_get_prompt(struct menu *menu)
return NULL;
}
-struct menu *menu_get_root_menu(struct menu *menu)
-{
- return &rootmenu;
-}
-
struct menu *menu_get_parent_menu(struct menu *menu)
{
enum prop_type type;
diff --git a/sound/core/init.c b/sound/core/init.c
index 193dae361fac..5377f94eb211 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -178,10 +178,8 @@ int snd_card_new(struct device *parent, int idx, const char *xid,
return -ENOMEM;
err = snd_card_init(card, parent, idx, xid, module, extra_size);
- if (err < 0) {
- kfree(card);
- return err;
- }
+ if (err < 0)
+ return err; /* card is freed by error handler */
*card_ret = card;
return 0;
@@ -233,7 +231,7 @@ int snd_devm_card_new(struct device *parent, int idx, const char *xid,
card->managed = true;
err = snd_card_init(card, parent, idx, xid, module, extra_size);
if (err < 0) {
- devres_free(card);
+ devres_free(card); /* in managed mode, we need to free manually */
return err;
}
@@ -297,6 +295,8 @@ static int snd_card_init(struct snd_card *card, struct device *parent,
mutex_unlock(&snd_card_mutex);
dev_err(parent, "cannot find the slot for index %d (range 0-%i), error: %d\n",
idx, snd_ecards_limit - 1, err);
+ if (!card->managed)
+ kfree(card); /* manually free here, as no destructor called */
return err;
}
set_bit(idx, snd_cards_lock); /* lock it */
diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
index d84ffdf47210..5a478649f338 100644
--- a/sound/hda/intel-dsp-config.c
+++ b/sound/hda/intel-dsp-config.c
@@ -450,6 +450,16 @@ static const struct config_entry config_table[] = {
.flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
.device = 0x51cb,
},
+ /* RaptorLake-M */
+ {
+ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+ .device = 0x51ce,
+ },
+ /* RaptorLake-PX */
+ {
+ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+ .device = 0x51cf,
+ },
#endif
};
diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c
index cae9a975cbcc..1a868dd9dc4b 100644
--- a/sound/pci/hda/hda_bind.c
+++ b/sound/pci/hda/hda_bind.c
@@ -157,10 +157,10 @@ static int hda_codec_driver_remove(struct device *dev)
return codec->bus->core.ext_ops->hdev_detach(&codec->core);
}
- refcount_dec(&codec->pcm_ref);
snd_hda_codec_disconnect_pcms(codec);
snd_hda_jack_tbl_disconnect(codec);
- wait_event(codec->remove_sleep, !refcount_read(&codec->pcm_ref));
+ if (!refcount_dec_and_test(&codec->pcm_ref))
+ wait_event(codec->remove_sleep, !refcount_read(&codec->pcm_ref));
snd_power_sync_ref(codec->bus->card);
if (codec->patch_ops.free)
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index b20694fd69de..6f30c374f896 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2550,6 +2550,8 @@ static const struct pci_device_id azx_ids[] = {
/* 5 Series/3400 */
{ PCI_DEVICE(0x8086, 0x3b56),
.driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM },
+ { PCI_DEVICE(0x8086, 0x3b57),
+ .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM },
/* Poulsbo */
{ PCI_DEVICE(0x8086, 0x811b),
.driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_BASE },
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 6c209cd26c0c..c239d9dbbaef 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -170,6 +170,8 @@ struct hdmi_spec {
bool dyn_pcm_no_legacy;
/* hdmi interrupt trigger control flag for Nvidia codec */
bool hdmi_intr_trig_ctrl;
+ bool nv_dp_workaround; /* workaround DP audio infoframe for Nvidia */
+
bool intel_hsw_fixup; /* apply Intel platform-specific fixups */
/*
* Non-generic VIA/NVIDIA specific
@@ -679,15 +681,24 @@ static void hdmi_pin_setup_infoframe(struct hda_codec *codec,
int ca, int active_channels,
int conn_type)
{
+ struct hdmi_spec *spec = codec->spec;
union audio_infoframe ai;
memset(&ai, 0, sizeof(ai));
- if (conn_type == 0) { /* HDMI */
+ if ((conn_type == 0) || /* HDMI */
+ /* Nvidia DisplayPort: Nvidia HW expects same layout as HDMI */
+ (conn_type == 1 && spec->nv_dp_workaround)) {
struct hdmi_audio_infoframe *hdmi_ai = &ai.hdmi;
- hdmi_ai->type = 0x84;
- hdmi_ai->ver = 0x01;
- hdmi_ai->len = 0x0a;
+ if (conn_type == 0) { /* HDMI */
+ hdmi_ai->type = 0x84;
+ hdmi_ai->ver = 0x01;
+ hdmi_ai->len = 0x0a;
+ } else {/* Nvidia DP */
+ hdmi_ai->type = 0x84;
+ hdmi_ai->ver = 0x1b;
+ hdmi_ai->len = 0x11 << 2;
+ }
hdmi_ai->CC02_CT47 = active_channels - 1;
hdmi_ai->CA = ca;
hdmi_checksum_audio_infoframe(hdmi_ai);
@@ -1267,6 +1278,7 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
set_bit(pcm_idx, &spec->pcm_in_use);
per_pin = get_pin(spec, pin_idx);
per_pin->cvt_nid = per_cvt->cvt_nid;
+ per_pin->silent_stream = false;
hinfo->nid = per_cvt->cvt_nid;
/* flip stripe flag for the assigned stream if supported */
@@ -3617,6 +3629,7 @@ static int patch_nvhdmi_2ch(struct hda_codec *codec)
spec->pcm_playback.rates = SUPPORTED_RATES;
spec->pcm_playback.maxbps = SUPPORTED_MAXBPS;
spec->pcm_playback.formats = SUPPORTED_FORMATS;
+ spec->nv_dp_workaround = true;
return 0;
}
@@ -3756,6 +3769,7 @@ static int patch_nvhdmi(struct hda_codec *codec)
spec->chmap.ops.chmap_cea_alloc_validate_get_type =
nvhdmi_chmap_cea_alloc_validate_get_type;
spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate;
+ spec->nv_dp_workaround = true;
codec->link_down_at_suspend = 1;
@@ -3779,6 +3793,7 @@ static int patch_nvhdmi_legacy(struct hda_codec *codec)
spec->chmap.ops.chmap_cea_alloc_validate_get_type =
nvhdmi_chmap_cea_alloc_validate_get_type;
spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate;
+ spec->nv_dp_workaround = true;
codec->link_down_at_suspend = 1;
@@ -3984,6 +3999,7 @@ static int tegra_hdmi_init(struct hda_codec *codec)
generic_hdmi_init_per_pins(codec);
+ codec->depop_delay = 10;
codec->patch_ops.build_pcms = tegra_hdmi_build_pcms;
spec->chmap.ops.chmap_cea_alloc_validate_get_type =
nvhdmi_chmap_cea_alloc_validate_get_type;
@@ -3992,6 +4008,7 @@ static int tegra_hdmi_init(struct hda_codec *codec)
spec->chmap.ops.chmap_cea_alloc_validate_get_type =
nvhdmi_chmap_cea_alloc_validate_get_type;
spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate;
+ spec->nv_dp_workaround = true;
return 0;
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 38930cf5aace..f9d46ae4c7b7 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -7067,6 +7067,8 @@ enum {
ALC294_FIXUP_ASUS_GU502_HP,
ALC294_FIXUP_ASUS_GU502_PINS,
ALC294_FIXUP_ASUS_GU502_VERBS,
+ ALC294_FIXUP_ASUS_G513_PINS,
+ ALC285_FIXUP_ASUS_G533Z_PINS,
ALC285_FIXUP_HP_GPIO_LED,
ALC285_FIXUP_HP_MUTE_LED,
ALC236_FIXUP_HP_GPIO_LED,
@@ -8406,6 +8408,24 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc294_fixup_gu502_hp,
},
+ [ALC294_FIXUP_ASUS_G513_PINS] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x19, 0x03a11050 }, /* front HP mic */
+ { 0x1a, 0x03a11c30 }, /* rear external mic */
+ { 0x21, 0x03211420 }, /* front HP out */
+ { }
+ },
+ },
+ [ALC285_FIXUP_ASUS_G533Z_PINS] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x14, 0x90170120 },
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC294_FIXUP_ASUS_G513_PINS,
+ },
[ALC294_FIXUP_ASUS_COEF_1B] = {
.type = HDA_FIXUP_VERBS,
.v.verbs = (const struct hda_verb[]) {
@@ -9149,6 +9169,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
SND_PCI_QUIRK(0x1028, 0x0872, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB),
+ SND_PCI_QUIRK(0x1028, 0x087d, "Dell Precision 5530", ALC289_FIXUP_DUAL_SPK),
SND_PCI_QUIRK(0x1028, 0x08ad, "Dell WYSE AIO", ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x08ae, "Dell WYSE NB", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
@@ -9165,6 +9186,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0a9d, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0a9e, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0b19, "Dell XPS 15 9520", ALC289_FIXUP_DUAL_SPK),
+ SND_PCI_QUIRK(0x1028, 0x0b1a, "Dell Precision 5570", ALC289_FIXUP_DUAL_SPK),
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -9292,6 +9314,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8896, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_MUTE_LED),
SND_PCI_QUIRK(0x103c, 0x8898, "HP EliteBook 845 G8 Notebook PC", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x103c, 0x88d0, "HP Pavilion 15-eh1xxx (mainboard 88D0)", ALC287_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8902, "HP OMEN 16", ALC285_FIXUP_HP_MUTE_LED),
SND_PCI_QUIRK(0x103c, 0x896e, "HP EliteBook x360 830 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8971, "HP EliteBook 830 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8972, "HP EliteBook 840 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
@@ -9339,10 +9362,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
+ SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
+ SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS),
SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
- SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC),
@@ -9358,14 +9382,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS),
SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
+ SND_PCI_QUIRK(0x1043, 0x1e5e, "ASUS ROG Strix G513", ALC294_FIXUP_ASUS_G513_PINS),
SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
+ SND_PCI_QUIRK(0x1043, 0x1c52, "ASUS Zephyrus G15 2022", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
- SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
- SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC),
@@ -9569,6 +9595,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
SND_PCI_QUIRK(0x1849, 0x1233, "ASRock NUC Box 1100", ALC233_FIXUP_NO_AUDIO_JACK),
SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS),
+ SND_PCI_QUIRK(0x19e5, 0x320f, "Huawei WRT-WX9 ", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1b35, 0x1235, "CZC B20", ALC269_FIXUP_CZC_B20),
SND_PCI_QUIRK(0x1b35, 0x1236, "CZC TMI", ALC269_FIXUP_CZC_TMI),
SND_PCI_QUIRK(0x1b35, 0x1237, "CZC L101", ALC269_FIXUP_CZC_L101),
diff --git a/sound/soc/codecs/nau8824.c b/sound/soc/codecs/nau8824.c
index 15596452ca37..4f19fd9b65d1 100644
--- a/sound/soc/codecs/nau8824.c
+++ b/sound/soc/codecs/nau8824.c
@@ -901,7 +901,10 @@ static void nau8824_jdet_work(struct work_struct *work)
NAU8824_IRQ_KEY_RELEASE_DIS |
NAU8824_IRQ_KEY_SHORT_PRESS_DIS, 0);
- nau8824_sema_release(nau8824);
+ if (nau8824->resume_lock) {
+ nau8824_sema_release(nau8824);
+ nau8824->resume_lock = false;
+ }
}
static void nau8824_setup_auto_irq(struct nau8824 *nau8824)
@@ -966,7 +969,10 @@ static irqreturn_t nau8824_interrupt(int irq, void *data)
/* release semaphore held after resume,
* and cancel jack detection
*/
- nau8824_sema_release(nau8824);
+ if (nau8824->resume_lock) {
+ nau8824_sema_release(nau8824);
+ nau8824->resume_lock = false;
+ }
cancel_work_sync(&nau8824->jdet_work);
} else if (active_irq & NAU8824_KEY_SHORT_PRESS_IRQ) {
int key_status, button_pressed;
@@ -1524,6 +1530,7 @@ static int __maybe_unused nau8824_suspend(struct snd_soc_component *component)
static int __maybe_unused nau8824_resume(struct snd_soc_component *component)
{
struct nau8824 *nau8824 = snd_soc_component_get_drvdata(component);
+ int ret;
regcache_cache_only(nau8824->regmap, false);
regcache_sync(nau8824->regmap);
@@ -1531,7 +1538,10 @@ static int __maybe_unused nau8824_resume(struct snd_soc_component *component)
/* Hold semaphore to postpone playback happening
* until jack detection done.
*/
- nau8824_sema_acquire(nau8824, 0);
+ nau8824->resume_lock = true;
+ ret = nau8824_sema_acquire(nau8824, 0);
+ if (ret)
+ nau8824->resume_lock = false;
enable_irq(nau8824->irq);
}
@@ -1940,6 +1950,7 @@ static int nau8824_i2c_probe(struct i2c_client *i2c)
nau8824->regmap = devm_regmap_init_i2c(i2c, &nau8824_regmap_config);
if (IS_ERR(nau8824->regmap))
return PTR_ERR(nau8824->regmap);
+ nau8824->resume_lock = false;
nau8824->dev = dev;
nau8824->irq = i2c->irq;
sema_init(&nau8824->jd_sem, 1);
diff --git a/sound/soc/codecs/nau8824.h b/sound/soc/codecs/nau8824.h
index de4bae8281d0..5fcfc43dfc85 100644
--- a/sound/soc/codecs/nau8824.h
+++ b/sound/soc/codecs/nau8824.h
@@ -436,6 +436,7 @@ struct nau8824 {
struct semaphore jd_sem;
int fs;
int irq;
+ int resume_lock;
int micbias_voltage;
int vref_impedance;
int jkdet_polarity;
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
index 5a844329800f..0f8e6dd214b0 100644
--- a/sound/soc/codecs/rt5640.c
+++ b/sound/soc/codecs/rt5640.c
@@ -2494,7 +2494,7 @@ static void rt5640_enable_jack_detect(struct snd_soc_component *component,
/* Select JD-source */
snd_soc_component_update_bits(component, RT5640_JD_CTRL,
- RT5640_JD_MASK, rt5640->jd_src);
+ RT5640_JD_MASK, rt5640->jd_src << RT5640_JD_SFT);
/* Selecting GPIO01 as an interrupt */
snd_soc_component_update_bits(component, RT5640_GPIO_CTRL1,
@@ -2504,12 +2504,8 @@ static void rt5640_enable_jack_detect(struct snd_soc_component *component,
snd_soc_component_update_bits(component, RT5640_GPIO_CTRL3,
RT5640_GP1_PF_MASK, RT5640_GP1_PF_OUT);
- /* Enabling jd2 in general control 1 */
snd_soc_component_write(component, RT5640_DUMMY1, 0x3f41);
- /* Enabling jd2 in general control 2 */
- snd_soc_component_write(component, RT5640_DUMMY2, 0x4001);
-
rt5640_set_ovcd_params(component);
/*
@@ -2518,12 +2514,25 @@ static void rt5640_enable_jack_detect(struct snd_soc_component *component,
* pin 0/1 instead of it being stuck to 1. So we invert the JD polarity
* on systems where the hardware does not already do this.
*/
- if (rt5640->jd_inverted)
- snd_soc_component_write(component, RT5640_IRQ_CTRL1,
- RT5640_IRQ_JD_NOR);
- else
- snd_soc_component_write(component, RT5640_IRQ_CTRL1,
- RT5640_IRQ_JD_NOR | RT5640_JD_P_INV);
+ if (rt5640->jd_inverted) {
+ if (rt5640->jd_src == RT5640_JD_SRC_JD1_IN4P)
+ snd_soc_component_write(component, RT5640_IRQ_CTRL1,
+ RT5640_IRQ_JD_NOR);
+ else if (rt5640->jd_src == RT5640_JD_SRC_JD2_IN4N)
+ snd_soc_component_update_bits(component, RT5640_DUMMY2,
+ RT5640_IRQ_JD2_MASK | RT5640_JD2_MASK,
+ RT5640_IRQ_JD2_NOR | RT5640_JD2_EN);
+ } else {
+ if (rt5640->jd_src == RT5640_JD_SRC_JD1_IN4P)
+ snd_soc_component_write(component, RT5640_IRQ_CTRL1,
+ RT5640_IRQ_JD_NOR | RT5640_JD_P_INV);
+ else if (rt5640->jd_src == RT5640_JD_SRC_JD2_IN4N)
+ snd_soc_component_update_bits(component, RT5640_DUMMY2,
+ RT5640_IRQ_JD2_MASK | RT5640_JD2_P_MASK |
+ RT5640_JD2_MASK,
+ RT5640_IRQ_JD2_NOR | RT5640_JD2_P_INV |
+ RT5640_JD2_EN);
+ }
rt5640->jack = jack;
if (rt5640->jack->status & SND_JACK_MICROPHONE) {
@@ -2725,10 +2734,8 @@ static int rt5640_probe(struct snd_soc_component *component)
if (device_property_read_u32(component->dev,
"realtek,jack-detect-source", &val) == 0) {
- if (val <= RT5640_JD_SRC_GPIO4)
- rt5640->jd_src = val << RT5640_JD_SFT;
- else if (val == RT5640_JD_SRC_HDA_HEADER)
- rt5640->jd_src = RT5640_JD_SRC_HDA_HEADER;
+ if (val <= RT5640_JD_SRC_HDA_HEADER)
+ rt5640->jd_src = val;
else
dev_warn(component->dev, "Warning: Invalid jack-detect-source value: %d, leaving jack-detect disabled\n",
val);
@@ -2809,12 +2816,31 @@ static int rt5640_resume(struct snd_soc_component *component)
regcache_sync(rt5640->regmap);
if (rt5640->jack) {
- if (rt5640->jd_src == RT5640_JD_SRC_HDA_HEADER)
+ if (rt5640->jd_src == RT5640_JD_SRC_HDA_HEADER) {
snd_soc_component_update_bits(component,
RT5640_DUMMY2, 0x1100, 0x1100);
- else
- snd_soc_component_write(component, RT5640_DUMMY2,
- 0x4001);
+ } else {
+ if (rt5640->jd_inverted) {
+ if (rt5640->jd_src == RT5640_JD_SRC_JD2_IN4N)
+ snd_soc_component_update_bits(
+ component, RT5640_DUMMY2,
+ RT5640_IRQ_JD2_MASK |
+ RT5640_JD2_MASK,
+ RT5640_IRQ_JD2_NOR |
+ RT5640_JD2_EN);
+
+ } else {
+ if (rt5640->jd_src == RT5640_JD_SRC_JD2_IN4N)
+ snd_soc_component_update_bits(
+ component, RT5640_DUMMY2,
+ RT5640_IRQ_JD2_MASK |
+ RT5640_JD2_P_MASK |
+ RT5640_JD2_MASK,
+ RT5640_IRQ_JD2_NOR |
+ RT5640_JD2_P_INV |
+ RT5640_JD2_EN);
+ }
+ }
queue_delayed_work(system_long_wq, &rt5640->jack_work, 0);
}
diff --git a/sound/soc/codecs/rt5640.h b/sound/soc/codecs/rt5640.h
index 505c93514051..f58b88e3325b 100644
--- a/sound/soc/codecs/rt5640.h
+++ b/sound/soc/codecs/rt5640.h
@@ -1984,6 +1984,20 @@
#define RT5640_M_MONO_ADC_R_SFT 12
#define RT5640_MCLK_DET (0x1 << 11)
+/* General Control 1 (0xfb) */
+#define RT5640_IRQ_JD2_MASK (0x1 << 12)
+#define RT5640_IRQ_JD2_SFT 12
+#define RT5640_IRQ_JD2_BP (0x0 << 12)
+#define RT5640_IRQ_JD2_NOR (0x1 << 12)
+#define RT5640_JD2_P_MASK (0x1 << 10)
+#define RT5640_JD2_P_SFT 10
+#define RT5640_JD2_P_NOR (0x0 << 10)
+#define RT5640_JD2_P_INV (0x1 << 10)
+#define RT5640_JD2_MASK (0x1 << 8)
+#define RT5640_JD2_SFT 8
+#define RT5640_JD2_DIS (0x0 << 8)
+#define RT5640_JD2_EN (0x1 << 8)
+
/* Codec Private Register definition */
/* MIC Over current threshold scale factor (0x15) */
diff --git a/sound/soc/codecs/tas2770.c b/sound/soc/codecs/tas2770.c
index bb653b664146..b6765235a4b3 100644
--- a/sound/soc/codecs/tas2770.c
+++ b/sound/soc/codecs/tas2770.c
@@ -495,6 +495,8 @@ static struct snd_soc_dai_driver tas2770_dai_driver[] = {
},
};
+static const struct regmap_config tas2770_i2c_regmap;
+
static int tas2770_codec_probe(struct snd_soc_component *component)
{
struct tas2770_priv *tas2770 =
@@ -508,6 +510,7 @@ static int tas2770_codec_probe(struct snd_soc_component *component)
}
tas2770_reset(tas2770);
+ regmap_reinit_cache(tas2770->regmap, &tas2770_i2c_regmap);
return 0;
}
diff --git a/sound/soc/fsl/imx-card.c b/sound/soc/fsl/imx-card.c
index 14be29530fb5..3f128ced4180 100644
--- a/sound/soc/fsl/imx-card.c
+++ b/sound/soc/fsl/imx-card.c
@@ -698,6 +698,10 @@ static int imx_card_parse_of(struct imx_card_data *data)
of_node_put(cpu);
of_node_put(codec);
of_node_put(platform);
+
+ cpu = NULL;
+ codec = NULL;
+ platform = NULL;
}
return 0;
diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
index a49bfaab6b21..2ff30b40a1e4 100644
--- a/sound/soc/intel/boards/sof_sdw.c
+++ b/sound/soc/intel/boards/sof_sdw.c
@@ -270,6 +270,16 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
.callback = sof_sdw_quirk_cb,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0AFF")
+ },
+ .driver_data = (void *)(SOF_SDW_TGL_HDMI |
+ RT711_JD2 |
+ SOF_SDW_FOUR_SPK),
+ },
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0B00")
},
.driver_data = (void *)(SOF_SDW_TGL_HDMI |
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index 8c8f9a851f89..eb71df9da831 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -758,8 +758,7 @@ bool snd_usb_endpoint_compatible(struct snd_usb_audio *chip,
* The endpoint needs to be closed via snd_usb_endpoint_close() later.
*
* Note that this function doesn't configure the endpoint. The substream
- * needs to set it up later via snd_usb_endpoint_set_params() and
- * snd_usb_endpoint_prepare().
+ * needs to set it up later via snd_usb_endpoint_configure().
*/
struct snd_usb_endpoint *
snd_usb_endpoint_open(struct snd_usb_audio *chip,
@@ -1293,13 +1292,12 @@ out_of_memory:
/*
* snd_usb_endpoint_set_params: configure an snd_usb_endpoint
*
- * It's called either from hw_params callback.
* Determine the number of URBs to be used on this endpoint.
* An endpoint must be configured before it can be started.
* An endpoint that is already running can not be reconfigured.
*/
-int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
- struct snd_usb_endpoint *ep)
+static int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
+ struct snd_usb_endpoint *ep)
{
const struct audioformat *fmt = ep->cur_audiofmt;
int err;
@@ -1382,18 +1380,18 @@ static int init_sample_rate(struct snd_usb_audio *chip,
}
/*
- * snd_usb_endpoint_prepare: Prepare the endpoint
+ * snd_usb_endpoint_configure: Configure the endpoint
*
* This function sets up the EP to be fully usable state.
- * It's called either from prepare callback.
+ * It's called either from hw_params or prepare callback.
* The function checks need_setup flag, and performs nothing unless needed,
* so it's safe to call this multiple times.
*
* This returns zero if unchanged, 1 if the configuration has changed,
* or a negative error code.
*/
-int snd_usb_endpoint_prepare(struct snd_usb_audio *chip,
- struct snd_usb_endpoint *ep)
+int snd_usb_endpoint_configure(struct snd_usb_audio *chip,
+ struct snd_usb_endpoint *ep)
{
bool iface_first;
int err = 0;
@@ -1414,6 +1412,9 @@ int snd_usb_endpoint_prepare(struct snd_usb_audio *chip,
if (err < 0)
goto unlock;
}
+ err = snd_usb_endpoint_set_params(chip, ep);
+ if (err < 0)
+ goto unlock;
goto done;
}
@@ -1441,6 +1442,10 @@ int snd_usb_endpoint_prepare(struct snd_usb_audio *chip,
if (err < 0)
goto unlock;
+ err = snd_usb_endpoint_set_params(chip, ep);
+ if (err < 0)
+ goto unlock;
+
err = snd_usb_select_mode_quirk(chip, ep->cur_audiofmt);
if (err < 0)
goto unlock;
diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h
index e67ea28faa54..6a9af04cf175 100644
--- a/sound/usb/endpoint.h
+++ b/sound/usb/endpoint.h
@@ -17,10 +17,8 @@ snd_usb_endpoint_open(struct snd_usb_audio *chip,
bool is_sync_ep);
void snd_usb_endpoint_close(struct snd_usb_audio *chip,
struct snd_usb_endpoint *ep);
-int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
- struct snd_usb_endpoint *ep);
-int snd_usb_endpoint_prepare(struct snd_usb_audio *chip,
- struct snd_usb_endpoint *ep);
+int snd_usb_endpoint_configure(struct snd_usb_audio *chip,
+ struct snd_usb_endpoint *ep);
int snd_usb_endpoint_get_clock_rate(struct snd_usb_audio *chip, int clock);
bool snd_usb_endpoint_compatible(struct snd_usb_audio *chip,
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index b604f7e95e82..d45d1d7e6664 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -443,17 +443,17 @@ static int configure_endpoints(struct snd_usb_audio *chip,
if (stop_endpoints(subs, false))
sync_pending_stops(subs);
if (subs->sync_endpoint) {
- err = snd_usb_endpoint_prepare(chip, subs->sync_endpoint);
+ err = snd_usb_endpoint_configure(chip, subs->sync_endpoint);
if (err < 0)
return err;
}
- err = snd_usb_endpoint_prepare(chip, subs->data_endpoint);
+ err = snd_usb_endpoint_configure(chip, subs->data_endpoint);
if (err < 0)
return err;
snd_usb_set_format_quirk(subs, subs->cur_audiofmt);
} else {
if (subs->sync_endpoint) {
- err = snd_usb_endpoint_prepare(chip, subs->sync_endpoint);
+ err = snd_usb_endpoint_configure(chip, subs->sync_endpoint);
if (err < 0)
return err;
}
@@ -551,13 +551,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
subs->cur_audiofmt = fmt;
mutex_unlock(&chip->mutex);
- if (subs->sync_endpoint) {
- ret = snd_usb_endpoint_set_params(chip, subs->sync_endpoint);
- if (ret < 0)
- goto unlock;
- }
-
- ret = snd_usb_endpoint_set_params(chip, subs->data_endpoint);
+ ret = configure_endpoints(chip, subs);
unlock:
if (ret < 0)
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 235dc85c91c3..ef4775c6db01 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -457,7 +457,8 @@
#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
#define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
-#define X86_BUG_RETBLEED X86_BUG(26) /* CPU is affected by RETBleed */
-#define X86_BUG_EIBRS_PBRSB X86_BUG(27) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
+#define X86_BUG_MMIO_UNKNOWN X86_BUG(26) /* CPU is too old and its MMIO Stale Data status is unknown */
+#define X86_BUG_RETBLEED X86_BUG(27) /* CPU is affected by RETBleed */
+#define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/tools/include/linux/gfp.h b/tools/include/linux/gfp.h
index b238dbc9eb85..6a10ff5f5be9 100644
--- a/tools/include/linux/gfp.h
+++ b/tools/include/linux/gfp.h
@@ -3,26 +3,7 @@
#define _TOOLS_INCLUDE_LINUX_GFP_H
#include <linux/types.h>
-
-#define __GFP_BITS_SHIFT 26
-#define __GFP_BITS_MASK ((gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
-
-#define __GFP_HIGH 0x20u
-#define __GFP_IO 0x40u
-#define __GFP_FS 0x80u
-#define __GFP_NOWARN 0x200u
-#define __GFP_ZERO 0x8000u
-#define __GFP_ATOMIC 0x80000u
-#define __GFP_ACCOUNT 0x100000u
-#define __GFP_DIRECT_RECLAIM 0x400000u
-#define __GFP_KSWAPD_RECLAIM 0x2000000u
-
-#define __GFP_RECLAIM (__GFP_DIRECT_RECLAIM | __GFP_KSWAPD_RECLAIM)
-
-#define GFP_ZONEMASK 0x0fu
-#define GFP_ATOMIC (__GFP_HIGH | __GFP_ATOMIC | __GFP_KSWAPD_RECLAIM)
-#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
-#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
+#include <linux/gfp_types.h>
static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
{
diff --git a/tools/include/linux/gfp_types.h b/tools/include/linux/gfp_types.h
new file mode 100644
index 000000000000..5f9f1ed190a0
--- /dev/null
+++ b/tools/include/linux/gfp_types.h
@@ -0,0 +1 @@
+#include "../../../include/linux/gfp_types.h"
diff --git a/tools/include/nolibc/arch-riscv.h b/tools/include/nolibc/arch-riscv.h
index 95e2b7924925..ba04771cb3a3 100644
--- a/tools/include/nolibc/arch-riscv.h
+++ b/tools/include/nolibc/arch-riscv.h
@@ -190,7 +190,7 @@ __asm__ (".section .text\n"
".option norelax\n"
"lla gp, __global_pointer$\n"
".option pop\n"
- "ld a0, 0(sp)\n" // argc (a0) was in the stack
+ "lw a0, 0(sp)\n" // argc (a0) was in the stack
"add a1, sp, "SZREG"\n" // argv (a1) = sp
"slli a2, a0, "PTRLOG"\n" // envp (a2) = SZREG*argc ...
"add a2, a2, "SZREG"\n" // + SZREG (skip null)
diff --git a/tools/include/nolibc/sys.h b/tools/include/nolibc/sys.h
index 08491070387b..ce3ee03aa679 100644
--- a/tools/include/nolibc/sys.h
+++ b/tools/include/nolibc/sys.h
@@ -692,12 +692,12 @@ void *sys_mmap(void *addr, size_t length, int prot, int flags, int fd,
{
#ifndef my_syscall6
/* Function not implemented. */
- return -ENOSYS;
+ return (void *)-ENOSYS;
#else
int n;
-#if defined(__i386__)
+#if defined(__NR_mmap2)
n = __NR_mmap2;
offset >>= 12;
#else
diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c
index 6b1bafe267a4..8ec5b9f344e0 100644
--- a/tools/lib/perf/evlist.c
+++ b/tools/lib/perf/evlist.c
@@ -441,6 +441,7 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
perf_evlist__for_each_entry(evlist, evsel) {
bool overwrite = evsel->attr.write_backward;
+ enum fdarray_flags flgs;
struct perf_mmap *map;
int *output, fd, cpu;
@@ -504,8 +505,8 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
revent = !overwrite ? POLLIN : 0;
- if (!evsel->system_wide &&
- perf_evlist__add_pollfd(evlist, fd, map, revent, fdarray_flag__default) < 0) {
+ flgs = evsel->system_wide ? fdarray_flag__nonfilterable : fdarray_flag__default;
+ if (perf_evlist__add_pollfd(evlist, fd, map, revent, flgs) < 0) {
perf_mmap__put(map);
return -1;
}
diff --git a/tools/memory-model/Documentation/litmus-tests.txt b/tools/memory-model/Documentation/litmus-tests.txt
index 8a9d5d2787f9..26554b1c5575 100644
--- a/tools/memory-model/Documentation/litmus-tests.txt
+++ b/tools/memory-model/Documentation/litmus-tests.txt
@@ -946,22 +946,39 @@ Limitations of the Linux-kernel memory model (LKMM) include:
carrying a dependency, then the compiler can break that dependency
by substituting a constant of that value.
- Conversely, LKMM sometimes doesn't recognize that a particular
- optimization is not allowed, and as a result, thinks that a
- dependency is not present (because the optimization would break it).
- The memory model misses some pretty obvious control dependencies
- because of this limitation. A simple example is:
+ Conversely, LKMM will sometimes overestimate the amount of
+ reordering compilers and CPUs can carry out, leading it to miss
+ some pretty obvious cases of ordering. A simple example is:
r1 = READ_ONCE(x);
if (r1 == 0)
smp_mb();
WRITE_ONCE(y, 1);
- There is a control dependency from the READ_ONCE to the WRITE_ONCE,
- even when r1 is nonzero, but LKMM doesn't realize this and thinks
- that the write may execute before the read if r1 != 0. (Yes, that
- doesn't make sense if you think about it, but the memory model's
- intelligence is limited.)
+ The WRITE_ONCE() does not depend on the READ_ONCE(), and as a
+ result, LKMM does not claim ordering. However, even though no
+ dependency is present, the WRITE_ONCE() will not be executed before
+ the READ_ONCE(). There are two reasons for this:
+
+ The presence of the smp_mb() in one of the branches
+ prevents the compiler from moving the WRITE_ONCE()
+ up before the "if" statement, since the compiler has
+ to assume that r1 will sometimes be 0 (but see the
+ comment below);
+
+ CPUs do not execute stores before po-earlier conditional
+ branches, even in cases where the store occurs after the
+ two arms of the branch have recombined.
+
+ It is clear that it is not dangerous in the slightest for LKMM to
+ make weaker guarantees than architectures. In fact, it is
+ desirable, as it gives compilers room for making optimizations.
+ For instance, suppose that a 0 value in r1 would trigger undefined
+ behavior elsewhere. Then a clever compiler might deduce that r1
+ can never be 0 in the if condition. As a result, said clever
+ compiler might deem it safe to optimize away the smp_mb(),
+ eliminating the branch and any ordering an architecture would
+ guarantee otherwise.
2. Multiple access sizes for a single variable are not supported,
and neither are misaligned or partially overlapping accesses.
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index f87ef43eb820..0f711f88894c 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -3371,6 +3371,8 @@ static int record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cp
return 0;
perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
+ if (cpu.cpu == -1)
+ continue;
/* Return ENODEV is input cpu is greater than max cpu */
if ((unsigned long)cpu.cpu > mask->nbits)
return -ENODEV;
diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c
index dfb6173b2a82..9e9a2b67de19 100644
--- a/tools/perf/tests/mmap-basic.c
+++ b/tools/perf/tests/mmap-basic.c
@@ -114,8 +114,7 @@ static int test__basic_mmap(struct test_suite *test __maybe_unused, int subtest
for (i = 0; i < nsyscalls; ++i)
for (j = 0; j < expected_nr_events[i]; ++j) {
- int foo = syscalls[i]();
- ++foo;
+ syscalls[i]();
}
md = &evlist->mmap[0];
diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c
index 6a001fcfed68..4952abe716f3 100644
--- a/tools/perf/tests/perf-record.c
+++ b/tools/perf/tests/perf-record.c
@@ -332,7 +332,7 @@ out_delete_evlist:
out:
if (err == -EACCES)
return TEST_SKIP;
- if (err < 0)
+ if (err < 0 || errs != 0)
return TEST_FAIL;
return TEST_OK;
}
diff --git a/tools/perf/tests/shell/record.sh b/tools/perf/tests/shell/record.sh
index 00c7285ce1ac..301f95427159 100755
--- a/tools/perf/tests/shell/record.sh
+++ b/tools/perf/tests/shell/record.sh
@@ -61,7 +61,7 @@ test_register_capture() {
echo "Register capture test [Skipped missing registers]"
return
fi
- if ! perf record -o - --intr-regs=di,r8,dx,cx -e cpu/br_inst_retired.near_call/p \
+ if ! perf record -o - --intr-regs=di,r8,dx,cx -e br_inst_retired.near_call:p \
-c 1000 --per-thread true 2> /dev/null \
| perf script -F ip,sym,iregs -i - 2> /dev/null \
| egrep -q "DI:"
diff --git a/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh b/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh
new file mode 100755
index 000000000000..d724855d097c
--- /dev/null
+++ b/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh
@@ -0,0 +1,83 @@
+#!/bin/sh
+# perf stat --bpf-counters --for-each-cgroup test
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+test_cgroups=
+if [ "$1" = "-v" ]; then
+ verbose="1"
+fi
+
+# skip if --bpf-counters --for-each-cgroup is not supported
+check_bpf_counter()
+{
+ if ! perf stat -a --bpf-counters --for-each-cgroup / true > /dev/null 2>&1; then
+ if [ "${verbose}" = "1" ]; then
+ echo "Skipping: --bpf-counters --for-each-cgroup not supported"
+ perf --no-pager stat -a --bpf-counters --for-each-cgroup / true || true
+ fi
+ exit 2
+ fi
+}
+
+# find two cgroups to measure
+find_cgroups()
+{
+ # try usual systemd slices first
+ if [ -d /sys/fs/cgroup/system.slice -a -d /sys/fs/cgroup/user.slice ]; then
+ test_cgroups="system.slice,user.slice"
+ return
+ fi
+
+ # try root and self cgroups
+ local self_cgrp=$(grep perf_event /proc/self/cgroup | cut -d: -f3)
+ if [ -z ${self_cgrp} ]; then
+ # cgroup v2 doesn't specify perf_event
+ self_cgrp=$(grep ^0: /proc/self/cgroup | cut -d: -f3)
+ fi
+
+ if [ -z ${self_cgrp} ]; then
+ test_cgroups="/"
+ else
+ test_cgroups="/,${self_cgrp}"
+ fi
+}
+
+# As cgroup events are cpu-wide, we cannot simply compare the result.
+# Just check if it runs without failure and has non-zero results.
+check_system_wide_counted()
+{
+ local output
+
+ output=$(perf stat -a --bpf-counters --for-each-cgroup ${test_cgroups} -e cpu-clock -x, sleep 1 2>&1)
+ if echo ${output} | grep -q -F "<not "; then
+ echo "Some system-wide events are not counted"
+ if [ "${verbose}" = "1" ]; then
+ echo ${output}
+ fi
+ exit 1
+ fi
+}
+
+check_cpu_list_counted()
+{
+ local output
+
+ output=$(perf stat -C 1 --bpf-counters --for-each-cgroup ${test_cgroups} -e cpu-clock -x, taskset -c 1 sleep 1 2>&1)
+ if echo ${output} | grep -q -F "<not "; then
+ echo "Some CPU events are not counted"
+ if [ "${verbose}" = "1" ]; then
+ echo ${output}
+ fi
+ exit 1
+ fi
+}
+
+check_bpf_counter
+find_cgroups
+
+check_system_wide_counted
+check_cpu_list_counted
+
+exit 0
diff --git a/tools/perf/tests/shell/test_brstack.sh b/tools/perf/tests/shell/test_brstack.sh
index c644f94a6500..ec801cffae6b 100755
--- a/tools/perf/tests/shell/test_brstack.sh
+++ b/tools/perf/tests/shell/test_brstack.sh
@@ -12,7 +12,8 @@ if ! [ -x "$(command -v cc)" ]; then
fi
# skip the test if the hardware doesn't support branch stack sampling
-perf record -b -o- -B true > /dev/null 2>&1 || exit 2
+# and if the architecture doesn't support filter types: any,save_type,u
+perf record -b -o- -B --branch-filter any,save_type,u true > /dev/null 2>&1 || exit 2
TMPDIR=$(mktemp -d /tmp/__perf_test.program.XXXXX)
diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c
index 4fd8d703ff19..8ab035b55875 100644
--- a/tools/perf/tests/vmlinux-kallsyms.c
+++ b/tools/perf/tests/vmlinux-kallsyms.c
@@ -43,10 +43,11 @@ static bool is_ignored_symbol(const char *name, char type)
/* Symbol names that begin with the following are ignored.*/
static const char * const ignored_prefixes[] = {
"$", /* local symbols for ARM, MIPS, etc. */
- ".LASANPC", /* s390 kasan local symbols */
+ ".L", /* local labels, .LBB,.Ltmpxxx,.L__unnamed_xx,.LASANPC, etc. */
"__crc_", /* modversions */
"__efistub_", /* arm64 EFI stub namespace */
- "__kvm_nvhe_", /* arm64 non-VHE KVM namespace */
+ "__kvm_nvhe_$", /* arm64 local symbols in non-VHE KVM namespace */
+ "__kvm_nvhe_.L", /* arm64 local symbols in non-VHE KVM namespace */
"__AArch64ADRPThunk_", /* arm64 lld */
"__ARMV5PILongThunk_", /* arm lld */
"__ARMV7PILongThunk_",
diff --git a/tools/perf/tests/wp.c b/tools/perf/tests/wp.c
index 9d4c45184e71..56455da30341 100644
--- a/tools/perf/tests/wp.c
+++ b/tools/perf/tests/wp.c
@@ -2,7 +2,9 @@
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
+#include <errno.h>
#include <sys/ioctl.h>
+#include <linux/compiler.h>
#include <linux/hw_breakpoint.h>
#include <linux/kernel.h>
#include "tests.h"
@@ -137,8 +139,7 @@ static int test__wp_rw(struct test_suite *test __maybe_unused,
#endif
}
-static int test__wp_modify(struct test_suite *test __maybe_unused,
- int subtest __maybe_unused)
+static int test__wp_modify(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
#if defined(__s390x__)
return TEST_SKIP;
@@ -160,6 +161,11 @@ static int test__wp_modify(struct test_suite *test __maybe_unused,
new_attr.disabled = 1;
ret = ioctl(fd, PERF_EVENT_IOC_MODIFY_ATTRIBUTES, &new_attr);
if (ret < 0) {
+ if (errno == ENOTTY) {
+ test->test_cases[subtest].skip_reason = "missing kernel support";
+ ret = TEST_SKIP;
+ }
+
pr_debug("ioctl(PERF_EVENT_IOC_MODIFY_ATTRIBUTES) failed\n");
close(fd);
return ret;
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index 9dfae1bda9cc..485e1a343165 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -269,7 +269,7 @@ CFLAGS_expr-flex.o += $(flex_flags)
bison_flags := -DYYENABLE_NLS=0
BISON_GE_35 := $(shell expr $(shell $(BISON) --version | grep bison | sed -e 's/.\+ \([0-9]\+\).\([0-9]\+\)/\1\2/g') \>\= 35)
ifeq ($(BISON_GE_35),1)
- bison_flags += -Wno-unused-parameter -Wno-nested-externs -Wno-implicit-function-declaration -Wno-switch-enum
+ bison_flags += -Wno-unused-parameter -Wno-nested-externs -Wno-implicit-function-declaration -Wno-switch-enum -Wno-unused-but-set-variable -Wno-unknown-warning-option
else
bison_flags += -w
endif
diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c
index 22dcfe07e886..906476a839e1 100644
--- a/tools/perf/util/arm-spe.c
+++ b/tools/perf/util/arm-spe.c
@@ -498,7 +498,7 @@ static void arm_spe__synth_data_source_generic(const struct arm_spe_record *reco
static u64 arm_spe__synth_data_source(const struct arm_spe_record *record, u64 midr)
{
union perf_mem_data_src data_src = { 0 };
- bool is_neoverse = is_midr_in_range(midr, neoverse_spe);
+ bool is_neoverse = is_midr_in_range_list(midr, neoverse_spe);
if (record->op == ARM_SPE_LD)
data_src.mem_op = PERF_MEM_OP_LOAD;
diff --git a/tools/perf/util/bpf_counter_cgroup.c b/tools/perf/util/bpf_counter_cgroup.c
index 63b9db657442..3c2df7522f6f 100644
--- a/tools/perf/util/bpf_counter_cgroup.c
+++ b/tools/perf/util/bpf_counter_cgroup.c
@@ -95,7 +95,7 @@ static int bperf_load_program(struct evlist *evlist)
perf_cpu_map__for_each_cpu(cpu, i, evlist->core.all_cpus) {
link = bpf_program__attach_perf_event(skel->progs.on_cgrp_switch,
- FD(cgrp_switch, cpu.cpu));
+ FD(cgrp_switch, i));
if (IS_ERR(link)) {
pr_err("Failed to attach cgroup program\n");
err = PTR_ERR(link);
@@ -115,15 +115,15 @@ static int bperf_load_program(struct evlist *evlist)
evsel->cgrp = NULL;
/* open single copy of the events w/o cgroup */
- err = evsel__open_per_cpu(evsel, evlist->core.all_cpus, -1);
+ err = evsel__open_per_cpu(evsel, evsel->core.cpus, -1);
if (err) {
pr_err("Failed to open first cgroup events\n");
goto out;
}
map_fd = bpf_map__fd(skel->maps.events);
- perf_cpu_map__for_each_cpu(cpu, j, evlist->core.all_cpus) {
- int fd = FD(evsel, cpu.cpu);
+ perf_cpu_map__for_each_cpu(cpu, j, evsel->core.cpus) {
+ int fd = FD(evsel, j);
__u32 idx = evsel->core.idx * total_cpus + cpu.cpu;
err = bpf_map_update_elem(map_fd, &idx, &fd,
@@ -269,7 +269,7 @@ static int bperf_cgrp__read(struct evsel *evsel)
goto out;
}
- perf_cpu_map__for_each_cpu(cpu, i, evlist->core.all_cpus) {
+ perf_cpu_map__for_each_cpu(cpu, i, evsel->core.cpus) {
counts = perf_counts(evsel->counts, i, 0);
counts->val = values[cpu.cpu].counter;
counts->ena = values[cpu.cpu].enabled;
diff --git a/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c b/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
index 292c430768b5..9aa8cdd93de4 100644
--- a/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
+++ b/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
@@ -48,6 +48,7 @@ const volatile __u32 num_cpus = 1;
int enabled = 0;
int use_cgroup_v2 = 0;
+int perf_subsys_id = -1;
static inline int get_cgroup_v1_idx(__u32 *cgrps, int size)
{
@@ -58,7 +59,15 @@ static inline int get_cgroup_v1_idx(__u32 *cgrps, int size)
int level;
int cnt;
- cgrp = BPF_CORE_READ(p, cgroups, subsys[perf_event_cgrp_id], cgroup);
+ if (perf_subsys_id == -1) {
+#if __has_builtin(__builtin_preserve_enum_value)
+ perf_subsys_id = bpf_core_enum_value(enum cgroup_subsys_id,
+ perf_event_cgrp_id);
+#else
+ perf_subsys_id = perf_event_cgrp_id;
+#endif
+ }
+ cgrp = BPF_CORE_READ(p, cgroups, subsys[perf_subsys_id], cgroup);
level = BPF_CORE_READ(cgrp, level);
for (cnt = 0; i < MAX_LEVELS; i++) {
@@ -176,7 +185,7 @@ static int bperf_cgroup_count(void)
}
// This will be attached to cgroup-switches event for each cpu
-SEC("perf_events")
+SEC("perf_event")
int BPF_PROG(on_cgrp_switch)
{
return bperf_cgroup_count();
diff --git a/tools/perf/util/bpf_skel/off_cpu.bpf.c b/tools/perf/util/bpf_skel/off_cpu.bpf.c
index c4ba2bcf179f..38e3b287dbb2 100644
--- a/tools/perf/util/bpf_skel/off_cpu.bpf.c
+++ b/tools/perf/util/bpf_skel/off_cpu.bpf.c
@@ -94,6 +94,8 @@ const volatile bool has_prev_state = false;
const volatile bool needs_cgroup = false;
const volatile bool uses_cgroup_v1 = false;
+int perf_subsys_id = -1;
+
/*
* Old kernel used to call it task_struct->state and now it's '__state'.
* Use BPF CO-RE "ignored suffix rule" to deal with it like below:
@@ -119,11 +121,19 @@ static inline __u64 get_cgroup_id(struct task_struct *t)
{
struct cgroup *cgrp;
- if (uses_cgroup_v1)
- cgrp = BPF_CORE_READ(t, cgroups, subsys[perf_event_cgrp_id], cgroup);
- else
- cgrp = BPF_CORE_READ(t, cgroups, dfl_cgrp);
+ if (!uses_cgroup_v1)
+ return BPF_CORE_READ(t, cgroups, dfl_cgrp, kn, id);
+
+ if (perf_subsys_id == -1) {
+#if __has_builtin(__builtin_preserve_enum_value)
+ perf_subsys_id = bpf_core_enum_value(enum cgroup_subsys_id,
+ perf_event_cgrp_id);
+#else
+ perf_subsys_id = perf_event_cgrp_id;
+#endif
+ }
+ cgrp = BPF_CORE_READ(t, cgroups, subsys[perf_subsys_id], cgroup);
return BPF_CORE_READ(cgrp, kn, id);
}
diff --git a/tools/perf/util/genelf.c b/tools/perf/util/genelf.c
index ed28a0dbcb7f..d81b54563e96 100644
--- a/tools/perf/util/genelf.c
+++ b/tools/perf/util/genelf.c
@@ -253,6 +253,7 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
Elf_Data *d;
Elf_Scn *scn;
Elf_Ehdr *ehdr;
+ Elf_Phdr *phdr;
Elf_Shdr *shdr;
uint64_t eh_frame_base_offset;
char *strsym = NULL;
@@ -288,6 +289,19 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
ehdr->e_shstrndx= unwinding ? 4 : 2; /* shdr index for section name */
/*
+ * setup program header
+ */
+ phdr = elf_newphdr(e, 1);
+ phdr[0].p_type = PT_LOAD;
+ phdr[0].p_offset = 0;
+ phdr[0].p_vaddr = 0;
+ phdr[0].p_paddr = 0;
+ phdr[0].p_filesz = csize;
+ phdr[0].p_memsz = csize;
+ phdr[0].p_flags = PF_X | PF_R;
+ phdr[0].p_align = 8;
+
+ /*
* setup text section
*/
scn = elf_newscn(e);
diff --git a/tools/perf/util/genelf.h b/tools/perf/util/genelf.h
index ae138afe6c56..b5c909546e3f 100644
--- a/tools/perf/util/genelf.h
+++ b/tools/perf/util/genelf.h
@@ -53,8 +53,10 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent
#if GEN_ELF_CLASS == ELFCLASS64
#define elf_newehdr elf64_newehdr
+#define elf_newphdr elf64_newphdr
#define elf_getshdr elf64_getshdr
#define Elf_Ehdr Elf64_Ehdr
+#define Elf_Phdr Elf64_Phdr
#define Elf_Shdr Elf64_Shdr
#define Elf_Sym Elf64_Sym
#define ELF_ST_TYPE(a) ELF64_ST_TYPE(a)
@@ -62,8 +64,10 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent
#define ELF_ST_VIS(a) ELF64_ST_VISIBILITY(a)
#else
#define elf_newehdr elf32_newehdr
+#define elf_newphdr elf32_newphdr
#define elf_getshdr elf32_getshdr
#define Elf_Ehdr Elf32_Ehdr
+#define Elf_Phdr Elf32_Phdr
#define Elf_Shdr Elf32_Shdr
#define Elf_Sym Elf32_Sym
#define ELF_ST_TYPE(a) ELF32_ST_TYPE(a)
diff --git a/tools/perf/util/parse-events-hybrid.c b/tools/perf/util/parse-events-hybrid.c
index 284f8eabd3b9..7c9f9150bad5 100644
--- a/tools/perf/util/parse-events-hybrid.c
+++ b/tools/perf/util/parse-events-hybrid.c
@@ -33,7 +33,8 @@ static void config_hybrid_attr(struct perf_event_attr *attr,
* If the PMU type ID is 0, the PERF_TYPE_RAW will be applied.
*/
attr->type = type;
- attr->config = attr->config | ((__u64)pmu_type << PERF_PMU_TYPE_SHIFT);
+ attr->config = (attr->config & PERF_HW_EVENT_MASK) |
+ ((__u64)pmu_type << PERF_PMU_TYPE_SHIFT);
}
static int create_event_hybrid(__u32 config_type, int *idx,
@@ -48,13 +49,25 @@ static int create_event_hybrid(__u32 config_type, int *idx,
__u64 config = attr->config;
config_hybrid_attr(attr, config_type, pmu->type);
+
+ /*
+ * Some hybrid hardware cache events are only available on one CPU
+ * PMU. For example, the 'L1-dcache-load-misses' is only available
+ * on cpu_core, while the 'L1-icache-loads' is only available on
+ * cpu_atom. We need to remove "not supported" hybrid cache events.
+ */
+ if (attr->type == PERF_TYPE_HW_CACHE
+ && !is_event_supported(attr->type, attr->config))
+ return 0;
+
evsel = parse_events__add_event_hybrid(list, idx, attr, name, metric_id,
pmu, config_terms);
- if (evsel)
+ if (evsel) {
evsel->pmu_name = strdup(pmu->name);
- else
+ if (!evsel->pmu_name)
+ return -ENOMEM;
+ } else
return -ENOMEM;
-
attr->type = type;
attr->config = config;
return 0;
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index f05e15acd33f..f3b2c2a87456 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -28,6 +28,7 @@
#include "util/parse-events-hybrid.h"
#include "util/pmu-hybrid.h"
#include "tracepoint.h"
+#include "thread_map.h"
#define MAX_NAME_LEN 100
@@ -157,6 +158,44 @@ struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
#define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE)
#define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT)
+bool is_event_supported(u8 type, u64 config)
+{
+ bool ret = true;
+ int open_return;
+ struct evsel *evsel;
+ struct perf_event_attr attr = {
+ .type = type,
+ .config = config,
+ .disabled = 1,
+ };
+ struct perf_thread_map *tmap = thread_map__new_by_tid(0);
+
+ if (tmap == NULL)
+ return false;
+
+ evsel = evsel__new(&attr);
+ if (evsel) {
+ open_return = evsel__open(evsel, NULL, tmap);
+ ret = open_return >= 0;
+
+ if (open_return == -EACCES) {
+ /*
+ * This happens if the paranoid value
+ * /proc/sys/kernel/perf_event_paranoid is set to 2
+ * Re-run with exclude_kernel set; we don't do that
+ * by default as some ARM machines do not support it.
+ *
+ */
+ evsel->core.attr.exclude_kernel = 1;
+ ret = evsel__open(evsel, NULL, tmap) >= 0;
+ }
+ evsel__delete(evsel);
+ }
+
+ perf_thread_map__put(tmap);
+ return ret;
+}
+
const char *event_type(int type)
{
switch (type) {
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index 7e6a601d9cd0..07df7bb7b042 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -19,6 +19,7 @@ struct option;
struct perf_pmu;
bool have_tracepoints(struct list_head *evlist);
+bool is_event_supported(u8 type, u64 config);
const char *event_type(int type);
diff --git a/tools/perf/util/print-events.c b/tools/perf/util/print-events.c
index ba1ab5134685..c4d5d87fae2f 100644
--- a/tools/perf/util/print-events.c
+++ b/tools/perf/util/print-events.c
@@ -22,7 +22,6 @@
#include "probe-file.h"
#include "string2.h"
#include "strlist.h"
-#include "thread_map.h"
#include "tracepoint.h"
#include "pfm.h"
#include "pmu-hybrid.h"
@@ -239,44 +238,6 @@ void print_sdt_events(const char *subsys_glob, const char *event_glob,
strlist__delete(sdtlist);
}
-static bool is_event_supported(u8 type, unsigned int config)
-{
- bool ret = true;
- int open_return;
- struct evsel *evsel;
- struct perf_event_attr attr = {
- .type = type,
- .config = config,
- .disabled = 1,
- };
- struct perf_thread_map *tmap = thread_map__new_by_tid(0);
-
- if (tmap == NULL)
- return false;
-
- evsel = evsel__new(&attr);
- if (evsel) {
- open_return = evsel__open(evsel, NULL, tmap);
- ret = open_return >= 0;
-
- if (open_return == -EACCES) {
- /*
- * This happens if the paranoid value
- * /proc/sys/kernel/perf_event_paranoid is set to 2
- * Re-run with exclude_kernel set; we don't do that
- * by default as some ARM machines do not support it.
- *
- */
- evsel->core.attr.exclude_kernel = 1;
- ret = evsel__open(evsel, NULL, tmap) >= 0;
- }
- evsel__delete(evsel);
- }
-
- perf_thread_map__put(tmap);
- return ret;
-}
-
int print_hwcache_events(const char *event_glob, bool name_only)
{
unsigned int type, op, i, evt_i = 0, evt_num = 0, npmus = 0;
diff --git a/tools/perf/util/scripting-engines/Build b/tools/perf/util/scripting-engines/Build
index c92326c2233a..0f5ba28339cf 100644
--- a/tools/perf/util/scripting-engines/Build
+++ b/tools/perf/util/scripting-engines/Build
@@ -3,4 +3,4 @@ perf-$(CONFIG_LIBPYTHON) += trace-event-python.o
CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-nested-externs -Wno-undef -Wno-switch-default -Wno-bad-function-cast -Wno-declaration-after-statement -Wno-switch-enum
-CFLAGS_trace-event-python.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-error=deprecated-declarations
+CFLAGS_trace-event-python.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-deprecated-declarations
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 75bec32d4f57..647b7dff8ef3 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -2102,8 +2102,8 @@ static int kcore_copy__compare_file(const char *from_dir, const char *to_dir,
* unusual. One significant peculiarity is that the mapping (start -> pgoff)
* is not the same for the kernel map and the modules map. That happens because
* the data is copied adjacently whereas the original kcore has gaps. Finally,
- * kallsyms and modules files are compared with their copies to check that
- * modules have not been loaded or unloaded while the copies were taking place.
+ * kallsyms file is compared with its copy to check that modules have not been
+ * loaded or unloaded while the copies were taking place.
*
* Return: %0 on success, %-1 on failure.
*/
@@ -2166,9 +2166,6 @@ int kcore_copy(const char *from_dir, const char *to_dir)
goto out_extract_close;
}
- if (kcore_copy__compare_file(from_dir, to_dir, "modules"))
- goto out_extract_close;
-
if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms"))
goto out_extract_close;
diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
index 812424dbf2d5..538790758e24 100644
--- a/tools/perf/util/synthetic-events.c
+++ b/tools/perf/util/synthetic-events.c
@@ -367,13 +367,24 @@ static void perf_record_mmap2__read_build_id(struct perf_record_mmap2 *event,
bool is_kernel)
{
struct build_id bid;
+ struct nsinfo *nsi;
+ struct nscookie nc;
int rc;
- if (is_kernel)
+ if (is_kernel) {
rc = sysfs__read_build_id("/sys/kernel/notes", &bid);
- else
- rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1;
+ goto out;
+ }
+
+ nsi = nsinfo__new(event->pid);
+ nsinfo__mountns_enter(nsi, &nc);
+ rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1;
+
+ nsinfo__mountns_exit(&nc);
+ nsinfo__put(nsi);
+
+out:
if (rc == 0) {
memcpy(event->build_id, bid.data, sizeof(bid.data));
event->build_id_size = (u8) bid.size;
diff --git a/tools/testing/nvdimm/test/ndtest.c b/tools/testing/nvdimm/test/ndtest.c
index 4d1a947367f9..01ceb98c15a0 100644
--- a/tools/testing/nvdimm/test/ndtest.c
+++ b/tools/testing/nvdimm/test/ndtest.c
@@ -134,39 +134,6 @@ static struct ndtest_mapping region1_mapping[] = {
},
};
-static struct ndtest_mapping region2_mapping[] = {
- {
- .dimm = 0,
- .position = 0,
- .start = 0,
- .size = DIMM_SIZE,
- },
-};
-
-static struct ndtest_mapping region3_mapping[] = {
- {
- .dimm = 1,
- .start = 0,
- .size = DIMM_SIZE,
- }
-};
-
-static struct ndtest_mapping region4_mapping[] = {
- {
- .dimm = 2,
- .start = 0,
- .size = DIMM_SIZE,
- }
-};
-
-static struct ndtest_mapping region5_mapping[] = {
- {
- .dimm = 3,
- .start = 0,
- .size = DIMM_SIZE,
- }
-};
-
static struct ndtest_region bus0_regions[] = {
{
.type = ND_DEVICE_NAMESPACE_PMEM,
@@ -182,34 +149,6 @@ static struct ndtest_region bus0_regions[] = {
.size = DIMM_SIZE * 2,
.range_index = 2,
},
- {
- .type = ND_DEVICE_NAMESPACE_BLK,
- .num_mappings = ARRAY_SIZE(region2_mapping),
- .mapping = region2_mapping,
- .size = DIMM_SIZE,
- .range_index = 3,
- },
- {
- .type = ND_DEVICE_NAMESPACE_BLK,
- .num_mappings = ARRAY_SIZE(region3_mapping),
- .mapping = region3_mapping,
- .size = DIMM_SIZE,
- .range_index = 4,
- },
- {
- .type = ND_DEVICE_NAMESPACE_BLK,
- .num_mappings = ARRAY_SIZE(region4_mapping),
- .mapping = region4_mapping,
- .size = DIMM_SIZE,
- .range_index = 5,
- },
- {
- .type = ND_DEVICE_NAMESPACE_BLK,
- .num_mappings = ARRAY_SIZE(region5_mapping),
- .mapping = region5_mapping,
- .size = DIMM_SIZE,
- .range_index = 6,
- },
};
static struct ndtest_mapping region6_mapping[] = {
@@ -501,21 +440,6 @@ static int ndtest_create_region(struct ndtest_priv *p,
nd_set->altcookie = nd_set->cookie1;
ndr_desc->nd_set = nd_set;
- if (region->type == ND_DEVICE_NAMESPACE_BLK) {
- mappings[0].start = 0;
- mappings[0].size = DIMM_SIZE;
- mappings[0].nvdimm = p->config->dimms[ndimm].nvdimm;
-
- ndr_desc->mapping = &mappings[0];
- ndr_desc->num_mappings = 1;
- ndr_desc->num_lanes = 1;
- ndbr_desc.enable = ndtest_blk_region_enable;
- ndbr_desc.do_io = ndtest_blk_do_io;
- region->region = nvdimm_blk_region_create(p->bus, ndr_desc);
-
- goto done;
- }
-
for (i = 0; i < region->num_mappings; i++) {
ndimm = region->mapping[i].dimm;
mappings[i].start = region->mapping[i].start;
@@ -527,7 +451,6 @@ static int ndtest_create_region(struct ndtest_priv *p,
ndr_desc->num_mappings = region->num_mappings;
region->region = nvdimm_pmem_region_create(p->bus, ndr_desc);
-done:
if (!region->region) {
dev_err(&p->pdev.dev, "Error registering region %pR\n",
ndr_desc->res);
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index c2064a35688b..1fc89b8ef433 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -13,6 +13,7 @@ TARGETS += damon
TARGETS += drivers/dma-buf
TARGETS += drivers/s390x/uvdevice
TARGETS += drivers/net/bonding
+TARGETS += drivers/net/team
TARGETS += efivarfs
TARGETS += exec
TARGETS += filesystems
diff --git a/tools/testing/selftests/drivers/net/bonding/Makefile b/tools/testing/selftests/drivers/net/bonding/Makefile
index ab6c54b12098..1d866658e541 100644
--- a/tools/testing/selftests/drivers/net/bonding/Makefile
+++ b/tools/testing/selftests/drivers/net/bonding/Makefile
@@ -1,6 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for net selftests
-TEST_PROGS := bond-break-lacpdu-tx.sh
+TEST_PROGS := bond-break-lacpdu-tx.sh \
+ dev_addr_lists.sh \
+ bond-arp-interval-causes-panic.sh
+
+TEST_FILES := lag_lib.sh
include ../../../lib.mk
diff --git a/tools/testing/selftests/drivers/net/bonding/bond-arp-interval-causes-panic.sh b/tools/testing/selftests/drivers/net/bonding/bond-arp-interval-causes-panic.sh
new file mode 100755
index 000000000000..71c00bfafbc9
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/bonding/bond-arp-interval-causes-panic.sh
@@ -0,0 +1,49 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# cause kernel oops in bond_rr_gen_slave_id
+DEBUG=${DEBUG:-0}
+
+set -e
+test ${DEBUG} -ne 0 && set -x
+
+finish()
+{
+ ip netns delete server || true
+ ip netns delete client || true
+ ip link del link1_1 || true
+}
+
+trap finish EXIT
+
+client_ip4=192.168.1.198
+server_ip4=192.168.1.254
+
+# setup kernel so it reboots after causing the panic
+echo 180 >/proc/sys/kernel/panic
+
+# build namespaces
+ip link add dev link1_1 type veth peer name link1_2
+
+ip netns add "server"
+ip link set dev link1_2 netns server up name eth0
+ip netns exec server ip addr add ${server_ip4}/24 dev eth0
+
+ip netns add "client"
+ip link set dev link1_1 netns client down name eth0
+ip netns exec client ip link add dev bond0 down type bond mode 1 \
+ miimon 100 all_slaves_active 1
+ip netns exec client ip link set dev eth0 down master bond0
+ip netns exec client ip link set dev bond0 up
+ip netns exec client ip addr add ${client_ip4}/24 dev bond0
+ip netns exec client ping -c 5 $server_ip4 >/dev/null
+
+ip netns exec client ip link set dev eth0 down nomaster
+ip netns exec client ip link set dev bond0 down
+ip netns exec client ip link set dev bond0 type bond mode 0 \
+ arp_interval 1000 arp_ip_target "+${server_ip4}"
+ip netns exec client ip link set dev eth0 down master bond0
+ip netns exec client ip link set dev bond0 up
+ip netns exec client ping -c 5 $server_ip4 >/dev/null
+
+exit 0
diff --git a/tools/testing/selftests/drivers/net/bonding/config b/tools/testing/selftests/drivers/net/bonding/config
index dc1c22de3c92..70638fa50b2c 100644
--- a/tools/testing/selftests/drivers/net/bonding/config
+++ b/tools/testing/selftests/drivers/net/bonding/config
@@ -1 +1,2 @@
CONFIG_BONDING=y
+CONFIG_MACVLAN=y
diff --git a/tools/testing/selftests/drivers/net/bonding/dev_addr_lists.sh b/tools/testing/selftests/drivers/net/bonding/dev_addr_lists.sh
new file mode 100755
index 000000000000..e6fa24eded5b
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/bonding/dev_addr_lists.sh
@@ -0,0 +1,109 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test bond device handling of addr lists (dev->uc, mc)
+#
+
+ALL_TESTS="
+ bond_cleanup_mode1
+ bond_cleanup_mode4
+ bond_listen_lacpdu_multicast_case_down
+ bond_listen_lacpdu_multicast_case_up
+"
+
+REQUIRE_MZ=no
+NUM_NETIFS=0
+lib_dir=$(dirname "$0")
+source "$lib_dir"/../../../net/forwarding/lib.sh
+
+source "$lib_dir"/lag_lib.sh
+
+
+destroy()
+{
+ local ifnames=(dummy1 dummy2 bond1 mv0)
+ local ifname
+
+ for ifname in "${ifnames[@]}"; do
+ ip link del "$ifname" &>/dev/null
+ done
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ destroy
+}
+
+
+# bond driver control paths vary between modes that have a primary slave
+# (bond_uses_primary()) and others. Test both kinds of modes.
+
+bond_cleanup_mode1()
+{
+ RET=0
+
+ test_LAG_cleanup "bonding" "active-backup"
+}
+
+bond_cleanup_mode4() {
+ RET=0
+
+ test_LAG_cleanup "bonding" "802.3ad"
+}
+
+bond_listen_lacpdu_multicast()
+{
+ # Initial state of bond device, up | down
+ local init_state=$1
+ local lacpdu_mc="01:80:c2:00:00:02"
+
+ ip link add dummy1 type dummy
+ ip link add bond1 "$init_state" type bond mode 802.3ad
+ ip link set dev dummy1 master bond1
+ if [ "$init_state" = "down" ]; then
+ ip link set dev bond1 up
+ fi
+
+ grep_bridge_fdb "$lacpdu_mc" bridge fdb show brport dummy1 >/dev/null
+ check_err $? "LACPDU multicast address not present on slave (1)"
+
+ ip link set dev bond1 down
+
+ not grep_bridge_fdb "$lacpdu_mc" bridge fdb show brport dummy1 >/dev/null
+ check_err $? "LACPDU multicast address still present on slave"
+
+ ip link set dev bond1 up
+
+ grep_bridge_fdb "$lacpdu_mc" bridge fdb show brport dummy1 >/dev/null
+ check_err $? "LACPDU multicast address not present on slave (2)"
+
+ cleanup
+
+ log_test "bonding LACPDU multicast address to slave (from bond $init_state)"
+}
+
+# The LACPDU mc addr is added by different paths depending on the initial state
+# of the bond when enslaving a device. Test both cases.
+
+bond_listen_lacpdu_multicast_case_down()
+{
+ RET=0
+
+ bond_listen_lacpdu_multicast "down"
+}
+
+bond_listen_lacpdu_multicast_case_up()
+{
+ RET=0
+
+ bond_listen_lacpdu_multicast "up"
+}
+
+
+trap cleanup EXIT
+
+tests_run
+
+exit "$EXIT_STATUS"
diff --git a/tools/testing/selftests/drivers/net/bonding/lag_lib.sh b/tools/testing/selftests/drivers/net/bonding/lag_lib.sh
new file mode 100644
index 000000000000..16c7fb858ac1
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/bonding/lag_lib.sh
@@ -0,0 +1,61 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test that a link aggregation device (bonding, team) removes the hardware
+# addresses that it adds on its underlying devices.
+test_LAG_cleanup()
+{
+ local driver=$1
+ local mode=$2
+ local ucaddr="02:00:00:12:34:56"
+ local addr6="fe80::78:9abc/64"
+ local mcaddr="33:33:ff:78:9a:bc"
+ local name
+
+ ip link add dummy1 type dummy
+ ip link add dummy2 type dummy
+ if [ "$driver" = "bonding" ]; then
+ name="bond1"
+ ip link add "$name" up type bond mode "$mode"
+ ip link set dev dummy1 master "$name"
+ ip link set dev dummy2 master "$name"
+ elif [ "$driver" = "team" ]; then
+ name="team0"
+ teamd -d -c '
+ {
+ "device": "'"$name"'",
+ "runner": {
+ "name": "'"$mode"'"
+ },
+ "ports": {
+ "dummy1":
+ {},
+ "dummy2":
+ {}
+ }
+ }
+ '
+ ip link set dev "$name" up
+ else
+ check_err 1
+ log_test test_LAG_cleanup ": unknown driver \"$driver\""
+ return
+ fi
+
+ # Used to test dev->uc handling
+ ip link add mv0 link "$name" up address "$ucaddr" type macvlan
+ # Used to test dev->mc handling
+ ip address add "$addr6" dev "$name"
+ ip link set dev "$name" down
+ ip link del "$name"
+
+ not grep_bridge_fdb "$ucaddr" bridge fdb show >/dev/null
+ check_err $? "macvlan unicast address still present on a slave"
+
+ not grep_bridge_fdb "$mcaddr" bridge fdb show >/dev/null
+ check_err $? "IPv6 solicited-node multicast mac address still present on a slave"
+
+ cleanup
+
+ log_test "$driver cleanup mode $mode"
+}
diff --git a/tools/testing/selftests/drivers/net/team/Makefile b/tools/testing/selftests/drivers/net/team/Makefile
new file mode 100644
index 000000000000..642d8df1c137
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/team/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for net selftests
+
+TEST_PROGS := dev_addr_lists.sh
+
+include ../../../lib.mk
diff --git a/tools/testing/selftests/drivers/net/team/config b/tools/testing/selftests/drivers/net/team/config
new file mode 100644
index 000000000000..265b6882cc21
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/team/config
@@ -0,0 +1,3 @@
+CONFIG_NET_TEAM=y
+CONFIG_NET_TEAM_MODE_LOADBALANCE=y
+CONFIG_MACVLAN=y
diff --git a/tools/testing/selftests/drivers/net/team/dev_addr_lists.sh b/tools/testing/selftests/drivers/net/team/dev_addr_lists.sh
new file mode 100755
index 000000000000..debda7262956
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/team/dev_addr_lists.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test team device handling of addr lists (dev->uc, mc)
+#
+
+ALL_TESTS="
+ team_cleanup
+"
+
+REQUIRE_MZ=no
+NUM_NETIFS=0
+lib_dir=$(dirname "$0")
+source "$lib_dir"/../../../net/forwarding/lib.sh
+
+source "$lib_dir"/../bonding/lag_lib.sh
+
+
+destroy()
+{
+ local ifnames=(dummy0 dummy1 team0 mv0)
+ local ifname
+
+ for ifname in "${ifnames[@]}"; do
+ ip link del "$ifname" &>/dev/null
+ done
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ destroy
+}
+
+
+team_cleanup()
+{
+ RET=0
+
+ test_LAG_cleanup "team" "lacp"
+}
+
+
+require_command teamd
+
+trap cleanup EXIT
+
+tests_run
+
+exit "$EXIT_STATUS"
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 4c122f1b1737..6448cb9f710f 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -48,6 +48,8 @@ LIBKVM += lib/rbtree.c
LIBKVM += lib/sparsebit.c
LIBKVM += lib/test_util.c
+LIBKVM_STRING += lib/string_override.c
+
LIBKVM_x86_64 += lib/x86_64/apic.c
LIBKVM_x86_64 += lib/x86_64/handlers.S
LIBKVM_x86_64 += lib/x86_64/perf_test_util.c
@@ -220,7 +222,8 @@ LIBKVM_C := $(filter %.c,$(LIBKVM))
LIBKVM_S := $(filter %.S,$(LIBKVM))
LIBKVM_C_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_C))
LIBKVM_S_OBJ := $(patsubst %.S, $(OUTPUT)/%.o, $(LIBKVM_S))
-LIBKVM_OBJS = $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ)
+LIBKVM_STRING_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_STRING))
+LIBKVM_OBJS = $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ) $(LIBKVM_STRING_OBJ)
EXTRA_CLEAN += $(LIBKVM_OBJS) cscope.*
@@ -231,6 +234,12 @@ $(LIBKVM_C_OBJ): $(OUTPUT)/%.o: %.c
$(LIBKVM_S_OBJ): $(OUTPUT)/%.o: %.S
$(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@
+# Compile the string overrides as freestanding to prevent the compiler from
+# generating self-referential code, e.g. without "freestanding" the compiler may
+# "optimize" memcmp() by invoking memcmp(), thus causing infinite recursion.
+$(LIBKVM_STRING_OBJ): $(OUTPUT)/%.o: %.c
+ $(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c -ffreestanding $< -o $@
+
x := $(shell mkdir -p $(sort $(dir $(TEST_GEN_PROGS))))
$(TEST_GEN_PROGS): $(LIBKVM_OBJS)
$(TEST_GEN_PROGS_EXTENDED): $(LIBKVM_OBJS)
diff --git a/tools/testing/selftests/kvm/access_tracking_perf_test.c b/tools/testing/selftests/kvm/access_tracking_perf_test.c
index 1c2749b1481a..76c583a07ea2 100644
--- a/tools/testing/selftests/kvm/access_tracking_perf_test.c
+++ b/tools/testing/selftests/kvm/access_tracking_perf_test.c
@@ -31,8 +31,9 @@
* These limitations are worked around in this test by using a large enough
* region of memory for each vCPU such that the number of translations cached in
* the TLB and the number of pages held in pagevecs are a small fraction of the
- * overall workload. And if either of those conditions are not true this test
- * will fail rather than silently passing.
+ * overall workload. And if either of those conditions are not true (for example
+ * in nesting, where TLB size is unlimited) this test will print a warning
+ * rather than silently passing.
*/
#include <inttypes.h>
#include <limits.h>
@@ -172,17 +173,23 @@ static void mark_vcpu_memory_idle(struct kvm_vm *vm,
vcpu_idx, no_pfn, pages);
/*
- * Test that at least 90% of memory has been marked idle (the rest might
- * not be marked idle because the pages have not yet made it to an LRU
- * list or the translations are still cached in the TLB). 90% is
+ * Check that at least 90% of memory has been marked idle (the rest
+ * might not be marked idle because the pages have not yet made it to an
+ * LRU list or the translations are still cached in the TLB). 90% is
* arbitrary; high enough that we ensure most memory access went through
* access tracking but low enough as to not make the test too brittle
* over time and across architectures.
+ *
+ * Note that when run in nested virtualization, this check will trigger
+ * much more frequently because TLB size is unlimited and since no flush
+ * happens, much more pages are cached there and guest won't see the
+ * "idle" bit cleared.
*/
- TEST_ASSERT(still_idle < pages / 10,
- "vCPU%d: Too many pages still idle (%"PRIu64 " out of %"
- PRIu64 ").\n",
- vcpu_idx, still_idle, pages);
+ if (still_idle < pages / 10)
+ printf("WARNING: vCPU%d: Too many pages still idle (%" PRIu64
+ "out of %" PRIu64 "), this will affect performance results"
+ ".\n",
+ vcpu_idx, still_idle, pages);
close(page_idle_fd);
close(pagemap_fd);
diff --git a/tools/testing/selftests/kvm/include/x86_64/vmx.h b/tools/testing/selftests/kvm/include/x86_64/vmx.h
index 99fa1410964c..790c6d1ecb34 100644
--- a/tools/testing/selftests/kvm/include/x86_64/vmx.h
+++ b/tools/testing/selftests/kvm/include/x86_64/vmx.h
@@ -617,6 +617,7 @@ void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
uint32_t memslot);
void nested_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm,
uint64_t addr, uint64_t size);
+bool kvm_vm_has_ept(struct kvm_vm *vm);
void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
uint32_t eptp_memslot);
void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm);
diff --git a/tools/testing/selftests/kvm/lib/assert.c b/tools/testing/selftests/kvm/lib/assert.c
index 71ade6100fd3..2bd25b191d15 100644
--- a/tools/testing/selftests/kvm/lib/assert.c
+++ b/tools/testing/selftests/kvm/lib/assert.c
@@ -22,7 +22,7 @@ static void test_dump_stack(void)
* Build and run this command:
*
* addr2line -s -e /proc/$PPID/exe -fpai {backtrace addresses} | \
- * grep -v test_dump_stack | cat -n 1>&2
+ * cat -n 1>&2
*
* Note that the spacing is different and there's no newline.
*/
@@ -36,18 +36,24 @@ static void test_dump_stack(void)
n * (((sizeof(void *)) * 2) + 1) +
/* Null terminator: */
1];
- char *c;
+ char *c = cmd;
n = backtrace(stack, n);
- c = &cmd[0];
- c += sprintf(c, "%s", addr2line);
/*
- * Skip the first 3 frames: backtrace, test_dump_stack, and
- * test_assert. We hope that backtrace isn't inlined and the other two
- * we've declared noinline.
+ * Skip the first 2 frames, which should be test_dump_stack() and
+ * test_assert(); both of which are declared noinline. Bail if the
+ * resulting stack trace would be empty. Otherwise, addr2line will block
+ * waiting for addresses to be passed in via stdin.
*/
+ if (n <= 2) {
+ fputs(" (stack trace empty)\n", stderr);
+ return;
+ }
+
+ c += sprintf(c, "%s", addr2line);
for (i = 2; i < n; i++)
c += sprintf(c, " %lx", ((unsigned long) stack[i]) - 1);
+
c += sprintf(c, "%s", pipeline);
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-result"
diff --git a/tools/testing/selftests/kvm/lib/string_override.c b/tools/testing/selftests/kvm/lib/string_override.c
new file mode 100644
index 000000000000..632398adc229
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/string_override.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <stddef.h>
+
+/*
+ * Override the "basic" built-in string helpers so that they can be used in
+ * guest code. KVM selftests don't support dynamic loading in guest code and
+ * will jump into the weeds if the compiler decides to insert an out-of-line
+ * call via the PLT.
+ */
+int memcmp(const void *cs, const void *ct, size_t count)
+{
+ const unsigned char *su1, *su2;
+ int res = 0;
+
+ for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--) {
+ if ((res = *su1 - *su2) != 0)
+ break;
+ }
+ return res;
+}
+
+void *memcpy(void *dest, const void *src, size_t count)
+{
+ char *tmp = dest;
+ const char *s = src;
+
+ while (count--)
+ *tmp++ = *s++;
+ return dest;
+}
+
+void *memset(void *s, int c, size_t count)
+{
+ char *xs = s;
+
+ while (count--)
+ *xs++ = c;
+ return s;
+}
diff --git a/tools/testing/selftests/kvm/lib/x86_64/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
index 80a568c439b8..d21049c38fc5 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/vmx.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
@@ -5,6 +5,8 @@
* Copyright (C) 2018, Google LLC.
*/
+#include <asm/msr-index.h>
+
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
@@ -542,9 +544,27 @@ void nested_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm,
__nested_map(vmx, vm, addr, addr, size, PG_LEVEL_1G);
}
+bool kvm_vm_has_ept(struct kvm_vm *vm)
+{
+ struct kvm_vcpu *vcpu;
+ uint64_t ctrl;
+
+ vcpu = list_first_entry(&vm->vcpus, struct kvm_vcpu, list);
+ TEST_ASSERT(vcpu, "Cannot determine EPT support without vCPUs.\n");
+
+ ctrl = vcpu_get_msr(vcpu, MSR_IA32_VMX_TRUE_PROCBASED_CTLS) >> 32;
+ if (!(ctrl & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
+ return false;
+
+ ctrl = vcpu_get_msr(vcpu, MSR_IA32_VMX_PROCBASED_CTLS2) >> 32;
+ return ctrl & SECONDARY_EXEC_ENABLE_EPT;
+}
+
void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
uint32_t eptp_memslot)
{
+ TEST_REQUIRE(kvm_vm_has_ept(vm));
+
vmx->eptp = (void *)vm_vaddr_alloc_page(vm);
vmx->eptp_hva = addr_gva2hva(vm, (uintptr_t)vmx->eptp);
vmx->eptp_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->eptp);
diff --git a/tools/testing/selftests/kvm/rseq_test.c b/tools/testing/selftests/kvm/rseq_test.c
index fac248a43666..6f88da7e60be 100644
--- a/tools/testing/selftests/kvm/rseq_test.c
+++ b/tools/testing/selftests/kvm/rseq_test.c
@@ -227,7 +227,7 @@ int main(int argc, char *argv[])
ucall_init(vm, NULL);
pthread_create(&migration_thread, NULL, migration_worker,
- (void *)(unsigned long)gettid());
+ (void *)(unsigned long)syscall(SYS_gettid));
for (i = 0; !done; i++) {
vcpu_run(vcpu);
diff --git a/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c b/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c
index b1905d280ef5..e0004bd26536 100644
--- a/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c
+++ b/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c
@@ -14,6 +14,9 @@
#include "kvm_util.h"
#include "processor.h"
+/* VMCALL and VMMCALL are both 3-byte opcodes. */
+#define HYPERCALL_INSN_SIZE 3
+
static bool ud_expected;
static void guest_ud_handler(struct ex_regs *regs)
@@ -22,7 +25,7 @@ static void guest_ud_handler(struct ex_regs *regs)
GUEST_DONE();
}
-extern unsigned char svm_hypercall_insn;
+extern uint8_t svm_hypercall_insn[HYPERCALL_INSN_SIZE];
static uint64_t svm_do_sched_yield(uint8_t apic_id)
{
uint64_t ret;
@@ -39,7 +42,7 @@ static uint64_t svm_do_sched_yield(uint8_t apic_id)
return ret;
}
-extern unsigned char vmx_hypercall_insn;
+extern uint8_t vmx_hypercall_insn[HYPERCALL_INSN_SIZE];
static uint64_t vmx_do_sched_yield(uint8_t apic_id)
{
uint64_t ret;
@@ -56,30 +59,20 @@ static uint64_t vmx_do_sched_yield(uint8_t apic_id)
return ret;
}
-static void assert_hypercall_insn(unsigned char *exp_insn, unsigned char *obs_insn)
-{
- uint32_t exp = 0, obs = 0;
-
- memcpy(&exp, exp_insn, sizeof(exp));
- memcpy(&obs, obs_insn, sizeof(obs));
-
- GUEST_ASSERT_EQ(exp, obs);
-}
-
static void guest_main(void)
{
- unsigned char *native_hypercall_insn, *hypercall_insn;
+ uint8_t *native_hypercall_insn, *hypercall_insn;
uint8_t apic_id;
apic_id = GET_APIC_ID_FIELD(xapic_read_reg(APIC_ID));
if (is_intel_cpu()) {
- native_hypercall_insn = &vmx_hypercall_insn;
- hypercall_insn = &svm_hypercall_insn;
+ native_hypercall_insn = vmx_hypercall_insn;
+ hypercall_insn = svm_hypercall_insn;
svm_do_sched_yield(apic_id);
} else if (is_amd_cpu()) {
- native_hypercall_insn = &svm_hypercall_insn;
- hypercall_insn = &vmx_hypercall_insn;
+ native_hypercall_insn = svm_hypercall_insn;
+ hypercall_insn = vmx_hypercall_insn;
vmx_do_sched_yield(apic_id);
} else {
GUEST_ASSERT(0);
@@ -87,8 +80,13 @@ static void guest_main(void)
return;
}
+ /*
+ * The hypercall didn't #UD (guest_ud_handler() signals "done" if a #UD
+ * occurs). Verify that a #UD is NOT expected and that KVM patched in
+ * the native hypercall.
+ */
GUEST_ASSERT(!ud_expected);
- assert_hypercall_insn(native_hypercall_insn, hypercall_insn);
+ GUEST_ASSERT(!memcmp(native_hypercall_insn, hypercall_insn, HYPERCALL_INSN_SIZE));
GUEST_DONE();
}
diff --git a/tools/testing/selftests/landlock/Makefile b/tools/testing/selftests/landlock/Makefile
index 02868ac3bc71..6632bfff486b 100644
--- a/tools/testing/selftests/landlock/Makefile
+++ b/tools/testing/selftests/landlock/Makefile
@@ -1,6 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
+#
+# First run: make -C ../../../.. headers_install
CFLAGS += -Wall -O2 $(KHDR_INCLUDES)
+LDLIBS += -lcap
+
+LOCAL_HDRS += common.h
src_test := $(wildcard *_test.c)
@@ -8,14 +13,10 @@ TEST_GEN_PROGS := $(src_test:.c=)
TEST_GEN_PROGS_EXTENDED := true
-OVERRIDE_TARGETS := 1
-top_srcdir := ../../../..
-include ../lib.mk
-
-khdr_dir = $(top_srcdir)/usr/include
+# Static linking for short targets:
+$(TEST_GEN_PROGS_EXTENDED): LDFLAGS += -static
-$(OUTPUT)/true: true.c
- $(LINK.c) $< $(LDLIBS) -o $@ -static
+include ../lib.mk
-$(OUTPUT)/%_test: %_test.c $(khdr_dir)/linux/landlock.h ../kselftest_harness.h common.h
- $(LINK.c) $< $(LDLIBS) -o $@ -lcap -I$(khdr_dir)
+# Static linking for targets with $(OUTPUT)/ prefix:
+$(TEST_GEN_PROGS_EXTENDED): LDFLAGS += -static
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index d44c72b3abe3..9d4cb94cf437 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -42,6 +42,10 @@ endif
selfdir = $(realpath $(dir $(filter %/lib.mk,$(MAKEFILE_LIST))))
top_srcdir = $(selfdir)/../../..
+ifeq ($(KHDR_INCLUDES),)
+KHDR_INCLUDES := -isystem $(top_srcdir)/usr/include
+endif
+
# The following are built by lib.mk common compile rules.
# TEST_CUSTOM_PROGS should be used by tests that require
# custom build rule and prevent common build rule use.
diff --git a/tools/testing/selftests/net/forwarding/router_multicast.sh b/tools/testing/selftests/net/forwarding/router_multicast.sh
index 57e90c873a2c..5a58b1ec8aef 100755
--- a/tools/testing/selftests/net/forwarding/router_multicast.sh
+++ b/tools/testing/selftests/net/forwarding/router_multicast.sh
@@ -28,7 +28,7 @@
# +------------------+ +------------------+
#
-ALL_TESTS="mcast_v4 mcast_v6 rpf_v4 rpf_v6"
+ALL_TESTS="mcast_v4 mcast_v6 rpf_v4 rpf_v6 unres_v4 unres_v6"
NUM_NETIFS=6
source lib.sh
source tc_common.sh
@@ -406,6 +406,96 @@ rpf_v6()
log_test "RPF IPv6"
}
+unres_v4()
+{
+ # Send a multicast packet not corresponding to an installed route,
+ # causing the kernel to queue the packet for resolution and emit an
+ # IGMPMSG_NOCACHE notification. smcrouted will react to this
+ # notification by consulting its (*, G) list and installing an (S, G)
+ # route, which will be used to forward the queued packet.
+
+ RET=0
+
+ tc filter add dev $h2 ingress protocol ip pref 1 handle 1 flower \
+ dst_ip 225.1.2.3 ip_proto udp dst_port 12345 action drop
+ tc filter add dev $h3 ingress protocol ip pref 1 handle 1 flower \
+ dst_ip 225.1.2.3 ip_proto udp dst_port 12345 action drop
+
+ # Forwarding should fail before installing a matching (*, G).
+ $MZ $h1 -c 1 -p 128 -t udp "ttl=10,sp=54321,dp=12345" \
+ -a 00:11:22:33:44:55 -b 01:00:5e:01:02:03 \
+ -A 198.51.100.2 -B 225.1.2.3 -q
+
+ tc_check_packets "dev $h2 ingress" 1 0
+ check_err $? "Multicast received on first host when should not"
+ tc_check_packets "dev $h3 ingress" 1 0
+ check_err $? "Multicast received on second host when should not"
+
+ # Create (*, G). Will not be installed in the kernel.
+ create_mcast_sg $rp1 0.0.0.0 225.1.2.3 $rp2 $rp3
+
+ $MZ $h1 -c 1 -p 128 -t udp "ttl=10,sp=54321,dp=12345" \
+ -a 00:11:22:33:44:55 -b 01:00:5e:01:02:03 \
+ -A 198.51.100.2 -B 225.1.2.3 -q
+
+ tc_check_packets "dev $h2 ingress" 1 1
+ check_err $? "Multicast not received on first host"
+ tc_check_packets "dev $h3 ingress" 1 1
+ check_err $? "Multicast not received on second host"
+
+ delete_mcast_sg $rp1 0.0.0.0 225.1.2.3 $rp2 $rp3
+
+ tc filter del dev $h3 ingress protocol ip pref 1 handle 1 flower
+ tc filter del dev $h2 ingress protocol ip pref 1 handle 1 flower
+
+ log_test "Unresolved queue IPv4"
+}
+
+unres_v6()
+{
+ # Send a multicast packet not corresponding to an installed route,
+ # causing the kernel to queue the packet for resolution and emit an
+ # MRT6MSG_NOCACHE notification. smcrouted will react to this
+ # notification by consulting its (*, G) list and installing an (S, G)
+ # route, which will be used to forward the queued packet.
+
+ RET=0
+
+ tc filter add dev $h2 ingress protocol ipv6 pref 1 handle 1 flower \
+ dst_ip ff0e::3 ip_proto udp dst_port 12345 action drop
+ tc filter add dev $h3 ingress protocol ipv6 pref 1 handle 1 flower \
+ dst_ip ff0e::3 ip_proto udp dst_port 12345 action drop
+
+ # Forwarding should fail before installing a matching (*, G).
+ $MZ $h1 -6 -c 1 -p 128 -t udp "ttl=10,sp=54321,dp=12345" \
+ -a 00:11:22:33:44:55 -b 33:33:00:00:00:03 \
+ -A 2001:db8:1::2 -B ff0e::3 -q
+
+ tc_check_packets "dev $h2 ingress" 1 0
+ check_err $? "Multicast received on first host when should not"
+ tc_check_packets "dev $h3 ingress" 1 0
+ check_err $? "Multicast received on second host when should not"
+
+ # Create (*, G). Will not be installed in the kernel.
+ create_mcast_sg $rp1 :: ff0e::3 $rp2 $rp3
+
+ $MZ $h1 -6 -c 1 -p 128 -t udp "ttl=10,sp=54321,dp=12345" \
+ -a 00:11:22:33:44:55 -b 33:33:00:00:00:03 \
+ -A 2001:db8:1::2 -B ff0e::3 -q
+
+ tc_check_packets "dev $h2 ingress" 1 1
+ check_err $? "Multicast not received on first host"
+ tc_check_packets "dev $h3 ingress" 1 1
+ check_err $? "Multicast not received on second host"
+
+ delete_mcast_sg $rp1 :: ff0e::3 $rp2 $rp3
+
+ tc filter del dev $h3 ingress protocol ipv6 pref 1 handle 1 flower
+ tc filter del dev $h2 ingress protocol ipv6 pref 1 handle 1 flower
+
+ log_test "Unresolved queue IPv6"
+}
+
trap cleanup EXIT
setup_prepare
diff --git a/tools/testing/selftests/net/forwarding/sch_red.sh b/tools/testing/selftests/net/forwarding/sch_red.sh
index e714bae473fb..81f31179ac88 100755
--- a/tools/testing/selftests/net/forwarding/sch_red.sh
+++ b/tools/testing/selftests/net/forwarding/sch_red.sh
@@ -1,3 +1,4 @@
+#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
# This test sends one stream of traffic from H1 through a TBF shaper, to a RED
diff --git a/tools/testing/selftests/net/reuseport_bpf.c b/tools/testing/selftests/net/reuseport_bpf.c
index 072d709c96b4..65aea27d761c 100644
--- a/tools/testing/selftests/net/reuseport_bpf.c
+++ b/tools/testing/selftests/net/reuseport_bpf.c
@@ -328,7 +328,7 @@ static void test_extra_filter(const struct test_params p)
if (bind(fd1, addr, sockaddr_size()))
error(1, errno, "failed to bind recv socket 1");
- if (!bind(fd2, addr, sockaddr_size()) && errno != EADDRINUSE)
+ if (!bind(fd2, addr, sockaddr_size()) || errno != EADDRINUSE)
error(1, errno, "bind socket 2 should fail with EADDRINUSE");
free(addr);
diff --git a/tools/testing/selftests/netfilter/nft_concat_range.sh b/tools/testing/selftests/netfilter/nft_concat_range.sh
index a6991877e50c..e908009576c7 100755
--- a/tools/testing/selftests/netfilter/nft_concat_range.sh
+++ b/tools/testing/selftests/netfilter/nft_concat_range.sh
@@ -91,7 +91,7 @@ src
start 1
count 5
src_delta 2000
-tools sendip nc bash
+tools sendip socat nc bash
proto udp
race_repeat 3
@@ -116,7 +116,7 @@ src
start 10
count 5
src_delta 2000
-tools sendip nc bash
+tools sendip socat nc bash
proto udp6
race_repeat 3
@@ -141,7 +141,7 @@ src
start 1
count 5
src_delta 2000
-tools sendip nc bash
+tools sendip socat nc bash
proto udp
race_repeat 0
@@ -163,7 +163,7 @@ src mac
start 10
count 5
src_delta 2000
-tools sendip nc bash
+tools sendip socat nc bash
proto udp6
race_repeat 0
@@ -185,7 +185,7 @@ src mac proto
start 10
count 5
src_delta 2000
-tools sendip nc bash
+tools sendip socat nc bash
proto udp6
race_repeat 0
@@ -207,7 +207,7 @@ src addr4
start 1
count 5
src_delta 2000
-tools sendip nc bash
+tools sendip socat nc bash
proto udp
race_repeat 3
@@ -227,7 +227,7 @@ src addr6 port
start 10
count 5
src_delta 2000
-tools sendip nc
+tools sendip socat nc
proto udp6
race_repeat 3
@@ -247,7 +247,7 @@ src mac proto addr4
start 1
count 5
src_delta 2000
-tools sendip nc bash
+tools sendip socat nc bash
proto udp
race_repeat 0
@@ -264,7 +264,7 @@ src mac
start 1
count 5
src_delta 2000
-tools sendip nc bash
+tools sendip socat nc bash
proto udp
race_repeat 0
@@ -286,7 +286,7 @@ src mac addr4
start 1
count 5
src_delta 2000
-tools sendip nc bash
+tools sendip socat nc bash
proto udp
race_repeat 0
@@ -337,7 +337,7 @@ src addr4
start 1
count 5
src_delta 2000
-tools sendip nc
+tools sendip socat nc
proto udp
race_repeat 3
@@ -363,7 +363,7 @@ src mac
start 1
count 1
src_delta 2000
-tools sendip nc bash
+tools sendip socat nc bash
proto udp
race_repeat 0
@@ -541,6 +541,24 @@ setup_send_udp() {
dst_port=
src_addr4=
}
+ elif command -v socat -v >/dev/null; then
+ send_udp() {
+ if [ -n "${src_addr4}" ]; then
+ B ip addr add "${src_addr4}" dev veth_b
+ __socatbind=",bind=${src_addr4}"
+ if [ -n "${src_port}" ];then
+ __socatbind="${__socatbind}:${src_port}"
+ fi
+ fi
+
+ ip addr add "${dst_addr4}" dev veth_a 2>/dev/null
+ [ -z "${dst_port}" ] && dst_port=12345
+
+ echo "test4" | B socat -t 0.01 STDIN UDP4-DATAGRAM:${dst_addr4}:${dst_port}"${__socatbind}"
+
+ src_addr4=
+ src_port=
+ }
elif command -v nc >/dev/null; then
if nc -u -w0 1.1.1.1 1 2>/dev/null; then
# OpenBSD netcat
@@ -606,6 +624,29 @@ setup_send_udp6() {
dst_port=
src_addr6=
}
+ elif command -v socat -v >/dev/null; then
+ send_udp6() {
+ ip -6 addr add "${dst_addr6}" dev veth_a nodad \
+ 2>/dev/null
+
+ __socatbind6=
+
+ if [ -n "${src_addr6}" ]; then
+ if [ -n "${src_addr6} != "${src_addr6_added} ]; then
+ B ip addr add "${src_addr6}" dev veth_b nodad
+
+ src_addr6_added=${src_addr6}
+ fi
+
+ __socatbind6=",bind=[${src_addr6}]"
+
+ if [ -n "${src_port}" ] ;then
+ __socatbind6="${__socatbind6}:${src_port}"
+ fi
+ fi
+
+ echo "test6" | B socat -t 0.01 STDIN UDP6-DATAGRAM:[${dst_addr6}]:${dst_port}"${__socatbind6}"
+ }
elif command -v nc >/dev/null && nc -u -w0 1.1.1.1 1 2>/dev/null; then
# GNU netcat might not work with IPv6, try next tool
send_udp6() {
diff --git a/tools/testing/selftests/nolibc/.gitignore b/tools/testing/selftests/nolibc/.gitignore
new file mode 100644
index 000000000000..4696df589d68
--- /dev/null
+++ b/tools/testing/selftests/nolibc/.gitignore
@@ -0,0 +1,4 @@
+/initramfs/
+/nolibc-test
+/run.out
+/sysroot/
diff --git a/tools/testing/selftests/nolibc/Makefile b/tools/testing/selftests/nolibc/Makefile
new file mode 100644
index 000000000000..69ea659caca9
--- /dev/null
+++ b/tools/testing/selftests/nolibc/Makefile
@@ -0,0 +1,135 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for nolibc tests
+include ../../../scripts/Makefile.include
+
+# we're in ".../tools/testing/selftests/nolibc"
+ifeq ($(srctree),)
+srctree := $(patsubst %/tools/testing/selftests/,%,$(dir $(CURDIR)))
+endif
+
+ifeq ($(ARCH),)
+include $(srctree)/scripts/subarch.include
+ARCH = $(SUBARCH)
+endif
+
+# kernel image names by architecture
+IMAGE_i386 = arch/x86/boot/bzImage
+IMAGE_x86 = arch/x86/boot/bzImage
+IMAGE_arm64 = arch/arm64/boot/Image
+IMAGE_arm = arch/arm/boot/zImage
+IMAGE_mips = vmlinuz
+IMAGE_riscv = arch/riscv/boot/Image
+IMAGE = $(IMAGE_$(ARCH))
+IMAGE_NAME = $(notdir $(IMAGE))
+
+# default kernel configurations that appear to be usable
+DEFCONFIG_i386 = defconfig
+DEFCONFIG_x86 = defconfig
+DEFCONFIG_arm64 = defconfig
+DEFCONFIG_arm = multi_v7_defconfig
+DEFCONFIG_mips = malta_defconfig
+DEFCONFIG_riscv = defconfig
+DEFCONFIG = $(DEFCONFIG_$(ARCH))
+
+# optional tests to run (default = all)
+TEST =
+
+# QEMU_ARCH: arch names used by qemu
+QEMU_ARCH_i386 = i386
+QEMU_ARCH_x86 = x86_64
+QEMU_ARCH_arm64 = aarch64
+QEMU_ARCH_arm = arm
+QEMU_ARCH_mips = mipsel # works with malta_defconfig
+QEMU_ARCH_riscv = riscv64
+QEMU_ARCH = $(QEMU_ARCH_$(ARCH))
+
+# QEMU_ARGS : some arch-specific args to pass to qemu
+QEMU_ARGS_i386 = -M pc -append "console=ttyS0,9600 i8042.noaux panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_x86 = -M pc -append "console=ttyS0,9600 i8042.noaux panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_arm64 = -M virt -cpu cortex-a53 -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_arm = -M virt -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_mips = -M malta -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_riscv = -M virt -append "console=ttyS0 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS = $(QEMU_ARGS_$(ARCH))
+
+# OUTPUT is only set when run from the main makefile, otherwise
+# it defaults to this nolibc directory.
+OUTPUT ?= $(CURDIR)/
+
+ifeq ($(V),1)
+Q=
+else
+Q=@
+endif
+
+CFLAGS ?= -Os -fno-ident -fno-asynchronous-unwind-tables
+LDFLAGS := -s
+
+help:
+ @echo "Supported targets under selftests/nolibc:"
+ @echo " all call the \"run\" target below"
+ @echo " help this help"
+ @echo " sysroot create the nolibc sysroot here (uses \$$ARCH)"
+ @echo " nolibc-test build the executable (uses \$$CC and \$$CROSS_COMPILE)"
+ @echo " initramfs prepare the initramfs with nolibc-test"
+ @echo " defconfig create a fresh new default config (uses \$$ARCH)"
+ @echo " kernel (re)build the kernel with the initramfs (uses \$$ARCH)"
+ @echo " run runs the kernel in QEMU after building it (uses \$$ARCH, \$$TEST)"
+ @echo " rerun runs a previously prebuilt kernel in QEMU (uses \$$ARCH, \$$TEST)"
+ @echo " clean clean the sysroot, initramfs, build and output files"
+ @echo ""
+ @echo "The output file is \"run.out\". Test ranges may be passed using \$$TEST."
+ @echo ""
+ @echo "Currently using the following variables:"
+ @echo " ARCH = $(ARCH)"
+ @echo " CROSS_COMPILE = $(CROSS_COMPILE)"
+ @echo " CC = $(CC)"
+ @echo " OUTPUT = $(OUTPUT)"
+ @echo " TEST = $(TEST)"
+ @echo " QEMU_ARCH = $(if $(QEMU_ARCH),$(QEMU_ARCH),UNKNOWN_ARCH) [determined from \$$ARCH]"
+ @echo " IMAGE_NAME = $(if $(IMAGE_NAME),$(IMAGE_NAME),UNKNOWN_ARCH) [determined from \$$ARCH]"
+ @echo ""
+
+all: run
+
+sysroot: sysroot/$(ARCH)/include
+
+sysroot/$(ARCH)/include:
+ $(QUIET_MKDIR)mkdir -p sysroot
+ $(Q)$(MAKE) -C ../../../include/nolibc ARCH=$(ARCH) OUTPUT=$(CURDIR)/sysroot/ headers_standalone
+ $(Q)mv sysroot/sysroot sysroot/$(ARCH)
+
+nolibc-test: nolibc-test.c sysroot/$(ARCH)/include
+ $(QUIET_CC)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ \
+ -nostdlib -static -Isysroot/$(ARCH)/include $< -lgcc
+
+initramfs: nolibc-test
+ $(QUIET_MKDIR)mkdir -p initramfs
+ $(call QUIET_INSTALL, initramfs/init)
+ $(Q)cp nolibc-test initramfs/init
+
+defconfig:
+ $(Q)$(MAKE) -C $(srctree) ARCH=$(ARCH) CC=$(CC) CROSS_COMPILE=$(CROSS_COMPILE) mrproper $(DEFCONFIG) prepare
+
+kernel: initramfs
+ $(Q)$(MAKE) -C $(srctree) ARCH=$(ARCH) CC=$(CC) CROSS_COMPILE=$(CROSS_COMPILE) $(IMAGE_NAME) CONFIG_INITRAMFS_SOURCE=$(CURDIR)/initramfs
+
+# run the tests after building the kernel
+run: kernel
+ $(Q)qemu-system-$(QEMU_ARCH) -display none -no-reboot -kernel "$(srctree)/$(IMAGE)" -serial stdio $(QEMU_ARGS) > "$(CURDIR)/run.out"
+ $(Q)grep -w FAIL "$(CURDIR)/run.out" && echo "See all results in $(CURDIR)/run.out" || echo "$$(grep -c ^[0-9].*OK $(CURDIR)/run.out) test(s) passed."
+
+# re-run the tests from an existing kernel
+rerun:
+ $(Q)qemu-system-$(QEMU_ARCH) -display none -no-reboot -kernel "$(srctree)/$(IMAGE)" -serial stdio $(QEMU_ARGS) > "$(CURDIR)/run.out"
+ $(Q)grep -w FAIL "$(CURDIR)/run.out" && echo "See all results in $(CURDIR)/run.out" || echo "$$(grep -c ^[0-9].*OK $(CURDIR)/run.out) test(s) passed."
+
+clean:
+ $(call QUIET_CLEAN, sysroot)
+ $(Q)rm -rf sysroot
+ $(call QUIET_CLEAN, nolibc-test)
+ $(Q)rm -f nolibc-test
+ $(call QUIET_CLEAN, initramfs)
+ $(Q)rm -rf initramfs
+ $(call QUIET_CLEAN, run.out)
+ $(Q)rm -rf run.out
diff --git a/tools/testing/selftests/nolibc/nolibc-test.c b/tools/testing/selftests/nolibc/nolibc-test.c
new file mode 100644
index 000000000000..78bced95ac63
--- /dev/null
+++ b/tools/testing/selftests/nolibc/nolibc-test.c
@@ -0,0 +1,757 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+
+/* platform-specific include files coming from the compiler */
+#include <limits.h>
+
+/* libc-specific include files
+ * The program may be built in 3 ways:
+ * $(CC) -nostdlib -include /path/to/nolibc.h => NOLIBC already defined
+ * $(CC) -nostdlib -I/path/to/nolibc/sysroot => _NOLIBC_* guards are present
+ * $(CC) with default libc => NOLIBC* never defined
+ */
+#ifndef NOLIBC
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifndef _NOLIBC_STDIO_H
+/* standard libcs need more includes */
+#include <linux/reboot.h>
+#include <sys/io.h>
+#include <sys/ioctl.h>
+#include <sys/mount.h>
+#include <sys/reboot.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/sysmacros.h>
+#include <sys/time.h>
+#include <sys/wait.h>
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdarg.h>
+#include <unistd.h>
+#endif
+#endif
+
+/* will be used by nolibc by getenv() */
+char **environ;
+
+/* definition of a series of tests */
+struct test {
+ const char *name; // test name
+ int (*func)(int min, int max); // handler
+};
+
+#ifndef _NOLIBC_STDLIB_H
+char *itoa(int i)
+{
+ static char buf[12];
+ int ret;
+
+ ret = snprintf(buf, sizeof(buf), "%d", i);
+ return (ret >= 0 && ret < sizeof(buf)) ? buf : "#err";
+}
+#endif
+
+#define CASE_ERR(err) \
+ case err: return #err
+
+/* returns the error name (e.g. "ENOENT") for common errors, "SUCCESS" for 0,
+ * or the decimal value for less common ones.
+ */
+const char *errorname(int err)
+{
+ switch (err) {
+ case 0: return "SUCCESS";
+ CASE_ERR(EPERM);
+ CASE_ERR(ENOENT);
+ CASE_ERR(ESRCH);
+ CASE_ERR(EINTR);
+ CASE_ERR(EIO);
+ CASE_ERR(ENXIO);
+ CASE_ERR(E2BIG);
+ CASE_ERR(ENOEXEC);
+ CASE_ERR(EBADF);
+ CASE_ERR(ECHILD);
+ CASE_ERR(EAGAIN);
+ CASE_ERR(ENOMEM);
+ CASE_ERR(EACCES);
+ CASE_ERR(EFAULT);
+ CASE_ERR(ENOTBLK);
+ CASE_ERR(EBUSY);
+ CASE_ERR(EEXIST);
+ CASE_ERR(EXDEV);
+ CASE_ERR(ENODEV);
+ CASE_ERR(ENOTDIR);
+ CASE_ERR(EISDIR);
+ CASE_ERR(EINVAL);
+ CASE_ERR(ENFILE);
+ CASE_ERR(EMFILE);
+ CASE_ERR(ENOTTY);
+ CASE_ERR(ETXTBSY);
+ CASE_ERR(EFBIG);
+ CASE_ERR(ENOSPC);
+ CASE_ERR(ESPIPE);
+ CASE_ERR(EROFS);
+ CASE_ERR(EMLINK);
+ CASE_ERR(EPIPE);
+ CASE_ERR(EDOM);
+ CASE_ERR(ERANGE);
+ CASE_ERR(ENOSYS);
+ default:
+ return itoa(err);
+ }
+}
+
+static int pad_spc(int llen, int cnt, const char *fmt, ...)
+{
+ va_list args;
+ int len;
+ int ret;
+
+ for (len = 0; len < cnt - llen; len++)
+ putchar(' ');
+
+ va_start(args, fmt);
+ ret = vfprintf(stdout, fmt, args);
+ va_end(args);
+ return ret < 0 ? ret : ret + len;
+}
+
+/* The tests below are intended to be used by the macroes, which evaluate
+ * expression <expr>, print the status to stdout, and update the "ret"
+ * variable to count failures. The functions themselves return the number
+ * of failures, thus either 0 or 1.
+ */
+
+#define EXPECT_ZR(cond, expr) \
+ do { if (!cond) pad_spc(llen, 40, "[SKIPPED]\n"); else ret += expect_zr(expr, llen); } while (0)
+
+static int expect_zr(int expr, int llen)
+{
+ int ret = !(expr == 0);
+
+ llen += printf(" = %d ", expr);
+ pad_spc(llen, 40, ret ? "[FAIL]\n" : " [OK]\n");
+ return ret;
+}
+
+
+#define EXPECT_NZ(cond, expr, val) \
+ do { if (!cond) pad_spc(llen, 40, "[SKIPPED]\n"); else ret += expect_nz(expr, llen; } while (0)
+
+static int expect_nz(int expr, int llen)
+{
+ int ret = !(expr != 0);
+
+ llen += printf(" = %d ", expr);
+ pad_spc(llen, 40, ret ? "[FAIL]\n" : " [OK]\n");
+ return ret;
+}
+
+
+#define EXPECT_EQ(cond, expr, val) \
+ do { if (!cond) pad_spc(llen, 40, "[SKIPPED]\n"); else ret += expect_eq(expr, llen, val); } while (0)
+
+static int expect_eq(int expr, int llen, int val)
+{
+ int ret = !(expr == val);
+
+ llen += printf(" = %d ", expr);
+ pad_spc(llen, 40, ret ? "[FAIL]\n" : " [OK]\n");
+ return ret;
+}
+
+
+#define EXPECT_NE(cond, expr, val) \
+ do { if (!cond) pad_spc(llen, 40, "[SKIPPED]\n"); else ret += expect_ne(expr, llen, val); } while (0)
+
+static int expect_ne(int expr, int llen, int val)
+{
+ int ret = !(expr != val);
+
+ llen += printf(" = %d ", expr);
+ pad_spc(llen, 40, ret ? "[FAIL]\n" : " [OK]\n");
+ return ret;
+}
+
+
+#define EXPECT_GE(cond, expr, val) \
+ do { if (!cond) pad_spc(llen, 40, "[SKIPPED]\n"); else ret += expect_ge(expr, llen, val); } while (0)
+
+static int expect_ge(int expr, int llen, int val)
+{
+ int ret = !(expr >= val);
+
+ llen += printf(" = %d ", expr);
+ pad_spc(llen, 40, ret ? "[FAIL]\n" : " [OK]\n");
+ return ret;
+}
+
+
+#define EXPECT_GT(cond, expr, val) \
+ do { if (!cond) pad_spc(llen, 40, "[SKIPPED]\n"); else ret += expect_gt(expr, llen, val); } while (0)
+
+static int expect_gt(int expr, int llen, int val)
+{
+ int ret = !(expr > val);
+
+ llen += printf(" = %d ", expr);
+ pad_spc(llen, 40, ret ? "[FAIL]\n" : " [OK]\n");
+ return ret;
+}
+
+
+#define EXPECT_LE(cond, expr, val) \
+ do { if (!cond) pad_spc(llen, 40, "[SKIPPED]\n"); else ret += expect_le(expr, llen, val); } while (0)
+
+static int expect_le(int expr, int llen, int val)
+{
+ int ret = !(expr <= val);
+
+ llen += printf(" = %d ", expr);
+ pad_spc(llen, 40, ret ? "[FAIL]\n" : " [OK]\n");
+ return ret;
+}
+
+
+#define EXPECT_LT(cond, expr, val) \
+ do { if (!cond) pad_spc(llen, 40, "[SKIPPED]\n"); else ret += expect_lt(expr, llen, val); } while (0)
+
+static int expect_lt(int expr, int llen, int val)
+{
+ int ret = !(expr < val);
+
+ llen += printf(" = %d ", expr);
+ pad_spc(llen, 40, ret ? "[FAIL]\n" : " [OK]\n");
+ return ret;
+}
+
+
+#define EXPECT_SYSZR(cond, expr) \
+ do { if (!cond) pad_spc(llen, 40, "[SKIPPED]\n"); else ret += expect_syszr(expr, llen); } while (0)
+
+static int expect_syszr(int expr, int llen)
+{
+ int ret = 0;
+
+ if (expr) {
+ ret = 1;
+ llen += printf(" = %d %s ", expr, errorname(errno));
+ llen += pad_spc(llen, 40, "[FAIL]\n");
+ } else {
+ llen += printf(" = %d ", expr);
+ llen += pad_spc(llen, 40, " [OK]\n");
+ }
+ return ret;
+}
+
+
+#define EXPECT_SYSEQ(cond, expr, val) \
+ do { if (!cond) pad_spc(llen, 40, "[SKIPPED]\n"); else ret += expect_syseq(expr, llen, val); } while (0)
+
+static int expect_syseq(int expr, int llen, int val)
+{
+ int ret = 0;
+
+ if (expr != val) {
+ ret = 1;
+ llen += printf(" = %d %s ", expr, errorname(errno));
+ llen += pad_spc(llen, 40, "[FAIL]\n");
+ } else {
+ llen += printf(" = %d ", expr);
+ llen += pad_spc(llen, 40, " [OK]\n");
+ }
+ return ret;
+}
+
+
+#define EXPECT_SYSNE(cond, expr, val) \
+ do { if (!cond) pad_spc(llen, 40, "[SKIPPED]\n"); else ret += expect_sysne(expr, llen, val); } while (0)
+
+static int expect_sysne(int expr, int llen, int val)
+{
+ int ret = 0;
+
+ if (expr == val) {
+ ret = 1;
+ llen += printf(" = %d %s ", expr, errorname(errno));
+ llen += pad_spc(llen, 40, "[FAIL]\n");
+ } else {
+ llen += printf(" = %d ", expr);
+ llen += pad_spc(llen, 40, " [OK]\n");
+ }
+ return ret;
+}
+
+
+#define EXPECT_SYSER(cond, expr, expret, experr) \
+ do { if (!cond) pad_spc(llen, 40, "[SKIPPED]\n"); else ret += expect_syserr(expr, expret, experr, llen); } while (0)
+
+static int expect_syserr(int expr, int expret, int experr, int llen)
+{
+ int ret = 0;
+ int _errno = errno;
+
+ llen += printf(" = %d %s ", expr, errorname(_errno));
+ if (expr != expret || _errno != experr) {
+ ret = 1;
+ llen += printf(" != (%d %s) ", expret, errorname(experr));
+ llen += pad_spc(llen, 40, "[FAIL]\n");
+ } else {
+ llen += pad_spc(llen, 40, " [OK]\n");
+ }
+ return ret;
+}
+
+
+#define EXPECT_PTRZR(cond, expr) \
+ do { if (!cond) pad_spc(llen, 40, "[SKIPPED]\n"); else ret += expect_ptrzr(expr, llen); } while (0)
+
+static int expect_ptrzr(const void *expr, int llen)
+{
+ int ret = 0;
+
+ llen += printf(" = <%p> ", expr);
+ if (expr) {
+ ret = 1;
+ llen += pad_spc(llen, 40, "[FAIL]\n");
+ } else {
+ llen += pad_spc(llen, 40, " [OK]\n");
+ }
+ return ret;
+}
+
+
+#define EXPECT_PTRNZ(cond, expr) \
+ do { if (!cond) pad_spc(llen, 40, "[SKIPPED]\n"); else ret += expect_ptrnz(expr, llen); } while (0)
+
+static int expect_ptrnz(const void *expr, int llen)
+{
+ int ret = 0;
+
+ llen += printf(" = <%p> ", expr);
+ if (!expr) {
+ ret = 1;
+ llen += pad_spc(llen, 40, "[FAIL]\n");
+ } else {
+ llen += pad_spc(llen, 40, " [OK]\n");
+ }
+ return ret;
+}
+
+
+#define EXPECT_STRZR(cond, expr) \
+ do { if (!cond) pad_spc(llen, 40, "[SKIPPED]\n"); else ret += expect_strzr(expr, llen); } while (0)
+
+static int expect_strzr(const char *expr, int llen)
+{
+ int ret = 0;
+
+ llen += printf(" = <%s> ", expr);
+ if (expr) {
+ ret = 1;
+ llen += pad_spc(llen, 40, "[FAIL]\n");
+ } else {
+ llen += pad_spc(llen, 40, " [OK]\n");
+ }
+ return ret;
+}
+
+
+#define EXPECT_STRNZ(cond, expr) \
+ do { if (!cond) pad_spc(llen, 40, "[SKIPPED]\n"); else ret += expect_strnz(expr, llen); } while (0)
+
+static int expect_strnz(const char *expr, int llen)
+{
+ int ret = 0;
+
+ llen += printf(" = <%s> ", expr);
+ if (!expr) {
+ ret = 1;
+ llen += pad_spc(llen, 40, "[FAIL]\n");
+ } else {
+ llen += pad_spc(llen, 40, " [OK]\n");
+ }
+ return ret;
+}
+
+
+#define EXPECT_STREQ(cond, expr, cmp) \
+ do { if (!cond) pad_spc(llen, 40, "[SKIPPED]\n"); else ret += expect_streq(expr, llen, cmp); } while (0)
+
+static int expect_streq(const char *expr, int llen, const char *cmp)
+{
+ int ret = 0;
+
+ llen += printf(" = <%s> ", expr);
+ if (strcmp(expr, cmp) != 0) {
+ ret = 1;
+ llen += pad_spc(llen, 40, "[FAIL]\n");
+ } else {
+ llen += pad_spc(llen, 40, " [OK]\n");
+ }
+ return ret;
+}
+
+
+#define EXPECT_STRNE(cond, expr, cmp) \
+ do { if (!cond) pad_spc(llen, 40, "[SKIPPED]\n"); else ret += expect_strne(expr, llen, cmp); } while (0)
+
+static int expect_strne(const char *expr, int llen, const char *cmp)
+{
+ int ret = 0;
+
+ llen += printf(" = <%s> ", expr);
+ if (strcmp(expr, cmp) == 0) {
+ ret = 1;
+ llen += pad_spc(llen, 40, "[FAIL]\n");
+ } else {
+ llen += pad_spc(llen, 40, " [OK]\n");
+ }
+ return ret;
+}
+
+
+/* declare tests based on line numbers. There must be exactly one test per line. */
+#define CASE_TEST(name) \
+ case __LINE__: llen += printf("%d %s", test, #name);
+
+
+/* used by some syscall tests below */
+int test_getdents64(const char *dir)
+{
+ char buffer[4096];
+ int fd, ret;
+ int err;
+
+ ret = fd = open(dir, O_RDONLY | O_DIRECTORY, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = getdents64(fd, (void *)buffer, sizeof(buffer));
+ err = errno;
+ close(fd);
+
+ errno = err;
+ return ret;
+}
+
+/* Run syscall tests between IDs <min> and <max>.
+ * Return 0 on success, non-zero on failure.
+ */
+int run_syscall(int min, int max)
+{
+ struct stat stat_buf;
+ int proc;
+ int test;
+ int tmp;
+ int ret = 0;
+ void *p1, *p2;
+
+ /* <proc> indicates whether or not /proc is mounted */
+ proc = stat("/proc", &stat_buf) == 0;
+
+ for (test = min; test >= 0 && test <= max; test++) {
+ int llen = 0; // line length
+
+ /* avoid leaving empty lines below, this will insert holes into
+ * test numbers.
+ */
+ switch (test + __LINE__ + 1) {
+ CASE_TEST(getpid); EXPECT_SYSNE(1, getpid(), -1); break;
+ CASE_TEST(getppid); EXPECT_SYSNE(1, getppid(), -1); break;
+#ifdef NOLIBC
+ CASE_TEST(gettid); EXPECT_SYSNE(1, gettid(), -1); break;
+#endif
+ CASE_TEST(getpgid_self); EXPECT_SYSNE(1, getpgid(0), -1); break;
+ CASE_TEST(getpgid_bad); EXPECT_SYSER(1, getpgid(-1), -1, ESRCH); break;
+ CASE_TEST(kill_0); EXPECT_SYSZR(1, kill(getpid(), 0)); break;
+ CASE_TEST(kill_CONT); EXPECT_SYSZR(1, kill(getpid(), 0)); break;
+ CASE_TEST(kill_BADPID); EXPECT_SYSER(1, kill(INT_MAX, 0), -1, ESRCH); break;
+ CASE_TEST(sbrk); if ((p1 = p2 = sbrk(4096)) != (void *)-1) p2 = sbrk(-4096); EXPECT_SYSZR(1, (p2 == (void *)-1) || p2 == p1); break;
+ CASE_TEST(brk); EXPECT_SYSZR(1, brk(sbrk(0))); break;
+ CASE_TEST(chdir_root); EXPECT_SYSZR(1, chdir("/")); break;
+ CASE_TEST(chdir_dot); EXPECT_SYSZR(1, chdir(".")); break;
+ CASE_TEST(chdir_blah); EXPECT_SYSER(1, chdir("/blah"), -1, ENOENT); break;
+ CASE_TEST(chmod_net); EXPECT_SYSZR(proc, chmod("/proc/self/net", 0555)); break;
+ CASE_TEST(chmod_self); EXPECT_SYSER(proc, chmod("/proc/self", 0555), -1, EPERM); break;
+ CASE_TEST(chown_self); EXPECT_SYSER(proc, chown("/proc/self", 0, 0), -1, EPERM); break;
+ CASE_TEST(chroot_root); EXPECT_SYSZR(1, chroot("/")); break;
+ CASE_TEST(chroot_blah); EXPECT_SYSER(1, chroot("/proc/self/blah"), -1, ENOENT); break;
+ CASE_TEST(chroot_exe); EXPECT_SYSER(proc, chroot("/proc/self/exe"), -1, ENOTDIR); break;
+ CASE_TEST(close_m1); EXPECT_SYSER(1, close(-1), -1, EBADF); break;
+ CASE_TEST(close_dup); EXPECT_SYSZR(1, close(dup(0))); break;
+ CASE_TEST(dup_0); tmp = dup(0); EXPECT_SYSNE(1, tmp, -1); close(tmp); break;
+ CASE_TEST(dup_m1); tmp = dup(-1); EXPECT_SYSER(1, tmp, -1, EBADF); if (tmp != -1) close(tmp); break;
+ CASE_TEST(dup2_0); tmp = dup2(0, 100); EXPECT_SYSNE(1, tmp, -1); close(tmp); break;
+ CASE_TEST(dup2_m1); tmp = dup2(-1, 100); EXPECT_SYSER(1, tmp, -1, EBADF); if (tmp != -1) close(tmp); break;
+ CASE_TEST(dup3_0); tmp = dup3(0, 100, 0); EXPECT_SYSNE(1, tmp, -1); close(tmp); break;
+ CASE_TEST(dup3_m1); tmp = dup3(-1, 100, 0); EXPECT_SYSER(1, tmp, -1, EBADF); if (tmp != -1) close(tmp); break;
+ CASE_TEST(execve_root); EXPECT_SYSER(1, execve("/", (char*[]){ [0] = "/", [1] = NULL }, NULL), -1, EACCES); break;
+ CASE_TEST(getdents64_root); EXPECT_SYSNE(1, test_getdents64("/"), -1); break;
+ CASE_TEST(getdents64_null); EXPECT_SYSER(1, test_getdents64("/dev/null"), -1, ENOTDIR); break;
+ CASE_TEST(gettimeofday_null); EXPECT_SYSZR(1, gettimeofday(NULL, NULL)); break;
+#ifdef NOLIBC
+ CASE_TEST(gettimeofday_bad1); EXPECT_SYSER(1, gettimeofday((void *)1, NULL), -1, EFAULT); break;
+ CASE_TEST(gettimeofday_bad2); EXPECT_SYSER(1, gettimeofday(NULL, (void *)1), -1, EFAULT); break;
+ CASE_TEST(gettimeofday_bad2); EXPECT_SYSER(1, gettimeofday(NULL, (void *)1), -1, EFAULT); break;
+#endif
+ CASE_TEST(ioctl_tiocinq); EXPECT_SYSZR(1, ioctl(0, TIOCINQ, &tmp)); break;
+ CASE_TEST(ioctl_tiocinq); EXPECT_SYSZR(1, ioctl(0, TIOCINQ, &tmp)); break;
+ CASE_TEST(link_root1); EXPECT_SYSER(1, link("/", "/"), -1, EEXIST); break;
+ CASE_TEST(link_blah); EXPECT_SYSER(1, link("/proc/self/blah", "/blah"), -1, ENOENT); break;
+ CASE_TEST(link_dir); EXPECT_SYSER(1, link("/", "/blah"), -1, EPERM); break;
+ CASE_TEST(link_cross); EXPECT_SYSER(proc, link("/proc/self/net", "/blah"), -1, EXDEV); break;
+ CASE_TEST(lseek_m1); EXPECT_SYSER(1, lseek(-1, 0, SEEK_SET), -1, EBADF); break;
+ CASE_TEST(lseek_0); EXPECT_SYSER(1, lseek(0, 0, SEEK_SET), -1, ESPIPE); break;
+ CASE_TEST(mkdir_root); EXPECT_SYSER(1, mkdir("/", 0755), -1, EEXIST); break;
+ CASE_TEST(open_tty); EXPECT_SYSNE(1, tmp = open("/dev/null", 0), -1); if (tmp != -1) close(tmp); break;
+ CASE_TEST(open_blah); EXPECT_SYSER(1, tmp = open("/proc/self/blah", 0), -1, ENOENT); if (tmp != -1) close(tmp); break;
+ CASE_TEST(poll_null); EXPECT_SYSZR(1, poll(NULL, 0, 0)); break;
+ CASE_TEST(poll_stdout); EXPECT_SYSNE(1, ({ struct pollfd fds = { 1, POLLOUT, 0}; poll(&fds, 1, 0); }), -1); break;
+ CASE_TEST(poll_fault); EXPECT_SYSER(1, poll((void *)1, 1, 0), -1, EFAULT); break;
+ CASE_TEST(read_badf); EXPECT_SYSER(1, read(-1, &tmp, 1), -1, EBADF); break;
+ CASE_TEST(sched_yield); EXPECT_SYSZR(1, sched_yield()); break;
+ CASE_TEST(select_null); EXPECT_SYSZR(1, ({ struct timeval tv = { 0 }; select(0, NULL, NULL, NULL, &tv); })); break;
+ CASE_TEST(select_stdout); EXPECT_SYSNE(1, ({ fd_set fds; FD_ZERO(&fds); FD_SET(1, &fds); select(2, NULL, &fds, NULL, NULL); }), -1); break;
+ CASE_TEST(select_fault); EXPECT_SYSER(1, select(1, (void *)1, NULL, NULL, 0), -1, EFAULT); break;
+ CASE_TEST(stat_blah); EXPECT_SYSER(1, stat("/proc/self/blah", &stat_buf), -1, ENOENT); break;
+ CASE_TEST(stat_fault); EXPECT_SYSER(1, stat(NULL, &stat_buf), -1, EFAULT); break;
+ CASE_TEST(symlink_root); EXPECT_SYSER(1, symlink("/", "/"), -1, EEXIST); break;
+ CASE_TEST(unlink_root); EXPECT_SYSER(1, unlink("/"), -1, EISDIR); break;
+ CASE_TEST(unlink_blah); EXPECT_SYSER(1, unlink("/proc/self/blah"), -1, ENOENT); break;
+ CASE_TEST(wait_child); EXPECT_SYSER(1, wait(&tmp), -1, ECHILD); break;
+ CASE_TEST(waitpid_min); EXPECT_SYSER(1, waitpid(INT_MIN, &tmp, WNOHANG), -1, ESRCH); break;
+ CASE_TEST(waitpid_child); EXPECT_SYSER(1, waitpid(getpid(), &tmp, WNOHANG), -1, ECHILD); break;
+ CASE_TEST(write_badf); EXPECT_SYSER(1, write(-1, &tmp, 1), -1, EBADF); break;
+ CASE_TEST(write_zero); EXPECT_SYSZR(1, write(1, &tmp, 0)); break;
+ case __LINE__:
+ return ret; /* must be last */
+ /* note: do not set any defaults so as to permit holes above */
+ }
+ }
+ return ret;
+}
+
+int run_stdlib(int min, int max)
+{
+ int test;
+ int tmp;
+ int ret = 0;
+ void *p1, *p2;
+
+ for (test = min; test >= 0 && test <= max; test++) {
+ int llen = 0; // line length
+
+ /* avoid leaving empty lines below, this will insert holes into
+ * test numbers.
+ */
+ switch (test + __LINE__ + 1) {
+ CASE_TEST(getenv_TERM); EXPECT_STRNZ(1, getenv("TERM")); break;
+ CASE_TEST(getenv_blah); EXPECT_STRZR(1, getenv("blah")); break;
+ CASE_TEST(setcmp_blah_blah); EXPECT_EQ(1, strcmp("blah", "blah"), 0); break;
+ CASE_TEST(setcmp_blah_blah2); EXPECT_NE(1, strcmp("blah", "blah2"), 0); break;
+ CASE_TEST(setncmp_blah_blah); EXPECT_EQ(1, strncmp("blah", "blah", 10), 0); break;
+ CASE_TEST(setncmp_blah_blah4); EXPECT_EQ(1, strncmp("blah", "blah4", 4), 0); break;
+ CASE_TEST(setncmp_blah_blah5); EXPECT_NE(1, strncmp("blah", "blah5", 5), 0); break;
+ CASE_TEST(setncmp_blah_blah6); EXPECT_NE(1, strncmp("blah", "blah6", 6), 0); break;
+ CASE_TEST(strchr_foobar_o); EXPECT_STREQ(1, strchr("foobar", 'o'), "oobar"); break;
+ CASE_TEST(strchr_foobar_z); EXPECT_STRZR(1, strchr("foobar", 'z')); break;
+ CASE_TEST(strrchr_foobar_o); EXPECT_STREQ(1, strrchr("foobar", 'o'), "obar"); break;
+ CASE_TEST(strrchr_foobar_z); EXPECT_STRZR(1, strrchr("foobar", 'z')); break;
+ case __LINE__:
+ return ret; /* must be last */
+ /* note: do not set any defaults so as to permit holes above */
+ }
+ }
+ return ret;
+}
+
+/* prepare what needs to be prepared for pid 1 (stdio, /dev, /proc, etc) */
+int prepare(void)
+{
+ struct stat stat_buf;
+
+ /* It's possible that /dev doesn't even exist or was not mounted, so
+ * we'll try to create it, mount it, or create minimal entries into it.
+ * We want at least /dev/null and /dev/console.
+ */
+ if (stat("/dev/.", &stat_buf) == 0 || mkdir("/dev", 0755) == 0) {
+ if (stat("/dev/console", &stat_buf) != 0 ||
+ stat("/dev/null", &stat_buf) != 0) {
+ /* try devtmpfs first, otherwise fall back to manual creation */
+ if (mount("/dev", "/dev", "devtmpfs", 0, 0) != 0) {
+ mknod("/dev/console", 0600 | S_IFCHR, makedev(5, 1));
+ mknod("/dev/null", 0666 | S_IFCHR, makedev(1, 3));
+ }
+ }
+ }
+
+ /* If no /dev/console was found before calling init, stdio is closed so
+ * we need to reopen it from /dev/console. If it failed above, it will
+ * still fail here and we cannot emit a message anyway.
+ */
+ if (close(dup(1)) == -1) {
+ int fd = open("/dev/console", O_RDWR);
+
+ if (fd >= 0) {
+ if (fd != 0)
+ dup2(fd, 0);
+ if (fd != 1)
+ dup2(fd, 1);
+ if (fd != 2)
+ dup2(fd, 2);
+ if (fd > 2)
+ close(fd);
+ puts("\nSuccessfully reopened /dev/console.");
+ }
+ }
+
+ /* try to mount /proc if not mounted. Silently fail otherwise */
+ if (stat("/proc/.", &stat_buf) == 0 || mkdir("/proc", 0755) == 0) {
+ if (stat("/proc/self", &stat_buf) != 0)
+ mount("/proc", "/proc", "proc", 0, 0);
+ }
+
+ return 0;
+}
+
+/* This is the definition of known test names, with their functions */
+static struct test test_names[] = {
+ /* add new tests here */
+ { .name = "syscall", .func = run_syscall },
+ { .name = "stdlib", .func = run_stdlib },
+ { 0 }
+};
+
+int main(int argc, char **argv, char **envp)
+{
+ int min = 0;
+ int max = __INT_MAX__;
+ int ret = 0;
+ int err;
+ int idx;
+ char *test;
+
+ environ = envp;
+
+ /* when called as init, it's possible that no console was opened, for
+ * example if no /dev file system was provided. We'll check that fd#1
+ * was opened, and if not we'll attempt to create and open /dev/console
+ * and /dev/null that we'll use for later tests.
+ */
+ if (getpid() == 1)
+ prepare();
+
+ /* the definition of a series of tests comes from either argv[1] or the
+ * "NOLIBC_TEST" environment variable. It's made of a comma-delimited
+ * series of test names and optional ranges:
+ * syscall:5-15[:.*],stdlib:8-10
+ */
+ test = argv[1];
+ if (!test)
+ test = getenv("NOLIBC_TEST");
+
+ if (test) {
+ char *comma, *colon, *dash, *value;
+
+ do {
+ comma = strchr(test, ',');
+ if (comma)
+ *(comma++) = '\0';
+
+ colon = strchr(test, ':');
+ if (colon)
+ *(colon++) = '\0';
+
+ for (idx = 0; test_names[idx].name; idx++) {
+ if (strcmp(test, test_names[idx].name) == 0)
+ break;
+ }
+
+ if (test_names[idx].name) {
+ /* The test was named, it will be called at least
+ * once. We may have an optional range at <colon>
+ * here, which defaults to the full range.
+ */
+ do {
+ min = 0; max = __INT_MAX__;
+ value = colon;
+ if (value && *value) {
+ colon = strchr(value, ':');
+ if (colon)
+ *(colon++) = '\0';
+
+ dash = strchr(value, '-');
+ if (dash)
+ *(dash++) = '\0';
+
+ /* support :val: :min-max: :min-: :-max: */
+ if (*value)
+ min = atoi(value);
+ if (!dash)
+ max = min;
+ else if (*dash)
+ max = atoi(dash);
+
+ value = colon;
+ }
+
+ /* now's time to call the test */
+ printf("Running test '%s'\n", test_names[idx].name);
+ err = test_names[idx].func(min, max);
+ ret += err;
+ printf("Errors during this test: %d\n\n", err);
+ } while (colon && *colon);
+ } else
+ printf("Ignoring unknown test name '%s'\n", test);
+
+ test = comma;
+ } while (test && *test);
+ } else {
+ /* no test mentioned, run everything */
+ for (idx = 0; test_names[idx].name; idx++) {
+ printf("Running test '%s'\n", test_names[idx].name);
+ err = test_names[idx].func(min, max);
+ ret += err;
+ printf("Errors during this test: %d\n\n", err);
+ }
+ }
+
+ printf("Total number of errors: %d\n", ret);
+
+ if (getpid() == 1) {
+ /* we're running as init, there's no other process on the
+ * system, thus likely started from a VM for a quick check.
+ * Exiting will provoke a kernel panic that may be reported
+ * as an error by Qemu or the hypervisor, while stopping
+ * cleanly will often be reported as a success. This allows
+ * to use the output of this program for bisecting kernels.
+ */
+ printf("Leaving init with final status: %d\n", !!ret);
+ if (ret == 0)
+ reboot(LINUX_REBOOT_CMD_POWER_OFF);
+#if defined(__x86_64__)
+ /* QEMU started with "-device isa-debug-exit -no-reboot" will
+ * exit with status code 2N+1 when N is written to 0x501. We
+ * hard-code the syscall here as it's arch-dependent.
+ */
+#if defined(_NOLIBC_SYS_H)
+ else if (my_syscall3(__NR_ioperm, 0x501, 1, 1) == 0)
+#else
+ else if (ioperm(0x501, 1, 1) == 0)
+#endif
+ asm volatile ("outb %%al, %%dx" :: "d"(0x501), "a"(0));
+ /* if it does nothing, fall back to the regular panic */
+#endif
+ }
+
+ printf("Exiting with status %d\n", !!ret);
+ return !!ret;
+}
diff --git a/tools/testing/selftests/timens/Makefile b/tools/testing/selftests/timens/Makefile
index f0d51d4d2c87..3a5936cc10ab 100644
--- a/tools/testing/selftests/timens/Makefile
+++ b/tools/testing/selftests/timens/Makefile
@@ -1,4 +1,4 @@
-TEST_GEN_PROGS := timens timerfd timer clock_nanosleep procfs exec futex vfork_exec
+TEST_GEN_PROGS := timens timerfd timer clock_nanosleep procfs exec futex
TEST_GEN_PROGS_EXTENDED := gettime_perf
CFLAGS := -Wall -Werror -pthread
diff --git a/tools/testing/selftests/timens/vfork_exec.c b/tools/testing/selftests/timens/vfork_exec.c
deleted file mode 100644
index e6ccd900f30a..000000000000
--- a/tools/testing/selftests/timens/vfork_exec.c
+++ /dev/null
@@ -1,90 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#define _GNU_SOURCE
-#include <errno.h>
-#include <fcntl.h>
-#include <sched.h>
-#include <stdio.h>
-#include <stdbool.h>
-#include <sys/stat.h>
-#include <sys/syscall.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <time.h>
-#include <unistd.h>
-#include <string.h>
-
-#include "log.h"
-#include "timens.h"
-
-#define OFFSET (36000)
-
-int main(int argc, char *argv[])
-{
- struct timespec now, tst;
- int status, i;
- pid_t pid;
-
- if (argc > 1) {
- if (sscanf(argv[1], "%ld", &now.tv_sec) != 1)
- return pr_perror("sscanf");
-
- for (i = 0; i < 2; i++) {
- _gettime(CLOCK_MONOTONIC, &tst, i);
- if (abs(tst.tv_sec - now.tv_sec) > 5)
- return pr_fail("%ld %ld\n", now.tv_sec, tst.tv_sec);
- }
- return 0;
- }
-
- nscheck();
-
- ksft_set_plan(1);
-
- clock_gettime(CLOCK_MONOTONIC, &now);
-
- if (unshare_timens())
- return 1;
-
- if (_settime(CLOCK_MONOTONIC, OFFSET))
- return 1;
-
- for (i = 0; i < 2; i++) {
- _gettime(CLOCK_MONOTONIC, &tst, i);
- if (abs(tst.tv_sec - now.tv_sec) > 5)
- return pr_fail("%ld %ld\n",
- now.tv_sec, tst.tv_sec);
- }
-
- pid = vfork();
- if (pid < 0)
- return pr_perror("fork");
-
- if (pid == 0) {
- char now_str[64];
- char *cargv[] = {"exec", now_str, NULL};
- char *cenv[] = {NULL};
-
- // Check that we are still in the source timens.
- for (i = 0; i < 2; i++) {
- _gettime(CLOCK_MONOTONIC, &tst, i);
- if (abs(tst.tv_sec - now.tv_sec) > 5)
- return pr_fail("%ld %ld\n",
- now.tv_sec, tst.tv_sec);
- }
-
- /* Check for proper vvar offsets after execve. */
- snprintf(now_str, sizeof(now_str), "%ld", now.tv_sec + OFFSET);
- execve("/proc/self/exe", cargv, cenv);
- return pr_perror("execve");
- }
-
- if (waitpid(pid, &status, 0) != pid)
- return pr_perror("waitpid");
-
- if (status)
- ksft_exit_fail();
-
- ksft_test_result_pass("exec\n");
- ksft_exit_pass();
- return 0;
-}
diff --git a/tools/testing/selftests/wireguard/qemu/Makefile b/tools/testing/selftests/wireguard/qemu/Makefile
index fda76282d34b..e95bd56b332f 100644
--- a/tools/testing/selftests/wireguard/qemu/Makefile
+++ b/tools/testing/selftests/wireguard/qemu/Makefile
@@ -343,8 +343,10 @@ $(KERNEL_BZIMAGE): $(TOOLCHAIN_PATH)/.installed $(KERNEL_BUILD_PATH)/.config $(B
.PHONY: $(KERNEL_BZIMAGE)
$(TOOLCHAIN_PATH)/$(CHOST)/include/linux/.installed: | $(KERNEL_BUILD_PATH)/.config $(TOOLCHAIN_PATH)/.installed
+ifneq ($(ARCH),um)
rm -rf $(TOOLCHAIN_PATH)/$(CHOST)/include/linux
$(MAKE) -C $(KERNEL_PATH) O=$(KERNEL_BUILD_PATH) INSTALL_HDR_PATH=$(TOOLCHAIN_PATH)/$(CHOST) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(CROSS_COMPILE) headers_install
+endif
touch $@
$(TOOLCHAIN_PATH)/.installed: $(TOOLCHAIN_TAR)
diff --git a/tools/virtio/linux/virtio.h b/tools/virtio/linux/virtio.h
index 363b98228301..5d3440f474dd 100644
--- a/tools/virtio/linux/virtio.h
+++ b/tools/virtio/linux/virtio.h
@@ -14,6 +14,7 @@ struct virtio_device {
u64 features;
struct list_head vqs;
spinlock_t vqs_list_lock;
+ const struct virtio_config_ops *config;
};
struct virtqueue {
@@ -23,7 +24,9 @@ struct virtqueue {
struct virtio_device *vdev;
unsigned int index;
unsigned int num_free;
+ unsigned int num_max;
void *priv;
+ bool reset;
};
/* Interfaces exported by virtio_ring. */
diff --git a/tools/virtio/linux/virtio_config.h b/tools/virtio/linux/virtio_config.h
index f2640e505c4e..2a8a70e2a950 100644
--- a/tools/virtio/linux/virtio_config.h
+++ b/tools/virtio/linux/virtio_config.h
@@ -3,6 +3,11 @@
#include <linux/virtio.h>
#include <uapi/linux/virtio_config.h>
+struct virtio_config_ops {
+ int (*disable_vq_and_reset)(struct virtqueue *vq);
+ int (*enable_vq_after_reset)(struct virtqueue *vq);
+};
+
/*
* __virtio_test_bit - helper to test feature bits. For use by transports.
* Devices should normally use virtio_has_feature,