aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore6
-rw-r--r--.mailmap3
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-livepatch8
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt3
-rw-r--r--Documentation/admin-guide/sysctl/kernel.rst2
-rw-r--r--Documentation/arch/riscv/hwprobe.rst2
-rw-r--r--Documentation/arch/riscv/vm-layout.rst11
-rw-r--r--Documentation/devicetree/bindings/arm/arm,coresight-dummy-sink.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/arm,coresight-dummy-source.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml8
-rw-r--r--Documentation/devicetree/bindings/dma/fsl,imx-dma.yaml56
-rw-r--r--Documentation/devicetree/bindings/dma/fsl-imx-dma.txt50
-rw-r--r--Documentation/devicetree/bindings/dma/fsl-qdma.txt58
-rw-r--r--Documentation/devicetree/bindings/dma/fsl-qdma.yaml132
-rw-r--r--Documentation/devicetree/bindings/dma/sprd,sc9860-dma.yaml92
-rw-r--r--Documentation/devicetree/bindings/dma/sprd-dma.txt44
-rw-r--r--Documentation/devicetree/bindings/dma/stm32/st,stm32-dma.yaml (renamed from Documentation/devicetree/bindings/dma/st,stm32-dma.yaml)4
-rw-r--r--Documentation/devicetree/bindings/dma/stm32/st,stm32-dma3.yaml135
-rw-r--r--Documentation/devicetree/bindings/dma/stm32/st,stm32-dmamux.yaml (renamed from Documentation/devicetree/bindings/dma/st,stm32-dmamux.yaml)4
-rw-r--r--Documentation/devicetree/bindings/dma/stm32/st,stm32-mdma.yaml (renamed from Documentation/devicetree/bindings/dma/st,stm32-mdma.yaml)4
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mux-gpio.yaml3
-rw-r--r--Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml20
-rw-r--r--Documentation/devicetree/bindings/i3c/i3c.yaml5
-rw-r--r--Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.yaml11
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml5
-rw-r--r--Documentation/devicetree/bindings/mailbox/mediatek,gce-props.yaml52
-rw-r--r--Documentation/devicetree/bindings/mailbox/qcom,cpucp-mbox.yaml49
-rw-r--r--Documentation/devicetree/bindings/phy/airoha,en7581-pcie-phy.yaml69
-rw-r--r--Documentation/devicetree/bindings/phy/amlogic,g12a-usb2-phy.yaml3
-rw-r--r--Documentation/devicetree/bindings/phy/fsl,imx8qm-hsio.yaml164
-rw-r--r--Documentation/devicetree/bindings/phy/marvell,armada-cp110-utmi-phy.yaml6
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml7
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml5
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,usb-hs-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/rockchip,rk3399-emmc-phy.yaml64
-rw-r--r--Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt43
-rw-r--r--Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml77
-rw-r--r--Documentation/devicetree/bindings/phy/starfive,jh7110-dphy-tx.yaml68
-rw-r--r--Documentation/devicetree/bindings/power/supply/maxim,max17201.yaml58
-rw-r--r--Documentation/devicetree/bindings/remoteproc/fsl,imx-rproc.yaml15
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,sa8775p-pas.yaml160
-rw-r--r--Documentation/devicetree/bindings/remoteproc/ti,k3-dsp-rproc.yaml89
-rw-r--r--Documentation/devicetree/bindings/soc/rockchip/grf.yaml16
-rw-r--r--Documentation/devicetree/bindings/spi/st,stm32-spi.yaml2
-rw-r--r--Documentation/devicetree/bindings/trivial-devices.yaml4
-rw-r--r--Documentation/devicetree/bindings/watchdog/amlogic,meson-gxbb-wdt.yaml1
-rw-r--r--Documentation/devicetree/bindings/watchdog/dlg,da9062-watchdog.yaml2
-rw-r--r--Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml1
-rw-r--r--Documentation/gpu/amdgpu/display/dcn-blocks.rst35
-rw-r--r--Documentation/gpu/amdgpu/display/display-manager.rst4
-rw-r--r--Documentation/kbuild/kconfig-language.rst13
-rw-r--r--Documentation/kbuild/makefiles.rst6
-rw-r--r--Documentation/locking/hwspinlock.rst11
-rw-r--r--Documentation/networking/xsk-tx-metadata.rst16
-rw-r--r--Documentation/process/changes.rst13
-rw-r--r--Documentation/rust/arch-support.rst2
-rw-r--r--Documentation/rust/general-information.rst8
-rw-r--r--Documentation/rust/quick-start.rst143
-rw-r--r--Documentation/rust/testing.rst5
-rw-r--r--Documentation/virt/uml/user_mode_linux_howto_v2.rst2
-rw-r--r--MAINTAINERS75
-rw-r--r--Makefile66
-rw-r--r--arch/arm/Kconfig6
-rwxr-xr-xarch/arm/boot/install.sh2
-rw-r--r--arch/arm/common/locomo.c4
-rw-r--r--arch/arm/common/sa1111.c4
-rw-r--r--arch/arm/include/asm/hardware/locomo.h2
-rw-r--r--arch/arm/include/asm/hardware/sa1111.h2
-rw-r--r--arch/arm64/Kconfig23
-rw-r--r--arch/arm64/Makefile10
-rwxr-xr-xarch/arm64/boot/install.sh2
-rw-r--r--arch/arm64/include/asm/pgtable.h22
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c2
-rw-r--r--arch/arm64/kernel/fpsimd.c2
-rw-r--r--arch/arm64/kernel/vdso/Makefile2
-rw-r--r--arch/arm64/kernel/vdso32/Makefile2
-rw-r--r--arch/arm64/tools/sysreg4
-rw-r--r--arch/loongarch/Kconfig1
-rwxr-xr-xarch/m68k/install.sh2
-rw-r--r--arch/mips/Kconfig7
-rw-r--r--arch/mips/include/asm/mips-cm.h4
-rw-r--r--arch/mips/include/asm/smp.h1
-rw-r--r--arch/mips/kernel/smp-cps.c5
-rw-r--r--arch/mips/loongson64/smp.c35
-rw-r--r--arch/mips/sibyte/common/sb_tbprof.c1
-rwxr-xr-xarch/nios2/boot/install.sh2
-rw-r--r--arch/parisc/Kconfig2
-rw-r--r--arch/parisc/include/asm/parisc-device.h2
-rw-r--r--arch/parisc/include/asm/unistd.h54
-rw-r--r--arch/parisc/include/asm/vdso.h2
-rwxr-xr-xarch/parisc/install.sh2
-rw-r--r--arch/parisc/kernel/cache.c6
-rw-r--r--arch/parisc/kernel/drivers.c4
-rw-r--r--arch/parisc/kernel/unaligned.c2
-rw-r--r--arch/parisc/kernel/vdso32/Makefile24
-rw-r--r--arch/parisc/kernel/vdso32/vdso32.lds.S3
-rw-r--r--arch/parisc/kernel/vdso32/vdso32_generic.c32
-rw-r--r--arch/parisc/kernel/vdso64/Makefile25
-rw-r--r--arch/parisc/kernel/vdso64/vdso64.lds.S2
-rw-r--r--arch/parisc/kernel/vdso64/vdso64_generic.c24
-rw-r--r--arch/powerpc/Kconfig3
-rw-r--r--arch/powerpc/include/asm/ps3.h6
-rw-r--r--arch/powerpc/include/asm/vio.h6
-rw-r--r--arch/powerpc/kvm/powerpc.c4
-rw-r--r--arch/powerpc/platforms/ps3/system-bus.c4
-rw-r--r--arch/powerpc/platforms/pseries/ibmebus.c2
-rw-r--r--arch/powerpc/platforms/pseries/vio.c6
-rw-r--r--arch/powerpc/sysdev/xive/native.c4
-rw-r--r--arch/powerpc/sysdev/xive/spapr.c3
-rw-r--r--arch/riscv/Kconfig9
-rw-r--r--arch/riscv/Kconfig.vendor19
-rw-r--r--arch/riscv/boot/Makefile1
-rwxr-xr-xarch/riscv/boot/install.sh2
-rw-r--r--arch/riscv/configs/defconfig26
-rw-r--r--arch/riscv/errata/andes/errata.c3
-rw-r--r--arch/riscv/errata/sifive/errata.c3
-rw-r--r--arch/riscv/errata/thead/errata.c3
-rw-r--r--arch/riscv/include/asm/acpi.h15
-rw-r--r--arch/riscv/include/asm/bitops.h2
-rw-r--r--arch/riscv/include/asm/cpufeature.h103
-rw-r--r--arch/riscv/include/asm/hwcap.h25
-rw-r--r--arch/riscv/include/asm/hwprobe.h2
-rw-r--r--arch/riscv/include/asm/page.h2
-rw-r--r--arch/riscv/include/asm/thread_info.h1
-rw-r--r--arch/riscv/include/asm/vendor_extensions.h104
-rw-r--r--arch/riscv/include/asm/vendor_extensions/andes.h19
-rw-r--r--arch/riscv/include/uapi/asm/hwprobe.h1
-rw-r--r--arch/riscv/kernel/Makefile3
-rw-r--r--arch/riscv/kernel/acpi.c17
-rw-r--r--arch/riscv/kernel/acpi_numa.c131
-rw-r--r--arch/riscv/kernel/cacheinfo.c35
-rw-r--r--arch/riscv/kernel/cpu.c35
-rw-r--r--arch/riscv/kernel/cpufeature.c143
-rw-r--r--arch/riscv/kernel/entry.S21
-rw-r--r--arch/riscv/kernel/probes/Makefile1
-rw-r--r--arch/riscv/kernel/probes/ftrace.c65
-rw-r--r--arch/riscv/kernel/setup.c4
-rw-r--r--arch/riscv/kernel/signal.c2
-rw-r--r--arch/riscv/kernel/smpboot.c2
-rw-r--r--arch/riscv/kernel/stacktrace.c4
-rw-r--r--arch/riscv/kernel/sys_hwprobe.c48
-rw-r--r--arch/riscv/kernel/vendor_extensions.c56
-rw-r--r--arch/riscv/kernel/vendor_extensions/Makefile3
-rw-r--r--arch/riscv/kernel/vendor_extensions/andes.c18
-rw-r--r--arch/s390/Kconfig11
-rw-r--r--arch/s390/appldata/appldata_base.c10
-rw-r--r--arch/s390/boot/Makefile3
-rw-r--r--arch/s390/boot/alternative.c3
-rw-r--r--arch/s390/boot/boot.h4
-rwxr-xr-xarch/s390/boot/install.sh2
-rw-r--r--arch/s390/boot/ipl_parm.c3
-rw-r--r--arch/s390/boot/startup.c14
-rw-r--r--arch/s390/boot/uv.c8
-rw-r--r--arch/s390/boot/uv.h13
-rw-r--r--arch/s390/boot/vmem.c11
-rw-r--r--arch/s390/configs/debug_defconfig1
-rw-r--r--arch/s390/configs/defconfig1
-rw-r--r--arch/s390/include/asm/abs_lowcore.h8
-rw-r--r--arch/s390/include/asm/alternative-asm.h57
-rw-r--r--arch/s390/include/asm/alternative.h154
-rw-r--r--arch/s390/include/asm/atomic_ops.h3
-rw-r--r--arch/s390/include/asm/ccwdev.h2
-rw-r--r--arch/s390/include/asm/facility.h1
-rw-r--r--arch/s390/include/asm/kmsan.h6
-rw-r--r--arch/s390/include/asm/lowcore.h32
-rw-r--r--arch/s390/include/asm/nospec-branch.h9
-rw-r--r--arch/s390/include/asm/page.h2
-rw-r--r--arch/s390/include/asm/processor.h30
-rw-r--r--arch/s390/include/asm/runtime-const.h77
-rw-r--r--arch/s390/include/asm/smp.h1
-rw-r--r--arch/s390/include/asm/spinlock.h2
-rw-r--r--arch/s390/include/asm/thread_info.h1
-rw-r--r--arch/s390/include/asm/uaccess.h9
-rw-r--r--arch/s390/include/asm/uv.h32
-rw-r--r--arch/s390/kernel/Makefile3
-rw-r--r--arch/s390/kernel/abs_lowcore.c1
-rw-r--r--arch/s390/kernel/alternative.c75
-rw-r--r--arch/s390/kernel/alternative.h0
-rw-r--r--arch/s390/kernel/asm-offsets.c5
-rw-r--r--arch/s390/kernel/debug.c2
-rw-r--r--arch/s390/kernel/early.c9
-rw-r--r--arch/s390/kernel/entry.S251
-rw-r--r--arch/s390/kernel/head64.S8
-rw-r--r--arch/s390/kernel/ipl.c2
-rw-r--r--arch/s390/kernel/machine_kexec.c2
-rw-r--r--arch/s390/kernel/nospec-branch.c16
-rw-r--r--arch/s390/kernel/nospec-sysfs.c2
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c14
-rw-r--r--arch/s390/kernel/processor.c20
-rw-r--r--arch/s390/kernel/reipl.S26
-rw-r--r--arch/s390/kernel/setup.c7
-rw-r--r--arch/s390/kernel/smp.c141
-rw-r--r--arch/s390/kernel/topology.c2
-rw-r--r--arch/s390/kernel/uv.c35
-rw-r--r--arch/s390/kernel/vmlinux.lds.S5
-rw-r--r--arch/s390/lib/spinlock.c4
-rw-r--r--arch/s390/mm/cmm.c6
-rw-r--r--arch/s390/mm/dump_pagetables.c30
-rw-r--r--arch/s390/mm/maccess.c4
-rw-r--r--arch/s390/pci/pci_irq.c110
-rw-r--r--arch/sh/Kconfig2
-rw-r--r--arch/sh/configs/apsh4ad0a_defconfig2
-rw-r--r--arch/sh/configs/sdk7786_defconfig2
-rw-r--r--arch/sh/configs/shx3_defconfig2
-rw-r--r--arch/sh/drivers/push-switch.c1
-rw-r--r--arch/sh/mm/Kconfig4
-rw-r--r--arch/sh/mm/init.c28
-rwxr-xr-xarch/sparc/boot/install.sh2
-rw-r--r--arch/sparc/include/asm/vio.h6
-rw-r--r--arch/sparc/kernel/vio.c4
-rw-r--r--arch/um/Kconfig8
-rw-r--r--arch/um/drivers/Kconfig20
-rw-r--r--arch/um/drivers/Makefile10
-rw-r--r--arch/um/drivers/chan.h3
-rw-r--r--arch/um/drivers/chan_kern.c81
-rw-r--r--arch/um/drivers/chan_user.c20
-rw-r--r--arch/um/drivers/harddog_kern.c1
-rw-r--r--arch/um/drivers/line.c2
-rw-r--r--arch/um/drivers/pcap_kern.c113
-rw-r--r--arch/um/drivers/pcap_user.c137
-rw-r--r--arch/um/drivers/pcap_user.h21
-rw-r--r--arch/um/drivers/port_kern.c14
-rw-r--r--arch/um/drivers/ubd_kern.c3
-rw-r--r--arch/um/drivers/vector_kern.c19
-rw-r--r--arch/um/drivers/vector_kern.h1
-rw-r--r--arch/um/drivers/xterm.c2
-rw-r--r--arch/um/drivers/xterm_kern.c13
-rw-r--r--arch/um/include/asm/mmu.h10
-rw-r--r--arch/um/include/asm/mmu_context.h2
-rw-r--r--arch/um/include/asm/pgtable.h32
-rw-r--r--arch/um/include/asm/tlbflush.h46
-rw-r--r--arch/um/include/shared/as-layout.h2
-rw-r--r--arch/um/include/shared/common-offsets.h5
-rw-r--r--arch/um/include/shared/kern_util.h1
-rw-r--r--arch/um/include/shared/os.h33
-rw-r--r--arch/um/include/shared/skas/mm_id.h2
-rw-r--r--arch/um/include/shared/skas/skas.h2
-rw-r--r--arch/um/include/shared/skas/stub-data.h36
-rw-r--r--arch/um/include/shared/timetravel.h9
-rw-r--r--arch/um/include/shared/user.h8
-rw-r--r--arch/um/kernel/exec.c9
-rw-r--r--arch/um/kernel/irq.c80
-rw-r--r--arch/um/kernel/ksyms.c2
-rw-r--r--arch/um/kernel/mem.c1
-rw-r--r--arch/um/kernel/process.c69
-rw-r--r--arch/um/kernel/reboot.c15
-rw-r--r--arch/um/kernel/skas/Makefile9
-rw-r--r--arch/um/kernel/skas/clone.c48
-rw-r--r--arch/um/kernel/skas/mmu.c54
-rw-r--r--arch/um/kernel/skas/process.c18
-rw-r--r--arch/um/kernel/skas/stub.c69
-rw-r--r--arch/um/kernel/time.c187
-rw-r--r--arch/um/kernel/tlb.c545
-rw-r--r--arch/um/kernel/trap.c15
-rw-r--r--arch/um/kernel/um_arch.c3
-rw-r--r--arch/um/os-Linux/file.c94
-rw-r--r--arch/um/os-Linux/signal.c118
-rw-r--r--arch/um/os-Linux/skas/mem.c245
-rw-r--r--arch/um/os-Linux/skas/process.c124
-rw-r--r--arch/um/os-Linux/start_up.c1
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/Makefile.um1
-rwxr-xr-xarch/x86/boot/install.sh2
-rw-r--r--arch/x86/entry/vdso/Makefile3
-rw-r--r--arch/x86/entry/vdso/vdso.lds.S2
-rw-r--r--arch/x86/entry/vdso/vgetrandom-chacha.S178
-rw-r--r--arch/x86/entry/vdso/vgetrandom.c17
-rw-r--r--arch/x86/include/asm/vdso/getrandom.h55
-rw-r--r--arch/x86/include/asm/vdso/vsyscall.h2
-rw-r--r--arch/x86/include/asm/vvar.h16
-rw-r--r--arch/x86/include/asm/xen/hypervisor.h5
-rw-r--r--arch/x86/kernel/itmt.c2
-rw-r--r--arch/x86/platform/pvh/enlighten.c3
-rw-r--r--arch/x86/um/Makefile5
-rw-r--r--arch/x86/um/asm/mm_context.h70
-rw-r--r--arch/x86/um/ldt.c380
-rw-r--r--arch/x86/um/shared/sysdep/stub.h2
-rw-r--r--arch/x86/um/shared/sysdep/stub_32.h45
-rw-r--r--arch/x86/um/shared/sysdep/stub_64.h41
-rw-r--r--arch/x86/um/stub_32.S56
-rw-r--r--arch/x86/um/stub_64.S50
-rw-r--r--arch/x86/um/tls_32.c1
-rw-r--r--arch/x86/xen/enlighten_pvh.c107
-rw-r--r--arch/x86/xen/multicalls.c19
-rw-r--r--arch/x86/xen/setup.c5
-rw-r--r--arch/x86/xen/smp_pv.c1
-rw-r--r--arch/x86/xen/xen-ops.h3
-rw-r--r--drivers/acpi/bus.c4
-rw-r--r--drivers/acpi/numa/Kconfig5
-rw-r--r--drivers/acpi/numa/srat.c32
-rw-r--r--drivers/amba/bus.c6
-rw-r--r--drivers/auxdisplay/Kconfig2
-rw-r--r--drivers/auxdisplay/arm-charlcd.c2
-rw-r--r--drivers/auxdisplay/charlcd.h2
-rw-r--r--drivers/auxdisplay/hd44780.c2
-rw-r--r--drivers/auxdisplay/hd44780_common.c1
-rw-r--r--drivers/auxdisplay/ht16k33.c1
-rw-r--r--drivers/auxdisplay/line-display.c11
-rw-r--r--drivers/base/arch_numa.c2
-rw-r--r--drivers/base/arch_topology.c145
-rw-r--r--drivers/base/auxiliary.c10
-rw-r--r--drivers/base/base.h16
-rw-r--r--drivers/base/core.c17
-rw-r--r--drivers/base/cpu.c4
-rw-r--r--drivers/base/dd.c30
-rw-r--r--drivers/base/devres.c34
-rw-r--r--drivers/base/driver.c10
-rw-r--r--drivers/base/firmware_loader/Kconfig7
-rw-r--r--drivers/base/firmware_loader/main.c90
-rw-r--r--drivers/base/isa.c2
-rw-r--r--drivers/base/module.c6
-rw-r--r--drivers/base/platform.c26
-rw-r--r--drivers/base/regmap/regcache-maple.c3
-rw-r--r--drivers/bcma/main.c6
-rw-r--r--drivers/block/rbd.c35
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c4
-rw-r--r--drivers/bus/mhi/ep/main.c4
-rw-r--r--drivers/bus/mhi/host/init.c4
-rw-r--r--drivers/bus/mips_cdmm.c6
-rw-r--r--drivers/bus/moxtet.c4
-rw-r--r--drivers/bus/sunxi-rsb.c2
-rw-r--r--drivers/bus/ti-sysc.c4
-rw-r--r--drivers/cdrom/cdrom.c4
-rw-r--r--drivers/cdx/cdx.c4
-rw-r--r--drivers/char/random.c22
-rw-r--r--drivers/clk/clk-conf.c4
-rw-r--r--drivers/clk/clk-si5351.c43
-rw-r--r--drivers/clk/clk.c12
-rw-r--r--drivers/clk/davinci/da8xx-cfgchip.c4
-rw-r--r--drivers/clk/qcom/common.c4
-rw-r--r--drivers/clk/samsung/clk-exynos4.c13
-rw-r--r--drivers/clk/sophgo/clk-sg2042-pll.c2
-rw-r--r--drivers/clk/sunxi/clk-simple-gates.c4
-rw-r--r--drivers/clk/sunxi/clk-sun8i-bus-gates.c4
-rw-r--r--drivers/clk/thead/Kconfig1
-rw-r--r--drivers/clocksource/samsung_pwm_timer.c4
-rw-r--r--drivers/cxl/core/port.c2
-rw-r--r--drivers/cxl/cxl.h5
-rw-r--r--drivers/dax/bus.c17
-rw-r--r--drivers/dca/dca-sysfs.c20
-rw-r--r--drivers/dio/dio-driver.c4
-rw-r--r--drivers/dma/Kconfig34
-rw-r--r--drivers/dma/Makefile4
-rw-r--r--drivers/dma/altera-msgdma.c16
-rw-r--r--drivers/dma/dmaengine.c16
-rw-r--r--drivers/dma/dmatest.c1
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpdmai.c1
-rw-r--r--drivers/dma/fsl-edma-common.c24
-rw-r--r--drivers/dma/fsl-edma-common.h4
-rw-r--r--drivers/dma/fsl-edma-main.c34
-rw-r--r--drivers/dma/idxd/bus.c6
-rw-r--r--drivers/dma/idxd/compat.c1
-rw-r--r--drivers/dma/idxd/dma.c2
-rw-r--r--drivers/dma/idxd/init.c1
-rw-r--r--drivers/dma/imx-sdma.c5
-rw-r--r--drivers/dma/ioat/init.c1
-rw-r--r--drivers/dma/mcf-edma-main.c2
-rw-r--r--drivers/dma/moxart-dma.c5
-rw-r--r--drivers/dma/qcom/gpi.c11
-rw-r--r--drivers/dma/qcom/hidma.c1
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c1
-rw-r--r--drivers/dma/sh/rz-dmac.c2
-rw-r--r--drivers/dma/stm32/Kconfig47
-rw-r--r--drivers/dma/stm32/Makefile5
-rw-r--r--drivers/dma/stm32/stm32-dma.c (renamed from drivers/dma/stm32-dma.c)2
-rw-r--r--drivers/dma/stm32/stm32-dma3.c1847
-rw-r--r--drivers/dma/stm32/stm32-dmamux.c (renamed from drivers/dma/stm32-dmamux.c)0
-rw-r--r--drivers/dma/stm32/stm32-mdma.c (renamed from drivers/dma/stm32-mdma.c)2
-rw-r--r--drivers/dma/ti/cppi41.c1
-rw-r--r--drivers/dma/ti/k3-psil.c1
-rw-r--r--drivers/dma/ti/k3-udma-glue.c1
-rw-r--r--drivers/dma/ti/k3-udma.c6
-rw-r--r--drivers/dma/ti/omap-dma.c1
-rw-r--r--drivers/dma/virt-dma.c1
-rw-r--r--drivers/eisa/eisa-bus.c4
-rw-r--r--drivers/firewire/core-device.c6
-rw-r--r--drivers/firmware/arm_ffa/bus.c2
-rw-r--r--drivers/firmware/arm_scmi/bus.c6
-rw-r--r--drivers/firmware/efi/libstub/Makefile3
-rw-r--r--drivers/firmware/efi/libstub/screen_info.c2
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c20
-rw-r--r--drivers/firmware/google/coreboot_table.c6
-rw-r--r--drivers/fpga/dfl.c4
-rw-r--r--drivers/fsi/fsi-core.c4
-rw-r--r--drivers/fsi/fsi-master-aspeed.c6
-rw-r--r--drivers/fsi/fsi-master-ast-cf.c6
-rw-r--r--drivers/fsi/fsi-master-gpio.c6
-rw-r--r--drivers/fsi/fsi-occ.c6
-rw-r--r--drivers/gpio/gpio-brcmstb.c5
-rw-r--r--drivers/gpio/gpio-virtuser.c2
-rw-r--r--drivers/gpio/gpiolib.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_df.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c121
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v4_15.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v4_15.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc24.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/opp.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c10
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/df/df_4_15_offset.h28
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/df/df_4_15_sh_mask.h28
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c16
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c18
-rw-r--r--drivers/gpu/drm/display/drm_dp_aux_bus.c2
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c55
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c6
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c6
-rw-r--r--drivers/gpu/drm/imagination/pvr_drv.c7
-rw-r--r--drivers/gpu/drm/stm/lvds.c4
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c5
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h8
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c16
-rw-r--r--drivers/gpu/drm/xe/xe_device.c20
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c14
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c15
-rw-r--r--drivers/gpu/drm/xe/xe_gt.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c1
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c16
-rw-r--r--drivers/gpu/drm/xe/xe_guc.h1
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c38
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.h1
-rw-r--r--drivers/gpu/drm/xe/xe_uc.c14
-rw-r--r--drivers/gpu/drm/xe/xe_uc.h1
-rw-r--r--drivers/gpu/host1x/bus.c2
-rw-r--r--drivers/gpu/host1x/dev.c6
-rw-r--r--drivers/gpu/host1x/mipi.c17
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c6
-rw-r--r--drivers/gpu/ipu-v3/ipu-pre.c5
-rw-r--r--drivers/gpu/ipu-v3/ipu-prg.c6
-rw-r--r--drivers/greybus/core.c4
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c2
-rw-r--r--drivers/hsi/clients/ssi_protocol.c1
-rw-r--r--drivers/hsi/hsi_core.c2
-rw-r--r--drivers/hv/vmbus_drv.c8
-rw-r--r--drivers/hwspinlock/hwspinlock_core.c28
-rw-r--r--drivers/hwspinlock/hwspinlock_internal.h3
-rw-r--r--drivers/hwspinlock/qcom_hwspinlock.c25
-rw-r--r--drivers/hwtracing/intel_th/core.c4
-rw-r--r--drivers/hwtracing/intel_th/intel_th.h2
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-piix4.c9
-rw-r--r--drivers/i2c/i2c-core-base.c4
-rw-r--r--drivers/i2c/i2c-smbus.c15
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c14
-rw-r--r--drivers/i3c/internals.h2
-rw-r--r--drivers/i3c/master.c5
-rw-r--r--drivers/i3c/master/ast2600-i3c-master.c1
-rw-r--r--drivers/i3c/master/dw-i3c-master.c349
-rw-r--r--drivers/i3c/master/dw-i3c-master.h14
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/core.c8
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dma.c44
-rw-r--r--drivers/i3c/master/svc-i3c-master.c121
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c4
-rw-r--r--drivers/input/gameport/gameport.c4
-rw-r--r--drivers/input/rmi4/rmi_bus.c6
-rw-r--r--drivers/input/rmi4/rmi_bus.h2
-rw-r--r--drivers/input/rmi4/rmi_driver.c2
-rw-r--r--drivers/input/rmi4/rmi_driver.h2
-rw-r--r--drivers/input/serio/serio.c4
-rw-r--r--drivers/iommu/amd/io_pgtable.c6
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c2
-rw-r--r--drivers/iommu/sprd-iommu.c2
-rw-r--r--drivers/ipack/ipack.c6
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.c4
-rw-r--r--drivers/irqchip/irq-pic32-evic.c4
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c7
-rw-r--r--drivers/macintosh/mac_hid.c2
-rw-r--r--drivers/macintosh/macio_asic.c2
-rw-r--r--drivers/mailbox/Kconfig8
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/bcm-pdc-mailbox.c4
-rw-r--r--drivers/mailbox/imx-mailbox.c10
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c100
-rw-r--r--drivers/mailbox/omap-mailbox.c3
-rw-r--r--drivers/mailbox/qcom-cpucp-mbox.c187
-rw-r--r--drivers/mailbox/zynqmp-ipi-mailbox.c9
-rw-r--r--drivers/mcb/mcb-core.c4
-rw-r--r--drivers/media/pci/bt8xx/bttv-gpio.c4
-rw-r--r--drivers/media/pci/bt8xx/bttv.h2
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-bus.h2
-rw-r--r--drivers/media/rc/lirc_dev.c4
-rw-r--r--drivers/memstick/core/memstick.c7
-rw-r--r--drivers/mfd/mcp-core.c2
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c4
-rw-r--r--drivers/misc/mei/bus.c4
-rw-r--r--drivers/misc/tifm_core.c6
-rw-r--r--drivers/mmc/core/sdio_bus.c10
-rw-r--r--drivers/most/core.c2
-rw-r--r--drivers/net/bonding/bond_main.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c4
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h1
-rw-r--r--drivers/net/ethernet/mediatek/airoha_eth.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c4
-rw-r--r--drivers/net/ethernet/renesas/rtsn.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c4
-rw-r--r--drivers/net/phy/mdio_bus.c4
-rw-r--r--drivers/net/phy/mdio_device.c4
-rw-r--r--drivers/net/phy/phy_device.c4
-rw-r--r--drivers/net/tap.c5
-rw-r--r--drivers/net/tun.c3
-rw-r--r--drivers/net/vrf.c2
-rw-r--r--drivers/ntb/ntb_transport.c2
-rw-r--r--drivers/nvdimm/bus.c25
-rw-r--r--drivers/nvdimm/e820.c5
-rw-r--r--drivers/nvdimm/of_pmem.c6
-rw-r--r--drivers/nvmem/layouts.c4
-rw-r--r--drivers/parport/procfs.c12
-rw-r--r--drivers/parport/share.c2
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c4
-rw-r--r--drivers/pci/pci-driver.c8
-rw-r--r--drivers/pcmcia/bcm63xx_pcmcia.c1
-rw-r--r--drivers/pcmcia/ds.c2
-rw-r--r--drivers/pcmcia/i82092.c1
-rw-r--r--drivers/pcmcia/i82365.c1
-rw-r--r--drivers/pcmcia/max1600.c1
-rw-r--r--drivers/pcmcia/rsrc_mgr.c1
-rw-r--r--drivers/pcmcia/yenta_socket.c7
-rw-r--r--drivers/peci/core.c4
-rw-r--r--drivers/peci/internal.h5
-rw-r--r--drivers/perf/arm_pmuv3.c2
-rw-r--r--drivers/perf/riscv_pmu_sbi.c13
-rw-r--r--drivers/phy/Kconfig10
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns-usb2.c1
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns-usb3.c1
-rw-r--r--drivers/phy/cadence/phy-cadence-torrent.c207
-rw-r--r--drivers/phy/freescale/Kconfig9
-rw-r--r--drivers/phy/freescale/Makefile1
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8qm-hsio.c611
-rw-r--r--drivers/phy/phy-airoha-pcie-regs.h494
-rw-r--r--drivers/phy/phy-airoha-pcie.c1286
-rw-r--r--drivers/phy/phy-core.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie.c318
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5.h14
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-pll.h3
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb.c3
-rw-r--r--drivers/phy/rockchip/Kconfig2
-rw-r--r--drivers/phy/samsung/phy-exynos5-usbdrd.c1053
-rw-r--r--drivers/phy/samsung/phy-exynos5250-usb2.c2
-rw-r--r--drivers/phy/st/phy-miphy28lp.c5
-rw-r--r--drivers/phy/starfive/Kconfig10
-rw-r--r--drivers/phy/starfive/Makefile1
-rw-r--r--drivers/phy/starfive/phy-jh7110-dphy-rx.c5
-rw-r--r--drivers/phy/starfive/phy-jh7110-dphy-tx.c461
-rw-r--r--drivers/phy/ti/phy-am654-serdes.c1
-rw-r--r--drivers/phy/ti/phy-j721e-wiz.c133
-rw-r--r--drivers/phy/xilinx/phy-zynqmp.c198
-rw-r--r--drivers/pinctrl/nxp/pinctrl-s32cc.c4
-rw-r--r--drivers/pinctrl/pinctrl-k210.c4
-rw-r--r--drivers/platform/surface/aggregator/bus.c4
-rw-r--r--drivers/platform/x86/wmi.c9
-rw-r--r--drivers/pnp/driver.c6
-rw-r--r--drivers/power/reset/piix4-poweroff.c1
-rw-r--r--drivers/power/supply/Kconfig21
-rw-r--r--drivers/power/supply/Makefile2
-rw-r--r--drivers/power/supply/ab8500_chargalg.c2
-rw-r--r--drivers/power/supply/ab8500_charger.c52
-rw-r--r--drivers/power/supply/ab8500_fg.c5
-rw-r--r--drivers/power/supply/adp5061.c2
-rw-r--r--drivers/power/supply/bd99954-charger.c7
-rw-r--r--drivers/power/supply/bq24735-charger.c2
-rw-r--r--drivers/power/supply/bq25890_charger.c10
-rw-r--r--drivers/power/supply/cw2015_battery.c2
-rw-r--r--drivers/power/supply/ingenic-battery.c10
-rw-r--r--drivers/power/supply/lenovo_yoga_c630_battery.c501
-rw-r--r--drivers/power/supply/lp8727_charger.c2
-rw-r--r--drivers/power/supply/ltc4162-l-charger.c4
-rw-r--r--drivers/power/supply/max14656_charger_detector.c2
-rw-r--r--drivers/power/supply/max1720x_battery.c337
-rw-r--r--drivers/power/supply/max77976_charger.c4
-rw-r--r--drivers/power/supply/mm8013.c2
-rw-r--r--drivers/power/supply/power_supply_core.c4
-rw-r--r--drivers/power/supply/power_supply_hwmon.c25
-rw-r--r--drivers/power/supply/power_supply_leds.c174
-rw-r--r--drivers/power/supply/power_supply_sysfs.c3
-rw-r--r--drivers/power/supply/qcom_battmgr.c1
-rw-r--r--drivers/power/supply/rt9455_charger.c4
-rw-r--r--drivers/power/supply/samsung-sdi-battery.c26
-rw-r--r--drivers/power/supply/sbs-charger.c2
-rw-r--r--drivers/power/supply/sbs-manager.c4
-rw-r--r--drivers/pps/clients/pps-gpio.c5
-rw-r--r--drivers/pwm/pwm-samsung.c4
-rw-r--r--drivers/rapidio/rio-driver.c4
-rw-r--r--drivers/regulator/Kconfig4
-rw-r--r--drivers/remoteproc/Kconfig1
-rw-r--r--drivers/remoteproc/imx_rproc.c10
-rw-r--r--drivers/remoteproc/mtk_scp.c23
-rw-r--r--drivers/remoteproc/omap_remoteproc.c46
-rw-r--r--drivers/remoteproc/qcom_common.c87
-rw-r--r--drivers/remoteproc/qcom_common.h10
-rw-r--r--drivers/remoteproc/qcom_q6v5_adsp.c3
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c3
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c14
-rw-r--r--drivers/remoteproc/qcom_q6v5_wcss.c3
-rw-r--r--drivers/remoteproc/stm32_rproc.c2
-rw-r--r--drivers/remoteproc/ti_k3_dsp_remoteproc.c2
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c13
-rw-r--r--drivers/remoteproc/xlnx_r5_remoteproc.c151
-rw-r--r--drivers/reset/reset-meson-audio-arb.c6
-rw-r--r--drivers/reset/reset-rzg2l-usbphy-ctrl.c6
-rw-r--r--drivers/reset/reset-ti-sci.c6
-rw-r--r--drivers/rpmsg/rpmsg_char.c5
-rw-r--r--drivers/rpmsg/rpmsg_core.c4
-rw-r--r--drivers/rpmsg/rpmsg_internal.h2
-rw-r--r--drivers/s390/char/Kconfig2
-rw-r--r--drivers/s390/cio/css.c4
-rw-r--r--drivers/s390/cio/css.h2
-rw-r--r--drivers/s390/cio/device.c4
-rw-r--r--drivers/s390/crypto/ap_bus.c4
-rw-r--r--drivers/s390/crypto/ap_bus.h2
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c2
-rw-r--r--drivers/scsi/scsi_sysfs.c6
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c2
-rw-r--r--drivers/sh/maple/maple.c4
-rw-r--r--drivers/siox/siox-core.c2
-rw-r--r--drivers/slimbus/core.c4
-rw-r--r--drivers/soc/qcom/apr.c4
-rw-r--r--drivers/soc/qcom/smem.c26
-rw-r--r--drivers/soundwire/amd_init.c7
-rw-r--r--drivers/soundwire/amd_manager.c13
-rw-r--r--drivers/soundwire/bus.c6
-rw-r--r--drivers/soundwire/bus_type.c6
-rw-r--r--drivers/soundwire/cadence_master.c5
-rw-r--r--drivers/soundwire/debugfs.c157
-rw-r--r--drivers/soundwire/generic_bandwidth_allocation.c14
-rw-r--r--drivers/soundwire/intel.c25
-rw-r--r--drivers/soundwire/intel.h5
-rw-r--r--drivers/soundwire/intel_ace2x.c39
-rw-r--r--drivers/soundwire/intel_ace2x_debugfs.c2
-rw-r--r--drivers/soundwire/intel_auxdevice.c42
-rw-r--r--drivers/soundwire/intel_bus_common.c2
-rw-r--r--drivers/soundwire/qcom.c22
-rw-r--r--drivers/soundwire/stream.c5
-rw-r--r--drivers/spi/spi-microchip-core.c190
-rw-r--r--drivers/spi/spi.c2
-rw-r--r--drivers/spi/spidev.c1
-rw-r--r--drivers/spmi/spmi.c2
-rw-r--r--drivers/ssb/main.c4
-rw-r--r--drivers/staging/fieldbus/anybuss/anybuss-client.h6
-rw-r--r--drivers/staging/fieldbus/anybuss/host.c4
-rw-r--r--drivers/staging/greybus/gbphy.c2
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.c2
-rw-r--r--drivers/staging/vme_user/vme.c2
-rw-r--r--drivers/tc/tc-driver.c6
-rw-r--r--drivers/tee/tee_core.c2
-rw-r--r--drivers/thermal/thermal_core.c89
-rw-r--r--drivers/thermal/thermal_core.h10
-rw-r--r--drivers/thunderbolt/domain.c8
-rw-r--r--drivers/tty/serdev/core.c2
-rw-r--r--drivers/tty/serial/serial_base_bus.c2
-rw-r--r--drivers/tty/sysrq.c6
-rw-r--r--drivers/usb/common/ulpi.c2
-rw-r--r--drivers/usb/core/driver.c2
-rw-r--r--drivers/usb/gadget/udc/core.c2
-rw-r--r--drivers/usb/misc/usb251xb.c4
-rw-r--r--drivers/usb/serial/bus.c2
-rw-r--r--drivers/usb/typec/bus.c2
-rw-r--r--drivers/vdpa/vdpa.c2
-rw-r--r--drivers/vfio/mdev/mdev_driver.c2
-rw-r--r--drivers/virtio/virtio.c2
-rw-r--r--drivers/watchdog/Kconfig3
-rw-r--r--drivers/watchdog/imx7ulp_wdt.c5
-rw-r--r--drivers/watchdog/lenovo_se10_wdt.c4
-rw-r--r--drivers/watchdog/rzg2l_wdt.c113
-rw-r--r--drivers/watchdog/rzn1_wdt.c6
-rw-r--r--drivers/watchdog/starfive-wdt.c4
-rw-r--r--drivers/watchdog/watchdog_dev.c2
-rw-r--r--drivers/xen/xenbus/xenbus.h2
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c4
-rw-r--r--drivers/zorro/zorro-driver.c4
-rw-r--r--fs/binfmt_elf.c2
-rw-r--r--fs/cachefiles/io.c2
-rw-r--r--fs/ceph/caps.c6
-rw-r--r--fs/ceph/dir.c2
-rw-r--r--fs/ceph/mds_client.c4
-rw-r--r--fs/ceph/mds_client.h6
-rw-r--r--fs/ceph/super.c3
-rw-r--r--fs/coredump.c2
-rw-r--r--fs/dcache.c2
-rw-r--r--fs/drop_caches.c2
-rw-r--r--fs/erofs/data.c30
-rw-r--r--fs/erofs/decompressor_lzma.c2
-rw-r--r--fs/erofs/inode.c19
-rw-r--r--fs/erofs/super.c16
-rw-r--r--fs/erofs/zutil.c3
-rw-r--r--fs/exec.c4
-rw-r--r--fs/f2fs/checkpoint.c11
-rw-r--r--fs/f2fs/compress.c2
-rw-r--r--fs/f2fs/data.c27
-rw-r--r--fs/f2fs/extent_cache.c48
-rw-r--r--fs/f2fs/f2fs.h78
-rw-r--r--fs/f2fs/file.c135
-rw-r--r--fs/f2fs/gc.c24
-rw-r--r--fs/f2fs/inline.c28
-rw-r--r--fs/f2fs/inode.c84
-rw-r--r--fs/f2fs/namei.c20
-rw-r--r--fs/f2fs/recovery.c11
-rw-r--r--fs/f2fs/segment.c54
-rw-r--r--fs/f2fs/segment.h3
-rw-r--r--fs/f2fs/super.c11
-rw-r--r--fs/f2fs/sysfs.c12
-rw-r--r--fs/file_table.c2
-rw-r--r--fs/fs-writeback.c2
-rw-r--r--fs/hostfs/hostfs.h7
-rw-r--r--fs/hostfs/hostfs_kern.c78
-rw-r--r--fs/hostfs/hostfs_user.c7
-rw-r--r--fs/inode.c42
-rw-r--r--fs/jffs2/Kconfig3
-rw-r--r--fs/jfs/jfs_dmap.c2
-rw-r--r--fs/jfs/jfs_dtree.c2
-rw-r--r--fs/jfs/jfs_imap.c5
-rw-r--r--fs/jfs/jfs_logmgr.c2
-rw-r--r--fs/jfs/jfs_metapage.c316
-rw-r--r--fs/jfs/jfs_metapage.h16
-rw-r--r--fs/jfs/xattr.c23
-rw-r--r--fs/locks.c9
-rw-r--r--fs/namei.c26
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/netfs/Kconfig18
-rw-r--r--fs/netfs/buffered_read.c14
-rw-r--r--fs/netfs/buffered_write.c12
-rw-r--r--fs/netfs/direct_read.c2
-rw-r--r--fs/netfs/direct_write.c8
-rw-r--r--fs/netfs/fscache_cache.c4
-rw-r--r--fs/netfs/fscache_cookie.c28
-rw-r--r--fs/netfs/fscache_io.c12
-rw-r--r--fs/netfs/fscache_main.c2
-rw-r--r--fs/netfs/fscache_volume.c4
-rw-r--r--fs/netfs/internal.h33
-rw-r--r--fs/netfs/io.c12
-rw-r--r--fs/netfs/main.c4
-rw-r--r--fs/netfs/misc.c4
-rw-r--r--fs/netfs/write_collect.c16
-rw-r--r--fs/netfs/write_issue.c37
-rw-r--r--fs/nilfs2/btnode.c25
-rw-r--r--fs/nilfs2/btree.c4
-rw-r--r--fs/pidfs.c63
-rw-r--r--fs/pipe.c2
-rw-r--r--fs/proc/task_mmu.c1
-rw-r--r--fs/quota/dquot.c2
-rw-r--r--fs/super.c11
-rw-r--r--fs/tests/binfmt_elf_kunit.c (renamed from fs/binfmt_elf_test.c)0
-rw-r--r--fs/tests/exec_kunit.c (renamed from fs/exec_test.c)0
-rw-r--r--fs/xattr.c91
-rw-r--r--fs/xfs/xfs_sysctl.c6
-rw-r--r--include/acpi/acpi_bus.h2
-rw-r--r--include/asm-generic/vmlinux.lds.h19
-rw-r--r--include/dt-bindings/i3c/i3c.h16
-rw-r--r--include/linux/acpi.h6
-rw-r--r--include/linux/arm_ffa.h2
-rw-r--r--include/linux/auxiliary_bus.h2
-rw-r--r--include/linux/bitops.h5
-rw-r--r--include/linux/cdx/cdx_bus.h2
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/cpumask.h50
-rw-r--r--include/linux/device.h21
-rw-r--r--include/linux/device/bus.h2
-rw-r--r--include/linux/device/driver.h18
-rw-r--r--include/linux/dfl.h2
-rw-r--r--include/linux/dio.h2
-rw-r--r--include/linux/dmaengine.h3
-rw-r--r--include/linux/eisa.h2
-rw-r--r--include/linux/f2fs_fs.h7
-rw-r--r--include/linux/firewire.h5
-rw-r--r--include/linux/firmware.h12
-rw-r--r--include/linux/fsi.h2
-rw-r--r--include/linux/fsl/mc.h2
-rw-r--r--include/linux/ftrace.h4
-rw-r--r--include/linux/gameport.h2
-rw-r--r--include/linux/greybus.h2
-rw-r--r--include/linux/huge_mm.h12
-rw-r--r--include/linux/hwspinlock.h6
-rw-r--r--include/linux/hyperv.h6
-rw-r--r--include/linux/i2c.h23
-rw-r--r--include/linux/i3c/device.h5
-rw-r--r--include/linux/i3c/master.h1
-rw-r--r--include/linux/io_uring_types.h5
-rw-r--r--include/linux/maple.h2
-rw-r--r--include/linux/mcb.h5
-rw-r--r--include/linux/mdio.h19
-rw-r--r--include/linux/memcontrol.h1
-rw-r--r--include/linux/mhi.h2
-rw-r--r--include/linux/mhi_ep.h2
-rw-r--r--include/linux/minmax.h7
-rw-r--r--include/linux/mm.h31
-rw-r--r--include/linux/module.h6
-rw-r--r--include/linux/moxtet.h9
-rw-r--r--include/linux/nd.h6
-rw-r--r--include/linux/of.h15
-rw-r--r--include/linux/pageblock-flags.h4
-rw-r--r--include/linux/pagemap.h6
-rw-r--r--include/linux/pci-epf.h3
-rw-r--r--include/linux/pci.h6
-rw-r--r--include/linux/perf_event.h6
-rw-r--r--include/linux/phy.h2
-rw-r--r--include/linux/platform_data/i2c-mux-gpio.h2
-rw-r--r--include/linux/platform_device.h15
-rw-r--r--include/linux/pnp.h2
-rw-r--r--include/linux/power_supply.h19
-rw-r--r--include/linux/printk.h4
-rw-r--r--include/linux/rio.h2
-rw-r--r--include/linux/scmi_protocol.h2
-rw-r--r--include/linux/security.h2
-rw-r--r--include/linux/serio.h2
-rw-r--r--include/linux/slimbus.h2
-rw-r--r--include/linux/soc/qcom/apr.h2
-rw-r--r--include/linux/soc/qcom/smem.h2
-rw-r--r--include/linux/soc/samsung/exynos-regs-pmu.h2
-rw-r--r--include/linux/soundwire/sdw.h2
-rw-r--r--include/linux/soundwire/sdw_intel.h5
-rw-r--r--include/linux/soundwire/sdw_type.h2
-rw-r--r--include/linux/spi/spi.h6
-rw-r--r--include/linux/ssb/ssb.h2
-rw-r--r--include/linux/sysctl.h34
-rw-r--r--include/linux/tc.h2
-rw-r--r--include/linux/tee_drv.h2
-rw-r--r--include/linux/uaccess.h46
-rw-r--r--include/linux/userfaultfd_k.h3
-rw-r--r--include/linux/virtio.h5
-rw-r--r--include/linux/vmstat.h4
-rw-r--r--include/linux/writeback.h2
-rw-r--r--include/linux/zorro.h2
-rw-r--r--include/net/ndisc.h2
-rw-r--r--include/net/neighbour.h6
-rw-r--r--include/net/netfilter/nf_hooks_lwtunnel.h2
-rw-r--r--include/scsi/scsi_transport_iscsi.h2
-rw-r--r--include/sound/ac97/codec.h5
-rw-r--r--include/sound/tas2781-tlv.h6
-rw-r--r--include/trace/events/mmflags.h7
-rw-r--r--include/trace/events/timer_migration.h16
-rw-r--r--include/uapi/linux/if_xdp.h4
-rw-r--r--include/uapi/linux/mman.h1
-rw-r--r--include/uapi/linux/random.h17
-rw-r--r--include/uapi/linux/um_timetravel.h190
-rw-r--r--include/vdso/datapage.h11
-rw-r--r--include/vdso/getrandom.h46
-rw-r--r--include/xen/xenbus.h5
-rw-r--r--init/Kconfig23
-rw-r--r--init/Makefile2
-rw-r--r--io_uring/io_uring.c13
-rw-r--r--io_uring/io_uring.h2
-rw-r--r--io_uring/msg_ring.c6
-rw-r--r--io_uring/napi.c60
-rw-r--r--io_uring/napi.h10
-rw-r--r--io_uring/timeout.c2
-rw-r--r--io_uring/uring_cmd.c2
-rw-r--r--ipc/ipc_sysctl.c6
-rw-r--r--kernel/bpf/syscall.c4
-rw-r--r--kernel/debug/kdb/kdb_bt.c2
-rw-r--r--kernel/debug/kdb/kdb_io.c6
-rw-r--r--kernel/debug/kdb/kdb_main.c18
-rw-r--r--kernel/debug/kdb/kdb_private.h2
-rw-r--r--kernel/delayacct.c2
-rw-r--r--kernel/dma/mapping.c2
-rw-r--r--kernel/events/callchain.c2
-rw-r--r--kernel/events/core.c32
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/hung_task.c2
-rw-r--r--kernel/kallsyms.c5
-rw-r--r--kernel/kallsyms_internal.h6
-rw-r--r--kernel/kexec_core.c2
-rw-r--r--kernel/kprobes.c2
-rw-r--r--kernel/latencytop.c2
-rw-r--r--kernel/livepatch/core.c17
-rw-r--r--kernel/pid_namespace.c2
-rw-r--r--kernel/pid_sysctl.h2
-rw-r--r--kernel/printk/internal.h2
-rw-r--r--kernel/printk/printk.c8
-rw-r--r--kernel/printk/sysctl.c2
-rw-r--r--kernel/sched/core.c6
-rw-r--r--kernel/sched/rt.c8
-rw-r--r--kernel/sched/topology.c2
-rw-r--r--kernel/seccomp.c2
-rw-r--r--kernel/stackleak.c2
-rw-r--r--kernel/sysctl.c64
-rw-r--r--kernel/time/timer.c2
-rw-r--r--kernel/time/timer_migration.c393
-rw-r--r--kernel/time/timer_migration.h27
-rw-r--r--kernel/trace/ftrace.c2
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--kernel/trace/trace_events_user.c2
-rw-r--r--kernel/trace/trace_stack.c2
-rw-r--r--kernel/umh.c2
-rw-r--r--kernel/utsname_sysctl.c2
-rw-r--r--kernel/vmcore_info.c4
-rw-r--r--kernel/watchdog.c12
-rw-r--r--lib/cpumask_kunit.c1
-rw-r--r--lib/decompress_bunzip2.c3
-rw-r--r--lib/find_bit_benchmark.c1
-rw-r--r--lib/kobject_uevent.c17
-rw-r--r--lib/test_bitmap.c1
-rw-r--r--lib/test_printf.c1
-rw-r--r--lib/test_scanf.c1
-rw-r--r--lib/usercopy.c30
-rw-r--r--lib/vdso/Kconfig5
-rw-r--r--lib/vdso/getrandom.c251
-rw-r--r--mm/compaction.c6
-rw-r--r--mm/huge_memory.c14
-rw-r--r--mm/hugetlb.c6
-rw-r--r--mm/ksm.c2
-rw-r--r--mm/madvise.c5
-rw-r--r--mm/memory.c15
-rw-r--r--mm/mempolicy.c3
-rw-r--r--mm/mlock.c2
-rw-r--r--mm/mmap.c30
-rw-r--r--mm/page-writeback.c10
-rw-r--r--mm/page_alloc.c49
-rw-r--r--mm/rmap.c21
-rw-r--r--mm/util.c6
-rw-r--r--mm/vmscan.c9
-rw-r--r--mm/vmstat.c4
-rw-r--r--net/bridge/br_netfilter_hooks.c2
-rw-r--r--net/core/filter.c15
-rw-r--r--net/core/neighbour.c18
-rw-r--r--net/core/sysctl_net_core.c20
-rw-r--r--net/ipv4/devinet.c6
-rw-r--r--net/ipv4/nexthop.c7
-rw-r--r--net/ipv4/route.c4
-rw-r--r--net/ipv4/sysctl_net_ipv4.c30
-rw-r--r--net/ipv4/tcp_input.c3
-rw-r--r--net/ipv6/addrconf.c16
-rw-r--r--net/ipv6/ndisc.c2
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/ipv6/sysctl_net_ipv6.c4
-rw-r--r--net/iucv/iucv.c2
-rw-r--r--net/l2tp/l2tp_core.c32
-rw-r--r--net/mpls/af_mpls.c4
-rw-r--r--net/mptcp/ctrl.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c12
-rw-r--r--net/netfilter/nf_conntrack_standalone.c2
-rw-r--r--net/netfilter/nf_hooks_lwtunnel.c2
-rw-r--r--net/netfilter/nf_log.c2
-rw-r--r--net/netfilter/nft_set_pipapo_avx2.c12
-rw-r--r--net/phonet/sysctl.c2
-rw-r--r--net/rds/tcp.c4
-rw-r--r--net/sctp/sysctl.c28
-rw-r--r--net/sunrpc/sysctl.c4
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma.c2
-rw-r--r--net/tipc/udp_media.c5
-rw-r--r--net/unix/af_unix.c41
-rw-r--r--net/unix/unix_bpf.c3
-rw-r--r--net/xdp/xdp_umem.c9
-rw-r--r--rust/Makefile76
-rw-r--r--rust/bindings/bindings_helper.h2
-rw-r--r--rust/bindings/lib.rs1
-rw-r--r--rust/helpers.c35
-rw-r--r--rust/kernel/alloc.rs17
-rw-r--r--rust/kernel/device.rs105
-rw-r--r--rust/kernel/firmware.rs117
-rw-r--r--rust/kernel/init.rs13
-rw-r--r--rust/kernel/lib.rs5
-rw-r--r--rust/kernel/page.rs250
-rw-r--r--rust/kernel/types.rs64
-rw-r--r--rust/kernel/uaccess.rs388
-rw-r--r--rust/kernel/workqueue.rs16
-rw-r--r--rust/macros/lib.rs45
-rw-r--r--rust/macros/module.rs18
-rw-r--r--rust/uapi/lib.rs1
-rw-r--r--samples/kobject/kobject-example.c1
-rw-r--r--samples/kobject/kset-example.c1
-rw-r--r--samples/qmi/qmi_sample_client.c6
-rw-r--r--scripts/Kbuild.include2
-rw-r--r--scripts/Makefile2
-rw-r--r--scripts/Makefile.lib9
-rw-r--r--scripts/Makefile.package14
-rwxr-xr-xscripts/build-version (renamed from init/build-version)0
-rw-r--r--scripts/generate_rust_target.rs17
-rw-r--r--scripts/include/array_size.h (renamed from scripts/kconfig/array_size.h)0
-rw-r--r--scripts/include/hashtable.h (renamed from scripts/kconfig/hashtable.h)0
-rw-r--r--scripts/include/list.h (renamed from scripts/kconfig/list.h)53
-rw-r--r--scripts/include/list_types.h (renamed from scripts/kconfig/list_types.h)0
-rwxr-xr-xscripts/install.sh4
-rw-r--r--scripts/kallsyms.c123
-rw-r--r--scripts/kconfig/conf.c230
-rw-r--r--scripts/kconfig/confdata.c129
-rw-r--r--scripts/kconfig/expr.c128
-rw-r--r--scripts/kconfig/expr.h29
-rwxr-xr-xscripts/kconfig/gconf-cfg.sh2
-rw-r--r--scripts/kconfig/gconf.c26
-rw-r--r--scripts/kconfig/internal.h2
-rw-r--r--scripts/kconfig/lkc.h34
-rw-r--r--scripts/kconfig/lkc_proto.h16
-rwxr-xr-xscripts/kconfig/mconf-cfg.sh2
-rw-r--r--scripts/kconfig/mconf.c38
-rw-r--r--scripts/kconfig/menu.c111
-rw-r--r--scripts/kconfig/mnconf-common.c2
-rw-r--r--scripts/kconfig/mnconf-common.h2
-rwxr-xr-xscripts/kconfig/nconf-cfg.sh2
-rw-r--r--scripts/kconfig/nconf.c38
-rw-r--r--scripts/kconfig/parser.y45
-rw-r--r--scripts/kconfig/preprocess.c4
-rwxr-xr-xscripts/kconfig/qconf-cfg.sh2
-rw-r--r--scripts/kconfig/qconf.cc20
-rw-r--r--scripts/kconfig/qconf.h2
-rw-r--r--scripts/kconfig/symbol.c373
-rw-r--r--scripts/kconfig/tests/choice/Kconfig17
-rw-r--r--scripts/kconfig/tests/choice/__init__.py10
-rw-r--r--scripts/kconfig/tests/choice/alldef_expected_config3
-rw-r--r--scripts/kconfig/tests/choice/allmod_expected_config3
-rw-r--r--scripts/kconfig/tests/choice/allno_expected_config3
-rw-r--r--scripts/kconfig/tests/choice/allyes_expected_config3
-rw-r--r--scripts/kconfig/tests/choice/oldask0_expected_stdout4
-rw-r--r--scripts/kconfig/tests/choice/oldask1_config1
-rw-r--r--scripts/kconfig/tests/choice/oldask1_expected_stdout9
-rw-r--r--scripts/kconfig/tests/choice_value_with_m_dep/Kconfig21
-rw-r--r--scripts/kconfig/tests/choice_value_with_m_dep/__init__.py16
-rw-r--r--scripts/kconfig/tests/choice_value_with_m_dep/config2
-rw-r--r--scripts/kconfig/tests/choice_value_with_m_dep/expected_config3
-rw-r--r--scripts/kconfig/tests/choice_value_with_m_dep/expected_stdout4
-rw-r--r--scripts/kconfig/tests/err_recursive_dep/expected_stderr36
-rw-r--r--scripts/kconfig/tests/inter_choice/Kconfig25
-rw-r--r--scripts/kconfig/tests/inter_choice/__init__.py15
-rw-r--r--scripts/kconfig/tests/inter_choice/defconfig1
-rw-r--r--scripts/kconfig/tests/inter_choice/expected_config4
-rw-r--r--scripts/kconfig/util.c4
-rwxr-xr-xscripts/link-vmlinux.sh105
-rwxr-xr-xscripts/make_fit.py86
-rw-r--r--scripts/mod/list.h213
-rw-r--r--scripts/mod/modpost.c67
-rw-r--r--scripts/mod/modpost.h2
-rw-r--r--scripts/package/PKGBUILD108
-rwxr-xr-xscripts/package/builddeb2
-rwxr-xr-xscripts/package/buildtar2
-rwxr-xr-xscripts/package/gen-diff-patch2
-rwxr-xr-xscripts/package/install-extmod-build5
-rw-r--r--scripts/package/kernel.spec2
-rwxr-xr-xscripts/package/mkdebian44
-rwxr-xr-xscripts/package/mkspec27
-rwxr-xr-xscripts/remove-stale-files18
-rwxr-xr-xscripts/rust_is_available.sh33
-rw-r--r--scripts/rust_is_available_bindgen_0_66.h2
-rwxr-xr-xscripts/rust_is_available_test.py59
-rw-r--r--security/apparmor/apparmorfs.c4
-rw-r--r--security/apparmor/file.c13
-rw-r--r--security/apparmor/include/cred.h20
-rw-r--r--security/apparmor/lsm.c16
-rw-r--r--security/apparmor/mount.c2
-rw-r--r--security/apparmor/policy.c2
-rw-r--r--security/apparmor/policy_unpack.c43
-rw-r--r--security/apparmor/policy_unpack_test.c1
-rw-r--r--security/landlock/cred.c11
-rw-r--r--security/min_addr.c2
-rw-r--r--security/yama/yama_lsm.c2
-rw-r--r--sound/ac97/bus.c4
-rw-r--r--sound/core/seq_device.c4
-rw-r--r--sound/core/ump.c13
-rw-r--r--sound/firewire/amdtp-stream.c3
-rw-r--r--sound/hda/hda_bus_type.c2
-rw-r--r--sound/pci/hda/patch_realtek.c15
-rw-r--r--sound/pci/hda/samsung_helper.c310
-rw-r--r--sound/soc/amd/yc/acp6x-mach.c7
-rw-r--r--sound/soc/codecs/arizona.c12
-rw-r--r--sound/soc/codecs/tas2781-fmwlib.c2
-rw-r--r--sound/soc/fsl/fsl-asoc-card.c46
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-ssp-common.c9
-rw-r--r--sound/soc/intel/common/soc-intel-quirks.h2
-rw-r--r--sound/soc/sof/amd/pci-vangogh.c1
-rw-r--r--sound/soc/sof/imx/imx8m.c2
-rw-r--r--sound/soc/sof/intel/hda-loader.c20
-rw-r--r--sound/soc/sof/intel/hda.c17
-rw-r--r--sound/soc/sof/ipc4-topology.c18
-rw-r--r--sound/soc/sof/sof-client.c4
-rw-r--r--sound/soc/tegra/Kconfig1
-rw-r--r--sound/usb/mixer.c7
-rw-r--r--sound/usb/quirks.c4
-rw-r--r--tools/bpf/bpftool/prog.c2
-rw-r--r--tools/bpf/resolve_btfids/main.c2
-rw-r--r--tools/build/Makefile.feature18
-rw-r--r--tools/include/asm/rwonce.h0
-rw-r--r--tools/include/linux/bitmap.h17
-rw-r--r--tools/include/uapi/linux/if_xdp.h4
-rw-r--r--tools/include/uapi/linux/mman.h1
-rw-r--r--tools/lib/bitmap.c20
-rw-r--r--tools/lib/bpf/btf_dump.c8
-rw-r--r--tools/perf/Makefile.config13
-rw-r--r--tools/perf/tests/vmlinux-kallsyms.c1
-rw-r--r--tools/perf/util/dso.c2
-rw-r--r--tools/perf/util/dso.h5
-rw-r--r--tools/perf/util/unwind-libunwind-local.c2
-rw-r--r--tools/testing/radix-tree/Makefile4
-rw-r--r--tools/testing/radix-tree/bitmap.c23
-rw-r--r--tools/testing/selftests/arm64/abi/ptrace.c2
-rw-r--r--tools/testing/selftests/bpf/DENYLIST.aarch641
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_sleep.c8
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_listen.c85
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_metadata.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf_dump_test_case_multidim.c4
-rw-r--r--tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c4
-rw-r--r--tools/testing/selftests/landlock/base_test.c74
-rw-r--r--tools/testing/selftests/landlock/config1
-rwxr-xr-xtools/testing/selftests/livepatch/test-livepatch.sh138
-rwxr-xr-xtools/testing/selftests/livepatch/test-syscall.sh5
-rwxr-xr-xtools/testing/selftests/livepatch/test-sysfs.sh48
-rw-r--r--tools/testing/selftests/mm/.gitignore1
-rw-r--r--tools/testing/selftests/mm/Makefile1
-rw-r--r--tools/testing/selftests/mm/droppable.c53
-rw-r--r--tools/testing/selftests/mm/va_high_addr_switch.c16
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_fdb_learning_limit.sh18
-rw-r--r--tools/testing/selftests/pidfd/pidfd_setns_test.c258
-rw-r--r--tools/testing/selftests/vDSO/.gitignore2
-rw-r--r--tools/testing/selftests/vDSO/Makefile18
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_chacha.c43
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_getrandom.c288
-rw-r--r--tools/tracing/latency/Makefile.config3
-rw-r--r--tools/tracing/rtla/Makefile.config3
-rw-r--r--tools/verification/rv/Makefile.config3
-rw-r--r--usr/Makefile4
1145 files changed, 21456 insertions, 8646 deletions
diff --git a/.gitignore b/.gitignore
index c59dc60ba62e..7902adf4f7f1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -93,6 +93,12 @@ modules.order
/tar-install/
#
+# pacman files (make pacman-pkg)
+#
+/PKGBUILD
+/pacman/
+
+#
# We don't want to ignore the following even if they are dot-files
#
!.clang-format
diff --git a/.mailmap b/.mailmap
index 38f8bed507a2..e51d76df75c2 100644
--- a/.mailmap
+++ b/.mailmap
@@ -260,6 +260,7 @@ Jaegeuk Kim <jaegeuk@kernel.org> <jaegeuk@motorola.com>
Jakub Kicinski <kuba@kernel.org> <jakub.kicinski@netronome.com>
James Bottomley <jejb@mulgrave.(none)>
James Bottomley <jejb@titanic.il.steeleye.com>
+James Clark <james.clark@linaro.org> <james.clark@arm.com>
James E Wilson <wilson@specifix.com>
James Hogan <jhogan@kernel.org> <james@albanarts.com>
James Hogan <jhogan@kernel.org> <james.hogan@imgtec.com>
@@ -474,6 +475,8 @@ Nadia Yvette Chambers <nyc@holomorphy.com> William Lee Irwin III <wli@holomorphy
Naoya Horiguchi <nao.horiguchi@gmail.com> <n-horiguchi@ah.jp.nec.com>
Naoya Horiguchi <nao.horiguchi@gmail.com> <naoya.horiguchi@nec.com>
Nathan Chancellor <nathan@kernel.org> <natechancellor@gmail.com>
+Naveen N Rao <naveen@kernel.org> <naveen.n.rao@linux.ibm.com>
+Naveen N Rao <naveen@kernel.org> <naveen.n.rao@linux.vnet.ibm.com>
Neeraj Upadhyay <neeraj.upadhyay@kernel.org> <quic_neeraju@quicinc.com>
Neeraj Upadhyay <neeraj.upadhyay@kernel.org> <neeraju@codeaurora.org>
Neil Armstrong <neil.armstrong@linaro.org> <narmstrong@baylibre.com>
diff --git a/Documentation/ABI/testing/sysfs-kernel-livepatch b/Documentation/ABI/testing/sysfs-kernel-livepatch
index a5df9b4910dc..3735d868013d 100644
--- a/Documentation/ABI/testing/sysfs-kernel-livepatch
+++ b/Documentation/ABI/testing/sysfs-kernel-livepatch
@@ -47,6 +47,14 @@ Description:
disabled when the feature is used. See
Documentation/livepatch/livepatch.rst for more information.
+What: /sys/kernel/livepatch/<patch>/replace
+Date: Jun 2024
+KernelVersion: 6.11.0
+Contact: live-patching@vger.kernel.org
+Description:
+ An attribute which indicates whether the patch supports
+ atomic-replace.
+
What: /sys/kernel/livepatch/<patch>/<object>
Date: Nov 2014
KernelVersion: 3.19.0
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index c1134ad5f06d..f1384c7b59c9 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -3830,9 +3830,6 @@
noalign [KNL,ARM]
- noaltinstr [S390,EARLY] Disables alternative instructions
- patching (CPU alternatives feature).
-
noapic [SMP,APIC,EARLY] Tells the kernel to not make use of any
IOAPICs that may be present in the system.
diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
index 7fd43947832f..f8bc1630eba0 100644
--- a/Documentation/admin-guide/sysctl/kernel.rst
+++ b/Documentation/admin-guide/sysctl/kernel.rst
@@ -454,7 +454,7 @@ ignore-unaligned-usertrap
On architectures where unaligned accesses cause traps, and where this
feature is supported (``CONFIG_SYSCTL_ARCH_UNALIGN_NO_WARN``;
-currently, ``arc`` and ``loongarch``), controls whether all
+currently, ``arc``, ``parisc`` and ``loongarch``), controls whether all
unaligned traps are logged.
= =============================================================
diff --git a/Documentation/arch/riscv/hwprobe.rst b/Documentation/arch/riscv/hwprobe.rst
index 02eb4d98b7de..3db60a0911df 100644
--- a/Documentation/arch/riscv/hwprobe.rst
+++ b/Documentation/arch/riscv/hwprobe.rst
@@ -264,3 +264,5 @@ The following keys are defined:
* :c:macro:`RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS`: An unsigned long which
represent the highest userspace virtual address usable.
+
+* :c:macro:`RISCV_HWPROBE_KEY_TIME_CSR_FREQ`: Frequency (in Hz) of `time CSR`.
diff --git a/Documentation/arch/riscv/vm-layout.rst b/Documentation/arch/riscv/vm-layout.rst
index e476b4386bd9..077b968dcc81 100644
--- a/Documentation/arch/riscv/vm-layout.rst
+++ b/Documentation/arch/riscv/vm-layout.rst
@@ -47,11 +47,12 @@ RISC-V Linux Kernel SV39
| Kernel-space virtual memory, shared between all processes:
____________________________________________________________|___________________________________________________________
| | | |
- ffffffc6fea00000 | -228 GB | ffffffc6feffffff | 6 MB | fixmap
- ffffffc6ff000000 | -228 GB | ffffffc6ffffffff | 16 MB | PCI io
- ffffffc700000000 | -228 GB | ffffffc7ffffffff | 4 GB | vmemmap
- ffffffc800000000 | -224 GB | ffffffd7ffffffff | 64 GB | vmalloc/ioremap space
- ffffffd800000000 | -160 GB | fffffff6ffffffff | 124 GB | direct mapping of all physical memory
+ ffffffc4fea00000 | -236 GB | ffffffc4feffffff | 6 MB | fixmap
+ ffffffc4ff000000 | -236 GB | ffffffc4ffffffff | 16 MB | PCI io
+ ffffffc500000000 | -236 GB | ffffffc5ffffffff | 4 GB | vmemmap
+ ffffffc600000000 | -232 GB | ffffffd5ffffffff | 64 GB | vmalloc/ioremap space
+ ffffffd600000000 | -168 GB | fffffff5ffffffff | 128 GB | direct mapping of all physical memory
+ | | | |
fffffff700000000 | -36 GB | fffffffeffffffff | 32 GB | kasan
__________________|____________|__________________|_________|____________________________________________________________
|
diff --git a/Documentation/devicetree/bindings/arm/arm,coresight-dummy-sink.yaml b/Documentation/devicetree/bindings/arm/arm,coresight-dummy-sink.yaml
index c960c8e0a9a5..08b89b62c505 100644
--- a/Documentation/devicetree/bindings/arm/arm,coresight-dummy-sink.yaml
+++ b/Documentation/devicetree/bindings/arm/arm,coresight-dummy-sink.yaml
@@ -30,7 +30,7 @@ description: |
maintainers:
- Mike Leach <mike.leach@linaro.org>
- Suzuki K Poulose <suzuki.poulose@arm.com>
- - James Clark <james.clark@arm.com>
+ - James Clark <james.clark@linaro.org>
- Mao Jinlong <quic_jinlmao@quicinc.com>
- Hao Zhang <quic_hazha@quicinc.com>
diff --git a/Documentation/devicetree/bindings/arm/arm,coresight-dummy-source.yaml b/Documentation/devicetree/bindings/arm/arm,coresight-dummy-source.yaml
index 6745b4cc8f1c..d50a60368e27 100644
--- a/Documentation/devicetree/bindings/arm/arm,coresight-dummy-source.yaml
+++ b/Documentation/devicetree/bindings/arm/arm,coresight-dummy-source.yaml
@@ -29,7 +29,7 @@ description: |
maintainers:
- Mike Leach <mike.leach@linaro.org>
- Suzuki K Poulose <suzuki.poulose@arm.com>
- - James Clark <james.clark@arm.com>
+ - James Clark <james.clark@linaro.org>
- Mao Jinlong <quic_jinlmao@quicinc.com>
- Hao Zhang <quic_hazha@quicinc.com>
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml b/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml
index 765ca155c83a..5192c93fbd67 100644
--- a/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml
+++ b/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml
@@ -14,7 +14,13 @@ allOf:
properties:
compatible:
- const: samsung,atna33xc20
+ oneOf:
+ # Samsung 13.3" FHD (1920x1080 pixels) eDP AMOLED panel
+ - const: samsung,atna33xc20
+ # Samsung 14.5" WQXGA+ (2880x1800 pixels) eDP AMOLED panel
+ - items:
+ - const: samsung,atna45af01
+ - const: samsung,atna33xc20
enable-gpios: true
port: true
diff --git a/Documentation/devicetree/bindings/dma/fsl,imx-dma.yaml b/Documentation/devicetree/bindings/dma/fsl,imx-dma.yaml
new file mode 100644
index 000000000000..902a11f65be2
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/fsl,imx-dma.yaml
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/fsl,imx-dma.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale Direct Memory Access (DMA) Controller for i.MX
+
+maintainers:
+ - Animesh Agarwal <animeshagarwal28@gmail.com>
+
+allOf:
+ - $ref: dma-controller.yaml#
+
+properties:
+ compatible:
+ enum:
+ - fsl,imx1-dma
+ - fsl,imx21-dma
+ - fsl,imx27-dma
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ items:
+ - description: DMA complete interrupt
+ - description: DMA Error interrupt
+ minItems: 1
+
+ "#dma-cells":
+ const: 1
+
+ dma-channels:
+ const: 16
+
+ dma-requests:
+ description: Number of DMA requests supported.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - "#dma-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ dma-controller@10001000 {
+ compatible = "fsl,imx27-dma";
+ reg = <0x10001000 0x1000>;
+ interrupts = <32 33>;
+ #dma-cells = <1>;
+ dma-channels = <16>;
+ };
diff --git a/Documentation/devicetree/bindings/dma/fsl-imx-dma.txt b/Documentation/devicetree/bindings/dma/fsl-imx-dma.txt
deleted file mode 100644
index 1c9929d53727..000000000000
--- a/Documentation/devicetree/bindings/dma/fsl-imx-dma.txt
+++ /dev/null
@@ -1,50 +0,0 @@
-* Freescale Direct Memory Access (DMA) Controller for i.MX
-
-This document will only describe differences to the generic DMA Controller and
-DMA request bindings as described in dma/dma.txt .
-
-* DMA controller
-
-Required properties:
-- compatible : Should be "fsl,<chip>-dma". chip can be imx1, imx21 or imx27
-- reg : Should contain DMA registers location and length
-- interrupts : First item should be DMA interrupt, second one is optional and
- should contain DMA Error interrupt
-- #dma-cells : Has to be 1. imx-dma does not support anything else.
-
-Optional properties:
-- dma-channels : Number of DMA channels supported. Should be 16.
-- #dma-channels : deprecated
-- dma-requests : Number of DMA requests supported.
-- #dma-requests : deprecated
-
-Example:
-
- dma: dma@10001000 {
- compatible = "fsl,imx27-dma";
- reg = <0x10001000 0x1000>;
- interrupts = <32 33>;
- #dma-cells = <1>;
- dma-channels = <16>;
- };
-
-
-* DMA client
-
-Clients have to specify the DMA requests with phandles in a list.
-
-Required properties:
-- dmas: List of one or more DMA request specifiers. One DMA request specifier
- consists of a phandle to the DMA controller followed by the integer
- specifying the request line.
-- dma-names: List of string identifiers for the DMA requests. For the correct
- names, have a look at the specific client driver.
-
-Example:
-
- sdhci1: sdhci@10013000 {
- ...
- dmas = <&dma 7>;
- dma-names = "rx-tx";
- ...
- };
diff --git a/Documentation/devicetree/bindings/dma/fsl-qdma.txt b/Documentation/devicetree/bindings/dma/fsl-qdma.txt
deleted file mode 100644
index da371c4d406c..000000000000
--- a/Documentation/devicetree/bindings/dma/fsl-qdma.txt
+++ /dev/null
@@ -1,58 +0,0 @@
-NXP Layerscape SoC qDMA Controller
-==================================
-
-This device follows the generic DMA bindings defined in dma/dma.txt.
-
-Required properties:
-
-- compatible: Must be one of
- "fsl,ls1021a-qdma": for LS1021A Board
- "fsl,ls1028a-qdma": for LS1028A Board
- "fsl,ls1043a-qdma": for ls1043A Board
- "fsl,ls1046a-qdma": for ls1046A Board
-- reg: Should contain the register's base address and length.
-- interrupts: Should contain a reference to the interrupt used by this
- device.
-- interrupt-names: Should contain interrupt names:
- "qdma-queue0": the block0 interrupt
- "qdma-queue1": the block1 interrupt
- "qdma-queue2": the block2 interrupt
- "qdma-queue3": the block3 interrupt
- "qdma-error": the error interrupt
-- fsl,dma-queues: Should contain number of queues supported.
-- dma-channels: Number of DMA channels supported
-- block-number: the virtual block number
-- block-offset: the offset of different virtual block
-- status-sizes: status queue size of per virtual block
-- queue-sizes: command queue size of per virtual block, the size number
- based on queues
-
-Optional properties:
-
-- dma-channels: Number of DMA channels supported by the controller.
-- big-endian: If present registers and hardware scatter/gather descriptors
- of the qDMA are implemented in big endian mode, otherwise in little
- mode.
-
-Examples:
-
- qdma: dma-controller@8390000 {
- compatible = "fsl,ls1021a-qdma";
- reg = <0x0 0x8388000 0x0 0x1000>, /* Controller regs */
- <0x0 0x8389000 0x0 0x1000>, /* Status regs */
- <0x0 0x838a000 0x0 0x2000>; /* Block regs */
- interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "qdma-error",
- "qdma-queue0", "qdma-queue1";
- dma-channels = <8>;
- block-number = <2>;
- block-offset = <0x1000>;
- fsl,dma-queues = <2>;
- status-sizes = <64>;
- queue-sizes = <64 64>;
- big-endian;
- };
-
-DMA clients must use the format described in dma/dma.txt file.
diff --git a/Documentation/devicetree/bindings/dma/fsl-qdma.yaml b/Documentation/devicetree/bindings/dma/fsl-qdma.yaml
new file mode 100644
index 000000000000..1b9ebdbe528a
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/fsl-qdma.yaml
@@ -0,0 +1,132 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/fsl-qdma.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP Layerscape SoC qDMA Controller
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ enum:
+ - fsl,ls1021a-qdma
+ - fsl,ls1028a-qdma
+ - fsl,ls1043a-qdma
+ - fsl,ls1046a-qdma
+
+ reg:
+ items:
+ - description: Controller regs
+ - description: Status regs
+ - description: Block regs
+
+ interrupts:
+ minItems: 2
+ maxItems: 5
+
+ interrupt-names:
+ minItems: 2
+ items:
+ - const: qdma-error
+ - const: qdma-queue0
+ - const: qdma-queue1
+ - const: qdma-queue2
+ - const: qdma-queue3
+
+ dma-channels:
+ minimum: 1
+ maximum: 64
+
+ fsl,dma-queues:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Should contain number of queues supported.
+ minimum: 1
+ maximum: 4
+
+ block-number:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: the virtual block number
+
+ block-offset:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: the offset of different virtual block
+
+ status-sizes:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: status queue size of per virtual block
+
+ queue-sizes:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ description:
+ command queue size of per virtual block, the size number
+ based on queues
+
+ big-endian:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ If present registers and hardware scatter/gather descriptors
+ of the qDMA are implemented in big endian mode, otherwise in little
+ mode.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-names
+ - fsl,dma-queues
+ - block-number
+ - block-offset
+ - status-sizes
+ - queue-sizes
+
+allOf:
+ - $ref: dma-controller.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - fsl,ls1028a-qdma
+ - fsl,ls1043a-qdma
+ - fsl,ls1046a-qdma
+ then:
+ properties:
+ interrupts:
+ minItems: 5
+ interrupt-names:
+ minItems: 5
+ else:
+ properties:
+ interrupts:
+ maxItems: 3
+ interrupt-names:
+ maxItems: 3
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ dma-controller@8390000 {
+ compatible = "fsl,ls1021a-qdma";
+ reg = <0x8388000 0x1000>, /* Controller regs */
+ <0x8389000 0x1000>, /* Status regs */
+ <0x838a000 0x2000>; /* Block regs */
+ interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "qdma-error", "qdma-queue0", "qdma-queue1";
+ #dma-cells = <1>;
+ dma-channels = <8>;
+ block-number = <2>;
+ block-offset = <0x1000>;
+ status-sizes = <64>;
+ queue-sizes = <64 64>;
+ big-endian;
+ fsl,dma-queues = <2>;
+ };
+
diff --git a/Documentation/devicetree/bindings/dma/sprd,sc9860-dma.yaml b/Documentation/devicetree/bindings/dma/sprd,sc9860-dma.yaml
new file mode 100644
index 000000000000..94647219c021
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/sprd,sc9860-dma.yaml
@@ -0,0 +1,92 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/sprd,sc9860-dma.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Spreadtrum SC9860 DMA controller
+
+description: |
+ There are three DMA controllers: AP DMA, AON DMA and AGCP DMA. For AGCP
+ DMA controller, it can or do not request the IRQ, which will save
+ system power without resuming system by DMA interrupts if AGCP DMA
+ does not request the IRQ.
+
+maintainers:
+ - Orson Zhai <orsonzhai@gmail.com>
+ - Baolin Wang <baolin.wang7@gmail.com>
+ - Chunyan Zhang <zhang.lyra@gmail.com>
+
+properties:
+ compatible:
+ const: sprd,sc9860-dma
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ minItems: 1
+ items:
+ - description: DMA enable clock
+ - description: optional ashb_eb clock, only for the AGCP DMA controller
+
+ clock-names:
+ minItems: 1
+ items:
+ - const: enable
+ - const: ashb_eb
+
+ '#dma-cells':
+ const: 1
+
+ dma-channels:
+ const: 32
+
+ '#dma-channels':
+ const: 32
+ deprecated: true
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - '#dma-cells'
+ - dma-channels
+
+allOf:
+ - $ref: dma-controller.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/sprd,sc9860-clk.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ /* AP DMA controller */
+ dma-controller@20100000 {
+ compatible = "sprd,sc9860-dma";
+ reg = <0x20100000 0x4000>;
+ interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&apahb_gate CLK_DMA_EB>;
+ clock-names = "enable";
+ #dma-cells = <1>;
+ dma-channels = <32>;
+ };
+
+ /* AGCP DMA controller */
+ dma-controller@41580000 {
+ compatible = "sprd,sc9860-dma";
+ reg = <0x41580000 0x4000>;
+ clocks = <&agcp_gate CLK_AGCP_DMAAP_EB>,
+ <&agcp_gate CLK_AGCP_AP_ASHB_EB>;
+ clock-names = "enable", "ashb_eb";
+ #dma-cells = <1>;
+ dma-channels = <32>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/dma/sprd-dma.txt b/Documentation/devicetree/bindings/dma/sprd-dma.txt
deleted file mode 100644
index c7e9b5fd50e7..000000000000
--- a/Documentation/devicetree/bindings/dma/sprd-dma.txt
+++ /dev/null
@@ -1,44 +0,0 @@
-* Spreadtrum DMA controller
-
-This binding follows the generic DMA bindings defined in dma.txt.
-
-Required properties:
-- compatible: Should be "sprd,sc9860-dma".
-- reg: Should contain DMA registers location and length.
-- interrupts: Should contain one interrupt shared by all channel.
-- #dma-cells: must be <1>. Used to represent the number of integer
- cells in the dmas property of client device.
-- dma-channels : Number of DMA channels supported. Should be 32.
-- clock-names: Should contain the clock of the DMA controller.
-- clocks: Should contain a clock specifier for each entry in clock-names.
-
-Deprecated properties:
-- #dma-channels : Number of DMA channels supported. Should be 32.
-
-Example:
-
-Controller:
-apdma: dma-controller@20100000 {
- compatible = "sprd,sc9860-dma";
- reg = <0x20100000 0x4000>;
- interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
- #dma-cells = <1>;
- dma-channels = <32>;
- clock-names = "enable";
- clocks = <&clk_ap_ahb_gates 5>;
-};
-
-
-Client:
-DMA clients connected to the Spreadtrum DMA controller must use the format
-described in the dma.txt file, using a two-cell specifier for each channel.
-The two cells in order are:
-1. A phandle pointing to the DMA controller.
-2. The slave id.
-
-spi0: spi@70a00000{
- ...
- dma-names = "rx_chn", "tx_chn";
- dmas = <&apdma 11>, <&apdma 12>;
- ...
-};
diff --git a/Documentation/devicetree/bindings/dma/st,stm32-dma.yaml b/Documentation/devicetree/bindings/dma/stm32/st,stm32-dma.yaml
index ff935a0068ec..11a289f1d505 100644
--- a/Documentation/devicetree/bindings/dma/st,stm32-dma.yaml
+++ b/Documentation/devicetree/bindings/dma/stm32/st,stm32-dma.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/dma/st,stm32-dma.yaml#
+$id: http://devicetree.org/schemas/dma/stm32/st,stm32-dma.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: STMicroelectronics STM32 DMA Controller
@@ -53,7 +53,7 @@ maintainers:
- Amelie Delaunay <amelie.delaunay@foss.st.com>
allOf:
- - $ref: dma-controller.yaml#
+ - $ref: /schemas/dma/dma-controller.yaml#
properties:
"#dma-cells":
diff --git a/Documentation/devicetree/bindings/dma/stm32/st,stm32-dma3.yaml b/Documentation/devicetree/bindings/dma/stm32/st,stm32-dma3.yaml
new file mode 100644
index 000000000000..7fdc44b2e646
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/stm32/st,stm32-dma3.yaml
@@ -0,0 +1,135 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/stm32/st,stm32-dma3.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STM32 DMA3 Controller
+
+description: |
+ The STM32 DMA3 is a direct memory access controller with different features
+ depending on its hardware configuration.
+ It is either called LPDMA (Low Power), GPDMA (General Purpose) or HPDMA (High
+ Performance).
+ Its hardware configuration registers allow to dynamically expose its features.
+
+ GPDMA and HPDMA support 16 independent DMA channels, while only 4 for LPDMA.
+ GPDMA and HPDMA support 256 DMA requests from peripherals, 8 for LPDMA.
+
+ Bindings are generic for these 3 STM32 DMA3 configurations.
+
+ DMA clients connected to the STM32 DMA3 controller must use the format
+ described in "#dma-cells" property description below, using a three-cell
+ specifier for each channel.
+
+maintainers:
+ - Amelie Delaunay <amelie.delaunay@foss.st.com>
+
+allOf:
+ - $ref: /schemas/dma/dma-controller.yaml#
+
+properties:
+ compatible:
+ const: st,stm32mp25-dma3
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ minItems: 4
+ maxItems: 16
+ description:
+ Should contain all of the per-channel DMA interrupts in ascending order
+ with respect to the DMA channel index.
+
+ clocks:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ "#dma-cells":
+ const: 3
+ description: |
+ Specifies the number of cells needed to provide DMA controller specific
+ information.
+ The first cell is the request line number.
+ The second cell is a 32-bit mask specifying the DMA channel requirements:
+ -bit 0-1: The priority level
+ 0x0: low priority, low weight
+ 0x1: low priority, mid weight
+ 0x2: low priority, high weight
+ 0x3: high priority
+ -bit 4-7: The FIFO requirement for queuing source/destination transfers
+ 0x0: no FIFO requirement/any channel can fit
+ 0x2: FIFO of 8 bytes (2^2+1)
+ 0x4: FIFO of 32 bytes (2^4+1)
+ 0x6: FIFO of 128 bytes (2^6+1)
+ 0x7: FIFO of 256 bytes (2^7+1)
+ The third cell is a 32-bit mask specifying the DMA transfer requirements:
+ -bit 0: The source incrementing burst
+ 0x0: fixed burst
+ 0x1: contiguously incremented burst
+ -bit 1: The source allocated port
+ 0x0: port 0 is allocated to the source transfer
+ 0x1: port 1 is allocated to the source transfer
+ -bit 4: The destination incrementing burst
+ 0x0: fixed burst
+ 0x1: contiguously incremented burst
+ -bit 5: The destination allocated port
+ 0x0: port 0 is allocated to the destination transfer
+ 0x1: port 1 is allocated to the destination transfer
+ -bit 8: The type of hardware request
+ 0x0: burst
+ 0x1: block
+ -bit 9: The control mode
+ 0x0: DMA controller control mode
+ 0x1: peripheral control mode
+ -bit 12-13: The transfer complete event mode
+ 0x0: at block level, transfer complete event is generated at the end
+ of a block
+ 0x2: at LLI level, the transfer complete event is generated at the end
+ of the LLI transfer
+ including the update of the LLI if any
+ 0x3: at channel level, the transfer complete event is generated at the
+ end of the last LLI
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - "#dma-cells"
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/st,stm32mp25-rcc.h>
+ dma-controller@40400000 {
+ compatible = "st,stm32mp25-dma3";
+ reg = <0x40400000 0x1000>;
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_BUS_HPDMA1>;
+ #dma-cells = <3>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/dma/st,stm32-dmamux.yaml b/Documentation/devicetree/bindings/dma/stm32/st,stm32-dmamux.yaml
index ddf82bf1e71a..f26c914a3a9a 100644
--- a/Documentation/devicetree/bindings/dma/st,stm32-dmamux.yaml
+++ b/Documentation/devicetree/bindings/dma/stm32/st,stm32-dmamux.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/dma/st,stm32-dmamux.yaml#
+$id: http://devicetree.org/schemas/dma/stm32/st,stm32-dmamux.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: STMicroelectronics STM32 DMA MUX (DMA request router)
@@ -10,7 +10,7 @@ maintainers:
- Amelie Delaunay <amelie.delaunay@foss.st.com>
allOf:
- - $ref: dma-router.yaml#
+ - $ref: /schemas/dma/dma-router.yaml#
properties:
"#dma-cells":
diff --git a/Documentation/devicetree/bindings/dma/st,stm32-mdma.yaml b/Documentation/devicetree/bindings/dma/stm32/st,stm32-mdma.yaml
index 3874544dfa74..45fe91db11db 100644
--- a/Documentation/devicetree/bindings/dma/st,stm32-mdma.yaml
+++ b/Documentation/devicetree/bindings/dma/stm32/st,stm32-mdma.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/dma/st,stm32-mdma.yaml#
+$id: http://devicetree.org/schemas/dma/stm32/st,stm32-mdma.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: STMicroelectronics STM32 MDMA Controller
@@ -53,7 +53,7 @@ maintainers:
- Amelie Delaunay <amelie.delaunay@foss.st.com>
allOf:
- - $ref: dma-controller.yaml#
+ - $ref: /schemas/dma/dma-controller.yaml#
properties:
"#dma-cells":
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.yaml b/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.yaml
index f34cc7ad5a00..4a93d1f78f93 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.yaml
+++ b/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.yaml
@@ -57,6 +57,9 @@ properties:
last value used.
$ref: /schemas/types.yaml#/definitions/uint32
+ settle-time-us:
+ description: Delay to wait before doing any transfer when a new bus gets selected.
+
allOf:
- $ref: i2c-mux.yaml
diff --git a/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml b/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml
index daf4e71b8e7f..c33ae7b63b84 100644
--- a/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml
+++ b/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml
@@ -31,6 +31,8 @@ properties:
- qcom,sm6350-cci
- qcom,sm8250-cci
- qcom,sm8450-cci
+ - qcom,sm8550-cci
+ - qcom,sm8650-cci
- const: qcom,msm8996-cci # CCI v2
"#address-cells":
@@ -195,6 +197,24 @@ allOf:
- const: cpas_ahb
- const: cci
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sm8550-cci
+ - qcom,sm8650-cci
+ then:
+ properties:
+ clocks:
+ minItems: 3
+ maxItems: 3
+ clock-names:
+ items:
+ - const: camnoc_axi
+ - const: cpas_ahb
+ - const: cci
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/i3c/i3c.yaml b/Documentation/devicetree/bindings/i3c/i3c.yaml
index 113957ebe9f1..e25fa72fd785 100644
--- a/Documentation/devicetree/bindings/i3c/i3c.yaml
+++ b/Documentation/devicetree/bindings/i3c/i3c.yaml
@@ -91,6 +91,7 @@ patternProperties:
- const: 0
- description: |
Shall encode the I3C LVR (Legacy Virtual Register):
+ See include/dt-bindings/i3c/i3c.h
bit[31:8]: unused/ignored
bit[7:5]: I2C device index. Possible values:
* 0: I2C device has a 50 ns spike filter
@@ -153,6 +154,8 @@ additionalProperties: true
examples:
- |
+ #include <dt-bindings/i3c/i3c.h>
+
i3c@d040000 {
compatible = "cdns,i3c-master";
clocks = <&coreclock>, <&i3csysclock>;
@@ -166,7 +169,7 @@ examples:
/* I2C device. */
eeprom@57 {
compatible = "atmel,24c01";
- reg = <0x57 0x0 0x10>;
+ reg = <0x57 0x0 (I2C_FM | I2C_FILTER)>;
pagesize = <0x8>;
};
diff --git a/Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.yaml b/Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.yaml
index c0e805e531be..4fc13e3c0f75 100644
--- a/Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.yaml
+++ b/Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.yaml
@@ -20,7 +20,16 @@ properties:
maxItems: 1
clocks:
- maxItems: 1
+ minItems: 1
+ items:
+ - description: Core clock
+ - description: APB clock
+
+ clock-names:
+ minItems: 1
+ items:
+ - const: core
+ - const: apb
interrupts:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml
index a03da9489ed9..190889c7b62a 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml
@@ -120,9 +120,8 @@ patternProperties:
description:
Positive input can be connected to pins AIN1 to AIN16 by choosing the
appropriate value from 1 to 16. Negative input is connected to AINCOM.
- items:
- minimum: 1
- maximum: 16
+ minimum: 1
+ maximum: 16
oneOf:
- required:
diff --git a/Documentation/devicetree/bindings/mailbox/mediatek,gce-props.yaml b/Documentation/devicetree/bindings/mailbox/mediatek,gce-props.yaml
new file mode 100644
index 000000000000..c25eed4606fe
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/mediatek,gce-props.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mailbox/mediatek,gce-props.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek Global Command Engine Common Properties
+
+maintainers:
+ - Houlong Wei <houlong.wei@mediatek.com>
+
+description:
+ The Global Command Engine (GCE) is an instruction based, multi-threaded,
+ single-core command dispatcher for MediaTek hardware. The Command Queue
+ (CMDQ) mailbox driver is a driver for GCE, implemented using the Linux
+ mailbox framework. It is used to receive messages from mailbox consumers
+ and configure GCE to execute the specified instruction set in the message.
+ We use mediatek,gce-mailbox.yaml to define the properties for CMDQ mailbox
+ driver. A device driver that uses the CMDQ driver to configure its hardware
+ registers is a mailbox consumer. The mailbox consumer can request a mailbox
+ channel corresponding to a GCE hardware thread to send a message, specifying
+ that the GCE thread to configure its hardware. The mailbox provider can also
+ reserve a mailbox channel to configure GCE hardware register by the specific
+ GCE thread. This binding defines the common GCE properties for both mailbox
+ provider and consumers.
+
+properties:
+ mediatek,gce-events:
+ description:
+ GCE has an event table in SRAM, consisting of 1024 event IDs (0~1023).
+ Each event ID has a boolean event value with the default value 0.
+ The property mediatek,gce-events is used to obtain the event IDs.
+ Some gce-events are hardware-bound and cannot be changed by software.
+ For instance, in MT8195, when VDO0_MUTEX is stream done, VDO_MUTEX will
+ send an event signal to GCE, setting the value of event ID 597 to 1.
+ Similarly, in MT8188, the value of event ID 574 will be set to 1 when
+ VOD0_MUTEX is stream done.
+ On the other hand, some gce-events are not hardware-bound and can be
+ changed by software. For example, in MT8188, we can set the value of
+ event ID 855, which is not bound to any hardware, to 1 when the driver
+ in the secure world completes a task. However, in MT8195, event ID 855
+ is already bound to VDEC_LAT1, so we need to select another event ID to
+ achieve the same purpose. This event ID can be any ID that is not bound
+ to any hardware and is not yet used in any software driver.
+ To determine if the event ID is bound to the hardware or used by a
+ software driver, refer to the GCE header
+ include/dt-bindings/gce/<chip>-gce.h of each chip.
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 32
+
+additionalProperties: true
diff --git a/Documentation/devicetree/bindings/mailbox/qcom,cpucp-mbox.yaml b/Documentation/devicetree/bindings/mailbox/qcom,cpucp-mbox.yaml
new file mode 100644
index 000000000000..f7342d04beec
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/qcom,cpucp-mbox.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mailbox/qcom,cpucp-mbox.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Technologies, Inc. CPUCP Mailbox Controller
+
+maintainers:
+ - Sibi Sankar <quic_sibis@quicinc.com>
+
+description:
+ The CPUSS Control Processor (CPUCP) mailbox controller enables communication
+ between AP and CPUCP by acting as a doorbell between them.
+
+properties:
+ compatible:
+ items:
+ - const: qcom,x1e80100-cpucp-mbox
+
+ reg:
+ items:
+ - description: CPUCP rx register region
+ - description: CPUCP tx register region
+
+ interrupts:
+ maxItems: 1
+
+ "#mbox-cells":
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - "#mbox-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ mailbox@17430000 {
+ compatible = "qcom,x1e80100-cpucp-mbox";
+ reg = <0x17430000 0x10000>, <0x18830000 0x10000>;
+ interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/airoha,en7581-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/airoha,en7581-pcie-phy.yaml
new file mode 100644
index 000000000000..98fcb1b364de
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/airoha,en7581-pcie-phy.yaml
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/airoha,en7581-pcie-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Airoha EN7581 PCI-Express PHY
+
+maintainers:
+ - Lorenzo Bianconi <lorenzo@kernel.org>
+
+description:
+ The PCIe PHY supports physical layer functionality for PCIe Gen2/Gen3 port.
+
+properties:
+ compatible:
+ const: airoha,en7581-pcie-phy
+
+ reg:
+ items:
+ - description: PCIE analog base address
+ - description: PCIE lane0 base address
+ - description: PCIE lane1 base address
+ - description: PCIE lane0 detection time base address
+ - description: PCIE lane1 detection time base address
+ - description: PCIE Rx AEQ base address
+
+ reg-names:
+ items:
+ - const: csr-2l
+ - const: pma0
+ - const: pma1
+ - const: p0-xr-dtime
+ - const: p1-xr-dtime
+ - const: rx-aeq
+
+ "#phy-cells":
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - "#phy-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/phy/phy.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ phy@11e80000 {
+ compatible = "airoha,en7581-pcie-phy";
+ #phy-cells = <0>;
+ reg = <0x0 0x1fa5a000 0x0 0xfff>,
+ <0x0 0x1fa5b000 0x0 0xfff>,
+ <0x0 0x1fa5c000 0x0 0xfff>,
+ <0x0 0x1fc10044 0x0 0x4>,
+ <0x0 0x1fc30044 0x0 0x4>,
+ <0x0 0x1fc15030 0x0 0x104>;
+ reg-names = "csr-2l", "pma0", "pma1",
+ "p0-xr-dtime", "p1-xr-dtime",
+ "rx-aeq";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/phy/amlogic,g12a-usb2-phy.yaml b/Documentation/devicetree/bindings/phy/amlogic,g12a-usb2-phy.yaml
index 0031fb6a4e76..1a0c436b87a0 100644
--- a/Documentation/devicetree/bindings/phy/amlogic,g12a-usb2-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/amlogic,g12a-usb2-phy.yaml
@@ -41,6 +41,9 @@ properties:
Phandle to a regulator that provides power to the PHY. This
regulator will be managed during the PHY power on/off sequence.
+ power-domains:
+ maxItems: 1
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/phy/fsl,imx8qm-hsio.yaml b/Documentation/devicetree/bindings/phy/fsl,imx8qm-hsio.yaml
new file mode 100644
index 000000000000..147bbfd2cd5f
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/fsl,imx8qm-hsio.yaml
@@ -0,0 +1,164 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/fsl,imx8qm-hsio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX8QM SoC series High Speed IO(HSIO) SERDES PHY
+
+maintainers:
+ - Richard Zhu <hongxing.zhu@nxp.com>
+
+properties:
+ compatible:
+ enum:
+ - fsl,imx8qm-hsio
+ - fsl,imx8qxp-hsio
+ reg:
+ items:
+ - description: Base address and length of the PHY block
+ - description: HSIO control and status registers(CSR) of the PHY
+ - description: HSIO CSR of the controller bound to the PHY
+ - description: HSIO CSR for MISC
+
+ reg-names:
+ items:
+ - const: reg
+ - const: phy
+ - const: ctrl
+ - const: misc
+
+ "#phy-cells":
+ const: 3
+ description:
+ The first defines lane index.
+ The second defines the type of the PHY refer to the include phy.h.
+ The third defines the controller index, indicated which controller
+ is bound to the lane.
+
+ clocks:
+ minItems: 5
+ maxItems: 14
+
+ clock-names:
+ minItems: 5
+ maxItems: 14
+
+ fsl,hsio-cfg:
+ description: |
+ Specifies the use case of the HSIO module in the hardware design.
+ Regarding the design of i.MX8QM HSIO subsystem, HSIO module can be
+ confiured as following three use cases.
+ +---------------------------------------+
+ | | i.MX8QM |
+ |------------------|--------------------|
+ | | Lane0| Lane1| Lane2|
+ |------------------|------|------|------|
+ | pciea-x2-sata | PCIEA| PCIEA| SATA |
+ |------------------|------|------|------|
+ | pciea-x2-pcieb | PCIEA| PCIEA| PCIEB|
+ |------------------|------|------|------|
+ | pciea-pcieb-sata | PCIEA| PCIEB| SATA |
+ +---------------------------------------+
+ $ref: /schemas/types.yaml#/definitions/string
+ enum: [ pciea-x2-sata, pciea-x2-pcieb, pciea-pcieb-sata]
+ default: pciea-pcieb-sata
+
+ fsl,refclk-pad-mode:
+ description:
+ Specifies the mode of the refclk pad used. INPUT(PHY refclock is
+ provided externally via the refclk pad) or OUTPUT(PHY refclock is
+ derived from SoC internal source and provided on the refclk pad).
+ This property not exists means unused(PHY refclock is derived from
+ SoC internal source).
+ $ref: /schemas/types.yaml#/definitions/string
+ enum: [ input, output, unused ]
+ default: unused
+
+ power-domains:
+ minItems: 1
+ maxItems: 2
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - "#phy-cells"
+ - clocks
+ - clock-names
+ - fsl,hsio-cfg
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - fsl,imx8qxp-hsio
+ then:
+ properties:
+ clock-names:
+ items:
+ - const: pclk0
+ - const: apb_pclk0
+ - const: phy0_crr
+ - const: ctl0_crr
+ - const: misc_crr
+ power-domains:
+ maxItems: 1
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - fsl,imx8qm-hsio
+ then:
+ properties:
+ clock-names:
+ items:
+ - const: pclk0
+ - const: pclk1
+ - const: apb_pclk0
+ - const: apb_pclk1
+ - const: pclk2
+ - const: epcs_tx
+ - const: epcs_rx
+ - const: apb_pclk2
+ - const: phy0_crr
+ - const: phy1_crr
+ - const: ctl0_crr
+ - const: ctl1_crr
+ - const: ctl2_crr
+ - const: misc_crr
+ power-domains:
+ minItems: 2
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx8-clock.h>
+ #include <dt-bindings/clock/imx8-lpcg.h>
+ #include <dt-bindings/firmware/imx/rsrc.h>
+ #include <dt-bindings/phy/phy-imx8-pcie.h>
+
+ phy@5f1a0000 {
+ compatible = "fsl,imx8qxp-hsio";
+ reg = <0x5f1a0000 0x10000>,
+ <0x5f120000 0x10000>,
+ <0x5f140000 0x10000>,
+ <0x5f160000 0x10000>;
+ reg-names = "reg", "phy", "ctrl", "misc";
+ clocks = <&phyx1_lpcg IMX_LPCG_CLK_0>,
+ <&phyx1_lpcg IMX_LPCG_CLK_4>,
+ <&phyx1_crr1_lpcg IMX_LPCG_CLK_4>,
+ <&pcieb_crr3_lpcg IMX_LPCG_CLK_4>,
+ <&misc_crr5_lpcg IMX_LPCG_CLK_4>;
+ clock-names = "pclk0", "apb_pclk0", "phy0_crr", "ctl0_crr", "misc_crr";
+ power-domains = <&pd IMX_SC_R_SERDES_1>;
+ #phy-cells = <3>;
+ fsl,hsio-cfg = "pciea-pcieb-sata";
+ fsl,refclk-pad-mode = "input";
+ };
+...
diff --git a/Documentation/devicetree/bindings/phy/marvell,armada-cp110-utmi-phy.yaml b/Documentation/devicetree/bindings/phy/marvell,armada-cp110-utmi-phy.yaml
index 9ce7b4c6d208..2ef02aac042a 100644
--- a/Documentation/devicetree/bindings/phy/marvell,armada-cp110-utmi-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/marvell,armada-cp110-utmi-phy.yaml
@@ -41,6 +41,12 @@ properties:
Phandle to the system controller node
$ref: /schemas/types.yaml#/definitions/phandle
+ swap-dx-lanes:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ description: |
+ Specifies the ports which will swap the differential-pair (D+/D-),
+ default is not-swapped.
+
# Required child nodes:
patternProperties:
diff --git a/Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml
index 634cec5d57ea..58ce2d91d28c 100644
--- a/Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml
@@ -19,6 +19,8 @@ properties:
- qcom,ipq6018-qmp-pcie-phy
- qcom,ipq8074-qmp-gen3-pcie-phy
- qcom,ipq8074-qmp-pcie-phy
+ - qcom,ipq9574-qmp-gen3x1-pcie-phy
+ - qcom,ipq9574-qmp-gen3x2-pcie-phy
reg:
items:
diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
index 16634f73bdcf..03dbd02cf9e7 100644
--- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
@@ -91,8 +91,7 @@ properties:
"#clock-cells": true
clock-output-names:
- minItems: 1
- maxItems: 2
+ maxItems: 1
"#phy-cells":
const: 0
@@ -222,14 +221,10 @@ allOf:
- qcom,sm8650-qmp-gen4x2-pcie-phy
then:
properties:
- clock-output-names:
- minItems: 2
"#clock-cells":
const: 1
else:
properties:
- clock-output-names:
- maxItems: 1
"#clock-cells":
const: 0
diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml
index 325585bc881b..0e0b6cae07bc 100644
--- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml
@@ -20,8 +20,9 @@ properties:
- qcom,ipq8074-qmp-usb3-phy
- qcom,ipq9574-qmp-usb3-phy
- qcom,msm8996-qmp-usb3-phy
- - com,qdu1000-qmp-usb3-uni-phy
+ - qcom,qdu1000-qmp-usb3-uni-phy
- qcom,sa8775p-qmp-usb3-uni-phy
+ - qcom,sc8180x-qmp-usb3-uni-phy
- qcom,sc8280xp-qmp-usb3-uni-phy
- qcom,sdm845-qmp-usb3-uni-phy
- qcom,sdx55-qmp-usb3-uni-phy
@@ -112,6 +113,7 @@ allOf:
enum:
- qcom,qdu1000-qmp-usb3-uni-phy
- qcom,sa8775p-qmp-usb3-uni-phy
+ - qcom,sc8180x-qmp-usb3-uni-phy
- qcom,sc8280xp-qmp-usb3-uni-phy
- qcom,sm8150-qmp-usb3-uni-phy
- qcom,sm8250-qmp-usb3-uni-phy
@@ -152,6 +154,7 @@ allOf:
contains:
enum:
- qcom,sa8775p-qmp-usb3-uni-phy
+ - qcom,sc8180x-qmp-usb3-uni-phy
- qcom,sc8280xp-qmp-usb3-uni-phy
- qcom,x1e80100-qmp-usb3-uni-phy
then:
diff --git a/Documentation/devicetree/bindings/phy/qcom,usb-hs-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,usb-hs-phy.yaml
index f042d6af1594..e03b516c698c 100644
--- a/Documentation/devicetree/bindings/phy/qcom,usb-hs-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,usb-hs-phy.yaml
@@ -15,6 +15,7 @@ if:
contains:
enum:
- qcom,usb-hs-phy-apq8064
+ - qcom,usb-hs-phy-msm8660
- qcom,usb-hs-phy-msm8960
then:
properties:
@@ -41,6 +42,7 @@ properties:
- enum:
- qcom,usb-hs-phy-apq8064
- qcom,usb-hs-phy-msm8226
+ - qcom,usb-hs-phy-msm8660
- qcom,usb-hs-phy-msm8916
- qcom,usb-hs-phy-msm8960
- qcom,usb-hs-phy-msm8974
diff --git a/Documentation/devicetree/bindings/phy/rockchip,rk3399-emmc-phy.yaml b/Documentation/devicetree/bindings/phy/rockchip,rk3399-emmc-phy.yaml
new file mode 100644
index 000000000000..3e3729b1c799
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/rockchip,rk3399-emmc-phy.yaml
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/rockchip,rk3399-emmc-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip EMMC PHY
+
+maintainers:
+ - Heiko Stuebner <heiko@sntech.de>
+
+properties:
+ compatible:
+ const: rockchip,rk3399-emmc-phy
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: emmcclk
+
+ drive-impedance-ohm:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Specifies the drive impedance in Ohm.
+ enum: [33, 40, 50, 66, 100]
+ default: 50
+
+ rockchip,enable-strobe-pulldown:
+ type: boolean
+ description: |
+ Enable internal pull-down for the strobe
+ line. If not set, pull-down is not used.
+
+ rockchip,output-tapdelay-select:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Specifies the phyctrl_otapdlysec register.
+ default: 0x4
+ maximum: 0xf
+
+ "#phy-cells":
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - "#phy-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ phy@f780 {
+ compatible = "rockchip,rk3399-emmc-phy";
+ reg = <0xf780 0x20>;
+ clocks = <&sdhci>;
+ clock-names = "emmcclk";
+ drive-impedance-ohm = <50>;
+ #phy-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt b/Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt
deleted file mode 100644
index 57d28c0d5696..000000000000
--- a/Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt
+++ /dev/null
@@ -1,43 +0,0 @@
-Rockchip EMMC PHY
------------------------
-
-Required properties:
- - compatible: rockchip,rk3399-emmc-phy
- - #phy-cells: must be 0
- - reg: PHY register address offset and length in "general
- register files"
-
-Optional properties:
- - clock-names: Should contain "emmcclk". Although this is listed as optional
- (because most boards can get basic functionality without having
- access to it), it is strongly suggested.
- See ../clock/clock-bindings.txt for details.
- - clocks: Should have a phandle to the card clock exported by the SDHCI driver.
- - drive-impedance-ohm: Specifies the drive impedance in Ohm.
- Possible values are 33, 40, 50, 66 and 100.
- If not set, the default value of 50 will be applied.
- - rockchip,enable-strobe-pulldown: Enable internal pull-down for the strobe
- line. If not set, pull-down is not used.
- - rockchip,output-tapdelay-select: Specifies the phyctrl_otapdlysec register.
- If not set, the register defaults to 0x4.
- Maximum value 0xf.
-
-Example:
-
-
-grf: syscon@ff770000 {
- compatible = "rockchip,rk3399-grf", "syscon", "simple-mfd";
- #address-cells = <1>;
- #size-cells = <1>;
-
-...
-
- emmcphy: phy@f780 {
- compatible = "rockchip,rk3399-emmc-phy";
- reg = <0xf780 0x20>;
- clocks = <&sdhci>;
- clock-names = "emmcclk";
- drive-impedance-ohm = <50>;
- #phy-cells = <0>;
- };
-};
diff --git a/Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml b/Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml
index 452e584d9812..16321cdd4919 100644
--- a/Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml
@@ -25,6 +25,7 @@ description: |
properties:
compatible:
enum:
+ - google,gs101-usb31drd-phy
- samsung,exynos5250-usbdrd-phy
- samsung,exynos5420-usbdrd-phy
- samsung,exynos5433-usbdrd-phy
@@ -57,7 +58,15 @@ properties:
the OF graph bindings specified.
reg:
- maxItems: 1
+ minItems: 1
+ maxItems: 3
+
+ reg-names:
+ minItems: 1
+ items:
+ - const: phy
+ - const: pcs
+ - const: pma
samsung,pmu-syscon:
$ref: /schemas/types.yaml#/definitions/phandle
@@ -72,6 +81,19 @@ properties:
description:
VBUS Boost 5V power source.
+ pll-supply:
+ description: Power supply for the USB PLL.
+ dvdd-usb20-supply:
+ description: DVDD power supply for the USB 2.0 phy.
+ vddh-usb20-supply:
+ description: VDDh power supply for the USB 2.0 phy.
+ vdd33-usb20-supply:
+ description: 3.3V power supply for the USB 2.0 phy.
+ vdda-usbdp-supply:
+ description: VDDa power supply for the USB DP phy.
+ vddh-usbdp-supply:
+ description: VDDh power supply for the USB DP phy.
+
required:
- compatible
- clocks
@@ -85,6 +107,40 @@ allOf:
properties:
compatible:
contains:
+ const: google,gs101-usb31drd-phy
+ then:
+ properties:
+ clocks:
+ items:
+ - description: Gate of main PHY clock
+ - description: Gate of PHY reference clock
+ - description: Gate of control interface AXI clock
+ - description: Gate of control interface APB clock
+ - description: Gate of SCL APB clock
+ clock-names:
+ items:
+ - const: phy
+ - const: ref
+ - const: ctrl_aclk
+ - const: ctrl_pclk
+ - const: scl_pclk
+ reg:
+ minItems: 3
+ reg-names:
+ minItems: 3
+ required:
+ - reg-names
+ - pll-supply
+ - dvdd-usb20-supply
+ - vddh-usb20-supply
+ - vdd33-usb20-supply
+ - vdda-usbdp-supply
+ - vddh-usbdp-supply
+
+ - if:
+ properties:
+ compatible:
+ contains:
enum:
- samsung,exynos5433-usbdrd-phy
- samsung,exynos7-usbdrd-phy
@@ -100,7 +156,20 @@ allOf:
- const: phy_utmi
- const: phy_pipe
- const: itp
- else:
+ reg:
+ maxItems: 1
+ reg-names:
+ maxItems: 1
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - samsung,exynos5250-usbdrd-phy
+ - samsung,exynos5420-usbdrd-phy
+ - samsung,exynos850-usbdrd-phy
+ then:
properties:
clocks:
minItems: 2
@@ -109,6 +178,10 @@ allOf:
items:
- const: phy
- const: ref
+ reg:
+ maxItems: 1
+ reg-names:
+ maxItems: 1
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/phy/starfive,jh7110-dphy-tx.yaml b/Documentation/devicetree/bindings/phy/starfive,jh7110-dphy-tx.yaml
new file mode 100644
index 000000000000..4a06a2642b4a
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/starfive,jh7110-dphy-tx.yaml
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/starfive,jh7110-dphy-tx.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Starfive SoC MIPI D-PHY Tx Controller
+
+maintainers:
+ - Keith Zhao <keith.zhao@starfivetech.com>
+ - Shengyang Chen <shengyang.chen@starfivetech.com>
+
+description:
+ The Starfive SoC uses the MIPI DSI D-PHY based on M31 IP to transfer
+ DSI data.
+
+properties:
+ compatible:
+ const: starfive,jh7110-dphy-tx
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: txesc
+
+ resets:
+ items:
+ - description: MIPITX_DPHY_SYS reset
+
+ reset-names:
+ items:
+ - const: sys
+
+ power-domains:
+ maxItems: 1
+
+ "#phy-cells":
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - resets
+ - reset-names
+ - power-domains
+ - "#phy-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ phy@295e0000 {
+ compatible = "starfive,jh7110-dphy-tx";
+ reg = <0x295e0000 0x10000>;
+ clocks = <&voutcrg 14>;
+ clock-names = "txesc";
+ resets = <&syscrg 10>;
+ reset-names = "sys";
+ power-domains = <&aon_syscon 0>;
+ #phy-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/power/supply/maxim,max17201.yaml b/Documentation/devicetree/bindings/power/supply/maxim,max17201.yaml
new file mode 100644
index 000000000000..fe3dd9bd5585
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/maxim,max17201.yaml
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/supply/maxim,max17201.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Maxim MAX17201 fuel gauge
+
+maintainers:
+ - Dimitri Fedrau <dima.fedrau@gmail.com>
+
+allOf:
+ - $ref: power-supply.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - const: maxim,max17201
+ - items:
+ - enum:
+ - maxim,max17205
+ - const: maxim,max17201
+
+ reg:
+ items:
+ - description: ModelGauge m5 registers
+ - description: Nonvolatile registers
+
+ reg-names:
+ items:
+ - const: m5
+ - const: nvmem
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - reg-names
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fuel-gauge@36 {
+ compatible = "maxim,max17201";
+ reg = <0x36>, <0xb>;
+ reg-names = "m5", "nvmem";
+ interrupt-parent = <&gpio0>;
+ interrupts = <31 IRQ_TYPE_LEVEL_LOW>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/remoteproc/fsl,imx-rproc.yaml b/Documentation/devicetree/bindings/remoteproc/fsl,imx-rproc.yaml
index df36e29d974c..57d75acb0b5e 100644
--- a/Documentation/devicetree/bindings/remoteproc/fsl,imx-rproc.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/fsl,imx-rproc.yaml
@@ -59,6 +59,7 @@ properties:
maxItems: 32
power-domains:
+ minItems: 2
maxItems: 8
fsl,auto-boot:
@@ -99,6 +100,20 @@ allOf:
properties:
fsl,iomuxc-gpr: false
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - fsl,imx8qxp-cm4
+ - fsl,imx8qm-cm4
+ then:
+ required:
+ - power-domains
+ else:
+ properties:
+ power-domains: false
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,sa8775p-pas.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,sa8775p-pas.yaml
new file mode 100644
index 000000000000..7fe401a06805
--- /dev/null
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,sa8775p-pas.yaml
@@ -0,0 +1,160 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/remoteproc/qcom,sa8775p-pas.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm SA8775p Peripheral Authentication Service
+
+maintainers:
+ - Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
+
+description:
+ Qualcomm SA8775p SoC Peripheral Authentication Service loads and boots firmware
+ on the Qualcomm DSP Hexagon cores.
+
+properties:
+ compatible:
+ enum:
+ - qcom,sa8775p-adsp-pas
+ - qcom,sa8775p-cdsp0-pas
+ - qcom,sa8775p-cdsp1-pas
+ - qcom,sa8775p-gpdsp0-pas
+ - qcom,sa8775p-gpdsp1-pas
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: XO clock
+
+ clock-names:
+ items:
+ - const: xo
+
+ qcom,qmp:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: Reference to the AOSS side-channel message RAM.
+
+ firmware-name:
+ $ref: /schemas/types.yaml#/definitions/string-array
+ items:
+ - description: Firmware name of the Hexagon core
+
+ memory-region:
+ items:
+ - description: Memory region for main Firmware authentication
+
+ interrupts:
+ maxItems: 5
+
+ interrupt-names:
+ maxItems: 5
+
+required:
+ - compatible
+ - reg
+ - memory-region
+
+allOf:
+ - $ref: /schemas/remoteproc/qcom,pas-common.yaml#
+
+ - if:
+ properties:
+ compatible:
+ enum:
+ - qcom,sa8775p-adsp-pas
+ then:
+ properties:
+ power-domains:
+ items:
+ - description: LCX power domain
+ - description: LMX power domain
+ power-domain-names:
+ items:
+ - const: lcx
+ - const: lmx
+
+ - if:
+ properties:
+ compatible:
+ enum:
+ - qcom,sa8775p-cdsp0-pas
+ - qcom,sa8775p-cdsp1-pas
+ then:
+ properties:
+ power-domains:
+ items:
+ - description: CX power domain
+ - description: MXC power domain
+ - description: NSP0 power domain
+ power-domain-names:
+ items:
+ - const: cx
+ - const: mxc
+ - const: nsp
+
+ - if:
+ properties:
+ compatible:
+ enum:
+ - qcom,sa8775p-gpdsp0-pas
+ - qcom,sa8775p-gpdsp1-pas
+ then:
+ properties:
+ power-domains:
+ items:
+ - description: CX power domain
+ - description: MXC power domain
+ power-domain-names:
+ items:
+ - const: cx
+ - const: mxc
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,rpmh.h>
+ #include <dt-bindings/interconnect/qcom,sa8775p-rpmh.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/mailbox/qcom-ipcc.h>
+ #include <dt-bindings/power/qcom,rpmhpd.h>
+
+ remoteproc@30000000 {
+ compatible = "qcom,sa8775p-adsp-pas";
+ reg = <0x30000000 0x100>;
+
+ interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog", "fatal", "ready", "handover", "stop-ack";
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "xo";
+
+ power-domains = <&rpmhpd RPMHPD_LCX>, <&rpmhpd RPMHPD_LMX>;
+ power-domain-names = "lcx", "lmx";
+
+ interconnects = <&lpass_ag_noc MASTER_LPASS_PROC 0 &mc_virt SLAVE_EBI1 0>;
+
+ memory-region = <&pil_adsp_mem>;
+
+ qcom,qmp = <&aoss_qmp>;
+
+ qcom,smem-states = <&smp2p_adsp_out 0>;
+ qcom,smem-state-names = "stop";
+
+ glink-edge {
+ interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
+ IPCC_MPROC_SIGNAL_GLINK_QMP
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_LPASS IPCC_MPROC_SIGNAL_GLINK_QMP>;
+
+ label = "lpass";
+ qcom,remote-pid = <2>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/remoteproc/ti,k3-dsp-rproc.yaml b/Documentation/devicetree/bindings/remoteproc/ti,k3-dsp-rproc.yaml
index 9768db8663eb..b51bb863d759 100644
--- a/Documentation/devicetree/bindings/remoteproc/ti,k3-dsp-rproc.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/ti,k3-dsp-rproc.yaml
@@ -25,9 +25,6 @@ description: |
host processor (Arm CorePac) to perform the device management of the remote
processor and to communicate with the remote processor.
-allOf:
- - $ref: /schemas/arm/keystone/ti,k3-sci-common.yaml#
-
properties:
compatible:
enum:
@@ -89,41 +86,57 @@ properties:
should be defined as per the generic bindings in,
Documentation/devicetree/bindings/sram/sram.yaml
-if:
- properties:
- compatible:
- enum:
- - ti,j721e-c66-dsp
-then:
- properties:
- reg:
- items:
- - description: Address and Size of the L2 SRAM internal memory region
- - description: Address and Size of the L1 PRAM internal memory region
- - description: Address and Size of the L1 DRAM internal memory region
- reg-names:
- items:
- - const: l2sram
- - const: l1pram
- - const: l1dram
-else:
- if:
- properties:
- compatible:
- enum:
- - ti,am62a-c7xv-dsp
- - ti,j721e-c71-dsp
- - ti,j721s2-c71-dsp
- then:
- properties:
- reg:
- items:
- - description: Address and Size of the L2 SRAM internal memory region
- - description: Address and Size of the L1 DRAM internal memory region
- reg-names:
- items:
- - const: l2sram
- - const: l1dram
+allOf:
+ - if:
+ properties:
+ compatible:
+ enum:
+ - ti,j721e-c66-dsp
+ then:
+ properties:
+ reg:
+ items:
+ - description: Address and Size of the L2 SRAM internal memory region
+ - description: Address and Size of the L1 PRAM internal memory region
+ - description: Address and Size of the L1 DRAM internal memory region
+ reg-names:
+ items:
+ - const: l2sram
+ - const: l1pram
+ - const: l1dram
+
+ - if:
+ properties:
+ compatible:
+ enum:
+ - ti,j721e-c71-dsp
+ - ti,j721s2-c71-dsp
+ then:
+ properties:
+ reg:
+ items:
+ - description: Address and Size of the L2 SRAM internal memory region
+ - description: Address and Size of the L1 DRAM internal memory region
+ reg-names:
+ items:
+ - const: l2sram
+ - const: l1dram
+
+ - if:
+ properties:
+ compatible:
+ enum:
+ - ti,am62a-c7xv-dsp
+ then:
+ properties:
+ reg:
+ items:
+ - description: Address and Size of the L2 SRAM internal memory region
+ reg-names:
+ items:
+ - const: l2sram
+
+ - $ref: /schemas/arm/keystone/ti,k3-sci-common.yaml#
required:
- compatible
diff --git a/Documentation/devicetree/bindings/soc/rockchip/grf.yaml b/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
index 79798c747476..78c6d5b64138 100644
--- a/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
+++ b/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
@@ -176,9 +176,10 @@ allOf:
Documentation/devicetree/bindings/phy/rockchip-pcie-phy.txt
patternProperties:
- "phy@[0-9a-f]+$":
- description:
- Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt
+ "^phy@[0-9a-f]+$":
+ type: object
+ $ref: /schemas/phy/rockchip,rk3399-emmc-phy.yaml#
+ unevaluatedProperties: false
- if:
properties:
@@ -292,6 +293,15 @@ examples:
#phy-cells = <0>;
};
+ phy@f780 {
+ compatible = "rockchip,rk3399-emmc-phy";
+ reg = <0xf780 0x20>;
+ clocks = <&sdhci>;
+ clock-names = "emmcclk";
+ drive-impedance-ohm = <50>;
+ #phy-cells = <0>;
+ };
+
u2phy0: usb2phy@e450 {
compatible = "rockchip,rk3399-usb2phy";
reg = <0xe450 0x10>;
diff --git a/Documentation/devicetree/bindings/spi/st,stm32-spi.yaml b/Documentation/devicetree/bindings/spi/st,stm32-spi.yaml
index a55c8633c32c..76e43c0ce36c 100644
--- a/Documentation/devicetree/bindings/spi/st,stm32-spi.yaml
+++ b/Documentation/devicetree/bindings/spi/st,stm32-spi.yaml
@@ -42,7 +42,7 @@ properties:
dmas:
description: |
DMA specifiers for tx and rx dma. DMA fifo mode must be used. See
- the STM32 DMA bindings Documentation/devicetree/bindings/dma/st,stm32-dma.yaml.
+ the STM32 DMA controllers bindings Documentation/devicetree/bindings/dma/stm32/*.yaml.
items:
- description: rx DMA channel
- description: tx DMA channel
diff --git a/Documentation/devicetree/bindings/trivial-devices.yaml b/Documentation/devicetree/bindings/trivial-devices.yaml
index 5d3dc952770d..7913ca9b6b54 100644
--- a/Documentation/devicetree/bindings/trivial-devices.yaml
+++ b/Documentation/devicetree/bindings/trivial-devices.yaml
@@ -328,7 +328,9 @@ properties:
- renesas,hs3001
# Renesas ISL29501 time-of-flight sensor
- renesas,isl29501
- # Rohm DH2228FV
+ # Rohm BH2228FV 8 channel DAC
+ - rohm,bh2228fv
+ # Rohm DH2228FV - This device does not exist, use rohm,bh2228fv instead.
- rohm,dh2228fv
# S524AD0XF1 (128K/256K-bit Serial EEPROM for Low Power)
- samsung,24ad0xd1
diff --git a/Documentation/devicetree/bindings/watchdog/amlogic,meson-gxbb-wdt.yaml b/Documentation/devicetree/bindings/watchdog/amlogic,meson-gxbb-wdt.yaml
index 69845ec32e81..d0eff1ea52b4 100644
--- a/Documentation/devicetree/bindings/watchdog/amlogic,meson-gxbb-wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/amlogic,meson-gxbb-wdt.yaml
@@ -21,6 +21,7 @@ properties:
- amlogic,t7-wdt
- items:
- enum:
+ - amlogic,a4-wdt
- amlogic,c3-wdt
- amlogic,s4-wdt
- const: amlogic,t7-wdt
diff --git a/Documentation/devicetree/bindings/watchdog/dlg,da9062-watchdog.yaml b/Documentation/devicetree/bindings/watchdog/dlg,da9062-watchdog.yaml
index c8f698120597..64619ba08d40 100644
--- a/Documentation/devicetree/bindings/watchdog/dlg,da9062-watchdog.yaml
+++ b/Documentation/devicetree/bindings/watchdog/dlg,da9062-watchdog.yaml
@@ -28,7 +28,7 @@ properties:
Add this property to disable the watchdog during suspend.
Only use this option if you can't use the watchdog automatic suspend
function during a suspend (see register CONTROL_B).
-
+
dlg,wdt-sd:
$ref: /schemas/types.yaml#/definitions/uint32
enum: [0, 1]
diff --git a/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml b/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml
index ffb17add491a..eba454d1680f 100644
--- a/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml
@@ -29,6 +29,7 @@ properties:
- renesas,r9a07g043-wdt # RZ/G2UL and RZ/Five
- renesas,r9a07g044-wdt # RZ/G2{L,LC}
- renesas,r9a07g054-wdt # RZ/V2L
+ - renesas,r9a08g045-wdt # RZ/G3S
- const: renesas,rzg2l-wdt
- items:
diff --git a/Documentation/gpu/amdgpu/display/dcn-blocks.rst b/Documentation/gpu/amdgpu/display/dcn-blocks.rst
index a3fbd3ea028b..5e34366f6dbe 100644
--- a/Documentation/gpu/amdgpu/display/dcn-blocks.rst
+++ b/Documentation/gpu/amdgpu/display/dcn-blocks.rst
@@ -8,37 +8,22 @@ and the code documentation when it is automatically generated.
DCHUBBUB
--------
-.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
:doc: overview
-.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
- :export:
-
-.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
- :internal:
-
HUBP
----
.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
:doc: overview
-.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
- :export:
-
-.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
- :internal:
-
DPP
---
-.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
:doc: overview
-.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
- :export:
-
-.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
:internal:
MPC
@@ -48,10 +33,8 @@ MPC
:doc: overview
.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
- :export:
-
-.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
:internal:
+ :no-identifiers: mpcc_blnd_cfg mpcc_alpha_blend_mode
OPP
---
@@ -60,19 +43,13 @@ OPP
:doc: overview
.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
- :export:
-
-.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
:internal:
DIO
---
-.. kernel-doc:: drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
+.. kernel-doc:: drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
:doc: overview
-.. kernel-doc:: drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
- :export:
-
-.. kernel-doc:: drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
+.. kernel-doc:: drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
:internal:
diff --git a/Documentation/gpu/amdgpu/display/display-manager.rst b/Documentation/gpu/amdgpu/display/display-manager.rst
index 67a811e6891f..b269ff3f7a54 100644
--- a/Documentation/gpu/amdgpu/display/display-manager.rst
+++ b/Documentation/gpu/amdgpu/display/display-manager.rst
@@ -132,7 +132,7 @@ The DRM blend mode and its elements are then mapped by AMDGPU display manager
(MPC), as follows:
.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
- :functions: mpcc_blnd_cfg
+ :identifiers: mpcc_blnd_cfg
Therefore, the blending configuration for a single MPCC instance on the MPC
tree is defined by :c:type:`mpcc_blnd_cfg`, where
@@ -144,7 +144,7 @@ alpha and plane alpha values. It sets one of the three modes for
:c:type:`MPCC_ALPHA_BLND_MODE`, as described below.
.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
- :functions: mpcc_alpha_blend_mode
+ :identifiers: mpcc_alpha_blend_mode
DM then maps the elements of `enum mpcc_alpha_blend_mode` to those in the DRM
blend formula, as follows:
diff --git a/Documentation/kbuild/kconfig-language.rst b/Documentation/kbuild/kconfig-language.rst
index 1fb3f5e6193c..71b38a7670f3 100644
--- a/Documentation/kbuild/kconfig-language.rst
+++ b/Documentation/kbuild/kconfig-language.rst
@@ -409,16 +409,9 @@ choices::
"endchoice"
This defines a choice group and accepts any of the above attributes as
-options. A choice can only be of type bool or tristate. If no type is
-specified for a choice, its type will be determined by the type of
-the first choice element in the group or remain unknown if none of the
-choice elements have a type specified, as well.
-
-While a boolean choice only allows a single config entry to be
-selected, a tristate choice also allows any number of config entries
-to be set to 'm'. This can be used if multiple drivers for a single
-hardware exists and only a single driver can be compiled/loaded into
-the kernel, but all drivers can be compiled as modules.
+options.
+
+A choice only allows a single config entry to be selected.
comment::
diff --git a/Documentation/kbuild/makefiles.rst b/Documentation/kbuild/makefiles.rst
index 991ce6081e35..be43990f1e7f 100644
--- a/Documentation/kbuild/makefiles.rst
+++ b/Documentation/kbuild/makefiles.rst
@@ -578,7 +578,7 @@ cc-option
Note: cc-option uses KBUILD_CFLAGS for $(CC) options
cc-option-yn
- cc-option-yn is used to check if gcc supports a given option
+ cc-option-yn is used to check if $(CC) supports a given option
and return "y" if supported, otherwise "n".
Example::
@@ -596,7 +596,7 @@ cc-option-yn
Note: cc-option-yn uses KBUILD_CFLAGS for $(CC) options
cc-disable-warning
- cc-disable-warning checks if gcc supports a given warning and returns
+ cc-disable-warning checks if $(CC) supports a given warning and returns
the commandline switch to disable it. This special function is needed,
because gcc 4.4 and later accept any unknown -Wno-* option and only
warn about it if there is another warning in the source file.
@@ -606,7 +606,7 @@ cc-disable-warning
KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
In the above example, -Wno-unused-but-set-variable will be added to
- KBUILD_CFLAGS only if gcc really accepts it.
+ KBUILD_CFLAGS only if $(CC) really accepts it.
gcc-min-version
gcc-min-version tests if the value of $(CONFIG_GCC_VERSION) is greater than
diff --git a/Documentation/locking/hwspinlock.rst b/Documentation/locking/hwspinlock.rst
index 6f03713b7003..2ffaa3cbd63f 100644
--- a/Documentation/locking/hwspinlock.rst
+++ b/Documentation/locking/hwspinlock.rst
@@ -87,6 +87,17 @@ Should be called from a process context (might sleep).
::
+ int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id);
+
+After verifying the owner of the hwspinlock, release a previously acquired
+hwspinlock; returns 0 on success, or an appropriate error code on failure
+(e.g. -EOPNOTSUPP if the bust operation is not defined for the specific
+hwspinlock).
+
+Should be called from a process context (might sleep).
+
+::
+
int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int timeout);
Lock a previously-assigned hwspinlock with a timeout limit (specified in
diff --git a/Documentation/networking/xsk-tx-metadata.rst b/Documentation/networking/xsk-tx-metadata.rst
index bd033fe95cca..e76b0cfc32f7 100644
--- a/Documentation/networking/xsk-tx-metadata.rst
+++ b/Documentation/networking/xsk-tx-metadata.rst
@@ -11,12 +11,16 @@ metadata on the receive side.
General Design
==============
-The headroom for the metadata is reserved via ``tx_metadata_len`` in
-``struct xdp_umem_reg``. The metadata length is therefore the same for
-every socket that shares the same umem. The metadata layout is a fixed UAPI,
-refer to ``union xsk_tx_metadata`` in ``include/uapi/linux/if_xdp.h``.
-Thus, generally, the ``tx_metadata_len`` field above should contain
-``sizeof(union xsk_tx_metadata)``.
+The headroom for the metadata is reserved via ``tx_metadata_len`` and
+``XDP_UMEM_TX_METADATA_LEN`` flag in ``struct xdp_umem_reg``. The metadata
+length is therefore the same for every socket that shares the same umem.
+The metadata layout is a fixed UAPI, refer to ``union xsk_tx_metadata`` in
+``include/uapi/linux/if_xdp.h``. Thus, generally, the ``tx_metadata_len``
+field above should contain ``sizeof(union xsk_tx_metadata)``.
+
+Note that in the original implementation the ``XDP_UMEM_TX_METADATA_LEN``
+flag was not required. Applications might attempt to create a umem
+with a flag first and if it fails, do another attempt without a flag.
The headroom and the metadata itself should be located right before
``xdp_desc->addr`` in the umem frame. Within a frame, the metadata
diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst
index 8d225a9f65a2..3fc63f27c226 100644
--- a/Documentation/process/changes.rst
+++ b/Documentation/process/changes.rst
@@ -33,7 +33,7 @@ GNU C 5.1 gcc --version
Clang/LLVM (optional) 13.0.1 clang --version
Rust (optional) 1.78.0 rustc --version
bindgen (optional) 0.65.1 bindgen --version
-GNU make 3.82 make --version
+GNU make 4.0 make --version
bash 4.2 bash --version
binutils 2.25 ld -v
flex 2.5.35 flex --version
@@ -89,14 +89,7 @@ docs on :ref:`Building Linux with Clang/LLVM <kbuild_llvm>`.
Rust (optional)
---------------
-A particular version of the Rust toolchain is required. Newer versions may or
-may not work because the kernel depends on some unstable Rust features, for
-the moment.
-
-Each Rust toolchain comes with several "components", some of which are required
-(like ``rustc``) and some that are optional. The ``rust-src`` component (which
-is optional) needs to be installed to build the kernel. Other components are
-useful for developing.
+A recent version of the Rust compiler is required.
Please see Documentation/rust/quick-start.rst for instructions on how to
satisfy the build requirements of Rust support. In particular, the ``Makefile``
@@ -112,7 +105,7 @@ It depends on ``libclang``.
Make
----
-You will need GNU make 3.82 or later to build the kernel.
+You will need GNU make 4.0 or later to build the kernel.
Bash
----
diff --git a/Documentation/rust/arch-support.rst b/Documentation/rust/arch-support.rst
index b13e19d84744..750ff371570a 100644
--- a/Documentation/rust/arch-support.rst
+++ b/Documentation/rust/arch-support.rst
@@ -18,7 +18,7 @@ Architecture Level of support Constraints
``arm64`` Maintained Little Endian only.
``loongarch`` Maintained \-
``riscv`` Maintained ``riscv64`` only.
-``um`` Maintained ``x86_64`` only.
+``um`` Maintained \-
``x86`` Maintained ``x86_64`` only.
============= ================ ==============================================
diff --git a/Documentation/rust/general-information.rst b/Documentation/rust/general-information.rst
index 4bb6ac12d482..e3f388ef4ee4 100644
--- a/Documentation/rust/general-information.rst
+++ b/Documentation/rust/general-information.rst
@@ -7,6 +7,14 @@ This document contains useful information to know when working with
the Rust support in the kernel.
+``no_std``
+----------
+
+The Rust support in the kernel can link only `core <https://doc.rust-lang.org/core/>`_,
+but not `std <https://doc.rust-lang.org/std/>`_. Crates for use in the
+kernel must opt into this behavior using the ``#![no_std]`` attribute.
+
+
Code documentation
------------------
diff --git a/Documentation/rust/quick-start.rst b/Documentation/rust/quick-start.rst
index cc3f11e0d441..d06a36106cd4 100644
--- a/Documentation/rust/quick-start.rst
+++ b/Documentation/rust/quick-start.rst
@@ -5,17 +5,93 @@ Quick Start
This document describes how to get started with kernel development in Rust.
+There are a few ways to install a Rust toolchain needed for kernel development.
+A simple way is to use the packages from your Linux distribution if they are
+suitable -- the first section below explains this approach. An advantage of this
+approach is that, typically, the distribution will match the LLVM used by Rust
+and Clang.
+
+Another way is using the prebuilt stable versions of LLVM+Rust provided on
+`kernel.org <https://kernel.org/pub/tools/llvm/rust/>`_. These are the same slim
+and fast LLVM toolchains from :ref:`Getting LLVM <getting_llvm>` with versions
+of Rust added to them that Rust for Linux supports. Two sets are provided: the
+"latest LLVM" and "matching LLVM" (please see the link for more information).
+
+Alternatively, the next two "Requirements" sections explain each component and
+how to install them through ``rustup``, the standalone installers from Rust
+and/or building them.
+
+The rest of the document explains other aspects on how to get started.
+
+
+Distributions
+-------------
+
+Arch Linux
+**********
+
+Arch Linux provides recent Rust releases and thus it should generally work out
+of the box, e.g.::
+
+ pacman -S rust rust-src rust-bindgen
+
+
+Debian
+******
+
+Debian Unstable (Sid), outside of the freeze period, provides recent Rust
+releases and thus it should generally work out of the box, e.g.::
+
+ apt install rustc rust-src bindgen rustfmt rust-clippy
+
+
+Fedora Linux
+************
+
+Fedora Linux provides recent Rust releases and thus it should generally work out
+of the box, e.g.::
+
+ dnf install rust rust-src bindgen-cli rustfmt clippy
+
+
+Gentoo Linux
+************
+
+Gentoo Linux (and especially the testing branch) provides recent Rust releases
+and thus it should generally work out of the box, e.g.::
+
+ USE='rust-src rustfmt clippy' emerge dev-lang/rust dev-util/bindgen
+
+``LIBCLANG_PATH`` may need to be set.
+
+
+Nix
+***
+
+Nix (unstable channel) provides recent Rust releases and thus it should
+generally work out of the box, e.g.::
+
+ { pkgs ? import <nixpkgs> {} }:
+ pkgs.mkShell {
+ nativeBuildInputs = with pkgs; [ rustc rust-bindgen rustfmt clippy ];
+ RUST_LIB_SRC = "${pkgs.rust.packages.stable.rustPlatform.rustLibSrc}";
+ }
+
+
+openSUSE
+********
+
+openSUSE Slowroll and openSUSE Tumbleweed provide recent Rust releases and thus
+they should generally work out of the box, e.g.::
+
+ zypper install rust rust1.79-src rust-bindgen clang
+
Requirements: Building
----------------------
This section explains how to fetch the tools needed for building.
-Some of these requirements might be available from Linux distributions
-under names like ``rustc``, ``rust-src``, ``rust-bindgen``, etc. However,
-at the time of writing, they are likely not to be recent enough unless
-the distribution tracks the latest releases.
-
To easily check whether the requirements are met, the following target
can be used::
@@ -29,16 +105,15 @@ if that is the case.
rustc
*****
-A particular version of the Rust compiler is required. Newer versions may or
-may not work because, for the moment, the kernel depends on some unstable
-Rust features.
+A recent version of the Rust compiler is required.
If ``rustup`` is being used, enter the kernel build directory (or use
-``--path=<build-dir>`` argument to the ``set`` sub-command) and run::
+``--path=<build-dir>`` argument to the ``set`` sub-command) and run,
+for instance::
- rustup override set $(scripts/min-tool-version.sh rustc)
+ rustup override set stable
-This will configure your working directory to use the correct version of
+This will configure your working directory to use the given version of
``rustc`` without affecting your default toolchain.
Note that the override applies to the current working directory (and its
@@ -65,9 +140,9 @@ version later on requires re-adding the component.
Otherwise, if a standalone installer is used, the Rust source tree may be
downloaded into the toolchain's installation folder::
- curl -L "https://static.rust-lang.org/dist/rust-src-$(scripts/min-tool-version.sh rustc).tar.gz" |
+ curl -L "https://static.rust-lang.org/dist/rust-src-$(rustc --version | cut -d' ' -f2).tar.gz" |
tar -xzf - -C "$(rustc --print sysroot)/lib" \
- "rust-src-$(scripts/min-tool-version.sh rustc)/rust-src/lib/" \
+ "rust-src-$(rustc --version | cut -d' ' -f2)/rust-src/lib/" \
--strip-components=3
In this case, upgrading the Rust compiler version later on requires manually
@@ -101,26 +176,22 @@ bindgen
*******
The bindings to the C side of the kernel are generated at build time using
-the ``bindgen`` tool. A particular version is required.
+the ``bindgen`` tool.
-Install it via (note that this will download and build the tool from source)::
+Install it, for instance, via (note that this will download and build the tool
+from source)::
- cargo install --locked --version $(scripts/min-tool-version.sh bindgen) bindgen-cli
+ cargo install --locked bindgen-cli
-``bindgen`` needs to find a suitable ``libclang`` in order to work. If it is
-not found (or a different ``libclang`` than the one found should be used),
-the process can be tweaked using the environment variables understood by
-``clang-sys`` (the Rust bindings crate that ``bindgen`` uses to access
-``libclang``):
+``bindgen`` uses the ``clang-sys`` crate to find a suitable ``libclang`` (which
+may be linked statically, dynamically or loaded at runtime). By default, the
+``cargo`` command above will produce a ``bindgen`` binary that will load
+``libclang`` at runtime. If it is not found (or a different ``libclang`` than
+the one found should be used), the process can be tweaked, e.g. by using the
+``LIBCLANG_PATH`` environment variable. For details, please see ``clang-sys``'s
+documentation at:
-* ``LLVM_CONFIG_PATH`` can be pointed to an ``llvm-config`` executable.
-
-* Or ``LIBCLANG_PATH`` can be pointed to a ``libclang`` shared library
- or to the directory containing it.
-
-* Or ``CLANG_PATH`` can be pointed to a ``clang`` executable.
-
-For details, please see ``clang-sys``'s documentation at:
+ https://github.com/KyleMayes/clang-sys#linking
https://github.com/KyleMayes/clang-sys#environment-variables
@@ -164,20 +235,6 @@ can be installed manually::
The standalone installers also come with ``clippy``.
-cargo
-*****
-
-``cargo`` is the Rust native build system. It is currently required to run
-the tests since it is used to build a custom standard library that contains
-the facilities provided by the custom ``alloc`` in the kernel. The tests can
-be run using the ``rusttest`` Make target.
-
-If ``rustup`` is being used, all the profiles already install the tool,
-thus nothing needs to be done.
-
-The standalone installers also come with ``cargo``.
-
-
rustdoc
*******
diff --git a/Documentation/rust/testing.rst b/Documentation/rust/testing.rst
index acfd0c2be48d..568b71b415a4 100644
--- a/Documentation/rust/testing.rst
+++ b/Documentation/rust/testing.rst
@@ -131,9 +131,8 @@ Additionally, there are the ``#[test]`` tests. These can be run using the
make LLVM=1 rusttest
-This requires the kernel ``.config`` and downloads external repositories. It
-runs the ``#[test]`` tests on the host (currently) and thus is fairly limited in
-what these tests can test.
+This requires the kernel ``.config``. It runs the ``#[test]`` tests on the host
+(currently) and thus is fairly limited in what these tests can test.
The Kselftests
--------------
diff --git a/Documentation/virt/uml/user_mode_linux_howto_v2.rst b/Documentation/virt/uml/user_mode_linux_howto_v2.rst
index d1cfe415e4c4..27942446f406 100644
--- a/Documentation/virt/uml/user_mode_linux_howto_v2.rst
+++ b/Documentation/virt/uml/user_mode_linux_howto_v2.rst
@@ -223,8 +223,6 @@ remote UML and other VM instances.
+-----------+--------+------------------------------------+------------+
| socket | legacy | none | ~ 450Mbit |
+-----------+--------+------------------------------------+------------+
-| pcap | legacy | rx only | ~ 450Mbit |
-+-----------+--------+------------------------------------+------------+
| ethertap | legacy | obsolete | ~ 500Mbit |
+-----------+--------+------------------------------------+------------+
| vde | legacy | obsolete | ~ 500Mbit |
diff --git a/MAINTAINERS b/MAINTAINERS
index 5579dac9aa64..a031a5159b7a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -702,6 +702,14 @@ S: Maintained
F: Documentation/devicetree/bindings/net/airoha,en7581-eth.yaml
F: drivers/net/ethernet/mediatek/airoha_eth.c
+AIROHA PCIE PHY DRIVER
+M: Lorenzo Bianconi <lorenzo@kernel.org>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: Documentation/devicetree/bindings/phy/airoha,en7581-pcie-phy.yaml
+F: drivers/phy/phy-airoha-pcie-regs.h
+F: drivers/phy/phy-airoha-pcie.c
+
AIROHA SPI SNFI DRIVER
M: Lorenzo Bianconi <lorenzo@kernel.org>
M: Ray Liu <ray.liu@airoha.com>
@@ -2188,7 +2196,7 @@ N: digicolor
ARM/CORESIGHT FRAMEWORK AND DRIVERS
M: Suzuki K Poulose <suzuki.poulose@arm.com>
R: Mike Leach <mike.leach@linaro.org>
-R: James Clark <james.clark@arm.com>
+R: James Clark <james.clark@linaro.org>
L: coresight@lists.linaro.org (moderated for non-subscribers)
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
@@ -3898,11 +3906,10 @@ F: include/net/bluetooth/
F: net/bluetooth/
BONDING DRIVER
-M: Jay Vosburgh <j.vosburgh@gmail.com>
+M: Jay Vosburgh <jv@jvosburgh.net>
M: Andy Gospodarek <andy@greyhouse.net>
L: netdev@vger.kernel.org
-S: Supported
-W: http://sourceforge.net/projects/bonding/
+S: Maintained
F: Documentation/networking/bonding.rst
F: drivers/net/bonding/
F: include/net/bond*
@@ -3966,8 +3973,10 @@ S: Odd Fixes
F: drivers/net/ethernet/netronome/nfp/bpf/
BPF JIT for POWERPC (32-BIT AND 64-BIT)
-M: Naveen N. Rao <naveen.n.rao@linux.ibm.com>
M: Michael Ellerman <mpe@ellerman.id.au>
+M: Hari Bathini <hbathini@linux.ibm.com>
+M: Christophe Leroy <christophe.leroy@csgroup.eu>
+R: Naveen N Rao <naveen@kernel.org>
L: bpf@vger.kernel.org
S: Supported
F: arch/powerpc/net/
@@ -6842,6 +6851,7 @@ F: include/linux/fwnode.h
F: include/linux/kobj*
F: include/linux/property.h
F: lib/kobj*
+F: rust/kernel/device.rs
DRIVERS FOR OMAP ADAPTIVE VOLTAGE SCALING (AVS)
M: Nishanth Menon <nm@ti.com>
@@ -8354,7 +8364,8 @@ F: Documentation/userspace-api/ELF.rst
F: fs/*binfmt_*.c
F: fs/Kconfig.binfmt
F: fs/exec.c
-F: fs/exec_test.c
+F: fs/tests/binfmt_*_kunit.c
+F: fs/tests/exec_kunit.c
F: include/linux/binfmts.h
F: include/linux/elf.h
F: include/uapi/linux/binfmts.h
@@ -8707,10 +8718,12 @@ F: include/linux/arm_ffa.h
FIRMWARE LOADER (request_firmware)
M: Luis Chamberlain <mcgrof@kernel.org>
M: Russ Weight <russ.weight@linux.dev>
+M: Danilo Krummrich <dakr@redhat.com>
L: linux-kernel@vger.kernel.org
S: Maintained
F: Documentation/firmware_class/
F: drivers/base/firmware_loader/
+F: rust/kernel/firmware.rs
F: include/linux/firmware.h
FLEXTIMER FTM-QUADDEC DRIVER
@@ -10642,6 +10655,7 @@ F: Documentation/ABI/testing/sysfs-bus-i3c
F: Documentation/devicetree/bindings/i3c/
F: Documentation/driver-api/i3c
F: drivers/i3c/
+F: include/dt-bindings/i3c/
F: include/linux/i3c/
IBM Operation Panel Input Driver
@@ -12117,6 +12131,7 @@ F: scripts/Makefile*
F: scripts/basic/
F: scripts/clang-tools/
F: scripts/dummy-tools/
+F: scripts/include/
F: scripts/mk*
F: scripts/mod/
F: scripts/package/
@@ -12172,6 +12187,13 @@ F: include/uapi/linux/nfsd/
F: include/uapi/linux/sunrpc/
F: net/sunrpc/
+KERNEL PACMAN PACKAGING (in addition to generic KERNEL BUILD)
+M: Thomas Weißschuh <linux@weissschuh.net>
+R: Christian Heusel <christian@heusel.eu>
+R: Nathan Chancellor <nathan@kernel.org>
+S: Maintained
+F: scripts/package/PKGBUILD
+
KERNEL REGRESSIONS
M: Thorsten Leemhuis <linux@leemhuis.info>
L: regressions@lists.linux.dev
@@ -12509,7 +12531,7 @@ F: mm/kmsan/
F: scripts/Makefile.kmsan
KPROBES
-M: Naveen N. Rao <naveen.n.rao@linux.ibm.com>
+M: Naveen N Rao <naveen@kernel.org>
M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
M: "David S. Miller" <davem@davemloft.net>
M: Masami Hiramatsu <mhiramat@kernel.org>
@@ -12886,7 +12908,7 @@ LINUX FOR POWERPC (32-BIT AND 64-BIT)
M: Michael Ellerman <mpe@ellerman.id.au>
R: Nicholas Piggin <npiggin@gmail.com>
R: Christophe Leroy <christophe.leroy@csgroup.eu>
-R: Naveen N. Rao <naveen.n.rao@linux.ibm.com>
+R: Naveen N Rao <naveen@kernel.org>
L: linuxppc-dev@lists.ozlabs.org
S: Supported
W: https://github.com/linuxppc/wiki/wiki
@@ -15742,6 +15764,12 @@ S: Maintained
F: Documentation/devicetree/bindings/hwmon/nuvoton,nct6775.yaml
F: drivers/hwmon/nct6775-i2c.c
+NETCONSOLE
+M: Breno Leitao <leitao@debian.org>
+S: Maintained
+F: Documentation/networking/netconsole.rst
+F: drivers/net/netconsole.c
+
NETDEVSIM
M: Jakub Kicinski <kuba@kernel.org>
S: Maintained
@@ -17867,7 +17895,7 @@ F: tools/perf/
PERFORMANCE EVENTS TOOLING ARM64
R: John Garry <john.g.garry@oracle.com>
R: Will Deacon <will@kernel.org>
-R: James Clark <james.clark@arm.com>
+R: James Clark <james.clark@linaro.org>
R: Mike Leach <mike.leach@linaro.org>
R: Leo Yan <leo.yan@linux.dev>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -18749,6 +18777,13 @@ S: Maintained
F: Documentation/devicetree/bindings/power/avs/qcom,cpr.yaml
F: drivers/pmdomain/qcom/cpr.c
+QUALCOMM CPUCP MAILBOX DRIVER
+M: Sibi Sankar <quic_sibis@quicinc.com>
+L: linux-arm-msm@vger.kernel.org
+S: Supported
+F: Documentation/devicetree/bindings/mailbox/qcom,cpucp-mbox.yaml
+F: drivers/mailbox/qcom-cpucp-mbox.c
+
QUALCOMM CPUFREQ DRIVER MSM8996/APQ8096
M: Ilia Lin <ilia.lin@kernel.org>
L: linux-pm@vger.kernel.org
@@ -19041,7 +19076,13 @@ S: Maintained
T: git https://git.kernel.org/pub/scm/linux/kernel/git/crng/random.git
F: Documentation/devicetree/bindings/rng/microsoft,vmgenid.yaml
F: drivers/char/random.c
+F: include/linux/random.h
+F: include/uapi/linux/random.h
F: drivers/virt/vmgenid.c
+F: include/vdso/getrandom.h
+F: lib/vdso/getrandom.c
+F: arch/x86/entry/vdso/vgetrandom*
+F: arch/x86/include/asm/vdso/getrandom*
RAPIDIO SUBSYSTEM
M: Matt Porter <mporter@kernel.crashing.org>
@@ -21660,6 +21701,13 @@ S: Supported
F: Documentation/devicetree/bindings/phy/starfive,jh7110-dphy-rx.yaml
F: drivers/phy/starfive/phy-jh7110-dphy-rx.c
+STARFIVE JH7110 DPHY TX DRIVER
+M: Keith Zhao <keith.zhao@starfivetech.com>
+M: Shengyang Chen <shengyang.chen@starfivetech.com>
+S: Supported
+F: Documentation/devicetree/bindings/phy/starfive,jh7110-dphy-tx.yaml
+F: drivers/phy/starfive/phy-jh7110-dphy-tx.c
+
STARFIVE JH7110 MMC/SD/SDIO DRIVER
M: William Qiu <william.qiu@starfivetech.com>
S: Supported
@@ -21817,6 +21865,15 @@ F: Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
F: Documentation/devicetree/bindings/sound/st,stm32-*.yaml
F: sound/soc/stm/
+STM32 DMA DRIVERS
+M: Amélie Delaunay <amelie.delaunay@foss.st.com>
+L: dmaengine@vger.kernel.org
+L: linux-stm32@st-md-mailman.stormreply.com (moderated for non-subscribers)
+S: Maintained
+F: Documentation/arch/arm/stm32/stm32-dma-mdma-chaining.rst
+F: Documentation/devicetree/bindings/dma/stm32/
+F: drivers/dma/stm32/
+
STM32 TIMER/LPTIMER DRIVERS
M: Fabrice Gasnier <fabrice.gasnier@foss.st.com>
S: Maintained
diff --git a/Makefile b/Makefile
index 67ce3b7d558e..2c02dafb48ce 100644
--- a/Makefile
+++ b/Makefile
@@ -11,8 +11,8 @@ NAME = Baby Opossum Posse
# Comments in this file are targeted only to the developer, do not
# expect to learn how to build the kernel reading this file.
-ifeq ($(filter undefine,$(.FEATURES)),)
-$(error GNU Make >= 3.82 is required. Your Make version is $(MAKE_VERSION))
+ifeq ($(filter output-sync,$(.FEATURES)),)
+$(error GNU Make >= 4.0 is required. Your Make version is $(MAKE_VERSION))
endif
$(if $(filter __%, $(MAKECMDGOALS)), \
@@ -93,15 +93,7 @@ endif
# If the user is running make -s (silent mode), suppress echoing of
# commands
-# make-4.0 (and later) keep single letter options in the 1st word of MAKEFLAGS.
-
-ifeq ($(filter 3.%,$(MAKE_VERSION)),)
-short-opts := $(firstword -$(MAKEFLAGS))
-else
-short-opts := $(filter-out --%,$(MAKEFLAGS))
-endif
-
-ifneq ($(findstring s,$(short-opts)),)
+ifneq ($(findstring s,$(firstword -$(MAKEFLAGS))),)
quiet=silent_
override KBUILD_VERBOSE :=
endif
@@ -201,14 +193,6 @@ ifneq ($(words $(subst :, ,$(abs_srctree))), 1)
$(error source directory cannot contain spaces or colons)
endif
-ifneq ($(filter 3.%,$(MAKE_VERSION)),)
-# 'MAKEFLAGS += -rR' does not immediately become effective for GNU Make 3.x
-# We need to invoke sub-make to avoid implicit rules in the top Makefile.
-need-sub-make := 1
-# Cancel implicit rules for this Makefile.
-$(this-makefile): ;
-endif
-
export sub_make_done := 1
endif # sub_make_done
@@ -461,21 +445,23 @@ KBUILD_USERLDFLAGS := $(USERLDFLAGS)
# host programs.
export rust_common_flags := --edition=2021 \
-Zbinary_dep_depinfo=y \
- -Dunsafe_op_in_unsafe_fn -Drust_2018_idioms \
- -Dunreachable_pub -Dnon_ascii_idents \
+ -Dunsafe_op_in_unsafe_fn \
+ -Dnon_ascii_idents \
+ -Wrust_2018_idioms \
+ -Wunreachable_pub \
-Wmissing_docs \
- -Drustdoc::missing_crate_level_docs \
- -Dclippy::correctness -Dclippy::style \
- -Dclippy::suspicious -Dclippy::complexity \
- -Dclippy::perf \
- -Dclippy::let_unit_value -Dclippy::mut_mut \
- -Dclippy::needless_bitwise_bool \
- -Dclippy::needless_continue \
- -Dclippy::no_mangle_with_rust_abi \
+ -Wrustdoc::missing_crate_level_docs \
+ -Wclippy::all \
+ -Wclippy::mut_mut \
+ -Wclippy::needless_bitwise_bool \
+ -Wclippy::needless_continue \
+ -Wclippy::no_mangle_with_rust_abi \
-Wclippy::dbg_macro
-KBUILD_HOSTCFLAGS := $(KBUILD_USERHOSTCFLAGS) $(HOST_LFS_CFLAGS) $(HOSTCFLAGS)
-KBUILD_HOSTCXXFLAGS := -Wall -O2 $(HOST_LFS_CFLAGS) $(HOSTCXXFLAGS)
+KBUILD_HOSTCFLAGS := $(KBUILD_USERHOSTCFLAGS) $(HOST_LFS_CFLAGS) \
+ $(HOSTCFLAGS) -I $(srctree)/scripts/include
+KBUILD_HOSTCXXFLAGS := -Wall -O2 $(HOST_LFS_CFLAGS) $(HOSTCXXFLAGS) \
+ -I $(srctree)/scripts/include
KBUILD_HOSTRUSTFLAGS := $(rust_common_flags) -O -Cstrip=debuginfo \
-Zallow-features= $(HOSTRUSTFLAGS)
KBUILD_HOSTLDFLAGS := $(HOST_LFS_LDFLAGS) $(HOSTLDFLAGS)
@@ -507,7 +493,6 @@ RUSTDOC = rustdoc
RUSTFMT = rustfmt
CLIPPY_DRIVER = clippy-driver
BINDGEN = bindgen
-CARGO = cargo
PAHOLE = pahole
RESOLVE_BTFIDS = $(objtree)/tools/bpf/resolve_btfids/resolve_btfids
LEX = flex
@@ -573,7 +558,7 @@ KBUILD_RUSTFLAGS := $(rust_common_flags) \
-Csymbol-mangling-version=v0 \
-Crelocation-model=static \
-Zfunction-sections=n \
- -Dclippy::float_arithmetic
+ -Wclippy::float_arithmetic
KBUILD_AFLAGS_KERNEL :=
KBUILD_CFLAGS_KERNEL :=
@@ -601,7 +586,7 @@ endif
export RUSTC_BOOTSTRAP := 1
export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC HOSTPKG_CONFIG
-export RUSTC RUSTDOC RUSTFMT RUSTC_OR_CLIPPY_QUIET RUSTC_OR_CLIPPY BINDGEN CARGO
+export RUSTC RUSTDOC RUSTFMT RUSTC_OR_CLIPPY_QUIET RUSTC_OR_CLIPPY BINDGEN
export HOSTRUSTC KBUILD_HOSTRUSTFLAGS
export CPP AR NM STRIP OBJCOPY OBJDUMP READELF PAHOLE RESOLVE_BTFIDS LEX YACC AWK INSTALLKERNEL
export PERL PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
@@ -1344,6 +1329,12 @@ prepare: tools/bpf/resolve_btfids
endif
endif
+# The tools build system is not a part of Kbuild and tends to introduce
+# its own unique issues. If you need to integrate a new tool into Kbuild,
+# please consider locating that tool outside the tools/ tree and using the
+# standard Kbuild "hostprogs" syntax instead of adding a new tools/* entry
+# here. See Documentation/kbuild/makefiles.rst for details.
+
PHONY += resolve_btfids_clean
resolve_btfids_O = $(abspath $(objtree))/tools/bpf/resolve_btfids
@@ -1497,7 +1488,7 @@ CLEAN_FILES += vmlinux.symvers modules-only.symvers \
# Directories & files removed with 'make mrproper'
MRPROPER_FILES += include/config include/generated \
arch/$(SRCARCH)/include/generated .objdiff \
- debian snap tar-install \
+ debian snap tar-install PKGBUILD pacman \
.config .config.old .version \
Module.symvers \
certs/signing_key.pem \
@@ -1967,9 +1958,12 @@ quiet_cmd_tags = GEN $@
tags TAGS cscope gtags: FORCE
$(call cmd,tags)
-# IDE support targets
+# Generate rust-project.json (a file that describes the structure of non-Cargo
+# Rust projects) for rust-analyzer (an implementation of the Language Server
+# Protocol).
PHONY += rust-analyzer
rust-analyzer:
+ $(Q)$(CONFIG_SHELL) $(srctree)/scripts/rust_is_available.sh
$(Q)$(MAKE) $(build)=rust $@
# Script to generate missing namespace dependencies
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index a867a7d967aa..954a1916a500 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1483,7 +1483,8 @@ config ARM_ATAG_DTB_COMPAT
from the ATAG list and store it at run time into the appended DTB.
choice
- prompt "Kernel command line type" if ARM_ATAG_DTB_COMPAT
+ prompt "Kernel command line type"
+ depends on ARM_ATAG_DTB_COMPAT
default ARM_ATAG_DTB_COMPAT_CMDLINE_FROM_BOOTLOADER
config ARM_ATAG_DTB_COMPAT_CMDLINE_FROM_BOOTLOADER
@@ -1512,7 +1513,8 @@ config CMDLINE
memory size and the root device (e.g., mem=64M root=/dev/nfs).
choice
- prompt "Kernel command line type" if CMDLINE != ""
+ prompt "Kernel command line type"
+ depends on CMDLINE != ""
default CMDLINE_FROM_BOOTLOADER
config CMDLINE_FROM_BOOTLOADER
diff --git a/arch/arm/boot/install.sh b/arch/arm/boot/install.sh
index 9ec11fac7d8d..34e2c6e31fd1 100755
--- a/arch/arm/boot/install.sh
+++ b/arch/arm/boot/install.sh
@@ -17,6 +17,8 @@
# $3 - kernel map file
# $4 - default install path (blank if root directory)
+set -e
+
if [ "$(basename $2)" = "zImage" ]; then
# Compressed install
echo "Installing compressed kernel"
diff --git a/arch/arm/common/locomo.c b/arch/arm/common/locomo.c
index 6d0c9f7268ba..06b0e5fd54a6 100644
--- a/arch/arm/common/locomo.c
+++ b/arch/arm/common/locomo.c
@@ -816,10 +816,10 @@ EXPORT_SYMBOL(locomo_frontlight_set);
* We model this as a regular bus type, and hang devices directly
* off this.
*/
-static int locomo_match(struct device *_dev, struct device_driver *_drv)
+static int locomo_match(struct device *_dev, const struct device_driver *_drv)
{
struct locomo_dev *dev = LOCOMO_DEV(_dev);
- struct locomo_driver *drv = LOCOMO_DRV(_drv);
+ const struct locomo_driver *drv = LOCOMO_DRV(_drv);
return dev->devid == drv->devid;
}
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c
index 1fbd7363cf11..550978dc3c50 100644
--- a/arch/arm/common/sa1111.c
+++ b/arch/arm/common/sa1111.c
@@ -1339,10 +1339,10 @@ EXPORT_SYMBOL_GPL(sa1111_get_irq);
* We model this as a regular bus type, and hang devices directly
* off this.
*/
-static int sa1111_match(struct device *_dev, struct device_driver *_drv)
+static int sa1111_match(struct device *_dev, const struct device_driver *_drv)
{
struct sa1111_dev *dev = to_sa1111_device(_dev);
- struct sa1111_driver *drv = SA1111_DRV(_drv);
+ const struct sa1111_driver *drv = SA1111_DRV(_drv);
return !!(dev->devid & drv->devid);
}
diff --git a/arch/arm/include/asm/hardware/locomo.h b/arch/arm/include/asm/hardware/locomo.h
index 9fd9ad5d9202..3190e1e5067a 100644
--- a/arch/arm/include/asm/hardware/locomo.h
+++ b/arch/arm/include/asm/hardware/locomo.h
@@ -189,7 +189,7 @@ struct locomo_driver {
void (*remove)(struct locomo_dev *);
};
-#define LOCOMO_DRV(_d) container_of((_d), struct locomo_driver, drv)
+#define LOCOMO_DRV(_d) container_of_const((_d), struct locomo_driver, drv)
#define LOCOMO_DRIVER_NAME(_ldev) ((_ldev)->dev.driver->name)
diff --git a/arch/arm/include/asm/hardware/sa1111.h b/arch/arm/include/asm/hardware/sa1111.h
index d8c6f8a99dfa..a815f39b4243 100644
--- a/arch/arm/include/asm/hardware/sa1111.h
+++ b/arch/arm/include/asm/hardware/sa1111.h
@@ -404,7 +404,7 @@ struct sa1111_driver {
void (*remove)(struct sa1111_dev *);
};
-#define SA1111_DRV(_d) container_of((_d), struct sa1111_driver, drv)
+#define SA1111_DRV(_d) container_of_const((_d), struct sa1111_driver, drv)
#define SA1111_DRIVER_NAME(_sadev) ((_sadev)->dev.driver->name)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 79a656a62cbc..b3fc891f1544 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -168,9 +168,9 @@ config ARM64
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN
- select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN
- select HAVE_ARCH_KASAN_SW_TAGS if HAVE_ARCH_KASAN
- select HAVE_ARCH_KASAN_HW_TAGS if (HAVE_ARCH_KASAN && ARM64_MTE)
+ select HAVE_ARCH_KASAN_VMALLOC
+ select HAVE_ARCH_KASAN_SW_TAGS
+ select HAVE_ARCH_KASAN_HW_TAGS if ARM64_MTE
# Some instrumentation may be unsound, hence EXPERT
select HAVE_ARCH_KCSAN if EXPERT
select HAVE_ARCH_KFENCE
@@ -211,8 +211,8 @@ config ARM64
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_ERROR_INJECTION
- select HAVE_FUNCTION_GRAPH_RETVAL if HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
+ select HAVE_FUNCTION_GRAPH_RETVAL
select HAVE_GCC_PLUGINS
select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && \
HW_PERF_EVENTS && HAVE_PERF_EVENTS_NMI
@@ -1471,7 +1471,6 @@ config HOTPLUG_CPU
config NUMA
bool "NUMA Memory Allocation and Scheduler Support"
select GENERIC_ARCH_NUMA
- select ACPI_NUMA if ACPI
select OF_NUMA
select HAVE_SETUP_PER_CPU_AREA
select NEED_PER_CPU_EMBED_FIRST_CHUNK
@@ -2290,7 +2289,8 @@ config CMDLINE
root device (e.g. root=/dev/nfs).
choice
- prompt "Kernel command line type" if CMDLINE != ""
+ prompt "Kernel command line type"
+ depends on CMDLINE != ""
default CMDLINE_FROM_BOOTLOADER
help
Choose how the kernel will handle the provided default kernel
@@ -2336,6 +2336,17 @@ config EFI
allow the kernel to be booted as an EFI application. This
is only useful on systems that have UEFI firmware.
+config COMPRESSED_INSTALL
+ bool "Install compressed image by default"
+ help
+ This makes the regular "make install" install the compressed
+ image we built, not the legacy uncompressed one.
+
+ You can check that a compressed image works for you by doing
+ "make zinstall" first, and verifying that everything is fine
+ in your environment before making "make install" do this for
+ you.
+
config DMI
bool "Enable support for SMBIOS (DMI) tables"
depends on EFI
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 3f0f35fd5bb7..f6bc3da1ef11 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -182,7 +182,13 @@ $(BOOT_TARGETS): vmlinux
Image.%: Image
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
-install: KBUILD_IMAGE := $(boot)/Image
+ifeq ($(CONFIG_COMPRESSED_INSTALL),y)
+ DEFAULT_KBUILD_IMAGE = $(KBUILD_IMAGE)
+else
+ DEFAULT_KBUILD_IMAGE = $(boot)/Image
+endif
+
+install: KBUILD_IMAGE := $(DEFAULT_KBUILD_IMAGE)
install zinstall:
$(call cmd,install)
@@ -229,7 +235,7 @@ define archhelp
echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)'
echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)'
echo ' image.fit - Flat Image Tree (arch/$(ARCH)/boot/image.fit)'
- echo ' install - Install uncompressed kernel'
+ echo ' install - Install kernel (compressed if COMPRESSED_INSTALL set)'
echo ' zinstall - Install compressed kernel'
echo ' Install using (your) ~/bin/installkernel or'
echo ' (distribution) /sbin/installkernel or'
diff --git a/arch/arm64/boot/install.sh b/arch/arm64/boot/install.sh
index 9b7a09808a3d..cc2f4ccca6c0 100755
--- a/arch/arm64/boot/install.sh
+++ b/arch/arm64/boot/install.sh
@@ -17,6 +17,8 @@
# $3 - kernel map file
# $4 - default install path (blank if root directory)
+set -e
+
if [ "$(basename $2)" = "Image.gz" ] || [ "$(basename $2)" = "vmlinuz.efi" ]
then
# Compressed install
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index f8efbc128446..7a4f5604be3f 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -1065,6 +1065,28 @@ static inline bool pgtable_l5_enabled(void) { return false; }
#define p4d_offset_kimg(dir,addr) ((p4d_t *)dir)
+static inline
+p4d_t *p4d_offset_lockless_folded(pgd_t *pgdp, pgd_t pgd, unsigned long addr)
+{
+ /*
+ * With runtime folding of the pud, pud_offset_lockless() passes
+ * the 'pgd_t *' we return here to p4d_to_folded_pud(), which
+ * will offset the pointer assuming that it points into
+ * a page-table page. However, the fast GUP path passes us a
+ * pgd_t allocated on the stack and so we must use the original
+ * pointer in 'pgdp' to construct the p4d pointer instead of
+ * using the generic p4d_offset_lockless() implementation.
+ *
+ * Note: reusing the original pointer means that we may
+ * dereference the same (live) page-table entry multiple times.
+ * This is safe because it is still only loaded once in the
+ * context of each level and the CPU guarantees same-address
+ * read-after-read ordering.
+ */
+ return p4d_offset(pgdp, addr);
+}
+#define p4d_offset_lockless p4d_offset_lockless_folded
+
#endif /* CONFIG_PGTABLE_LEVELS > 4 */
#define pgd_ERROR(e) \
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index b776e7424fe9..e737c6295ec7 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -507,7 +507,7 @@ static int update_insn_emulation_mode(struct insn_emulation *insn,
return ret;
}
-static int emulation_proc_handler(struct ctl_table *table, int write,
+static int emulation_proc_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp,
loff_t *ppos)
{
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 82e8a6017382..77006df20a75 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -535,7 +535,7 @@ static unsigned int find_supported_vector_length(enum vec_type type,
#if defined(CONFIG_ARM64_SVE) && defined(CONFIG_SYSCTL)
-static int vec_proc_do_default_vl(struct ctl_table *table, int write,
+static int vec_proc_do_default_vl(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct vl_info *info = table->extra1;
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index d63930c82839..d11da6461278 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -21,7 +21,7 @@ btildflags-$(CONFIG_ARM64_BTI_KERNEL) += -z force-bti
# potential future proofing if we end up with internal calls to the exported
# routines, as x86 does (see 6f121e548f83 ("x86, vdso: Reimplement vdso.so
# preparation in build-time C")).
-ldflags-y := -shared -soname=linux-vdso.so.1 --hash-style=sysv \
+ldflags-y := -shared -soname=linux-vdso.so.1 \
-Bsymbolic --build-id=sha1 -n $(btildflags-y)
ifdef CONFIG_LD_ORPHAN_WARN
diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
index cc4508c604b2..25a2cb6317f3 100644
--- a/arch/arm64/kernel/vdso32/Makefile
+++ b/arch/arm64/kernel/vdso32/Makefile
@@ -98,7 +98,7 @@ VDSO_AFLAGS += -D__ASSEMBLY__
# From arm vDSO Makefile
VDSO_LDFLAGS += -Bsymbolic --no-undefined -soname=linux-vdso.so.1
VDSO_LDFLAGS += -z max-page-size=4096 -z common-page-size=4096
-VDSO_LDFLAGS += -shared --hash-style=sysv --build-id=sha1
+VDSO_LDFLAGS += -shared --build-id=sha1
VDSO_LDFLAGS += --orphan-handling=$(CONFIG_LD_ORPHAN_WARN_LEVEL)
diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg
index a4c1dd4741a4..7ceaa1e0b4bc 100644
--- a/arch/arm64/tools/sysreg
+++ b/arch/arm64/tools/sysreg
@@ -149,7 +149,7 @@ Res0 63:32
UnsignedEnum 31:28 GIC
0b0000 NI
0b0001 GICv3
- 0b0010 GICv4p1
+ 0b0011 GICv4p1
EndEnum
UnsignedEnum 27:24 Virt_frac
0b0000 NI
@@ -903,7 +903,7 @@ EndEnum
UnsignedEnum 27:24 GIC
0b0000 NI
0b0001 IMP
- 0b0010 V4P1
+ 0b0011 V4P1
EndEnum
SignedEnum 23:20 AdvSIMD
0b0000 IMP
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index ebdb7156560c..70f169210b52 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -476,7 +476,6 @@ config NR_CPUS
config NUMA
bool "NUMA Support"
select SMP
- select ACPI_NUMA if ACPI
help
Say Y to compile the kernel with NUMA (Non-Uniform Memory Access)
support. This option improves performance on systems with more
diff --git a/arch/m68k/install.sh b/arch/m68k/install.sh
index af65e16e5147..b6829b3942b3 100755
--- a/arch/m68k/install.sh
+++ b/arch/m68k/install.sh
@@ -16,6 +16,8 @@
# $3 - kernel map file
# $4 - default install path (blank if root directory)
+set -e
+
if [ -f $4/vmlinuz ]; then
mv $4/vmlinuz $4/vmlinuz.old
fi
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 28af3d9e6bc0..60077e576935 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -478,6 +478,7 @@ config MACH_LOONGSON64
select BOARD_SCACHE
select CSRC_R4K
select CEVT_R4K
+ select SYNC_R4K
select FORCE_PCI
select ISA
select I8259
@@ -2927,7 +2928,8 @@ config BUILTIN_DTB
bool
choice
- prompt "Kernel appended dtb support" if USE_OF
+ prompt "Kernel appended dtb support"
+ depends on USE_OF
default MIPS_NO_APPENDED_DTB
config MIPS_NO_APPENDED_DTB
@@ -2968,7 +2970,8 @@ choice
endchoice
choice
- prompt "Kernel command line type" if !CMDLINE_OVERRIDE
+ prompt "Kernel command line type"
+ depends on !CMDLINE_OVERRIDE
default MIPS_CMDLINE_FROM_DTB if USE_OF && !ATH79 && !MACH_INGENIC && \
!MACH_LOONGSON64 && !MIPS_MALTA && \
!CAVIUM_OCTEON_SOC
diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
index c2930a75b7e4..1e782275850a 100644
--- a/arch/mips/include/asm/mips-cm.h
+++ b/arch/mips/include/asm/mips-cm.h
@@ -240,6 +240,10 @@ GCR_ACCESSOR_RO(32, 0x0d0, gic_status)
GCR_ACCESSOR_RO(32, 0x0f0, cpc_status)
#define CM_GCR_CPC_STATUS_EX BIT(0)
+/* GCR_ACCESS - Controls core/IOCU access to GCRs */
+GCR_ACCESSOR_RW(32, 0x120, access_cm3)
+#define CM_GCR_ACCESS_ACCESSEN GENMASK(7, 0)
+
/* GCR_L2_CONFIG - Indicates L2 cache configuration when Config5.L2C=1 */
GCR_ACCESSOR_RW(32, 0x130, l2_config)
#define CM_GCR_L2_CONFIG_BYPASS BIT(20)
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index bc2c240f414b..2427d76f953f 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -50,7 +50,6 @@ extern int __cpu_logical_map[NR_CPUS];
#define SMP_CALL_FUNCTION 0x2
/* Octeon - Tell another core to flush its icache */
#define SMP_ICACHE_FLUSH 0x4
-#define SMP_ASK_C0COUNT 0x8
/* Mask of CPUs which are currently definitely operating coherently */
extern cpumask_t cpu_coherent_mask;
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index 9cc087dd1c19..395622c37325 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -317,7 +317,10 @@ static void boot_core(unsigned int core, unsigned int vpe_id)
write_gcr_co_reset_ext_base(CM_GCR_Cx_RESET_EXT_BASE_UEB);
/* Ensure the core can access the GCRs */
- set_gcr_access(1 << core);
+ if (mips_cm_revision() < CM_REV_CM3)
+ set_gcr_access(1 << core);
+ else
+ set_gcr_access_cm3(1 << core);
if (mips_cpc_present()) {
/* Reset the core */
diff --git a/arch/mips/loongson64/smp.c b/arch/mips/loongson64/smp.c
index 66d049cdcf14..147acd972a07 100644
--- a/arch/mips/loongson64/smp.c
+++ b/arch/mips/loongson64/smp.c
@@ -33,7 +33,6 @@ static void __iomem *ipi_clear0_regs[16];
static void __iomem *ipi_status0_regs[16];
static void __iomem *ipi_en0_regs[16];
static void __iomem *ipi_mailbox_buf[16];
-static uint32_t core0_c0count[NR_CPUS];
static u32 (*ipi_read_clear)(int cpu);
static void (*ipi_write_action)(int cpu, u32 action);
@@ -382,11 +381,10 @@ loongson3_send_ipi_mask(const struct cpumask *mask, unsigned int action)
ipi_write_action(cpu_logical_map(i), (u32)action);
}
-
static irqreturn_t loongson3_ipi_interrupt(int irq, void *dev_id)
{
- int i, cpu = smp_processor_id();
- unsigned int action, c0count;
+ int cpu = smp_processor_id();
+ unsigned int action;
action = ipi_read_clear(cpu);
@@ -399,26 +397,14 @@ static irqreturn_t loongson3_ipi_interrupt(int irq, void *dev_id)
irq_exit();
}
- if (action & SMP_ASK_C0COUNT) {
- BUG_ON(cpu != 0);
- c0count = read_c0_count();
- c0count = c0count ? c0count : 1;
- for (i = 1; i < nr_cpu_ids; i++)
- core0_c0count[i] = c0count;
- nudge_writes(); /* Let others see the result ASAP */
- }
-
return IRQ_HANDLED;
}
-#define MAX_LOOPS 800
/*
* SMP init and finish on secondary CPUs
*/
static void loongson3_init_secondary(void)
{
- int i;
- uint32_t initcount;
unsigned int cpu = smp_processor_id();
unsigned int imask = STATUSF_IP7 | STATUSF_IP6 |
STATUSF_IP3 | STATUSF_IP2;
@@ -432,23 +418,6 @@ static void loongson3_init_secondary(void)
cpu_logical_map(cpu) % loongson_sysconf.cores_per_package);
cpu_data[cpu].package =
cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
-
- i = 0;
- core0_c0count[cpu] = 0;
- loongson3_send_ipi_single(0, SMP_ASK_C0COUNT);
- while (!core0_c0count[cpu]) {
- i++;
- cpu_relax();
- }
-
- if (i > MAX_LOOPS)
- i = MAX_LOOPS;
- if (cpu_data[cpu].package)
- initcount = core0_c0count[cpu] + i;
- else /* Local access is faster for loops */
- initcount = core0_c0count[cpu] + i/2;
-
- write_c0_count(initcount);
}
static void loongson3_smp_finish(void)
diff --git a/arch/mips/sibyte/common/sb_tbprof.c b/arch/mips/sibyte/common/sb_tbprof.c
index af5333986900..149a9151bc0b 100644
--- a/arch/mips/sibyte/common/sb_tbprof.c
+++ b/arch/mips/sibyte/common/sb_tbprof.c
@@ -589,4 +589,5 @@ module_exit(sbprof_tb_cleanup);
MODULE_ALIAS_CHARDEV_MAJOR(SBPROF_TB_MAJOR);
MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
+MODULE_DESCRIPTION("Support for ZBbus profiling");
MODULE_LICENSE("GPL");
diff --git a/arch/nios2/boot/install.sh b/arch/nios2/boot/install.sh
index 34a2feec42c8..1161f2bf59ec 100755
--- a/arch/nios2/boot/install.sh
+++ b/arch/nios2/boot/install.sh
@@ -16,6 +16,8 @@
# $3 - kernel map file
# $4 - default install path (blank if root directory)
+set -e
+
if [ -f $4/vmlinuz ]; then
mv $4/vmlinuz $4/vmlinuz.old
fi
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index dc9b902de8ea..5d650e02cbf4 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -46,6 +46,7 @@ config PARISC
select GENERIC_CPU_DEVICES if !SMP
select GENERIC_LIB_DEVMEM_IS_ALLOWED
select SYSCTL_ARCH_UNALIGN_ALLOW
+ select SYSCTL_ARCH_UNALIGN_NO_WARN
select SYSCTL_EXCEPTION_TRACE
select HAVE_MOD_ARCH_SPECIFIC
select MODULES_USE_ELF_RELA
@@ -86,6 +87,7 @@ config PARISC
select HAVE_SOFTIRQ_ON_OWN_STACK if IRQSTACKS
select TRACE_IRQFLAGS_SUPPORT
select HAVE_FUNCTION_DESCRIPTORS if 64BIT
+ select PCI_MSI_ARCH_FALLBACKS if PCI_MSI
help
The PA-RISC microprocessor is designed by Hewlett-Packard and used
diff --git a/arch/parisc/include/asm/parisc-device.h b/arch/parisc/include/asm/parisc-device.h
index 7ddd7f433367..9e74cef4d774 100644
--- a/arch/parisc/include/asm/parisc-device.h
+++ b/arch/parisc/include/asm/parisc-device.h
@@ -41,7 +41,7 @@ struct parisc_driver {
#define to_parisc_device(d) container_of(d, struct parisc_device, dev)
-#define to_parisc_driver(d) container_of(d, struct parisc_driver, drv)
+#define to_parisc_driver(d) container_of_const(d, struct parisc_driver, drv)
#define parisc_parent(d) to_parisc_device(d->dev.parent)
static inline const char *parisc_pathname(struct parisc_device *d)
diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h
index 98851ff7699a..a97c0fd55f91 100644
--- a/arch/parisc/include/asm/unistd.h
+++ b/arch/parisc/include/asm/unistd.h
@@ -20,7 +20,7 @@
* sysdeps/unix/sysv/linux/hppa/sysdep.h
*/
-#ifdef PIC
+#ifndef DONT_USE_PIC
/* WARNING: CANNOT BE USED IN A NOP! */
# define K_STW_ASM_PIC " copy %%r19, %%r4\n"
# define K_LDW_ASM_PIC " copy %%r4, %%r19\n"
@@ -43,7 +43,7 @@
across the syscall. */
#define K_CALL_CLOB_REGS "%r1", "%r2", K_USING_GR4 \
- "%r20", "%r29", "%r31"
+ "%r20", "%r29", "%r31"
#undef K_INLINE_SYSCALL
#define K_INLINE_SYSCALL(name, nr, args...) ({ \
@@ -58,7 +58,7 @@
" ldi %1, %%r20\n" \
K_LDW_ASM_PIC \
: "=r" (__res) \
- : "i" (SYS_ify(name)) K_ASM_ARGS_##nr \
+ : "i" (name) K_ASM_ARGS_##nr \
: "memory", K_CALL_CLOB_REGS K_CLOB_ARGS_##nr \
); \
__sys_res = (long)__res; \
@@ -104,42 +104,18 @@
#define K_CLOB_ARGS_1 K_CLOB_ARGS_2, "%r25"
#define K_CLOB_ARGS_0 K_CLOB_ARGS_1, "%r26"
-#define _syscall0(type,name) \
-type name(void) \
-{ \
- return K_INLINE_SYSCALL(name, 0); \
-}
-
-#define _syscall1(type,name,type1,arg1) \
-type name(type1 arg1) \
-{ \
- return K_INLINE_SYSCALL(name, 1, arg1); \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2) \
-type name(type1 arg1, type2 arg2) \
-{ \
- return K_INLINE_SYSCALL(name, 2, arg1, arg2); \
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
-type name(type1 arg1, type2 arg2, type3 arg3) \
-{ \
- return K_INLINE_SYSCALL(name, 3, arg1, arg2, arg3); \
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
-{ \
- return K_INLINE_SYSCALL(name, 4, arg1, arg2, arg3, arg4); \
-}
-
-/* select takes 5 arguments */
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
-{ \
- return K_INLINE_SYSCALL(name, 5, arg1, arg2, arg3, arg4, arg5); \
-}
+#define syscall0(name) \
+ K_INLINE_SYSCALL(name, 0)
+#define syscall1(name, arg1) \
+ K_INLINE_SYSCALL(name, 1, arg1)
+#define syscall2(name, arg1, arg2) \
+ K_INLINE_SYSCALL(name, 2, arg1, arg2)
+#define syscall3(name, arg1, arg2, arg3) \
+ K_INLINE_SYSCALL(name, 3, arg1, arg2, arg3)
+#define syscall4(name, arg1, arg2, arg3, arg4) \
+ K_INLINE_SYSCALL(name, 4, arg1, arg2, arg3, arg4)
+#define syscall5(name, arg1, arg2, arg3, arg4, arg5) \
+ K_INLINE_SYSCALL(name, 5, arg1, arg2, arg3, arg4, arg5)
#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_STAT64
diff --git a/arch/parisc/include/asm/vdso.h b/arch/parisc/include/asm/vdso.h
index ef8206193f82..2a2dc11b5545 100644
--- a/arch/parisc/include/asm/vdso.h
+++ b/arch/parisc/include/asm/vdso.h
@@ -19,6 +19,6 @@ extern struct vdso_data *vdso_data;
/* Default link addresses for the vDSOs */
#define VDSO_LBASE 0
-#define VDSO_VERSION_STRING LINUX_5.18
+#define VDSO_VERSION_STRING LINUX_6.11
#endif /* __PARISC_VDSO_H__ */
diff --git a/arch/parisc/install.sh b/arch/parisc/install.sh
index 933d031c249a..664c2d77f776 100755
--- a/arch/parisc/install.sh
+++ b/arch/parisc/install.sh
@@ -16,6 +16,8 @@
# $3 - kernel map file
# $4 - default install path (blank if root directory)
+set -e
+
if [ "$(basename $2)" = "vmlinuz" ]; then
# Compressed install
echo "Installing compressed kernel"
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 483bfafd930c..db531e58d70e 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -611,11 +611,7 @@ void __init parisc_setup_cache_timing(void)
threshold/1024);
set_tlb_threshold:
- if (threshold > FLUSH_TLB_THRESHOLD)
- parisc_tlb_flush_threshold = threshold;
- else
- parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
-
+ parisc_tlb_flush_threshold = max(threshold, FLUSH_TLB_THRESHOLD);
printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
parisc_tlb_flush_threshold/1024);
}
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index ac19d685e4a5..1e793f770f71 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -97,7 +97,7 @@ static int for_each_padev(int (*fn)(struct device *, void *), void * data)
* @driver: the PA-RISC driver to try
* @dev: the PA-RISC device to try
*/
-static int match_device(struct parisc_driver *driver, struct parisc_device *dev)
+static int match_device(const struct parisc_driver *driver, struct parisc_device *dev)
{
const struct parisc_device_id *ids;
@@ -548,7 +548,7 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path)
return dev;
}
-static int parisc_generic_match(struct device *dev, struct device_driver *drv)
+static int parisc_generic_match(struct device *dev, const struct device_driver *drv)
{
return match_device(to_parisc_driver(drv), to_parisc_device(dev));
}
diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
index 71e596ca5a86..3e79e40e361d 100644
--- a/arch/parisc/kernel/unaligned.c
+++ b/arch/parisc/kernel/unaligned.c
@@ -104,6 +104,7 @@
#define ERR_NOTHANDLED -1
int unaligned_enabled __read_mostly = 1;
+int no_unaligned_warning __read_mostly;
static int emulate_ldh(struct pt_regs *regs, int toreg)
{
@@ -399,6 +400,7 @@ void handle_unaligned(struct pt_regs *regs)
} else {
static DEFINE_RATELIMIT_STATE(kernel_ratelimit, 5 * HZ, 5);
if (!(current->thread.flags & PARISC_UAC_NOPRINT) &&
+ !no_unaligned_warning &&
__ratelimit(&kernel_ratelimit))
pr_warn("Kernel: unaligned access to " RFMT " in %pS "
"(iir " RFMT ")\n",
diff --git a/arch/parisc/kernel/vdso32/Makefile b/arch/parisc/kernel/vdso32/Makefile
index 1350d50c6306..2b36d25ada6e 100644
--- a/arch/parisc/kernel/vdso32/Makefile
+++ b/arch/parisc/kernel/vdso32/Makefile
@@ -1,11 +1,25 @@
-# List of files in the vdso, has to be asm only for now
+# Include the generic Makefile to check the built vdso.
+include $(srctree)/lib/vdso/Makefile
+
+KCOV_INSTRUMENT := n
+
+# Disable gcov profiling, ubsan and kasan for VDSO code
+GCOV_PROFILE := n
+UBSAN_SANITIZE := n
+KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
obj-vdso32 = note.o sigtramp.o restart_syscall.o
+obj-cvdso32 = vdso32_generic.o
# Build rules
-targets := $(obj-vdso32) vdso32.so
+targets := $(obj-vdso32) $(obj-cvdso32) vdso32.so
obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32))
+obj-cvdso32 := $(addprefix $(obj)/, $(obj-cvdso32))
+
+VDSO_CFLAGS_REMOVE := -pg $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_vdso32_generic.o = $(VDSO_CFLAGS_REMOVE)
ccflags-y := -shared -fno-common -fbuiltin -mno-fast-indirect-calls -O2 -mno-long-calls
# -march=1.1 -mschedule=7100LC
@@ -26,18 +40,22 @@ $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so FORCE
# Force dependency (incbin is bad)
# link rule for the .so file, .lds has to be first
-$(obj)/vdso32.so: $(obj)/vdso32.lds $(obj-vdso32) $(VDSO_LIBGCC) FORCE
+$(obj)/vdso32.so: $(obj)/vdso32.lds $(obj-vdso32) $(obj-cvdso32) $(VDSO_LIBGCC) FORCE
$(call if_changed,vdso32ld)
# assembly rules for the .S files
$(obj-vdso32): %.o: %.S FORCE
$(call if_changed_dep,vdso32as)
+$(obj-cvdso32): %.o: %.c FORCE
+ $(call if_changed_dep,vdso32cc)
# actual build commands
quiet_cmd_vdso32ld = VDSO32L $@
cmd_vdso32ld = $(CROSS32CC) $(c_flags) -Wl,-T $(filter-out FORCE, $^) -o $@
quiet_cmd_vdso32as = VDSO32A $@
cmd_vdso32as = $(CROSS32CC) $(a_flags) -c -o $@ $<
+quiet_cmd_vdso32cc = VDSO32C $@
+ cmd_vdso32cc = $(CROSS32CC) $(c_flags) -c -o $@ $<
# Generate VDSO offsets using helper script
gen-vdsosym := $(src)/gen_vdso_offsets.sh
diff --git a/arch/parisc/kernel/vdso32/vdso32.lds.S b/arch/parisc/kernel/vdso32/vdso32.lds.S
index d4aff3af5262..4273baa26b65 100644
--- a/arch/parisc/kernel/vdso32/vdso32.lds.S
+++ b/arch/parisc/kernel/vdso32/vdso32.lds.S
@@ -106,6 +106,9 @@ VERSION
global:
__kernel_sigtramp_rt32;
__kernel_restart_syscall32;
+ __vdso_gettimeofday;
+ __vdso_clock_gettime;
+ __vdso_clock_gettime64;
local: *;
};
}
diff --git a/arch/parisc/kernel/vdso32/vdso32_generic.c b/arch/parisc/kernel/vdso32/vdso32_generic.c
new file mode 100644
index 000000000000..8d5bd59e8646
--- /dev/null
+++ b/arch/parisc/kernel/vdso32/vdso32_generic.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "asm/unistd.h"
+#include <linux/types.h>
+#include <uapi/asm/unistd_32.h>
+
+struct timezone;
+struct old_timespec32;
+struct __kernel_timespec;
+struct __kernel_old_timeval;
+
+/* forward declarations */
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz);
+int __vdso_clock_gettime(clockid_t clock, struct old_timespec32 *ts);
+int __vdso_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts);
+
+
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
+ struct timezone *tz)
+{
+ return syscall2(__NR_gettimeofday, (long)tv, (long)tz);
+}
+
+int __vdso_clock_gettime(clockid_t clock, struct old_timespec32 *ts)
+{
+ return syscall2(__NR_clock_gettime, (long)clock, (long)ts);
+}
+
+int __vdso_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts)
+{
+ return syscall2(__NR_clock_gettime64, (long)clock, (long)ts);
+}
diff --git a/arch/parisc/kernel/vdso64/Makefile b/arch/parisc/kernel/vdso64/Makefile
index 0b1c1cc4c2c7..bd87bd6a6659 100644
--- a/arch/parisc/kernel/vdso64/Makefile
+++ b/arch/parisc/kernel/vdso64/Makefile
@@ -1,12 +1,25 @@
-# List of files in the vdso, has to be asm only for now
+# Include the generic Makefile to check the built vdso.
+include $(srctree)/lib/vdso/Makefile
+
+KCOV_INSTRUMENT := n
+
+# Disable gcov profiling, ubsan and kasan for VDSO code
+GCOV_PROFILE := n
+UBSAN_SANITIZE := n
+KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
obj-vdso64 = note.o sigtramp.o restart_syscall.o
+obj-cvdso64 = vdso64_generic.o
# Build rules
-targets := $(obj-vdso64) vdso64.so
-obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64))
+targets := $(obj-vdso64) $(obj-cvdso64) vdso64.so
+obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64))
+obj-cvdso64 := $(addprefix $(obj)/, $(obj-cvdso64))
+VDSO_CFLAGS_REMOVE := -pg $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_vdso64_generic.o = $(VDSO_CFLAGS_REMOVE)
ccflags-y := -shared -fno-common -fno-builtin
ccflags-y += -nostdlib -Wl,-soname=linux-vdso64.so.1 \
@@ -26,18 +39,22 @@ $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so FORCE
# Force dependency (incbin is bad)
# link rule for the .so file, .lds has to be first
-$(obj)/vdso64.so: $(obj)/vdso64.lds $(obj-vdso64) $(VDSO_LIBGCC) FORCE
+$(obj)/vdso64.so: $(obj)/vdso64.lds $(obj-vdso64) $(obj-cvdso64) $(VDSO_LIBGCC) FORCE
$(call if_changed,vdso64ld)
# assembly rules for the .S files
$(obj-vdso64): %.o: %.S FORCE
$(call if_changed_dep,vdso64as)
+$(obj-cvdso64): %.o: %.c FORCE
+ $(call if_changed_dep,vdso64cc)
# actual build commands
quiet_cmd_vdso64ld = VDSO64L $@
cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $(filter-out FORCE, $^) -o $@
quiet_cmd_vdso64as = VDSO64A $@
cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $<
+quiet_cmd_vdso64cc = VDSO64C $@
+ cmd_vdso64cc = $(CC) $(c_flags) -c -o $@ $<
# Generate VDSO offsets using helper script
gen-vdsosym := $(src)/gen_vdso_offsets.sh
diff --git a/arch/parisc/kernel/vdso64/vdso64.lds.S b/arch/parisc/kernel/vdso64/vdso64.lds.S
index de1fb4b19286..10f25e4e1554 100644
--- a/arch/parisc/kernel/vdso64/vdso64.lds.S
+++ b/arch/parisc/kernel/vdso64/vdso64.lds.S
@@ -104,6 +104,8 @@ VERSION
global:
__kernel_sigtramp_rt64;
__kernel_restart_syscall64;
+ __vdso_gettimeofday;
+ __vdso_clock_gettime;
local: *;
};
}
diff --git a/arch/parisc/kernel/vdso64/vdso64_generic.c b/arch/parisc/kernel/vdso64/vdso64_generic.c
new file mode 100644
index 000000000000..fc6836a0075b
--- /dev/null
+++ b/arch/parisc/kernel/vdso64/vdso64_generic.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "asm/unistd.h"
+#include <linux/types.h>
+
+struct timezone;
+struct __kernel_timespec;
+struct __kernel_old_timeval;
+
+/* forward declarations */
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz);
+int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts);
+
+
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
+ struct timezone *tz)
+{
+ return syscall2(__NR_gettimeofday, (long)tv, (long)tz);
+}
+
+int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
+{
+ return syscall2(__NR_clock_gettime, (long)clock, (long)ts);
+}
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index bc5a1612be72..d7b09b064a8a 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -964,7 +964,8 @@ config CMDLINE
most cases you will need to specify the root device here.
choice
- prompt "Kernel command line type" if CMDLINE != ""
+ prompt "Kernel command line type"
+ depends on CMDLINE != ""
default CMDLINE_FROM_BOOTLOADER
config CMDLINE_FROM_BOOTLOADER
diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h
index d13d8fdc3411..987e23a2bd28 100644
--- a/arch/powerpc/include/asm/ps3.h
+++ b/arch/powerpc/include/asm/ps3.h
@@ -390,11 +390,7 @@ int ps3_system_bus_device_register(struct ps3_system_bus_device *dev);
int ps3_system_bus_driver_register(struct ps3_system_bus_driver *drv);
void ps3_system_bus_driver_unregister(struct ps3_system_bus_driver *drv);
-static inline struct ps3_system_bus_driver *ps3_drv_to_system_bus_drv(
- struct device_driver *_drv)
-{
- return container_of(_drv, struct ps3_system_bus_driver, core);
-}
+#define ps3_drv_to_system_bus_drv(_drv) container_of_const(_drv, struct ps3_system_bus_driver, core)
static inline struct ps3_system_bus_device *ps3_dev_to_system_bus_dev(
const struct device *_dev)
{
diff --git a/arch/powerpc/include/asm/vio.h b/arch/powerpc/include/asm/vio.h
index 6faf2a931755..7c444150c5ad 100644
--- a/arch/powerpc/include/asm/vio.h
+++ b/arch/powerpc/include/asm/vio.h
@@ -156,11 +156,7 @@ static inline int vio_enable_interrupts(struct vio_dev *dev)
}
#endif
-static inline struct vio_driver *to_vio_driver(struct device_driver *drv)
-{
- return container_of(drv, struct vio_driver, driver);
-}
-
+#define to_vio_driver(__drv) container_of_const(__drv, struct vio_driver, driver)
#define to_vio_dev(__dev) container_of_const(__dev, struct vio_dev, dev)
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 961aadc71de2..5e6c7b527677 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -1984,8 +1984,10 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
break;
r = -ENXIO;
- if (!xive_enabled())
+ if (!xive_enabled()) {
+ fdput(f);
break;
+ }
r = -EPERM;
dev = kvm_device_from_filp(f.file);
diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
index 56dc6b29a3e7..b9a7d9bae687 100644
--- a/arch/powerpc/platforms/ps3/system-bus.c
+++ b/arch/powerpc/platforms/ps3/system-bus.c
@@ -333,10 +333,10 @@ int ps3_mmio_region_init(struct ps3_system_bus_device *dev,
EXPORT_SYMBOL_GPL(ps3_mmio_region_init);
static int ps3_system_bus_match(struct device *_dev,
- struct device_driver *_drv)
+ const struct device_driver *_drv)
{
int result;
- struct ps3_system_bus_driver *drv = ps3_drv_to_system_bus_drv(_drv);
+ const struct ps3_system_bus_driver *drv = ps3_drv_to_system_bus_drv(_drv);
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
if (!dev->match_sub_id)
diff --git a/arch/powerpc/platforms/pseries/ibmebus.c b/arch/powerpc/platforms/pseries/ibmebus.c
index b401282727a4..3436b0af795e 100644
--- a/arch/powerpc/platforms/pseries/ibmebus.c
+++ b/arch/powerpc/platforms/pseries/ibmebus.c
@@ -339,7 +339,7 @@ static struct attribute *ibmbus_bus_attrs[] = {
};
ATTRIBUTE_GROUPS(ibmbus_bus);
-static int ibmebus_bus_bus_match(struct device *dev, struct device_driver *drv)
+static int ibmebus_bus_bus_match(struct device *dev, const struct device_driver *drv)
{
const struct of_device_id *matches = drv->of_match_table;
diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
index 36d1c7d4156b..ac1d2d2c9a88 100644
--- a/arch/powerpc/platforms/pseries/vio.c
+++ b/arch/powerpc/platforms/pseries/vio.c
@@ -1576,10 +1576,10 @@ void vio_unregister_device(struct vio_dev *viodev)
}
EXPORT_SYMBOL(vio_unregister_device);
-static int vio_bus_match(struct device *dev, struct device_driver *drv)
+static int vio_bus_match(struct device *dev, const struct device_driver *drv)
{
const struct vio_dev *vio_dev = to_vio_dev(dev);
- struct vio_driver *vio_drv = to_vio_driver(drv);
+ const struct vio_driver *vio_drv = to_vio_driver(drv);
const struct vio_device_id *ids = vio_drv->id_table;
return (ids != NULL) && (vio_match_device(ids, vio_dev) != NULL);
@@ -1689,7 +1689,7 @@ struct vio_dev *vio_find_node(struct device_node *vnode)
/* construct the kobject name from the device node */
if (of_node_is_type(vnode_parent, "vdevice")) {
const __be32 *prop;
-
+
prop = of_get_property(vnode, "reg", NULL);
if (!prop)
goto out;
diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
index 517b963e3e6a..a0934b516933 100644
--- a/arch/powerpc/sysdev/xive/native.c
+++ b/arch/powerpc/sysdev/xive/native.c
@@ -559,9 +559,7 @@ bool __init xive_native_init(void)
struct device_node *np;
struct resource r;
void __iomem *tima;
- struct property *prop;
u8 max_prio = 7;
- const __be32 *p;
u32 val, cpu;
s64 rc;
@@ -592,7 +590,7 @@ bool __init xive_native_init(void)
max_prio = val - 1;
/* Iterate the EQ sizes and pick one */
- of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, p, val) {
+ of_property_for_each_u32(np, "ibm,xive-eq-sizes", val) {
xive_queue_shift = val;
if (val == PAGE_SHIFT)
break;
diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c
index e45419264391..f2fa985a2c77 100644
--- a/arch/powerpc/sysdev/xive/spapr.c
+++ b/arch/powerpc/sysdev/xive/spapr.c
@@ -814,7 +814,6 @@ bool __init xive_spapr_init(void)
struct device_node *np;
struct resource r;
void __iomem *tima;
- struct property *prop;
u8 max_prio;
u32 val;
u32 len;
@@ -866,7 +865,7 @@ bool __init xive_spapr_init(void)
}
/* Iterate the EQ sizes and pick one */
- of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, reg, val) {
+ of_property_for_each_u32(np, "ibm,xive-eq-sizes", val) {
xive_queue_shift = val;
if (val == PAGE_SHIFT)
break;
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 736457a5898a..0f3cd7c3a436 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -13,7 +13,9 @@ config 32BIT
config RISCV
def_bool y
select ACPI_GENERIC_GSI if ACPI
+ select ACPI_PPTT if ACPI
select ACPI_REDUCED_HARDWARE_ONLY if ACPI
+ select ACPI_SPCR_TABLE if ACPI
select ARCH_DMA_DEFAULT_COHERENT
select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION
select ARCH_ENABLE_MEMORY_HOTPLUG if SPARSEMEM_VMEMMAP
@@ -123,6 +125,7 @@ config RISCV
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
select HAVE_ARCH_SECCOMP_FILTER
+ select HAVE_ARCH_STACKLEAK
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT && MMU
@@ -154,7 +157,6 @@ config RISCV
select HAVE_KERNEL_UNCOMPRESSED if !XIP_KERNEL && !EFI_ZBOOT
select HAVE_KERNEL_ZSTD if !XIP_KERNEL && !EFI_ZBOOT
select HAVE_KPROBES if !XIP_KERNEL
- select HAVE_KPROBES_ON_FTRACE if !XIP_KERNEL
select HAVE_KRETPROBES if !XIP_KERNEL
# https://github.com/ClangBuiltLinux/linux/issues/1881
select HAVE_LD_DEAD_CODE_DATA_ELIMINATION if !LD_IS_LLD
@@ -820,6 +822,8 @@ config RISCV_EFFICIENT_UNALIGNED_ACCESS
endchoice
+source "arch/riscv/Kconfig.vendor"
+
endmenu # "Platform type"
menu "Kernel features"
@@ -960,7 +964,8 @@ config CMDLINE
line here and choose how the kernel should use it later on.
choice
- prompt "Built-in command line usage" if CMDLINE != ""
+ prompt "Built-in command line usage"
+ depends on CMDLINE != ""
default CMDLINE_FALLBACK
help
Choose how the kernel will handle the provided built-in command
diff --git a/arch/riscv/Kconfig.vendor b/arch/riscv/Kconfig.vendor
new file mode 100644
index 000000000000..6f1cdd32ed29
--- /dev/null
+++ b/arch/riscv/Kconfig.vendor
@@ -0,0 +1,19 @@
+menu "Vendor extensions"
+
+config RISCV_ISA_VENDOR_EXT
+ bool
+
+menu "Andes"
+config RISCV_ISA_VENDOR_EXT_ANDES
+ bool "Andes vendor extension support"
+ select RISCV_ISA_VENDOR_EXT
+ default y
+ help
+ Say N here if you want to disable all Andes vendor extension
+ support. This will cause any Andes vendor extensions that are
+ requested by hardware probing to be ignored.
+
+ If you don't know what to do here, say Y.
+endmenu
+
+endmenu
diff --git a/arch/riscv/boot/Makefile b/arch/riscv/boot/Makefile
index 869c0345b908..4e9e7a28bf9b 100644
--- a/arch/riscv/boot/Makefile
+++ b/arch/riscv/boot/Makefile
@@ -18,7 +18,6 @@ OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
OBJCOPYFLAGS_loader.bin :=-O binary
OBJCOPYFLAGS_xipImage :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
-targets := Image Image.* loader loader.o loader.lds loader.bin
targets := Image Image.* loader loader.o loader.lds loader.bin xipImage
ifeq ($(CONFIG_XIP_KERNEL),y)
diff --git a/arch/riscv/boot/install.sh b/arch/riscv/boot/install.sh
index a8df7591513a..4b3d8bf91cc6 100755
--- a/arch/riscv/boot/install.sh
+++ b/arch/riscv/boot/install.sh
@@ -17,6 +17,8 @@
# $3 - kernel map file
# $4 - default install path (blank if root directory)
+set -e
+
case "${2##*/}" in
# Compressed install
Image.*|vmlinuz.efi)
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index 3f1f055866af..0d678325444f 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -7,6 +7,7 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_CGROUPS=y
CONFIG_MEMCG=y
+CONFIG_BLK_CGROUP=y
CONFIG_CGROUP_SCHED=y
CONFIG_CFS_BANDWIDTH=y
CONFIG_RT_GROUP_SCHED=y
@@ -35,9 +36,6 @@ CONFIG_ARCH_THEAD=y
CONFIG_ARCH_VIRT=y
CONFIG_ARCH_CANAAN=y
CONFIG_SMP=y
-CONFIG_HOTPLUG_CPU=y
-CONFIG_PM=y
-CONFIG_CPU_IDLE=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_STAT=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=m
@@ -52,13 +50,11 @@ CONFIG_ACPI=y
CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-CONFIG_SPARSEMEM_MANUAL=y
CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_SPARSEMEM_MANUAL=y
CONFIG_NET=y
CONFIG_PACKET=y
-CONFIG_UNIX=y
CONFIG_XFRM_USER=m
-CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_PNP=y
@@ -102,9 +98,9 @@ CONFIG_NET_SCHED=y
CONFIG_NET_CLS_CGROUP=m
CONFIG_NETLINK_DIAG=y
CONFIG_CGROUP_NET_PRIO=y
+CONFIG_CAN=m
CONFIG_NET_9P=y
CONFIG_NET_9P_VIRTIO=y
-CONFIG_CAN=m
CONFIG_PCI=y
CONFIG_PCIEPORTBUS=y
CONFIG_PCI_HOST_GENERIC=y
@@ -153,8 +149,8 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_DW=y
CONFIG_SERIAL_OF_PLATFORM=y
-CONFIG_SERIAL_SH_SCI=y
CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
+CONFIG_SERIAL_SH_SCI=y
CONFIG_VIRTIO_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_VIRTIO=y
@@ -179,7 +175,6 @@ CONFIG_DEVFREQ_THERMAL=y
CONFIG_RZG2L_THERMAL=y
CONFIG_WATCHDOG=y
CONFIG_SUNXI_WATCHDOG=y
-CONFIG_RENESAS_RZG2LWDT=y
CONFIG_MFD_AXP20X_I2C=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
@@ -193,11 +188,9 @@ CONFIG_DRM_NOUVEAU=m
CONFIG_DRM_SUN4I=m
CONFIG_DRM_VIRTIO_GPU=m
CONFIG_FB=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_SOC=y
-CONFIG_SND_SOC_RZ=m
CONFIG_SND_DESIGNWARE_I2S=m
CONFIG_SND_SOC_STARFIVE=m
CONFIG_SND_SOC_JH7110_PWMDAC=m
@@ -239,34 +232,31 @@ CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_MMC=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
-CONFIG_MMC_SDHCI_CADENCE=y
CONFIG_MMC_SDHCI_OF_DWCMSHC=y
+CONFIG_MMC_SDHCI_CADENCE=y
CONFIG_MMC_SPI=y
+CONFIG_MMC_SDHI=y
CONFIG_MMC_DW=y
CONFIG_MMC_DW_STARFIVE=y
-CONFIG_MMC_SDHI=y
CONFIG_MMC_SUNXI=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_SUN6I=y
CONFIG_DMADEVICES=y
CONFIG_DMA_SUN6I=m
CONFIG_DW_AXI_DMAC=y
-CONFIG_RZ_DMAC=y
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_BALLOON=y
CONFIG_VIRTIO_INPUT=y
CONFIG_VIRTIO_MMIO=y
-CONFIG_RENESAS_OSTM=y
CONFIG_CLK_SOPHGO_CV1800=y
CONFIG_SUN8I_DE2_CCU=m
+CONFIG_RENESAS_OSTM=y
CONFIG_SUN50I_IOMMU=y
CONFIG_RPMSG_CHAR=y
CONFIG_RPMSG_CTRL=y
CONFIG_RPMSG_VIRTIO=y
-CONFIG_ARCH_R9A07G043=y
+CONFIG_PM_DEVFREQ=y
CONFIG_IIO=y
-CONFIG_RZG2L_ADC=m
-CONFIG_RESET_RZG2L_USBPHY_CTRL=y
CONFIG_PHY_SUN4I_USB=m
CONFIG_PHY_RCAR_GEN3_USB2=y
CONFIG_PHY_STARFIVE_JH7110_DPHY_RX=m
diff --git a/arch/riscv/errata/andes/errata.c b/arch/riscv/errata/andes/errata.c
index f2708a9494a1..fc1a34faa5f3 100644
--- a/arch/riscv/errata/andes/errata.c
+++ b/arch/riscv/errata/andes/errata.c
@@ -17,6 +17,7 @@
#include <asm/processor.h>
#include <asm/sbi.h>
#include <asm/vendorid_list.h>
+#include <asm/vendor_extensions.h>
#define ANDES_AX45MP_MARCHID 0x8000000000008a45UL
#define ANDES_AX45MP_MIMPID 0x500UL
@@ -65,6 +66,8 @@ void __init_or_module andes_errata_patch_func(struct alt_entry *begin, struct al
unsigned long archid, unsigned long impid,
unsigned int stage)
{
+ BUILD_BUG_ON(ERRATA_ANDES_NUMBER >= RISCV_VENDOR_EXT_ALTERNATIVES_BASE);
+
if (stage == RISCV_ALTERNATIVES_BOOT)
errata_probe_iocp(stage, archid, impid);
diff --git a/arch/riscv/errata/sifive/errata.c b/arch/riscv/errata/sifive/errata.c
index 716cfedad3a2..cea3b96ade11 100644
--- a/arch/riscv/errata/sifive/errata.c
+++ b/arch/riscv/errata/sifive/errata.c
@@ -12,6 +12,7 @@
#include <asm/alternative.h>
#include <asm/vendorid_list.h>
#include <asm/errata_list.h>
+#include <asm/vendor_extensions.h>
struct errata_info_t {
char name[32];
@@ -96,6 +97,8 @@ void sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
u32 cpu_apply_errata = 0;
u32 tmp;
+ BUILD_BUG_ON(ERRATA_SIFIVE_NUMBER >= RISCV_VENDOR_EXT_ALTERNATIVES_BASE);
+
if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
return;
diff --git a/arch/riscv/errata/thead/errata.c b/arch/riscv/errata/thead/errata.c
index bf6a0a6318ee..f5120e07c318 100644
--- a/arch/riscv/errata/thead/errata.c
+++ b/arch/riscv/errata/thead/errata.c
@@ -18,6 +18,7 @@
#include <asm/io.h>
#include <asm/patch.h>
#include <asm/vendorid_list.h>
+#include <asm/vendor_extensions.h>
#define CSR_TH_SXSTATUS 0x5c0
#define SXSTATUS_MAEE _AC(0x200000, UL)
@@ -166,6 +167,8 @@ void thead_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
u32 tmp;
void *oldptr, *altptr;
+ BUILD_BUG_ON(ERRATA_THEAD_NUMBER >= RISCV_VENDOR_EXT_ALTERNATIVES_BASE);
+
for (alt = begin; alt < end; alt++) {
if (alt->vendor_id != THEAD_VENDOR_ID)
continue;
diff --git a/arch/riscv/include/asm/acpi.h b/arch/riscv/include/asm/acpi.h
index 7dad0cf9d701..e0a1f84404f3 100644
--- a/arch/riscv/include/asm/acpi.h
+++ b/arch/riscv/include/asm/acpi.h
@@ -61,11 +61,14 @@ static inline void arch_fix_phys_package_id(int num, u32 slot) { }
void acpi_init_rintc_map(void);
struct acpi_madt_rintc *acpi_cpu_get_madt_rintc(int cpu);
-u32 get_acpi_id_for_cpu(int cpu);
+static inline u32 get_acpi_id_for_cpu(int cpu)
+{
+ return acpi_cpu_get_madt_rintc(cpu)->uid;
+}
+
int acpi_get_riscv_isa(struct acpi_table_header *table,
unsigned int cpu, const char **isa);
-static inline int acpi_numa_get_nid(unsigned int cpu) { return NUMA_NO_NODE; }
void acpi_get_cbo_block_size(struct acpi_table_header *table, u32 *cbom_size,
u32 *cboz_size, u32 *cbop_size);
#else
@@ -87,4 +90,12 @@ static inline void acpi_get_cbo_block_size(struct acpi_table_header *table,
#endif /* CONFIG_ACPI */
+#ifdef CONFIG_ACPI_NUMA
+int acpi_numa_get_nid(unsigned int cpu);
+void acpi_map_cpus_to_nodes(void);
+#else
+static inline int acpi_numa_get_nid(unsigned int cpu) { return NUMA_NO_NODE; }
+static inline void acpi_map_cpus_to_nodes(void) { }
+#endif /* CONFIG_ACPI_NUMA */
+
#endif /*_ASM_ACPI_H*/
diff --git a/arch/riscv/include/asm/bitops.h b/arch/riscv/include/asm/bitops.h
index 880606b0469a..71af9ecfcfcb 100644
--- a/arch/riscv/include/asm/bitops.h
+++ b/arch/riscv/include/asm/bitops.h
@@ -170,7 +170,7 @@ legacy:
({ \
typeof(x) x_ = (x); \
__builtin_constant_p(x_) ? \
- (int)((x_ != 0) ? (32 - __builtin_clz(x_)) : 0) \
+ ((x_ != 0) ? (32 - __builtin_clz(x_)) : 0) \
: \
variable_fls(x_); \
})
diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h
index 000796c2d0b1..45f9c1171a48 100644
--- a/arch/riscv/include/asm/cpufeature.h
+++ b/arch/riscv/include/asm/cpufeature.h
@@ -33,6 +33,31 @@ extern struct riscv_isainfo hart_isa[NR_CPUS];
void riscv_user_isa_enable(void);
+#define _RISCV_ISA_EXT_DATA(_name, _id, _subset_exts, _subset_exts_size, _validate) { \
+ .name = #_name, \
+ .property = #_name, \
+ .id = _id, \
+ .subset_ext_ids = _subset_exts, \
+ .subset_ext_size = _subset_exts_size, \
+ .validate = _validate \
+}
+
+#define __RISCV_ISA_EXT_DATA(_name, _id) _RISCV_ISA_EXT_DATA(_name, _id, NULL, 0, NULL)
+
+#define __RISCV_ISA_EXT_DATA_VALIDATE(_name, _id, _validate) \
+ _RISCV_ISA_EXT_DATA(_name, _id, NULL, 0, _validate)
+
+/* Used to declare pure "lasso" extension (Zk for instance) */
+#define __RISCV_ISA_EXT_BUNDLE(_name, _bundled_exts) \
+ _RISCV_ISA_EXT_DATA(_name, RISCV_ISA_EXT_INVALID, _bundled_exts, \
+ ARRAY_SIZE(_bundled_exts), NULL)
+
+/* Used to declare extensions that are a superset of other extensions (Zvbb for instance) */
+#define __RISCV_ISA_EXT_SUPERSET(_name, _id, _sub_exts) \
+ _RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts), NULL)
+#define __RISCV_ISA_EXT_SUPERSET_VALIDATE(_name, _id, _sub_exts, _validate) \
+ _RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts), _validate)
+
#if defined(CONFIG_RISCV_MISALIGNED)
bool check_unaligned_access_emulated_all_cpus(void);
void unaligned_emulation_finish(void);
@@ -79,59 +104,66 @@ extern bool riscv_isa_fallback;
unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap);
+#define STANDARD_EXT 0
+
bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, unsigned int bit);
#define riscv_isa_extension_available(isa_bitmap, ext) \
__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext)
-static __always_inline bool
-riscv_has_extension_likely(const unsigned long ext)
+static __always_inline bool __riscv_has_extension_likely(const unsigned long vendor,
+ const unsigned long ext)
{
- compiletime_assert(ext < RISCV_ISA_EXT_MAX,
- "ext must be < RISCV_ISA_EXT_MAX");
-
- if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
- asm goto(
- ALTERNATIVE("j %l[l_no]", "nop", 0, %[ext], 1)
- :
- : [ext] "i" (ext)
- :
- : l_no);
- } else {
- if (!__riscv_isa_extension_available(NULL, ext))
- goto l_no;
- }
+ asm goto(ALTERNATIVE("j %l[l_no]", "nop", %[vendor], %[ext], 1)
+ :
+ : [vendor] "i" (vendor), [ext] "i" (ext)
+ :
+ : l_no);
return true;
l_no:
return false;
}
-static __always_inline bool
-riscv_has_extension_unlikely(const unsigned long ext)
+static __always_inline bool __riscv_has_extension_unlikely(const unsigned long vendor,
+ const unsigned long ext)
{
- compiletime_assert(ext < RISCV_ISA_EXT_MAX,
- "ext must be < RISCV_ISA_EXT_MAX");
-
- if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
- asm goto(
- ALTERNATIVE("nop", "j %l[l_yes]", 0, %[ext], 1)
- :
- : [ext] "i" (ext)
- :
- : l_yes);
- } else {
- if (__riscv_isa_extension_available(NULL, ext))
- goto l_yes;
- }
+ asm goto(ALTERNATIVE("nop", "j %l[l_yes]", %[vendor], %[ext], 1)
+ :
+ : [vendor] "i" (vendor), [ext] "i" (ext)
+ :
+ : l_yes);
return false;
l_yes:
return true;
}
+static __always_inline bool riscv_has_extension_unlikely(const unsigned long ext)
+{
+ compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX");
+
+ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE))
+ return __riscv_has_extension_unlikely(STANDARD_EXT, ext);
+
+ return __riscv_isa_extension_available(NULL, ext);
+}
+
+static __always_inline bool riscv_has_extension_likely(const unsigned long ext)
+{
+ compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX");
+
+ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE))
+ return __riscv_has_extension_likely(STANDARD_EXT, ext);
+
+ return __riscv_isa_extension_available(NULL, ext);
+}
+
static __always_inline bool riscv_cpu_has_extension_likely(int cpu, const unsigned long ext)
{
- if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && riscv_has_extension_likely(ext))
+ compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX");
+
+ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) &&
+ __riscv_has_extension_likely(STANDARD_EXT, ext))
return true;
return __riscv_isa_extension_available(hart_isa[cpu].isa, ext);
@@ -139,7 +171,10 @@ static __always_inline bool riscv_cpu_has_extension_likely(int cpu, const unsign
static __always_inline bool riscv_cpu_has_extension_unlikely(int cpu, const unsigned long ext)
{
- if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && riscv_has_extension_unlikely(ext))
+ compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX");
+
+ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) &&
+ __riscv_has_extension_unlikely(STANDARD_EXT, ext))
return true;
return __riscv_isa_extension_available(hart_isa[cpu].isa, ext);
diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h
index b18b202ca141..5a0bd27fd11a 100644
--- a/arch/riscv/include/asm/hwcap.h
+++ b/arch/riscv/include/asm/hwcap.h
@@ -80,19 +80,18 @@
#define RISCV_ISA_EXT_ZFA 71
#define RISCV_ISA_EXT_ZTSO 72
#define RISCV_ISA_EXT_ZACAS 73
-#define RISCV_ISA_EXT_XANDESPMU 74
-#define RISCV_ISA_EXT_ZVE32X 75
-#define RISCV_ISA_EXT_ZVE32F 76
-#define RISCV_ISA_EXT_ZVE64X 77
-#define RISCV_ISA_EXT_ZVE64F 78
-#define RISCV_ISA_EXT_ZVE64D 79
-#define RISCV_ISA_EXT_ZIMOP 80
-#define RISCV_ISA_EXT_ZCA 81
-#define RISCV_ISA_EXT_ZCB 82
-#define RISCV_ISA_EXT_ZCD 83
-#define RISCV_ISA_EXT_ZCF 84
-#define RISCV_ISA_EXT_ZCMOP 85
-#define RISCV_ISA_EXT_ZAWRS 86
+#define RISCV_ISA_EXT_ZVE32X 74
+#define RISCV_ISA_EXT_ZVE32F 75
+#define RISCV_ISA_EXT_ZVE64X 76
+#define RISCV_ISA_EXT_ZVE64F 77
+#define RISCV_ISA_EXT_ZVE64D 78
+#define RISCV_ISA_EXT_ZIMOP 79
+#define RISCV_ISA_EXT_ZCA 80
+#define RISCV_ISA_EXT_ZCB 81
+#define RISCV_ISA_EXT_ZCD 82
+#define RISCV_ISA_EXT_ZCF 83
+#define RISCV_ISA_EXT_ZCMOP 84
+#define RISCV_ISA_EXT_ZAWRS 85
#define RISCV_ISA_EXT_XLINUXENVCFG 127
diff --git a/arch/riscv/include/asm/hwprobe.h b/arch/riscv/include/asm/hwprobe.h
index 150a9877b0af..ef01c182af2b 100644
--- a/arch/riscv/include/asm/hwprobe.h
+++ b/arch/riscv/include/asm/hwprobe.h
@@ -8,7 +8,7 @@
#include <uapi/asm/hwprobe.h>
-#define RISCV_HWPROBE_MAX_KEY 7
+#define RISCV_HWPROBE_MAX_KEY 8
static inline bool riscv_hwprobe_key_is_valid(__s64 key)
{
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index 235fd45d998d..7ede2111c591 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -37,7 +37,7 @@
* define the PAGE_OFFSET value for SV48 and SV39.
*/
#define PAGE_OFFSET_L4 _AC(0xffffaf8000000000, UL)
-#define PAGE_OFFSET_L3 _AC(0xffffffd800000000, UL)
+#define PAGE_OFFSET_L3 _AC(0xffffffd600000000, UL)
#else
#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
#endif /* CONFIG_64BIT */
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
index 5d473343634b..fca5c6be2b81 100644
--- a/arch/riscv/include/asm/thread_info.h
+++ b/arch/riscv/include/asm/thread_info.h
@@ -10,6 +10,7 @@
#include <asm/page.h>
#include <linux/const.h>
+#include <linux/sizes.h>
/* thread information allocation */
#define THREAD_SIZE_ORDER CONFIG_THREAD_SIZE_ORDER
diff --git a/arch/riscv/include/asm/vendor_extensions.h b/arch/riscv/include/asm/vendor_extensions.h
new file mode 100644
index 000000000000..7437304a71b9
--- /dev/null
+++ b/arch/riscv/include/asm/vendor_extensions.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2024 Rivos, Inc
+ */
+
+#ifndef _ASM_VENDOR_EXTENSIONS_H
+#define _ASM_VENDOR_EXTENSIONS_H
+
+#include <asm/cpufeature.h>
+
+#include <linux/array_size.h>
+#include <linux/types.h>
+
+/*
+ * The extension keys of each vendor must be strictly less than this value.
+ */
+#define RISCV_ISA_VENDOR_EXT_MAX 32
+
+struct riscv_isavendorinfo {
+ DECLARE_BITMAP(isa, RISCV_ISA_VENDOR_EXT_MAX);
+};
+
+struct riscv_isa_vendor_ext_data_list {
+ bool is_initialized;
+ const size_t ext_data_count;
+ const struct riscv_isa_ext_data *ext_data;
+ struct riscv_isavendorinfo per_hart_isa_bitmap[NR_CPUS];
+ struct riscv_isavendorinfo all_harts_isa_bitmap;
+};
+
+extern struct riscv_isa_vendor_ext_data_list *riscv_isa_vendor_ext_list[];
+
+extern const size_t riscv_isa_vendor_ext_list_size;
+
+/*
+ * The alternatives need some way of distinguishing between vendor extensions
+ * and errata. Incrementing all of the vendor extension keys so they are at
+ * least 0x8000 accomplishes that.
+ */
+#define RISCV_VENDOR_EXT_ALTERNATIVES_BASE 0x8000
+
+#define VENDOR_EXT_ALL_CPUS -1
+
+bool __riscv_isa_vendor_extension_available(int cpu, unsigned long vendor, unsigned int bit);
+#define riscv_cpu_isa_vendor_extension_available(cpu, vendor, ext) \
+ __riscv_isa_vendor_extension_available(cpu, vendor, RISCV_ISA_VENDOR_EXT_##ext)
+#define riscv_isa_vendor_extension_available(vendor, ext) \
+ __riscv_isa_vendor_extension_available(VENDOR_EXT_ALL_CPUS, vendor, \
+ RISCV_ISA_VENDOR_EXT_##ext)
+
+static __always_inline bool riscv_has_vendor_extension_likely(const unsigned long vendor,
+ const unsigned long ext)
+{
+ if (!IS_ENABLED(CONFIG_RISCV_ISA_VENDOR_EXT))
+ return false;
+
+ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE))
+ return __riscv_has_extension_likely(vendor,
+ ext + RISCV_VENDOR_EXT_ALTERNATIVES_BASE);
+
+ return __riscv_isa_vendor_extension_available(VENDOR_EXT_ALL_CPUS, vendor, ext);
+}
+
+static __always_inline bool riscv_has_vendor_extension_unlikely(const unsigned long vendor,
+ const unsigned long ext)
+{
+ if (!IS_ENABLED(CONFIG_RISCV_ISA_VENDOR_EXT))
+ return false;
+
+ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE))
+ return __riscv_has_extension_unlikely(vendor,
+ ext + RISCV_VENDOR_EXT_ALTERNATIVES_BASE);
+
+ return __riscv_isa_vendor_extension_available(VENDOR_EXT_ALL_CPUS, vendor, ext);
+}
+
+static __always_inline bool riscv_cpu_has_vendor_extension_likely(const unsigned long vendor,
+ int cpu, const unsigned long ext)
+{
+ if (!IS_ENABLED(CONFIG_RISCV_ISA_VENDOR_EXT))
+ return false;
+
+ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) &&
+ __riscv_has_extension_likely(vendor, ext + RISCV_VENDOR_EXT_ALTERNATIVES_BASE))
+ return true;
+
+ return __riscv_isa_vendor_extension_available(cpu, vendor, ext);
+}
+
+static __always_inline bool riscv_cpu_has_vendor_extension_unlikely(const unsigned long vendor,
+ int cpu,
+ const unsigned long ext)
+{
+ if (!IS_ENABLED(CONFIG_RISCV_ISA_VENDOR_EXT))
+ return false;
+
+ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) &&
+ __riscv_has_extension_unlikely(vendor, ext + RISCV_VENDOR_EXT_ALTERNATIVES_BASE))
+ return true;
+
+ return __riscv_isa_vendor_extension_available(cpu, vendor, ext);
+}
+
+#endif /* _ASM_VENDOR_EXTENSIONS_H */
diff --git a/arch/riscv/include/asm/vendor_extensions/andes.h b/arch/riscv/include/asm/vendor_extensions/andes.h
new file mode 100644
index 000000000000..7bb2fc43438f
--- /dev/null
+++ b/arch/riscv/include/asm/vendor_extensions/andes.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_VENDOR_EXTENSIONS_ANDES_H
+#define _ASM_RISCV_VENDOR_EXTENSIONS_ANDES_H
+
+#include <asm/vendor_extensions.h>
+
+#include <linux/types.h>
+
+#define RISCV_ISA_VENDOR_EXT_XANDESPMU 0
+
+/*
+ * Extension keys should be strictly less than max.
+ * It is safe to increment this when necessary.
+ */
+#define RISCV_ISA_VENDOR_EXT_MAX_ANDES 32
+
+extern struct riscv_isa_vendor_ext_data_list riscv_isa_vendor_ext_list_andes;
+
+#endif
diff --git a/arch/riscv/include/uapi/asm/hwprobe.h b/arch/riscv/include/uapi/asm/hwprobe.h
index 8b8f6ac0eae2..b706c8e47b02 100644
--- a/arch/riscv/include/uapi/asm/hwprobe.h
+++ b/arch/riscv/include/uapi/asm/hwprobe.h
@@ -81,6 +81,7 @@ struct riscv_hwprobe {
#define RISCV_HWPROBE_MISALIGNED_MASK (7 << 0)
#define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6
#define RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS 7
+#define RISCV_HWPROBE_KEY_TIME_CSR_FREQ 8
/* Increase RISCV_HWPROBE_MAX_KEY when adding items. */
/* Flags */
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index 5b243d46f4b1..06d407f1b30b 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -58,6 +58,8 @@ obj-y += riscv_ksyms.o
obj-y += stacktrace.o
obj-y += cacheinfo.o
obj-y += patch.o
+obj-y += vendor_extensions.o
+obj-y += vendor_extensions/
obj-y += probes/
obj-y += tests/
obj-$(CONFIG_MMU) += vdso.o vdso/
@@ -110,3 +112,4 @@ obj-$(CONFIG_COMPAT) += compat_vdso/
obj-$(CONFIG_64BIT) += pi/
obj-$(CONFIG_ACPI) += acpi.o
+obj-$(CONFIG_ACPI_NUMA) += acpi_numa.o
diff --git a/arch/riscv/kernel/acpi.c b/arch/riscv/kernel/acpi.c
index e619edc8b0cc..ba957aaca5cb 100644
--- a/arch/riscv/kernel/acpi.c
+++ b/arch/riscv/kernel/acpi.c
@@ -17,7 +17,9 @@
#include <linux/efi.h>
#include <linux/io.h>
#include <linux/memblock.h>
+#include <linux/of_fdt.h>
#include <linux/pci.h>
+#include <linux/serial_core.h>
int acpi_noirq = 1; /* skip ACPI IRQ initialization */
int acpi_disabled = 1;
@@ -131,7 +133,7 @@ void __init acpi_boot_table_init(void)
if (param_acpi_off ||
(!param_acpi_on && !param_acpi_force &&
efi.acpi20 == EFI_INVALID_TABLE_ADDR))
- return;
+ goto done;
/*
* ACPI is disabled at this point. Enable it in order to parse
@@ -151,6 +153,14 @@ void __init acpi_boot_table_init(void)
if (!param_acpi_force)
disable_acpi();
}
+
+done:
+ if (acpi_disabled) {
+ if (earlycon_acpi_spcr_enable)
+ early_init_dt_scan_chosen_stdout();
+ } else {
+ acpi_parse_spcr(earlycon_acpi_spcr_enable, true);
+ }
}
static int acpi_parse_madt_rintc(union acpi_subtable_headers *header, const unsigned long end)
@@ -191,11 +201,6 @@ struct acpi_madt_rintc *acpi_cpu_get_madt_rintc(int cpu)
return &cpu_madt_rintc[cpu];
}
-u32 get_acpi_id_for_cpu(int cpu)
-{
- return acpi_cpu_get_madt_rintc(cpu)->uid;
-}
-
/*
* __acpi_map_table() will be called before paging_init(), so early_ioremap()
* or early_memremap() should be called here to for ACPI table mapping.
diff --git a/arch/riscv/kernel/acpi_numa.c b/arch/riscv/kernel/acpi_numa.c
new file mode 100644
index 000000000000..0231482d6946
--- /dev/null
+++ b/arch/riscv/kernel/acpi_numa.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ACPI 6.6 based NUMA setup for RISCV
+ * Lots of code was borrowed from arch/arm64/kernel/acpi_numa.c
+ *
+ * Copyright 2004 Andi Kleen, SuSE Labs.
+ * Copyright (C) 2013-2016, Linaro Ltd.
+ * Author: Hanjun Guo <hanjun.guo@linaro.org>
+ * Copyright (C) 2024 Intel Corporation.
+ *
+ * Reads the ACPI SRAT table to figure out what memory belongs to which CPUs.
+ *
+ * Called from acpi_numa_init while reading the SRAT and SLIT tables.
+ * Assumes all memory regions belonging to a single proximity domain
+ * are in one chunk. Holes between them will be included in the node.
+ */
+
+#define pr_fmt(fmt) "ACPI: NUMA: " fmt
+
+#include <linux/acpi.h>
+#include <linux/bitmap.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/memblock.h>
+#include <linux/mmzone.h>
+#include <linux/module.h>
+#include <linux/topology.h>
+
+#include <asm/numa.h>
+
+static int acpi_early_node_map[NR_CPUS] __initdata = { NUMA_NO_NODE };
+
+int __init acpi_numa_get_nid(unsigned int cpu)
+{
+ return acpi_early_node_map[cpu];
+}
+
+static inline int get_cpu_for_acpi_id(u32 uid)
+{
+ int cpu;
+
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++)
+ if (uid == get_acpi_id_for_cpu(cpu))
+ return cpu;
+
+ return -EINVAL;
+}
+
+static int __init acpi_parse_rintc_pxm(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_srat_rintc_affinity *pa;
+ int cpu, pxm, node;
+
+ if (srat_disabled())
+ return -EINVAL;
+
+ pa = (struct acpi_srat_rintc_affinity *)header;
+ if (!pa)
+ return -EINVAL;
+
+ if (!(pa->flags & ACPI_SRAT_RINTC_ENABLED))
+ return 0;
+
+ pxm = pa->proximity_domain;
+ node = pxm_to_node(pxm);
+
+ /*
+ * If we can't map the UID to a logical cpu this
+ * means that the UID is not part of possible cpus
+ * so we do not need a NUMA mapping for it, skip
+ * the SRAT entry and keep parsing.
+ */
+ cpu = get_cpu_for_acpi_id(pa->acpi_processor_uid);
+ if (cpu < 0)
+ return 0;
+
+ acpi_early_node_map[cpu] = node;
+ pr_info("SRAT: PXM %d -> HARTID 0x%lx -> Node %d\n", pxm,
+ cpuid_to_hartid_map(cpu), node);
+
+ return 0;
+}
+
+void __init acpi_map_cpus_to_nodes(void)
+{
+ int i;
+
+ /*
+ * In ACPI, SMP and CPU NUMA information is provided in separate
+ * static tables, namely the MADT and the SRAT.
+ *
+ * Thus, it is simpler to first create the cpu logical map through
+ * an MADT walk and then map the logical cpus to their node ids
+ * as separate steps.
+ */
+ acpi_table_parse_entries(ACPI_SIG_SRAT, sizeof(struct acpi_table_srat),
+ ACPI_SRAT_TYPE_RINTC_AFFINITY, acpi_parse_rintc_pxm, 0);
+
+ for (i = 0; i < nr_cpu_ids; i++)
+ early_map_cpu_to_node(i, acpi_numa_get_nid(i));
+}
+
+/* Callback for Proximity Domain -> logical node ID mapping */
+void __init acpi_numa_rintc_affinity_init(struct acpi_srat_rintc_affinity *pa)
+{
+ int pxm, node;
+
+ if (srat_disabled())
+ return;
+
+ if (pa->header.length < sizeof(struct acpi_srat_rintc_affinity)) {
+ pr_err("SRAT: Invalid SRAT header length: %d\n", pa->header.length);
+ bad_srat();
+ return;
+ }
+
+ if (!(pa->flags & ACPI_SRAT_RINTC_ENABLED))
+ return;
+
+ pxm = pa->proximity_domain;
+ node = acpi_map_pxm_to_node(pxm);
+
+ if (node == NUMA_NO_NODE) {
+ pr_err("SRAT: Too many proximity domains %d\n", pxm);
+ bad_srat();
+ return;
+ }
+
+ node_set(node, numa_nodes_parsed);
+}
diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c
index 09e9b88110d1..d6c108c50cba 100644
--- a/arch/riscv/kernel/cacheinfo.c
+++ b/arch/riscv/kernel/cacheinfo.c
@@ -3,6 +3,7 @@
* Copyright (C) 2017 SiFive
*/
+#include <linux/acpi.h>
#include <linux/cpu.h>
#include <linux/of.h>
#include <asm/cacheinfo.h>
@@ -64,7 +65,6 @@ uintptr_t get_cache_geometry(u32 level, enum cache_type type)
}
static void ci_leaf_init(struct cacheinfo *this_leaf,
- struct device_node *node,
enum cache_type type, unsigned int level)
{
this_leaf->level = level;
@@ -79,12 +79,33 @@ int populate_cache_leaves(unsigned int cpu)
struct device_node *prev = NULL;
int levels = 1, level = 1;
+ if (!acpi_disabled) {
+ int ret, fw_levels, split_levels;
+
+ ret = acpi_get_cache_info(cpu, &fw_levels, &split_levels);
+ if (ret)
+ return ret;
+
+ BUG_ON((split_levels > fw_levels) ||
+ (split_levels + fw_levels > this_cpu_ci->num_leaves));
+
+ for (; level <= this_cpu_ci->num_levels; level++) {
+ if (level <= split_levels) {
+ ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
+ ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
+ } else {
+ ci_leaf_init(this_leaf++, CACHE_TYPE_UNIFIED, level);
+ }
+ }
+ return 0;
+ }
+
if (of_property_read_bool(np, "cache-size"))
- ci_leaf_init(this_leaf++, np, CACHE_TYPE_UNIFIED, level);
+ ci_leaf_init(this_leaf++, CACHE_TYPE_UNIFIED, level);
if (of_property_read_bool(np, "i-cache-size"))
- ci_leaf_init(this_leaf++, np, CACHE_TYPE_INST, level);
+ ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
if (of_property_read_bool(np, "d-cache-size"))
- ci_leaf_init(this_leaf++, np, CACHE_TYPE_DATA, level);
+ ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
prev = np;
while ((np = of_find_next_cache_node(np))) {
@@ -97,11 +118,11 @@ int populate_cache_leaves(unsigned int cpu)
if (level <= levels)
break;
if (of_property_read_bool(np, "cache-size"))
- ci_leaf_init(this_leaf++, np, CACHE_TYPE_UNIFIED, level);
+ ci_leaf_init(this_leaf++, CACHE_TYPE_UNIFIED, level);
if (of_property_read_bool(np, "i-cache-size"))
- ci_leaf_init(this_leaf++, np, CACHE_TYPE_INST, level);
+ ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
if (of_property_read_bool(np, "d-cache-size"))
- ci_leaf_init(this_leaf++, np, CACHE_TYPE_DATA, level);
+ ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
levels = level;
}
of_node_put(np);
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
index c1f3655238fd..f6b13e9f5e6c 100644
--- a/arch/riscv/kernel/cpu.c
+++ b/arch/riscv/kernel/cpu.c
@@ -16,6 +16,7 @@
#include <asm/sbi.h>
#include <asm/smp.h>
#include <asm/pgtable.h>
+#include <asm/vendor_extensions.h>
bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
{
@@ -235,7 +236,33 @@ arch_initcall(riscv_cpuinfo_init);
#ifdef CONFIG_PROC_FS
-static void print_isa(struct seq_file *f, const unsigned long *isa_bitmap)
+#define ALL_CPUS -1
+
+static void print_vendor_isa(struct seq_file *f, int cpu)
+{
+ struct riscv_isavendorinfo *vendor_bitmap;
+ struct riscv_isa_vendor_ext_data_list *ext_list;
+ const struct riscv_isa_ext_data *ext_data;
+
+ for (int i = 0; i < riscv_isa_vendor_ext_list_size; i++) {
+ ext_list = riscv_isa_vendor_ext_list[i];
+ ext_data = riscv_isa_vendor_ext_list[i]->ext_data;
+
+ if (cpu == ALL_CPUS)
+ vendor_bitmap = &ext_list->all_harts_isa_bitmap;
+ else
+ vendor_bitmap = &ext_list->per_hart_isa_bitmap[cpu];
+
+ for (int j = 0; j < ext_list->ext_data_count; j++) {
+ if (!__riscv_isa_extension_available(vendor_bitmap->isa, ext_data[j].id))
+ continue;
+
+ seq_printf(f, "_%s", ext_data[j].name);
+ }
+ }
+}
+
+static void print_isa(struct seq_file *f, const unsigned long *isa_bitmap, int cpu)
{
if (IS_ENABLED(CONFIG_32BIT))
@@ -254,6 +281,8 @@ static void print_isa(struct seq_file *f, const unsigned long *isa_bitmap)
seq_printf(f, "%s", riscv_isa_ext[i].name);
}
+ print_vendor_isa(f, cpu);
+
seq_puts(f, "\n");
}
@@ -316,7 +345,7 @@ static int c_show(struct seq_file *m, void *v)
* line.
*/
seq_puts(m, "isa\t\t: ");
- print_isa(m, NULL);
+ print_isa(m, NULL, ALL_CPUS);
print_mmu(m);
if (acpi_disabled) {
@@ -338,7 +367,7 @@ static int c_show(struct seq_file *m, void *v)
* additional extensions not present across all harts.
*/
seq_puts(m, "hart isa\t: ");
- print_isa(m, hart_isa[cpu_id].isa);
+ print_isa(m, hart_isa[cpu_id].isa, cpu_id);
seq_puts(m, "\n");
return 0;
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
index 0366dc3baf33..8f20607adb40 100644
--- a/arch/riscv/kernel/cpufeature.c
+++ b/arch/riscv/kernel/cpufeature.c
@@ -24,6 +24,7 @@
#include <asm/processor.h>
#include <asm/sbi.h>
#include <asm/vector.h>
+#include <asm/vendor_extensions.h>
#define NUM_ALPHA_EXTS ('z' - 'a' + 1)
@@ -100,31 +101,6 @@ static int riscv_ext_zicboz_validate(const struct riscv_isa_ext_data *data,
return 0;
}
-#define _RISCV_ISA_EXT_DATA(_name, _id, _subset_exts, _subset_exts_size, _validate) { \
- .name = #_name, \
- .property = #_name, \
- .id = _id, \
- .subset_ext_ids = _subset_exts, \
- .subset_ext_size = _subset_exts_size, \
- .validate = _validate \
-}
-
-#define __RISCV_ISA_EXT_DATA(_name, _id) _RISCV_ISA_EXT_DATA(_name, _id, NULL, 0, NULL)
-
-#define __RISCV_ISA_EXT_DATA_VALIDATE(_name, _id, _validate) \
- _RISCV_ISA_EXT_DATA(_name, _id, NULL, 0, _validate)
-
-/* Used to declare pure "lasso" extension (Zk for instance) */
-#define __RISCV_ISA_EXT_BUNDLE(_name, _bundled_exts) \
- _RISCV_ISA_EXT_DATA(_name, RISCV_ISA_EXT_INVALID, _bundled_exts, \
- ARRAY_SIZE(_bundled_exts), NULL)
-
-/* Used to declare extensions that are a superset of other extensions (Zvbb for instance) */
-#define __RISCV_ISA_EXT_SUPERSET(_name, _id, _sub_exts) \
- _RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts), NULL)
-#define __RISCV_ISA_EXT_SUPERSET_VALIDATE(_name, _id, _sub_exts, _validate) \
- _RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts), _validate)
-
static int riscv_ext_zca_depends(const struct riscv_isa_ext_data *data,
const unsigned long *isa_bitmap)
{
@@ -405,7 +381,6 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = {
__RISCV_ISA_EXT_DATA(svinval, RISCV_ISA_EXT_SVINVAL),
__RISCV_ISA_EXT_DATA(svnapot, RISCV_ISA_EXT_SVNAPOT),
__RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT),
- __RISCV_ISA_EXT_DATA(xandespmu, RISCV_ISA_EXT_XANDESPMU),
};
const size_t riscv_isa_ext_count = ARRAY_SIZE(riscv_isa_ext);
@@ -512,6 +487,21 @@ static void __init riscv_parse_isa_string(const char *isa, unsigned long *bitmap
bool ext_err = false;
switch (*ext) {
+ case 'x':
+ case 'X':
+ if (acpi_disabled)
+ pr_warn_once("Vendor extensions are ignored in riscv,isa. Use riscv,isa-extensions instead.");
+ /*
+ * To skip an extension, we find its end.
+ * As multi-letter extensions must be split from other multi-letter
+ * extensions with an "_", the end of a multi-letter extension will
+ * either be the null character or the "_" at the start of the next
+ * multi-letter extension.
+ */
+ for (; *isa && *isa != '_'; ++isa)
+ ;
+ ext_err = true;
+ break;
case 's':
/*
* Workaround for invalid single-letter 's' & 'u' (QEMU).
@@ -527,8 +517,6 @@ static void __init riscv_parse_isa_string(const char *isa, unsigned long *bitmap
}
fallthrough;
case 'S':
- case 'x':
- case 'X':
case 'z':
case 'Z':
/*
@@ -728,6 +716,61 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap)
acpi_put_table((struct acpi_table_header *)rhct);
}
+static void __init riscv_fill_cpu_vendor_ext(struct device_node *cpu_node, int cpu)
+{
+ if (!IS_ENABLED(CONFIG_RISCV_ISA_VENDOR_EXT))
+ return;
+
+ for (int i = 0; i < riscv_isa_vendor_ext_list_size; i++) {
+ struct riscv_isa_vendor_ext_data_list *ext_list = riscv_isa_vendor_ext_list[i];
+
+ for (int j = 0; j < ext_list->ext_data_count; j++) {
+ const struct riscv_isa_ext_data ext = ext_list->ext_data[j];
+ struct riscv_isavendorinfo *isavendorinfo = &ext_list->per_hart_isa_bitmap[cpu];
+
+ if (of_property_match_string(cpu_node, "riscv,isa-extensions",
+ ext.property) < 0)
+ continue;
+
+ /*
+ * Assume that subset extensions are all members of the
+ * same vendor.
+ */
+ if (ext.subset_ext_size)
+ for (int k = 0; k < ext.subset_ext_size; k++)
+ set_bit(ext.subset_ext_ids[k], isavendorinfo->isa);
+
+ set_bit(ext.id, isavendorinfo->isa);
+ }
+ }
+}
+
+/*
+ * Populate all_harts_isa_bitmap for each vendor with all of the extensions that
+ * are shared across CPUs for that vendor.
+ */
+static void __init riscv_fill_vendor_ext_list(int cpu)
+{
+ if (!IS_ENABLED(CONFIG_RISCV_ISA_VENDOR_EXT))
+ return;
+
+ for (int i = 0; i < riscv_isa_vendor_ext_list_size; i++) {
+ struct riscv_isa_vendor_ext_data_list *ext_list = riscv_isa_vendor_ext_list[i];
+
+ if (!ext_list->is_initialized) {
+ bitmap_copy(ext_list->all_harts_isa_bitmap.isa,
+ ext_list->per_hart_isa_bitmap[cpu].isa,
+ RISCV_ISA_VENDOR_EXT_MAX);
+ ext_list->is_initialized = true;
+ } else {
+ bitmap_and(ext_list->all_harts_isa_bitmap.isa,
+ ext_list->all_harts_isa_bitmap.isa,
+ ext_list->per_hart_isa_bitmap[cpu].isa,
+ RISCV_ISA_VENDOR_EXT_MAX);
+ }
+ }
+}
+
static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap)
{
unsigned int cpu;
@@ -760,6 +803,7 @@ static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap)
}
riscv_resolve_isa(source_isa, isainfo->isa, &this_hwcap, isa2hwcap);
+ riscv_fill_cpu_vendor_ext(cpu_node, cpu);
of_node_put(cpu_node);
@@ -776,6 +820,8 @@ static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap)
bitmap_copy(riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
else
bitmap_and(riscv_isa, riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
+
+ riscv_fill_vendor_ext_list(cpu);
}
if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX))
@@ -918,28 +964,45 @@ void __init_or_module riscv_cpufeature_patch_func(struct alt_entry *begin,
{
struct alt_entry *alt;
void *oldptr, *altptr;
- u16 id, value;
+ u16 id, value, vendor;
if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
return;
for (alt = begin; alt < end; alt++) {
- if (alt->vendor_id != 0)
- continue;
-
id = PATCH_ID_CPUFEATURE_ID(alt->patch_id);
+ vendor = PATCH_ID_CPUFEATURE_ID(alt->vendor_id);
- if (id >= RISCV_ISA_EXT_MAX) {
- WARN(1, "This extension id:%d is not in ISA extension list", id);
- continue;
- }
+ /*
+ * Any alternative with a patch_id that is less than
+ * RISCV_ISA_EXT_MAX is interpreted as a standard extension.
+ *
+ * Any alternative with patch_id that is greater than or equal
+ * to RISCV_VENDOR_EXT_ALTERNATIVES_BASE is interpreted as a
+ * vendor extension.
+ */
+ if (id < RISCV_ISA_EXT_MAX) {
+ /*
+ * This patch should be treated as errata so skip
+ * processing here.
+ */
+ if (alt->vendor_id != 0)
+ continue;
- if (!__riscv_isa_extension_available(NULL, id))
- continue;
+ if (!__riscv_isa_extension_available(NULL, id))
+ continue;
- value = PATCH_ID_CPUFEATURE_VALUE(alt->patch_id);
- if (!riscv_cpufeature_patch_check(id, value))
+ value = PATCH_ID_CPUFEATURE_VALUE(alt->patch_id);
+ if (!riscv_cpufeature_patch_check(id, value))
+ continue;
+ } else if (id >= RISCV_VENDOR_EXT_ALTERNATIVES_BASE) {
+ if (!__riscv_isa_vendor_extension_available(VENDOR_EXT_ALL_CPUS, vendor,
+ id - RISCV_VENDOR_EXT_ALTERNATIVES_BASE))
+ continue;
+ } else {
+ WARN(1, "This extension id:%d is not in ISA extension list", id);
continue;
+ }
oldptr = ALT_OLD_PTR(alt);
altptr = ALT_ALT_PTR(alt);
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 68a24cf9481a..ac2e908d4418 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -88,7 +88,6 @@ SYM_CODE_START(handle_exception)
call riscv_v_context_nesting_start
#endif
move a0, sp /* pt_regs */
- la ra, ret_from_exception
/*
* MSB of cause differentiates between
@@ -97,7 +96,8 @@ SYM_CODE_START(handle_exception)
bge s4, zero, 1f
/* Handle interrupts */
- tail do_irq
+ call do_irq
+ j ret_from_exception
1:
/* Handle other exceptions */
slli t0, s4, RISCV_LGPTR
@@ -105,11 +105,14 @@ SYM_CODE_START(handle_exception)
la t2, excp_vect_table_end
add t0, t1, t0
/* Check if exception code lies within bounds */
- bgeu t0, t2, 1f
- REG_L t0, 0(t0)
- jr t0
-1:
- tail do_trap_unknown
+ bgeu t0, t2, 3f
+ REG_L t1, 0(t0)
+2: jalr t1
+ j ret_from_exception
+3:
+
+ la t1, do_trap_unknown
+ j 2b
SYM_CODE_END(handle_exception)
ASM_NOKPROBE(handle_exception)
@@ -130,6 +133,10 @@ SYM_CODE_START_NOALIGN(ret_from_exception)
#endif
bnez s0, 1f
+#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+ call stackleak_erase_on_task_stack
+#endif
+
/* Save unwound kernel stack pointer in thread_info */
addi s0, sp, PT_SIZE_ON_STACK
REG_S s0, TASK_TI_KERNEL_SP(tp)
diff --git a/arch/riscv/kernel/probes/Makefile b/arch/riscv/kernel/probes/Makefile
index 8265ff497977..d2129f2c61b8 100644
--- a/arch/riscv/kernel/probes/Makefile
+++ b/arch/riscv/kernel/probes/Makefile
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o simulate-insn.o
obj-$(CONFIG_RETHOOK) += rethook.o rethook_trampoline.o
-obj-$(CONFIG_KPROBES_ON_FTRACE) += ftrace.o
obj-$(CONFIG_UPROBES) += uprobes.o decode-insn.o simulate-insn.o
CFLAGS_REMOVE_simulate-insn.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_rethook.o = $(CC_FLAGS_FTRACE)
diff --git a/arch/riscv/kernel/probes/ftrace.c b/arch/riscv/kernel/probes/ftrace.c
deleted file mode 100644
index a69dfa610aa8..000000000000
--- a/arch/riscv/kernel/probes/ftrace.c
+++ /dev/null
@@ -1,65 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include <linux/kprobes.h>
-
-/* Ftrace callback handler for kprobes -- called under preepmt disabled */
-void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *ops, struct ftrace_regs *fregs)
-{
- struct kprobe *p;
- struct pt_regs *regs;
- struct kprobe_ctlblk *kcb;
- int bit;
-
- if (unlikely(kprobe_ftrace_disabled))
- return;
-
- bit = ftrace_test_recursion_trylock(ip, parent_ip);
- if (bit < 0)
- return;
-
- p = get_kprobe((kprobe_opcode_t *)ip);
- if (unlikely(!p) || kprobe_disabled(p))
- goto out;
-
- regs = ftrace_get_regs(fregs);
- kcb = get_kprobe_ctlblk();
- if (kprobe_running()) {
- kprobes_inc_nmissed_count(p);
- } else {
- unsigned long orig_ip = instruction_pointer(regs);
-
- instruction_pointer_set(regs, ip);
-
- __this_cpu_write(current_kprobe, p);
- kcb->kprobe_status = KPROBE_HIT_ACTIVE;
- if (!p->pre_handler || !p->pre_handler(p, regs)) {
- /*
- * Emulate singlestep (and also recover regs->pc)
- * as if there is a nop
- */
- instruction_pointer_set(regs,
- (unsigned long)p->addr + MCOUNT_INSN_SIZE);
- if (unlikely(p->post_handler)) {
- kcb->kprobe_status = KPROBE_HIT_SSDONE;
- p->post_handler(p, regs, 0);
- }
- instruction_pointer_set(regs, orig_ip);
- }
-
- /*
- * If pre_handler returns !0, it changes regs->pc. We have to
- * skip emulating post_handler.
- */
- __this_cpu_write(current_kprobe, NULL);
- }
-out:
- ftrace_test_recursion_unlock(bit);
-}
-NOKPROBE_SYMBOL(kprobe_ftrace_handler);
-
-int arch_prepare_kprobe_ftrace(struct kprobe *p)
-{
- p->ainsn.api.insn = NULL;
- return 0;
-}
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 4f73c0ae44b2..a2cde65b69e9 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -281,8 +281,10 @@ void __init setup_arch(char **cmdline_p)
setup_smp();
#endif
- if (!acpi_disabled)
+ if (!acpi_disabled) {
acpi_init_rintc_map();
+ acpi_map_cpus_to_nodes();
+ }
riscv_init_cbo_blocksizes();
riscv_fill_hwcap();
diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
index 5a2edd7f027e..dcd282419456 100644
--- a/arch/riscv/kernel/signal.c
+++ b/arch/riscv/kernel/signal.c
@@ -84,7 +84,7 @@ static long save_v_state(struct pt_regs *regs, void __user **sc_vec)
datap = state + 1;
/* datap is designed to be 16 byte aligned for better performance */
- WARN_ON(unlikely(!IS_ALIGNED((unsigned long)datap, 16)));
+ WARN_ON(!IS_ALIGNED((unsigned long)datap, 16));
get_cpu_vector_context();
riscv_v_vstate_save(&current->thread.vstate, regs);
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index 19baf0d574d3..0f8f1c95ac38 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -96,7 +96,6 @@ static int __init acpi_parse_rintc(union acpi_subtable_headers *header, const un
if (hart == cpuid_to_hartid_map(0)) {
BUG_ON(found_boot_cpu);
found_boot_cpu = true;
- early_map_cpu_to_node(0, acpi_numa_get_nid(cpu_count));
return 0;
}
@@ -106,7 +105,6 @@ static int __init acpi_parse_rintc(union acpi_subtable_headers *header, const un
}
cpuid_to_hartid_map(cpu_count) = hart;
- early_map_cpu_to_node(cpu_count, acpi_numa_get_nid(cpu_count));
cpu_count++;
return 0;
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index 10e311b2759d..c6d5de22463f 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -16,7 +16,7 @@
#ifdef CONFIG_FRAME_POINTER
-extern asmlinkage void ret_from_exception(void);
+extern asmlinkage void handle_exception(void);
static inline int fp_is_valid(unsigned long fp, unsigned long sp)
{
@@ -71,7 +71,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
fp = frame->fp;
pc = ftrace_graph_ret_addr(current, &graph_idx, frame->ra,
&frame->ra);
- if (pc == (unsigned long)ret_from_exception) {
+ if (pc == (unsigned long)handle_exception) {
if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc)))
break;
diff --git a/arch/riscv/kernel/sys_hwprobe.c b/arch/riscv/kernel/sys_hwprobe.c
index 685594769535..8d1b5c35d2a7 100644
--- a/arch/riscv/kernel/sys_hwprobe.c
+++ b/arch/riscv/kernel/sys_hwprobe.c
@@ -9,6 +9,7 @@
#include <asm/cpufeature.h>
#include <asm/hwprobe.h>
#include <asm/processor.h>
+#include <asm/delay.h>
#include <asm/sbi.h>
#include <asm/switch_to.h>
#include <asm/uaccess.h>
@@ -93,44 +94,45 @@ static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
* regardless of the kernel's configuration, as no other checks, besides
* presence in the hart_isa bitmap, are made.
*/
+ EXT_KEY(ZACAS);
+ EXT_KEY(ZAWRS);
EXT_KEY(ZBA);
EXT_KEY(ZBB);
- EXT_KEY(ZBS);
- EXT_KEY(ZICBOZ);
EXT_KEY(ZBC);
-
EXT_KEY(ZBKB);
EXT_KEY(ZBKC);
EXT_KEY(ZBKX);
+ EXT_KEY(ZBS);
+ EXT_KEY(ZCA);
+ EXT_KEY(ZCB);
+ EXT_KEY(ZCMOP);
+ EXT_KEY(ZICBOZ);
+ EXT_KEY(ZICOND);
+ EXT_KEY(ZIHINTNTL);
+ EXT_KEY(ZIHINTPAUSE);
+ EXT_KEY(ZIMOP);
EXT_KEY(ZKND);
EXT_KEY(ZKNE);
EXT_KEY(ZKNH);
EXT_KEY(ZKSED);
EXT_KEY(ZKSH);
EXT_KEY(ZKT);
- EXT_KEY(ZIHINTNTL);
EXT_KEY(ZTSO);
- EXT_KEY(ZACAS);
- EXT_KEY(ZICOND);
- EXT_KEY(ZIHINTPAUSE);
- EXT_KEY(ZIMOP);
- EXT_KEY(ZCA);
- EXT_KEY(ZCB);
- EXT_KEY(ZCMOP);
- EXT_KEY(ZAWRS);
/*
* All the following extensions must depend on the kernel
* support of V.
*/
if (has_vector()) {
- EXT_KEY(ZVE32X);
- EXT_KEY(ZVE32F);
- EXT_KEY(ZVE64X);
- EXT_KEY(ZVE64F);
- EXT_KEY(ZVE64D);
EXT_KEY(ZVBB);
EXT_KEY(ZVBC);
+ EXT_KEY(ZVE32F);
+ EXT_KEY(ZVE32X);
+ EXT_KEY(ZVE64D);
+ EXT_KEY(ZVE64F);
+ EXT_KEY(ZVE64X);
+ EXT_KEY(ZVFH);
+ EXT_KEY(ZVFHMIN);
EXT_KEY(ZVKB);
EXT_KEY(ZVKG);
EXT_KEY(ZVKNED);
@@ -139,16 +141,14 @@ static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
EXT_KEY(ZVKSED);
EXT_KEY(ZVKSH);
EXT_KEY(ZVKT);
- EXT_KEY(ZVFH);
- EXT_KEY(ZVFHMIN);
}
if (has_fpu()) {
- EXT_KEY(ZFH);
- EXT_KEY(ZFHMIN);
- EXT_KEY(ZFA);
EXT_KEY(ZCD);
EXT_KEY(ZCF);
+ EXT_KEY(ZFA);
+ EXT_KEY(ZFH);
+ EXT_KEY(ZFHMIN);
}
#undef EXT_KEY
}
@@ -237,6 +237,10 @@ static void hwprobe_one_pair(struct riscv_hwprobe *pair,
pair->value = user_max_virt_addr();
break;
+ case RISCV_HWPROBE_KEY_TIME_CSR_FREQ:
+ pair->value = riscv_timebase;
+ break;
+
/*
* For forward compatibility, unknown keys don't fail the whole
* call, but get their element key set to -1 and value set to 0
diff --git a/arch/riscv/kernel/vendor_extensions.c b/arch/riscv/kernel/vendor_extensions.c
new file mode 100644
index 000000000000..b6c1e7b5d34b
--- /dev/null
+++ b/arch/riscv/kernel/vendor_extensions.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2024 Rivos, Inc
+ */
+
+#include <asm/vendorid_list.h>
+#include <asm/vendor_extensions.h>
+#include <asm/vendor_extensions/andes.h>
+
+#include <linux/array_size.h>
+#include <linux/types.h>
+
+struct riscv_isa_vendor_ext_data_list *riscv_isa_vendor_ext_list[] = {
+#ifdef CONFIG_RISCV_ISA_VENDOR_EXT_ANDES
+ &riscv_isa_vendor_ext_list_andes,
+#endif
+};
+
+const size_t riscv_isa_vendor_ext_list_size = ARRAY_SIZE(riscv_isa_vendor_ext_list);
+
+/**
+ * __riscv_isa_vendor_extension_available() - Check whether given vendor
+ * extension is available or not.
+ *
+ * @cpu: check if extension is available on this cpu
+ * @vendor: vendor that the extension is a member of
+ * @bit: bit position of the desired extension
+ * Return: true or false
+ *
+ * NOTE: When cpu is -1, will check if extension is available on all cpus
+ */
+bool __riscv_isa_vendor_extension_available(int cpu, unsigned long vendor, unsigned int bit)
+{
+ struct riscv_isavendorinfo *bmap;
+ struct riscv_isavendorinfo *cpu_bmap;
+
+ switch (vendor) {
+ #ifdef CONFIG_RISCV_ISA_VENDOR_EXT_ANDES
+ case ANDES_VENDOR_ID:
+ bmap = &riscv_isa_vendor_ext_list_andes.all_harts_isa_bitmap;
+ cpu_bmap = &riscv_isa_vendor_ext_list_andes.per_hart_isa_bitmap[cpu];
+ break;
+ #endif
+ default:
+ return false;
+ }
+
+ if (cpu != -1)
+ bmap = &cpu_bmap[cpu];
+
+ if (bit >= RISCV_ISA_VENDOR_EXT_MAX)
+ return false;
+
+ return test_bit(bit, bmap->isa) ? true : false;
+}
+EXPORT_SYMBOL_GPL(__riscv_isa_vendor_extension_available);
diff --git a/arch/riscv/kernel/vendor_extensions/Makefile b/arch/riscv/kernel/vendor_extensions/Makefile
new file mode 100644
index 000000000000..6a61aed944f1
--- /dev/null
+++ b/arch/riscv/kernel/vendor_extensions/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_RISCV_ISA_VENDOR_EXT_ANDES) += andes.o
diff --git a/arch/riscv/kernel/vendor_extensions/andes.c b/arch/riscv/kernel/vendor_extensions/andes.c
new file mode 100644
index 000000000000..ec688c88456a
--- /dev/null
+++ b/arch/riscv/kernel/vendor_extensions/andes.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <asm/cpufeature.h>
+#include <asm/vendor_extensions.h>
+#include <asm/vendor_extensions/andes.h>
+
+#include <linux/array_size.h>
+#include <linux/types.h>
+
+/* All Andes vendor extensions supported in Linux */
+const struct riscv_isa_ext_data riscv_isa_vendor_ext_andes[] = {
+ __RISCV_ISA_EXT_DATA(xandespmu, RISCV_ISA_VENDOR_EXT_XANDESPMU),
+};
+
+struct riscv_isa_vendor_ext_data_list riscv_isa_vendor_ext_list_andes = {
+ .ext_data_count = ARRAY_SIZE(riscv_isa_vendor_ext_andes),
+ .ext_data = riscv_isa_vendor_ext_andes,
+};
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 59e0d861e26f..a822f952f64a 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -799,17 +799,6 @@ config HAVE_PNETID
menu "Virtualization"
-config PROTECTED_VIRTUALIZATION_GUEST
- def_bool n
- prompt "Protected virtualization guest support"
- help
- Select this option, if you want to be able to run this
- kernel as a protected virtualization KVM guest.
- Protected virtualization capable machines have a mini hypervisor
- located at machine level (an ultravisor). With help of the
- Ultravisor, KVM will be able to run "protected" VMs, special
- VMs whose memory and management data are unavailable to KVM.
-
config PFAULT
def_bool y
prompt "Pseudo page fault support"
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index c2978cb03b36..91a30e017d65 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -46,9 +46,9 @@
* /proc entries (sysctl)
*/
static const char appldata_proc_name[APPLDATA_PROC_NAME_LENGTH] = "appldata";
-static int appldata_timer_handler(struct ctl_table *ctl, int write,
+static int appldata_timer_handler(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos);
-static int appldata_interval_handler(struct ctl_table *ctl, int write,
+static int appldata_interval_handler(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos);
static struct ctl_table_header *appldata_sysctl_header;
@@ -199,7 +199,7 @@ static void __appldata_vtimer_setup(int cmd)
* Start/Stop timer, show status of timer (0 = not active, 1 = active)
*/
static int
-appldata_timer_handler(struct ctl_table *ctl, int write,
+appldata_timer_handler(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int timer_active = appldata_timer_active;
@@ -232,7 +232,7 @@ appldata_timer_handler(struct ctl_table *ctl, int write,
* current timer interval.
*/
static int
-appldata_interval_handler(struct ctl_table *ctl, int write,
+appldata_interval_handler(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int interval = appldata_interval;
@@ -262,7 +262,7 @@ appldata_interval_handler(struct ctl_table *ctl, int write,
* monitoring (0 = not in process, 1 = in process)
*/
static int
-appldata_generic_handler(struct ctl_table *ctl, int write,
+appldata_generic_handler(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct appldata_ops *ops = NULL, *tmp_ops;
diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile
index e7658997452b..4f476884d340 100644
--- a/arch/s390/boot/Makefile
+++ b/arch/s390/boot/Makefile
@@ -39,8 +39,7 @@ CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
obj-y := head.o als.o startup.o physmem_info.o ipl_parm.o ipl_report.o vmem.o
obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o
-obj-y += version.o pgm_check_info.o ctype.o ipl_data.o relocs.o
-obj-$(findstring y, $(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) $(CONFIG_PGSTE)) += uv.o
+obj-y += version.o pgm_check_info.o ctype.o ipl_data.o relocs.o alternative.o uv.o
obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
obj-y += $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o
obj-$(CONFIG_KERNEL_ZSTD) += clz_ctz.o
diff --git a/arch/s390/boot/alternative.c b/arch/s390/boot/alternative.c
new file mode 100644
index 000000000000..abc08d2c873d
--- /dev/null
+++ b/arch/s390/boot/alternative.c
@@ -0,0 +1,3 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "../kernel/alternative.c"
diff --git a/arch/s390/boot/boot.h b/arch/s390/boot/boot.h
index 18027fdc92b0..83e2ce050b6c 100644
--- a/arch/s390/boot/boot.h
+++ b/arch/s390/boot/boot.h
@@ -30,6 +30,8 @@ struct vmlinux_info {
unsigned long init_mm_off;
unsigned long swapper_pg_dir_off;
unsigned long invalid_pg_dir_off;
+ unsigned long alt_instructions;
+ unsigned long alt_instructions_end;
#ifdef CONFIG_KASAN
unsigned long kasan_early_shadow_page_off;
unsigned long kasan_early_shadow_pte_off;
@@ -89,8 +91,10 @@ extern char _end[], _decompressor_end[];
extern unsigned char _compressed_start[];
extern unsigned char _compressed_end[];
extern struct vmlinux_info _vmlinux_info;
+
#define vmlinux _vmlinux_info
+#define __lowcore_pa(x) ((unsigned long)(x) % sizeof(struct lowcore))
#define __abs_lowcore_pa(x) (((unsigned long)(x) - __abs_lowcore) % sizeof(struct lowcore))
#define __kernel_va(x) ((void *)((unsigned long)(x) - __kaslr_offset_phys + __kaslr_offset))
#define __kernel_pa(x) ((unsigned long)(x) - __kaslr_offset + __kaslr_offset_phys)
diff --git a/arch/s390/boot/install.sh b/arch/s390/boot/install.sh
index a13dd2f2aa1c..fa41486258ee 100755
--- a/arch/s390/boot/install.sh
+++ b/arch/s390/boot/install.sh
@@ -15,6 +15,8 @@
# $3 - kernel map file
# $4 - default install path (blank if root directory)
+set -e
+
echo "Warning: '${INSTALLKERNEL}' command not available - additional " \
"bootloader config required" >&2
if [ -f "$4/vmlinuz-$1" ]; then mv -- "$4/vmlinuz-$1" "$4/vmlinuz-$1.old"; fi
diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c
index a21f301acd29..1773b72a6a7b 100644
--- a/arch/s390/boot/ipl_parm.c
+++ b/arch/s390/boot/ipl_parm.c
@@ -3,6 +3,7 @@
#include <linux/init.h>
#include <linux/ctype.h>
#include <linux/pgtable.h>
+#include <asm/abs_lowcore.h>
#include <asm/page-states.h>
#include <asm/ebcdic.h>
#include <asm/sclp.h>
@@ -310,5 +311,7 @@ void parse_boot_command_line(void)
prot_virt_host = 1;
}
#endif
+ if (!strcmp(param, "relocate_lowcore") && test_facility(193))
+ relocate_lowcore = 1;
}
}
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
index c59014945af0..ce232552bc1c 100644
--- a/arch/s390/boot/startup.c
+++ b/arch/s390/boot/startup.c
@@ -30,6 +30,7 @@ unsigned long __bootdata_preserved(vmemmap_size);
unsigned long __bootdata_preserved(MODULES_VADDR);
unsigned long __bootdata_preserved(MODULES_END);
unsigned long __bootdata_preserved(max_mappable);
+int __bootdata_preserved(relocate_lowcore);
u64 __bootdata_preserved(stfle_fac_list[16]);
struct oldmem_data __bootdata_preserved(oldmem_data);
@@ -376,6 +377,8 @@ static void kaslr_adjust_vmlinux_info(long offset)
vmlinux.init_mm_off += offset;
vmlinux.swapper_pg_dir_off += offset;
vmlinux.invalid_pg_dir_off += offset;
+ vmlinux.alt_instructions += offset;
+ vmlinux.alt_instructions_end += offset;
#ifdef CONFIG_KASAN
vmlinux.kasan_early_shadow_page_off += offset;
vmlinux.kasan_early_shadow_pte_off += offset;
@@ -478,8 +481,12 @@ void startup_kernel(void)
* before the kernel started. Therefore, in case the two sections
* overlap there is no risk of corrupting any data.
*/
- if (kaslr_enabled())
- amode31_lma = randomize_within_range(vmlinux.amode31_size, PAGE_SIZE, 0, SZ_2G);
+ if (kaslr_enabled()) {
+ unsigned long amode31_min;
+
+ amode31_min = (unsigned long)_decompressor_end;
+ amode31_lma = randomize_within_range(vmlinux.amode31_size, PAGE_SIZE, amode31_min, SZ_2G);
+ }
if (!amode31_lma)
amode31_lma = __kaslr_offset_phys - vmlinux.amode31_size;
physmem_reserve(RR_AMODE31, amode31_lma, vmlinux.amode31_size);
@@ -503,6 +510,9 @@ void startup_kernel(void)
kaslr_adjust_got(__kaslr_offset);
setup_vmem(__kaslr_offset, __kaslr_offset + kernel_size, asce_limit);
copy_bootdata();
+ __apply_alternatives((struct alt_instr *)_vmlinux_info.alt_instructions,
+ (struct alt_instr *)_vmlinux_info.alt_instructions_end,
+ ALT_CTX_EARLY);
/*
* Save KASLR offset for early dumps, before vmcore_info is set.
diff --git a/arch/s390/boot/uv.c b/arch/s390/boot/uv.c
index 1e66d2cbb096..318e6ba95bfd 100644
--- a/arch/s390/boot/uv.c
+++ b/arch/s390/boot/uv.c
@@ -8,12 +8,8 @@
#include "uv.h"
/* will be used in arch/s390/kernel/uv.c */
-#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
int __bootdata_preserved(prot_virt_guest);
-#endif
-#if IS_ENABLED(CONFIG_KVM)
int __bootdata_preserved(prot_virt_host);
-#endif
struct uv_info __bootdata_preserved(uv_info);
void uv_query_info(void)
@@ -53,14 +49,11 @@ void uv_query_info(void)
uv_info.max_secrets = uvcb.max_secrets;
}
-#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
if (test_bit_inv(BIT_UVC_CMD_SET_SHARED_ACCESS, (unsigned long *)uvcb.inst_calls_list) &&
test_bit_inv(BIT_UVC_CMD_REMOVE_SHARED_ACCESS, (unsigned long *)uvcb.inst_calls_list))
prot_virt_guest = 1;
-#endif
}
-#if IS_ENABLED(CONFIG_KVM)
unsigned long adjust_to_uv_max(unsigned long limit)
{
if (is_prot_virt_host() && uv_info.max_sec_stor_addr)
@@ -92,4 +85,3 @@ void sanitize_prot_virt_host(void)
{
prot_virt_host = is_prot_virt_host_capable();
}
-#endif
diff --git a/arch/s390/boot/uv.h b/arch/s390/boot/uv.h
index 0f3070856f8d..da4a4a8d48e0 100644
--- a/arch/s390/boot/uv.h
+++ b/arch/s390/boot/uv.h
@@ -2,21 +2,8 @@
#ifndef BOOT_UV_H
#define BOOT_UV_H
-#if IS_ENABLED(CONFIG_KVM)
unsigned long adjust_to_uv_max(unsigned long limit);
void sanitize_prot_virt_host(void);
-#else
-static inline unsigned long adjust_to_uv_max(unsigned long limit)
-{
- return limit;
-}
-static inline void sanitize_prot_virt_host(void) {}
-#endif
-
-#if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
void uv_query_info(void);
-#else
-static inline void uv_query_info(void) {}
-#endif
#endif /* BOOT_UV_H */
diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c
index a255ca189aaa..2847cc059ab7 100644
--- a/arch/s390/boot/vmem.c
+++ b/arch/s390/boot/vmem.c
@@ -26,6 +26,7 @@ atomic_long_t __bootdata_preserved(direct_pages_count[PG_DIRECT_MAP_MAX]);
enum populate_mode {
POPULATE_NONE,
POPULATE_DIRECT,
+ POPULATE_LOWCORE,
POPULATE_ABS_LOWCORE,
POPULATE_IDENTITY,
POPULATE_KERNEL,
@@ -242,6 +243,8 @@ static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_m
return -1;
case POPULATE_DIRECT:
return addr;
+ case POPULATE_LOWCORE:
+ return __lowcore_pa(addr);
case POPULATE_ABS_LOWCORE:
return __abs_lowcore_pa(addr);
case POPULATE_KERNEL:
@@ -418,6 +421,7 @@ static void pgtable_populate(unsigned long addr, unsigned long end, enum populat
void setup_vmem(unsigned long kernel_start, unsigned long kernel_end, unsigned long asce_limit)
{
+ unsigned long lowcore_address = 0;
unsigned long start, end;
unsigned long asce_type;
unsigned long asce_bits;
@@ -455,12 +459,17 @@ void setup_vmem(unsigned long kernel_start, unsigned long kernel_end, unsigned l
__arch_set_page_dat((void *)swapper_pg_dir, 1UL << CRST_ALLOC_ORDER);
__arch_set_page_dat((void *)invalid_pg_dir, 1UL << CRST_ALLOC_ORDER);
+ if (relocate_lowcore)
+ lowcore_address = LOWCORE_ALT_ADDRESS;
+
/*
* To allow prefixing the lowcore must be mapped with 4KB pages.
* To prevent creation of a large page at address 0 first map
* the lowcore and create the identity mapping only afterwards.
*/
- pgtable_populate(0, sizeof(struct lowcore), POPULATE_DIRECT);
+ pgtable_populate(lowcore_address,
+ lowcore_address + sizeof(struct lowcore),
+ POPULATE_LOWCORE);
for_each_physmem_usable_range(i, &start, &end) {
pgtable_populate((unsigned long)__identity_va(start),
(unsigned long)__identity_va(end),
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index f3602414a961..ea63a7342f5f 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -55,7 +55,6 @@ CONFIG_EXPOLINE_AUTO=y
CONFIG_CHSC_SCH=y
CONFIG_VFIO_CCW=m
CONFIG_VFIO_AP=m
-CONFIG_PROTECTED_VIRTUALIZATION_GUEST=y
CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
CONFIG_S390_HYPFS_FS=y
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index d0d8925fdf09..d8b28ff8ff45 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -53,7 +53,6 @@ CONFIG_EXPOLINE_AUTO=y
CONFIG_CHSC_SCH=y
CONFIG_VFIO_CCW=m
CONFIG_VFIO_AP=m
-CONFIG_PROTECTED_VIRTUALIZATION_GUEST=y
CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
CONFIG_S390_HYPFS_FS=y
diff --git a/arch/s390/include/asm/abs_lowcore.h b/arch/s390/include/asm/abs_lowcore.h
index 6f264b79e377..d20df8c923fc 100644
--- a/arch/s390/include/asm/abs_lowcore.h
+++ b/arch/s390/include/asm/abs_lowcore.h
@@ -2,6 +2,7 @@
#ifndef _ASM_S390_ABS_LOWCORE_H
#define _ASM_S390_ABS_LOWCORE_H
+#include <asm/sections.h>
#include <asm/lowcore.h>
#define ABS_LOWCORE_MAP_SIZE (NR_CPUS * sizeof(struct lowcore))
@@ -24,4 +25,11 @@ static inline void put_abs_lowcore(struct lowcore *lc)
put_cpu();
}
+extern int __bootdata_preserved(relocate_lowcore);
+
+static inline int have_relocated_lowcore(void)
+{
+ return relocate_lowcore;
+}
+
#endif /* _ASM_S390_ABS_LOWCORE_H */
diff --git a/arch/s390/include/asm/alternative-asm.h b/arch/s390/include/asm/alternative-asm.h
deleted file mode 100644
index 608f6287ca9c..000000000000
--- a/arch/s390/include/asm/alternative-asm.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_S390_ALTERNATIVE_ASM_H
-#define _ASM_S390_ALTERNATIVE_ASM_H
-
-#ifdef __ASSEMBLY__
-
-/*
- * Issue one struct alt_instr descriptor entry (need to put it into
- * the section .altinstructions, see below). This entry contains
- * enough information for the alternatives patching code to patch an
- * instruction. See apply_alternatives().
- */
-.macro alt_entry orig_start, orig_end, alt_start, alt_end, feature
- .long \orig_start - .
- .long \alt_start - .
- .word \feature
- .byte \orig_end - \orig_start
- .org . - ( \orig_end - \orig_start ) & 1
- .org . - ( \orig_end - \orig_start ) + ( \alt_end - \alt_start )
- .org . - ( \alt_end - \alt_start ) + ( \orig_end - \orig_start )
-.endm
-
-/*
- * Define an alternative between two instructions. If @feature is
- * present, early code in apply_alternatives() replaces @oldinstr with
- * @newinstr.
- */
-.macro ALTERNATIVE oldinstr, newinstr, feature
- .pushsection .altinstr_replacement,"ax"
-770: \newinstr
-771: .popsection
-772: \oldinstr
-773: .pushsection .altinstructions,"a"
- alt_entry 772b, 773b, 770b, 771b, \feature
- .popsection
-.endm
-
-/*
- * Define an alternative between two instructions. If @feature is
- * present, early code in apply_alternatives() replaces @oldinstr with
- * @newinstr.
- */
-.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2
- .pushsection .altinstr_replacement,"ax"
-770: \newinstr1
-771: \newinstr2
-772: .popsection
-773: \oldinstr
-774: .pushsection .altinstructions,"a"
- alt_entry 773b, 774b, 770b, 771b,\feature1
- alt_entry 773b, 774b, 771b, 772b,\feature2
- .popsection
-.endm
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _ASM_S390_ALTERNATIVE_ASM_H */
diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h
index dd93b92c3ab6..de980c938a3e 100644
--- a/arch/s390/include/asm/alternative.h
+++ b/arch/s390/include/asm/alternative.h
@@ -2,6 +2,58 @@
#ifndef _ASM_S390_ALTERNATIVE_H
#define _ASM_S390_ALTERNATIVE_H
+/*
+ * Each alternative comes with a 32 bit feature field:
+ * union {
+ * u32 feature;
+ * struct {
+ * u32 ctx : 4;
+ * u32 type : 8;
+ * u32 data : 20;
+ * };
+ * }
+ *
+ * @ctx is a bitfield, where only one bit must be set. Each bit defines
+ * in which context an alternative is supposed to be applied to the
+ * kernel image:
+ *
+ * - from the decompressor before the kernel itself is executed
+ * - from early kernel code from within the kernel
+ *
+ * @type is a number which defines the type and with that the type
+ * specific alternative patching.
+ *
+ * @data is additional type specific information which defines if an
+ * alternative should be applied.
+ */
+
+#define ALT_CTX_EARLY 1
+#define ALT_CTX_LATE 2
+#define ALT_CTX_ALL (ALT_CTX_EARLY | ALT_CTX_LATE)
+
+#define ALT_TYPE_FACILITY 0
+#define ALT_TYPE_SPEC 1
+#define ALT_TYPE_LOWCORE 2
+
+#define ALT_DATA_SHIFT 0
+#define ALT_TYPE_SHIFT 20
+#define ALT_CTX_SHIFT 28
+
+#define ALT_FACILITY_EARLY(facility) (ALT_CTX_EARLY << ALT_CTX_SHIFT | \
+ ALT_TYPE_FACILITY << ALT_TYPE_SHIFT | \
+ (facility) << ALT_DATA_SHIFT)
+
+#define ALT_FACILITY(facility) (ALT_CTX_LATE << ALT_CTX_SHIFT | \
+ ALT_TYPE_FACILITY << ALT_TYPE_SHIFT | \
+ (facility) << ALT_DATA_SHIFT)
+
+#define ALT_SPEC(facility) (ALT_CTX_LATE << ALT_CTX_SHIFT | \
+ ALT_TYPE_SPEC << ALT_TYPE_SHIFT | \
+ (facility) << ALT_DATA_SHIFT)
+
+#define ALT_LOWCORE (ALT_CTX_EARLY << ALT_CTX_SHIFT | \
+ ALT_TYPE_LOWCORE << ALT_TYPE_SHIFT)
+
#ifndef __ASSEMBLY__
#include <linux/types.h>
@@ -11,12 +63,30 @@
struct alt_instr {
s32 instr_offset; /* original instruction */
s32 repl_offset; /* offset to replacement instruction */
- u16 facility; /* facility bit set for replacement */
+ union {
+ u32 feature; /* feature required for replacement */
+ struct {
+ u32 ctx : 4; /* context */
+ u32 type : 8; /* type of alternative */
+ u32 data : 20; /* patching information */
+ };
+ };
u8 instrlen; /* length of original instruction */
} __packed;
-void apply_alternative_instructions(void);
-void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
+extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
+
+void __apply_alternatives(struct alt_instr *start, struct alt_instr *end, unsigned int ctx);
+
+static inline void apply_alternative_instructions(void)
+{
+ __apply_alternatives(__alt_instructions, __alt_instructions_end, ALT_CTX_LATE);
+}
+
+static inline void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
+{
+ __apply_alternatives(start, end, ALT_CTX_ALL);
+}
/*
* +---------------------------------+
@@ -48,10 +118,10 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
#define OLDINSTR(oldinstr) \
"661:\n\t" oldinstr "\n662:\n"
-#define ALTINSTR_ENTRY(facility, num) \
+#define ALTINSTR_ENTRY(feature, num) \
"\t.long 661b - .\n" /* old instruction */ \
"\t.long " b_altinstr(num)"b - .\n" /* alt instruction */ \
- "\t.word " __stringify(facility) "\n" /* facility bit */ \
+ "\t.long " __stringify(feature) "\n" /* feature */ \
"\t.byte " oldinstr_len "\n" /* instruction len */ \
"\t.org . - (" oldinstr_len ") & 1\n" \
"\t.org . - (" oldinstr_len ") + (" altinstr_len(num) ")\n" \
@@ -61,24 +131,24 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
b_altinstr(num)":\n\t" altinstr "\n" e_altinstr(num) ":\n"
/* alternative assembly primitive: */
-#define ALTERNATIVE(oldinstr, altinstr, facility) \
+#define ALTERNATIVE(oldinstr, altinstr, feature) \
".pushsection .altinstr_replacement, \"ax\"\n" \
ALTINSTR_REPLACEMENT(altinstr, 1) \
".popsection\n" \
OLDINSTR(oldinstr) \
".pushsection .altinstructions,\"a\"\n" \
- ALTINSTR_ENTRY(facility, 1) \
+ ALTINSTR_ENTRY(feature, 1) \
".popsection\n"
-#define ALTERNATIVE_2(oldinstr, altinstr1, facility1, altinstr2, facility2)\
+#define ALTERNATIVE_2(oldinstr, altinstr1, feature1, altinstr2, feature2)\
".pushsection .altinstr_replacement, \"ax\"\n" \
ALTINSTR_REPLACEMENT(altinstr1, 1) \
ALTINSTR_REPLACEMENT(altinstr2, 2) \
".popsection\n" \
OLDINSTR(oldinstr) \
".pushsection .altinstructions,\"a\"\n" \
- ALTINSTR_ENTRY(facility1, 1) \
- ALTINSTR_ENTRY(facility2, 2) \
+ ALTINSTR_ENTRY(feature1, 1) \
+ ALTINSTR_ENTRY(feature2, 2) \
".popsection\n"
/*
@@ -93,12 +163,12 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
* For non barrier like inlines please define new variants
* without volatile and memory clobber.
*/
-#define alternative(oldinstr, altinstr, facility) \
- asm_inline volatile(ALTERNATIVE(oldinstr, altinstr, facility) : : : "memory")
+#define alternative(oldinstr, altinstr, feature) \
+ asm_inline volatile(ALTERNATIVE(oldinstr, altinstr, feature) : : : "memory")
-#define alternative_2(oldinstr, altinstr1, facility1, altinstr2, facility2) \
- asm_inline volatile(ALTERNATIVE_2(oldinstr, altinstr1, facility1, \
- altinstr2, facility2) ::: "memory")
+#define alternative_2(oldinstr, altinstr1, feature1, altinstr2, feature2) \
+ asm_inline volatile(ALTERNATIVE_2(oldinstr, altinstr1, feature1, \
+ altinstr2, feature2) ::: "memory")
/* Alternative inline assembly with input. */
#define alternative_input(oldinstr, newinstr, feature, input...) \
@@ -106,8 +176,8 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
: : input)
/* Like alternative_input, but with a single output argument */
-#define alternative_io(oldinstr, altinstr, facility, output, input...) \
- asm_inline volatile(ALTERNATIVE(oldinstr, altinstr, facility) \
+#define alternative_io(oldinstr, altinstr, feature, output, input...) \
+ asm_inline volatile(ALTERNATIVE(oldinstr, altinstr, feature) \
: output : input)
/* Use this macro if more than one output parameter is needed. */
@@ -116,6 +186,56 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
/* Use this macro if clobbers are needed without inputs. */
#define ASM_NO_INPUT_CLOBBER(clobber...) : clobber
+#else /* __ASSEMBLY__ */
+
+/*
+ * Issue one struct alt_instr descriptor entry (need to put it into
+ * the section .altinstructions, see below). This entry contains
+ * enough information for the alternatives patching code to patch an
+ * instruction. See apply_alternatives().
+ */
+.macro alt_entry orig_start, orig_end, alt_start, alt_end, feature
+ .long \orig_start - .
+ .long \alt_start - .
+ .long \feature
+ .byte \orig_end - \orig_start
+ .org . - ( \orig_end - \orig_start ) & 1
+ .org . - ( \orig_end - \orig_start ) + ( \alt_end - \alt_start )
+ .org . - ( \alt_end - \alt_start ) + ( \orig_end - \orig_start )
+.endm
+
+/*
+ * Define an alternative between two instructions. If @feature is
+ * present, early code in apply_alternatives() replaces @oldinstr with
+ * @newinstr.
+ */
+.macro ALTERNATIVE oldinstr, newinstr, feature
+ .pushsection .altinstr_replacement,"ax"
+770: \newinstr
+771: .popsection
+772: \oldinstr
+773: .pushsection .altinstructions,"a"
+ alt_entry 772b, 773b, 770b, 771b, \feature
+ .popsection
+.endm
+
+/*
+ * Define an alternative between two instructions. If @feature is
+ * present, early code in apply_alternatives() replaces @oldinstr with
+ * @newinstr.
+ */
+.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2
+ .pushsection .altinstr_replacement,"ax"
+770: \newinstr1
+771: \newinstr2
+772: .popsection
+773: \oldinstr
+774: .pushsection .altinstructions,"a"
+ alt_entry 773b, 774b, 770b, 771b,\feature1
+ alt_entry 773b, 774b, 771b, 772b,\feature2
+ .popsection
+.endm
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_S390_ALTERNATIVE_H */
diff --git a/arch/s390/include/asm/atomic_ops.h b/arch/s390/include/asm/atomic_ops.h
index 2b379d1d9046..742c7919cbcd 100644
--- a/arch/s390/include/asm/atomic_ops.h
+++ b/arch/s390/include/asm/atomic_ops.h
@@ -188,7 +188,8 @@ static __always_inline long __atomic64_cmpxchg(long *ptr, long old, long new)
return old;
}
-#ifdef __GCC_ASM_FLAG_OUTPUTS__
+/* GCC versions before 14.2.0 may die with an ICE in some configurations. */
+#if defined(__GCC_ASM_FLAG_OUTPUTS__) && !(IS_ENABLED(CONFIG_CC_IS_GCC) && (GCC_VERSION < 140200))
static __always_inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
{
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index 436365ff6c19..e3afcece375e 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -210,7 +210,7 @@ extern void ccw_device_get_id(struct ccw_device *, struct ccw_dev_id *);
#define get_ccwdev_lock(x) (x)->ccwlock
#define to_ccwdev(n) container_of(n, struct ccw_device, dev)
-#define to_ccwdrv(n) container_of(n, struct ccw_driver, driver)
+#define to_ccwdrv(n) container_of_const(n, struct ccw_driver, driver)
extern struct ccw_device *ccw_device_create_console(struct ccw_driver *);
extern void ccw_device_destroy_console(struct ccw_device *);
diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h
index d46cc725f024..b7d234838a36 100644
--- a/arch/s390/include/asm/facility.h
+++ b/arch/s390/include/asm/facility.h
@@ -20,7 +20,6 @@
#define MAX_FACILITY_BIT (sizeof(stfle_fac_list) * 8)
extern u64 stfle_fac_list[16];
-extern u64 alt_stfle_fac_list[16];
static inline void __set_facility(unsigned long nr, void *facilities)
{
diff --git a/arch/s390/include/asm/kmsan.h b/arch/s390/include/asm/kmsan.h
index 27db65fbf3f6..f73e181d09ae 100644
--- a/arch/s390/include/asm/kmsan.h
+++ b/arch/s390/include/asm/kmsan.h
@@ -12,8 +12,8 @@
static inline bool is_lowcore_addr(void *addr)
{
- return addr >= (void *)&S390_lowcore &&
- addr < (void *)(&S390_lowcore + 1);
+ return addr >= (void *)get_lowcore() &&
+ addr < (void *)(get_lowcore() + 1);
}
static inline void *arch_kmsan_get_meta_or_null(void *addr, bool is_origin)
@@ -25,7 +25,7 @@ static inline void *arch_kmsan_get_meta_or_null(void *addr, bool is_origin)
* order to get a distinct struct page.
*/
addr += (void *)lowcore_ptr[raw_smp_processor_id()] -
- (void *)&S390_lowcore;
+ (void *)get_lowcore();
if (KMSAN_WARN_ON(is_lowcore_addr(addr)))
return NULL;
return kmsan_get_metadata(addr, is_origin);
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index c724e71e1785..183ac29afaf8 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -14,10 +14,15 @@
#include <asm/ctlreg.h>
#include <asm/cpu.h>
#include <asm/types.h>
+#include <asm/alternative.h>
#define LC_ORDER 1
#define LC_PAGES 2
+#define LOWCORE_ALT_ADDRESS _AC(0x70000, UL)
+
+#ifndef __ASSEMBLY__
+
struct pgm_tdb {
u64 data[32];
};
@@ -97,8 +102,7 @@ struct lowcore {
__u64 save_area_async[8]; /* 0x0240 */
__u64 save_area_restart[1]; /* 0x0280 */
- /* CPU flags. */
- __u64 cpu_flags; /* 0x0288 */
+ __u64 pcpu; /* 0x0288 */
/* Return psws. */
psw_t return_psw; /* 0x0290 */
@@ -215,7 +219,14 @@ struct lowcore {
static __always_inline struct lowcore *get_lowcore(void)
{
- return NULL;
+ struct lowcore *lc;
+
+ if (__is_defined(__DECOMPRESSOR))
+ return NULL;
+ asm(ALTERNATIVE("llilh %[lc],0", "llilh %[lc],%[alt]", ALT_LOWCORE)
+ : [lc] "=d" (lc)
+ : [alt] "i" (LOWCORE_ALT_ADDRESS >> 16));
+ return lc;
}
extern struct lowcore *lowcore_ptr[];
@@ -225,4 +236,19 @@ static inline void set_prefix(__u32 address)
asm volatile("spx %0" : : "Q" (address) : "memory");
}
+#else /* __ASSEMBLY__ */
+
+.macro GET_LC reg
+ ALTERNATIVE "llilh \reg,0", \
+ __stringify(llilh \reg, LOWCORE_ALT_ADDRESS >> 16), \
+ ALT_LOWCORE
+.endm
+
+.macro STMG_LC start, end, savearea
+ ALTERNATIVE "stmg \start, \end, \savearea", \
+ __stringify(stmg \start, \end, LOWCORE_ALT_ADDRESS + \savearea), \
+ ALT_LOWCORE
+.endm
+
+#endif /* __ASSEMBLY__ */
#endif /* _ASM_S390_LOWCORE_H */
diff --git a/arch/s390/include/asm/nospec-branch.h b/arch/s390/include/asm/nospec-branch.h
index b9c1f3cae842..192835a3e24d 100644
--- a/arch/s390/include/asm/nospec-branch.h
+++ b/arch/s390/include/asm/nospec-branch.h
@@ -5,8 +5,17 @@
#ifndef __ASSEMBLY__
#include <linux/types.h>
+#include <asm/facility.h>
extern int nospec_disable;
+extern int nobp;
+
+static inline bool nobp_enabled(void)
+{
+ if (__is_defined(__DECOMPRESSOR))
+ return false;
+ return nobp && test_facility(82);
+}
void nospec_init_branches(void);
void nospec_auto_detect(void);
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 5ec41ec3d761..06416b3f94f5 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -174,12 +174,10 @@ static inline int devmem_is_allowed(unsigned long pfn)
#define HAVE_ARCH_FREE_PAGE
#define HAVE_ARCH_ALLOC_PAGE
-#if IS_ENABLED(CONFIG_PGSTE)
int arch_make_folio_accessible(struct folio *folio);
#define HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
int arch_make_page_accessible(struct page *page);
#define HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
-#endif
struct vm_layout {
unsigned long kaslr_offset;
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index c87cf2b8e81a..5ecd442535b9 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -14,13 +14,11 @@
#include <linux/bits.h>
-#define CIF_SIE 0 /* CPU needs SIE exit cleanup */
#define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */
#define CIF_ENABLED_WAIT 5 /* in enabled wait state */
#define CIF_MCCK_GUEST 6 /* machine check happening in guest */
#define CIF_DEDICATED_CPU 7 /* this CPU is dedicated */
-#define _CIF_SIE BIT(CIF_SIE)
#define _CIF_NOHZ_DELAY BIT(CIF_NOHZ_DELAY)
#define _CIF_ENABLED_WAIT BIT(CIF_ENABLED_WAIT)
#define _CIF_MCCK_GUEST BIT(CIF_MCCK_GUEST)
@@ -42,21 +40,37 @@
#include <asm/irqflags.h>
#include <asm/alternative.h>
+struct pcpu {
+ unsigned long ec_mask; /* bit mask for ec_xxx functions */
+ unsigned long ec_clk; /* sigp timestamp for ec_xxx */
+ unsigned long flags; /* per CPU flags */
+ signed char state; /* physical cpu state */
+ signed char polarization; /* physical polarization */
+ u16 address; /* physical cpu address */
+};
+
+DECLARE_PER_CPU(struct pcpu, pcpu_devices);
+
typedef long (*sys_call_ptr_t)(struct pt_regs *regs);
+static __always_inline struct pcpu *this_pcpu(void)
+{
+ return (struct pcpu *)(get_lowcore()->pcpu);
+}
+
static __always_inline void set_cpu_flag(int flag)
{
- get_lowcore()->cpu_flags |= (1UL << flag);
+ this_pcpu()->flags |= (1UL << flag);
}
static __always_inline void clear_cpu_flag(int flag)
{
- get_lowcore()->cpu_flags &= ~(1UL << flag);
+ this_pcpu()->flags &= ~(1UL << flag);
}
static __always_inline bool test_cpu_flag(int flag)
{
- return get_lowcore()->cpu_flags & (1UL << flag);
+ return this_pcpu()->flags & (1UL << flag);
}
static __always_inline bool test_and_set_cpu_flag(int flag)
@@ -81,9 +95,7 @@ static __always_inline bool test_and_clear_cpu_flag(int flag)
*/
static __always_inline bool test_cpu_flag_of(int flag, int cpu)
{
- struct lowcore *lc = lowcore_ptr[cpu];
-
- return lc->cpu_flags & (1UL << flag);
+ return per_cpu(pcpu_devices, cpu).flags & (1UL << flag);
}
#define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY)
@@ -405,7 +417,7 @@ static __always_inline bool regs_irqs_disabled(struct pt_regs *regs)
static __always_inline void bpon(void)
{
- asm volatile(ALTERNATIVE("nop", ".insn rrf,0xb2e80000,0,0,13,0", 82));
+ asm volatile(ALTERNATIVE("nop", ".insn rrf,0xb2e80000,0,0,13,0", ALT_SPEC(82)));
}
#endif /* __ASSEMBLY__ */
diff --git a/arch/s390/include/asm/runtime-const.h b/arch/s390/include/asm/runtime-const.h
new file mode 100644
index 000000000000..17878b1d048c
--- /dev/null
+++ b/arch/s390/include/asm/runtime-const.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_RUNTIME_CONST_H
+#define _ASM_S390_RUNTIME_CONST_H
+
+#include <linux/uaccess.h>
+
+#define runtime_const_ptr(sym) \
+({ \
+ typeof(sym) __ret; \
+ \
+ asm_inline( \
+ "0: iihf %[__ret],%[c1]\n" \
+ " iilf %[__ret],%[c2]\n" \
+ ".pushsection runtime_ptr_" #sym ",\"a\"\n" \
+ ".long 0b - .\n" \
+ ".popsection" \
+ : [__ret] "=d" (__ret) \
+ : [c1] "i" (0x01234567UL), \
+ [c2] "i" (0x89abcdefUL)); \
+ __ret; \
+})
+
+#define runtime_const_shift_right_32(val, sym) \
+({ \
+ unsigned int __ret = (val); \
+ \
+ asm_inline( \
+ "0: srl %[__ret],12\n" \
+ ".pushsection runtime_shift_" #sym ",\"a\"\n" \
+ ".long 0b - .\n" \
+ ".popsection" \
+ : [__ret] "+d" (__ret)); \
+ __ret; \
+})
+
+#define runtime_const_init(type, sym) do { \
+ extern s32 __start_runtime_##type##_##sym[]; \
+ extern s32 __stop_runtime_##type##_##sym[]; \
+ \
+ runtime_const_fixup(__runtime_fixup_##type, \
+ (unsigned long)(sym), \
+ __start_runtime_##type##_##sym, \
+ __stop_runtime_##type##_##sym); \
+} while (0)
+
+/* 32-bit immediate for iihf and iilf in bits in I2 field */
+static inline void __runtime_fixup_32(u32 *p, unsigned int val)
+{
+ s390_kernel_write(p, &val, sizeof(val));
+}
+
+static inline void __runtime_fixup_ptr(void *where, unsigned long val)
+{
+ __runtime_fixup_32(where + 2, val >> 32);
+ __runtime_fixup_32(where + 8, val);
+}
+
+/* Immediate value is lower 12 bits of D2 field of srl */
+static inline void __runtime_fixup_shift(void *where, unsigned long val)
+{
+ u32 insn = *(u32 *)where;
+
+ insn &= 0xfffff000;
+ insn |= (val & 63);
+ s390_kernel_write(where, &insn, sizeof(insn));
+}
+
+static inline void runtime_const_fixup(void (*fn)(void *, unsigned long),
+ unsigned long val, s32 *start, s32 *end)
+{
+ while (start < end) {
+ fn(*start + (void *)start, val);
+ start++;
+ }
+}
+
+#endif /* _ASM_S390_RUNTIME_CONST_H */
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index c13c79025348..cd835f4fb11a 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -24,7 +24,6 @@ extern int __cpu_up(unsigned int cpu, struct task_struct *tidle);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
-extern void smp_call_online_cpu(void (*func)(void *), void *);
extern void smp_call_ipl_cpu(void (*func)(void *), void *);
extern void smp_emergency_stop(void);
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 3e43c90ff135..77d5e804af93 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -79,7 +79,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp)
typecheck(int, lp->lock);
kcsan_release();
asm_inline volatile(
- ALTERNATIVE("nop", ".insn rre,0xb2fa0000,7,0", 49) /* NIAI 7 */
+ ALTERNATIVE("nop", ".insn rre,0xb2fa0000,7,0", ALT_FACILITY(49)) /* NIAI 7 */
" sth %1,%0\n"
: "=R" (((unsigned short *) &lp->lock)[1])
: "d" (0) : "cc", "memory");
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index d02a709717b8..00ac01874a12 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -40,6 +40,7 @@ struct thread_info {
unsigned long flags; /* low level flags */
unsigned long syscall_work; /* SYSCALL_WORK_ flags */
unsigned int cpu; /* current CPU */
+ unsigned char sie; /* running in SIE context */
};
/*
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 9213be0529ee..a81f897a81ce 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -332,7 +332,14 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
return __clear_user(to, n);
}
-void *s390_kernel_write(void *dst, const void *src, size_t size);
+void *__s390_kernel_write(void *dst, const void *src, size_t size);
+
+static inline void *s390_kernel_write(void *dst, const void *src, size_t size)
+{
+ if (__is_defined(__DECOMPRESSOR))
+ return memcpy(dst, src, size);
+ return __s390_kernel_write(dst, src, size);
+}
int __noreturn __put_kernel_bad(void);
diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h
index 0679445cac0b..0b5f8f3e84f1 100644
--- a/arch/s390/include/asm/uv.h
+++ b/arch/s390/include/asm/uv.h
@@ -414,7 +414,6 @@ static inline bool uv_has_feature(u8 feature_bit)
return test_bit_inv(feature_bit, &uv_info.uv_feature_indications);
}
-#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
extern int prot_virt_guest;
static inline int is_prot_virt_guest(void)
@@ -466,13 +465,6 @@ static inline int uv_remove_shared(unsigned long addr)
return share(addr, UVC_CMD_REMOVE_SHARED_ACCESS);
}
-#else
-#define is_prot_virt_guest() 0
-static inline int uv_set_shared(unsigned long addr) { return 0; }
-static inline int uv_remove_shared(unsigned long addr) { return 0; }
-#endif
-
-#if IS_ENABLED(CONFIG_KVM)
extern int prot_virt_host;
static inline int is_prot_virt_host(void)
@@ -489,29 +481,5 @@ int uv_convert_from_secure_pte(pte_t pte);
int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr);
void setup_uv(void);
-#else
-#define is_prot_virt_host() 0
-static inline void setup_uv(void) {}
-
-static inline int uv_pin_shared(unsigned long paddr)
-{
- return 0;
-}
-
-static inline int uv_destroy_folio(struct folio *folio)
-{
- return 0;
-}
-
-static inline int uv_destroy_pte(pte_t pte)
-{
- return 0;
-}
-
-static inline int uv_convert_from_secure_pte(pte_t pte)
-{
- return 0;
-}
-#endif
#endif /* _ASM_S390_UV_H */
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 7241fa194709..e47a4be54ff8 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -43,7 +43,7 @@ obj-y += sysinfo.o lgr.o os_info.o ctlreg.o
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
obj-y += entry.o reipl.o kdebugfs.o alternative.o
obj-y += nospec-branch.o ipl_vmparm.o machine_kexec_reloc.o unwind_bc.o
-obj-y += smp.o text_amode31.o stacktrace.o abs_lowcore.o facility.o
+obj-y += smp.o text_amode31.o stacktrace.o abs_lowcore.o facility.o uv.o
extra-y += vmlinux.lds
@@ -80,7 +80,6 @@ obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o perf_regs.o
obj-$(CONFIG_PERF_EVENTS) += perf_pai_crypto.o perf_pai_ext.o
obj-$(CONFIG_TRACEPOINTS) += trace.o
-obj-$(findstring y, $(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) $(CONFIG_PGSTE)) += uv.o
# vdso
obj-y += vdso64/
diff --git a/arch/s390/kernel/abs_lowcore.c b/arch/s390/kernel/abs_lowcore.c
index f9efc54ec4b7..09cd24cbe74e 100644
--- a/arch/s390/kernel/abs_lowcore.c
+++ b/arch/s390/kernel/abs_lowcore.c
@@ -4,6 +4,7 @@
#include <asm/abs_lowcore.h>
unsigned long __bootdata_preserved(__abs_lowcore);
+int __bootdata_preserved(relocate_lowcore);
int abs_lowcore_map(int cpu, struct lowcore *lc, bool alloc)
{
diff --git a/arch/s390/kernel/alternative.c b/arch/s390/kernel/alternative.c
index 1ac5f707dd70..8d5d0de35de0 100644
--- a/arch/s390/kernel/alternative.c
+++ b/arch/s390/kernel/alternative.c
@@ -1,68 +1,41 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/module.h>
-#include <linux/cpu.h>
-#include <linux/smp.h>
-#include <asm/text-patching.h>
+
+#include <linux/uaccess.h>
+#include <asm/nospec-branch.h>
+#include <asm/abs_lowcore.h>
#include <asm/alternative.h>
#include <asm/facility.h>
-#include <asm/nospec-branch.h>
-
-static int __initdata_or_module alt_instr_disabled;
-
-static int __init disable_alternative_instructions(char *str)
-{
- alt_instr_disabled = 1;
- return 0;
-}
-
-early_param("noaltinstr", disable_alternative_instructions);
-static void __init_or_module __apply_alternatives(struct alt_instr *start,
- struct alt_instr *end)
+void __apply_alternatives(struct alt_instr *start, struct alt_instr *end, unsigned int ctx)
{
- struct alt_instr *a;
u8 *instr, *replacement;
+ struct alt_instr *a;
+ bool replace;
/*
* The scan order should be from start to end. A later scanned
* alternative code can overwrite previously scanned alternative code.
*/
for (a = start; a < end; a++) {
+ if (!(a->ctx & ctx))
+ continue;
+ switch (a->type) {
+ case ALT_TYPE_FACILITY:
+ replace = test_facility(a->data);
+ break;
+ case ALT_TYPE_SPEC:
+ replace = nobp_enabled();
+ break;
+ case ALT_TYPE_LOWCORE:
+ replace = have_relocated_lowcore();
+ break;
+ default:
+ replace = false;
+ }
+ if (!replace)
+ continue;
instr = (u8 *)&a->instr_offset + a->instr_offset;
replacement = (u8 *)&a->repl_offset + a->repl_offset;
-
- if (!__test_facility(a->facility, alt_stfle_fac_list))
- continue;
s390_kernel_write(instr, replacement, a->instrlen);
}
}
-
-void __init_or_module apply_alternatives(struct alt_instr *start,
- struct alt_instr *end)
-{
- if (!alt_instr_disabled)
- __apply_alternatives(start, end);
-}
-
-extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
-void __init apply_alternative_instructions(void)
-{
- apply_alternatives(__alt_instructions, __alt_instructions_end);
-}
-
-static void do_sync_core(void *info)
-{
- sync_core();
-}
-
-void text_poke_sync(void)
-{
- on_each_cpu(do_sync_core, NULL, 1);
-}
-
-void text_poke_sync_lock(void)
-{
- cpus_read_lock();
- text_poke_sync();
- cpus_read_unlock();
-}
diff --git a/arch/s390/kernel/alternative.h b/arch/s390/kernel/alternative.h
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/arch/s390/kernel/alternative.h
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 26bb45d0e6f1..ffa0dd2dbaac 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -28,6 +28,7 @@ int main(void)
BLANK();
/* thread info offsets */
OFFSET(__TI_flags, task_struct, thread_info.flags);
+ OFFSET(__TI_sie, task_struct, thread_info.sie);
BLANK();
/* pt_regs offsets */
OFFSET(__PT_PSW, pt_regs, psw);
@@ -114,7 +115,7 @@ int main(void)
OFFSET(__LC_SAVE_AREA_SYNC, lowcore, save_area_sync);
OFFSET(__LC_SAVE_AREA_ASYNC, lowcore, save_area_async);
OFFSET(__LC_SAVE_AREA_RESTART, lowcore, save_area_restart);
- OFFSET(__LC_CPU_FLAGS, lowcore, cpu_flags);
+ OFFSET(__LC_PCPU, lowcore, pcpu);
OFFSET(__LC_RETURN_PSW, lowcore, return_psw);
OFFSET(__LC_RETURN_MCCK_PSW, lowcore, return_mcck_psw);
OFFSET(__LC_SYS_ENTER_TIMER, lowcore, sys_enter_timer);
@@ -186,5 +187,7 @@ int main(void)
#endif
OFFSET(__FTRACE_REGS_PT_REGS, ftrace_regs, regs);
DEFINE(__FTRACE_REGS_SIZE, sizeof(struct ftrace_regs));
+
+ OFFSET(__PCPU_FLAGS, pcpu, flags);
return 0;
}
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 85328a0ef3b6..bce50ca75ea7 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -954,7 +954,7 @@ static int debug_active = 1;
* always allow read, allow write only if debug_stoppable is set or
* if debug_active is already off
*/
-static int s390dbf_procactive(struct ctl_table *table, int write,
+static int s390dbf_procactive(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
if (!write || debug_stoppable || !debug_active)
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 467ed4dba817..14d324865e33 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -48,6 +48,7 @@ decompressor_handled_param(dfltcc);
decompressor_handled_param(facilities);
decompressor_handled_param(nokaslr);
decompressor_handled_param(cmma);
+decompressor_handled_param(relocate_lowcore);
#if IS_ENABLED(CONFIG_KVM)
decompressor_handled_param(prot_virt);
#endif
@@ -190,13 +191,6 @@ static noinline __init void setup_lowcore_early(void)
get_lowcore()->preempt_count = INIT_PREEMPT_COUNT;
}
-static noinline __init void setup_facility_list(void)
-{
- memcpy(alt_stfle_fac_list, stfle_fac_list, sizeof(alt_stfle_fac_list));
- if (!IS_ENABLED(CONFIG_KERNEL_NOBP))
- __clear_facility(82, alt_stfle_fac_list);
-}
-
static __init void detect_diag9c(void)
{
unsigned int cpu_address;
@@ -291,7 +285,6 @@ void __init startup_init(void)
lockdep_off();
sort_amode31_extable();
setup_lowcore_early();
- setup_facility_list();
detect_machine_type();
setup_arch_string();
setup_boot_command_line();
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 454b6b92c7f8..749410cfdbc0 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -12,7 +12,7 @@
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/asm-extable.h>
-#include <asm/alternative-asm.h>
+#include <asm/alternative.h>
#include <asm/processor.h>
#include <asm/cache.h>
#include <asm/dwarf.h>
@@ -28,49 +28,54 @@
#include <asm/setup.h>
#include <asm/nmi.h>
#include <asm/nospec-insn.h>
+#include <asm/lowcore.h>
_LPP_OFFSET = __LC_LPP
.macro STBEAR address
- ALTERNATIVE "nop", ".insn s,0xb2010000,\address", 193
+ ALTERNATIVE "nop", ".insn s,0xb2010000,\address", ALT_FACILITY(193)
.endm
.macro LBEAR address
- ALTERNATIVE "nop", ".insn s,0xb2000000,\address", 193
+ ALTERNATIVE "nop", ".insn s,0xb2000000,\address", ALT_FACILITY(193)
.endm
- .macro LPSWEY address,lpswe
- ALTERNATIVE "b \lpswe; nopr", ".insn siy,0xeb0000000071,\address,0", 193
+ .macro LPSWEY address, lpswe
+ ALTERNATIVE_2 "b \lpswe;nopr", \
+ ".insn siy,0xeb0000000071,\address,0", ALT_FACILITY_EARLY(193), \
+ __stringify(.insn siy,0xeb0000000071,LOWCORE_ALT_ADDRESS+\address,0), \
+ ALT_LOWCORE
.endm
- .macro MBEAR reg
- ALTERNATIVE "brcl 0,0", __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK), 193
+ .macro MBEAR reg, lowcore
+ ALTERNATIVE "brcl 0,0", __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK(\lowcore)),\
+ ALT_FACILITY(193)
.endm
- .macro CHECK_STACK savearea
+ .macro CHECK_STACK savearea, lowcore
#ifdef CONFIG_CHECK_STACK
tml %r15,THREAD_SIZE - CONFIG_STACK_GUARD
- lghi %r14,\savearea
+ la %r14,\savearea(\lowcore)
jz stack_overflow
#endif
.endm
- .macro CHECK_VMAP_STACK savearea,oklabel
+ .macro CHECK_VMAP_STACK savearea, lowcore, oklabel
#ifdef CONFIG_VMAP_STACK
lgr %r14,%r15
nill %r14,0x10000 - THREAD_SIZE
oill %r14,STACK_INIT_OFFSET
- clg %r14,__LC_KERNEL_STACK
+ clg %r14,__LC_KERNEL_STACK(\lowcore)
je \oklabel
- clg %r14,__LC_ASYNC_STACK
+ clg %r14,__LC_ASYNC_STACK(\lowcore)
je \oklabel
- clg %r14,__LC_MCCK_STACK
+ clg %r14,__LC_MCCK_STACK(\lowcore)
je \oklabel
- clg %r14,__LC_NODAT_STACK
+ clg %r14,__LC_NODAT_STACK(\lowcore)
je \oklabel
- clg %r14,__LC_RESTART_STACK
+ clg %r14,__LC_RESTART_STACK(\lowcore)
je \oklabel
- lghi %r14,\savearea
+ la %r14,\savearea(\lowcore)
j stack_overflow
#else
j \oklabel
@@ -100,30 +105,31 @@ _LPP_OFFSET = __LC_LPP
.endm
.macro BPOFF
- ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,12,0", 82
+ ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,12,0", ALT_SPEC(82)
.endm
.macro BPON
- ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,13,0", 82
+ ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,13,0", ALT_SPEC(82)
.endm
.macro BPENTER tif_ptr,tif_mask
ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .insn rrf,0xb2e80000,0,0,13,0", \
- "j .+12; nop; nop", 82
+ "j .+12; nop; nop", ALT_SPEC(82)
.endm
.macro BPEXIT tif_ptr,tif_mask
TSTMSK \tif_ptr,\tif_mask
ALTERNATIVE "jz .+8; .insn rrf,0xb2e80000,0,0,12,0", \
- "jnz .+8; .insn rrf,0xb2e80000,0,0,13,0", 82
+ "jnz .+8; .insn rrf,0xb2e80000,0,0,13,0", ALT_SPEC(82)
.endm
#if IS_ENABLED(CONFIG_KVM)
- .macro SIEEXIT sie_control
- lg %r9,\sie_control # get control block pointer
- ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
- lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce
- ni __LC_CPU_FLAGS+7,255-_CIF_SIE
+ .macro SIEEXIT sie_control,lowcore
+ lg %r9,\sie_control # get control block pointer
+ ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
+ lctlg %c1,%c1,__LC_KERNEL_ASCE(\lowcore) # load primary asce
+ lg %r9,__LC_CURRENT(\lowcore)
+ mvi __TI_sie(%r9),0
larl %r9,sie_exit # skip forward to sie_exit
.endm
#endif
@@ -163,13 +169,14 @@ SYM_FUNC_START(__switch_to_asm)
stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev
lg %r15,0(%r4,%r3) # start of kernel stack of next
agr %r15,%r5 # end of kernel stack of next
- stg %r3,__LC_CURRENT # store task struct of next
- stg %r15,__LC_KERNEL_STACK # store end of kernel stack
+ GET_LC %r13
+ stg %r3,__LC_CURRENT(%r13) # store task struct of next
+ stg %r15,__LC_KERNEL_STACK(%r13) # store end of kernel stack
lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next
aghi %r3,__TASK_pid
- mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next
+ mvc __LC_CURRENT_PID(4,%r13),0(%r3) # store pid of next
+ ALTERNATIVE "nop", "lpp _LPP_OFFSET(%r13)", ALT_FACILITY(40)
lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
- ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40
BR_EX %r14
SYM_FUNC_END(__switch_to_asm)
@@ -183,15 +190,16 @@ SYM_FUNC_END(__switch_to_asm)
*/
SYM_FUNC_START(__sie64a)
stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
- lg %r12,__LC_CURRENT
+ GET_LC %r13
+ lg %r14,__LC_CURRENT(%r13)
stg %r2,__SF_SIE_CONTROL_PHYS(%r15) # save sie block physical..
stg %r3,__SF_SIE_CONTROL(%r15) # ...and virtual addresses
stg %r4,__SF_SIE_SAVEAREA(%r15) # save guest register save area
stg %r5,__SF_SIE_GUEST_ASCE(%r15) # save guest asce
xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
- mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags
+ mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r14) # copy thread flags
lmg %r0,%r13,0(%r4) # load guest gprs 0-13
- oi __LC_CPU_FLAGS+7,_CIF_SIE
+ mvi __TI_sie(%r14),1
lctlg %c1,%c1,__SF_SIE_GUEST_ASCE(%r15) # load primary asce
lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now
@@ -210,8 +218,10 @@ SYM_FUNC_START(__sie64a)
.Lsie_skip:
lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
- lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce
- ni __LC_CPU_FLAGS+7,255-_CIF_SIE
+ GET_LC %r14
+ lctlg %c1,%c1,__LC_KERNEL_ASCE(%r14) # load primary asce
+ lg %r14,__LC_CURRENT(%r14)
+ mvi __TI_sie(%r14),0
# some program checks are suppressing. C code (e.g. do_protection_exception)
# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
@@ -254,14 +264,15 @@ EXPORT_SYMBOL(sie_exit)
*/
SYM_CODE_START(system_call)
- stpt __LC_SYS_ENTER_TIMER
- stmg %r8,%r15,__LC_SAVE_AREA_SYNC
+ STMG_LC %r8,%r15,__LC_SAVE_AREA_SYNC
+ GET_LC %r13
+ stpt __LC_SYS_ENTER_TIMER(%r13)
BPOFF
lghi %r14,0
.Lsysc_per:
- STBEAR __LC_LAST_BREAK
- lctlg %c1,%c1,__LC_KERNEL_ASCE
- lg %r15,__LC_KERNEL_STACK
+ STBEAR __LC_LAST_BREAK(%r13)
+ lctlg %c1,%c1,__LC_KERNEL_ASCE(%r13)
+ lg %r15,__LC_KERNEL_STACK(%r13)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
stmg %r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
# clear user controlled register to prevent speculative use
@@ -276,17 +287,17 @@ SYM_CODE_START(system_call)
xgr %r10,%r10
xgr %r11,%r11
la %r2,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
- mvc __PT_R8(64,%r2),__LC_SAVE_AREA_SYNC
- MBEAR %r2
+ mvc __PT_R8(64,%r2),__LC_SAVE_AREA_SYNC(%r13)
+ MBEAR %r2,%r13
lgr %r3,%r14
brasl %r14,__do_syscall
STACKLEAK_ERASE
- lctlg %c1,%c1,__LC_USER_ASCE
- mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
+ lctlg %c1,%c1,__LC_USER_ASCE(%r13)
+ mvc __LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
BPON
LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
+ stpt __LC_EXIT_TIMER(%r13)
lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
- stpt __LC_EXIT_TIMER
LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE
SYM_CODE_END(system_call)
@@ -297,12 +308,13 @@ SYM_CODE_START(ret_from_fork)
lgr %r3,%r11
brasl %r14,__ret_from_fork
STACKLEAK_ERASE
- lctlg %c1,%c1,__LC_USER_ASCE
- mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
+ GET_LC %r13
+ lctlg %c1,%c1,__LC_USER_ASCE(%r13)
+ mvc __LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
BPON
LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
+ stpt __LC_EXIT_TIMER(%r13)
lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
- stpt __LC_EXIT_TIMER
LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE
SYM_CODE_END(ret_from_fork)
@@ -311,39 +323,40 @@ SYM_CODE_END(ret_from_fork)
*/
SYM_CODE_START(pgm_check_handler)
- stpt __LC_SYS_ENTER_TIMER
+ STMG_LC %r8,%r15,__LC_SAVE_AREA_SYNC
+ GET_LC %r13
+ stpt __LC_SYS_ENTER_TIMER(%r13)
BPOFF
- stmg %r8,%r15,__LC_SAVE_AREA_SYNC
lgr %r10,%r15
- lmg %r8,%r9,__LC_PGM_OLD_PSW
+ lmg %r8,%r9,__LC_PGM_OLD_PSW(%r13)
tmhh %r8,0x0001 # coming from user space?
jno .Lpgm_skip_asce
- lctlg %c1,%c1,__LC_KERNEL_ASCE
+ lctlg %c1,%c1,__LC_KERNEL_ASCE(%r13)
j 3f # -> fault in user space
.Lpgm_skip_asce:
1: tmhh %r8,0x4000 # PER bit set in old PSW ?
jnz 2f # -> enabled, can't be a double fault
- tm __LC_PGM_ILC+3,0x80 # check for per exception
+ tm __LC_PGM_ILC+3(%r13),0x80 # check for per exception
jnz .Lpgm_svcper # -> single stepped svc
-2: CHECK_STACK __LC_SAVE_AREA_SYNC
+2: CHECK_STACK __LC_SAVE_AREA_SYNC,%r13
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
# CHECK_VMAP_STACK branches to stack_overflow or 4f
- CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f
-3: lg %r15,__LC_KERNEL_STACK
+ CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,%r13,4f
+3: lg %r15,__LC_KERNEL_STACK(%r13)
4: la %r11,STACK_FRAME_OVERHEAD(%r15)
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
stmg %r0,%r7,__PT_R0(%r11)
- mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
- mvc __PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK
+ mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC(%r13)
+ mvc __PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK(%r13)
stctg %c1,%c1,__PT_CR1(%r11)
#if IS_ENABLED(CONFIG_KVM)
- ltg %r12,__LC_GMAP
+ ltg %r12,__LC_GMAP(%r13)
jz 5f
clc __GMAP_ASCE(8,%r12), __PT_CR1(%r11)
jne 5f
BPENTER __SF_SIE_FLAGS(%r10),_TIF_ISOLATE_BP_GUEST
- SIEEXIT __SF_SIE_CONTROL(%r10)
+ SIEEXIT __SF_SIE_CONTROL(%r10),%r13
#endif
5: stmg %r8,%r9,__PT_PSW(%r11)
# clear user controlled registers to prevent speculative use
@@ -359,11 +372,11 @@ SYM_CODE_START(pgm_check_handler)
tmhh %r8,0x0001 # returning to user space?
jno .Lpgm_exit_kernel
STACKLEAK_ERASE
- lctlg %c1,%c1,__LC_USER_ASCE
+ lctlg %c1,%c1,__LC_USER_ASCE(%r13)
BPON
- stpt __LC_EXIT_TIMER
+ stpt __LC_EXIT_TIMER(%r13)
.Lpgm_exit_kernel:
- mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
+ mvc __LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE
@@ -372,11 +385,11 @@ SYM_CODE_START(pgm_check_handler)
# single stepped system call
#
.Lpgm_svcper:
- mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
+ mvc __LC_RETURN_PSW(8,%r13),__LC_SVC_NEW_PSW(%r13)
larl %r14,.Lsysc_per
- stg %r14,__LC_RETURN_PSW+8
+ stg %r14,__LC_RETURN_PSW+8(%r13)
lghi %r14,1
- LBEAR __LC_PGM_LAST_BREAK
+ LBEAR __LC_PGM_LAST_BREAK(%r13)
LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE # branch to .Lsysc_per
SYM_CODE_END(pgm_check_handler)
@@ -385,25 +398,27 @@ SYM_CODE_END(pgm_check_handler)
*/
.macro INT_HANDLER name,lc_old_psw,handler
SYM_CODE_START(\name)
- stckf __LC_INT_CLOCK
- stpt __LC_SYS_ENTER_TIMER
- STBEAR __LC_LAST_BREAK
+ STMG_LC %r8,%r15,__LC_SAVE_AREA_ASYNC
+ GET_LC %r13
+ stckf __LC_INT_CLOCK(%r13)
+ stpt __LC_SYS_ENTER_TIMER(%r13)
+ STBEAR __LC_LAST_BREAK(%r13)
BPOFF
- stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
- lmg %r8,%r9,\lc_old_psw
+ lmg %r8,%r9,\lc_old_psw(%r13)
tmhh %r8,0x0001 # interrupting from user ?
jnz 1f
#if IS_ENABLED(CONFIG_KVM)
- TSTMSK __LC_CPU_FLAGS,_CIF_SIE
+ lg %r10,__LC_CURRENT(%r13)
+ tm __TI_sie(%r10),0xff
jz 0f
BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
- SIEEXIT __SF_SIE_CONTROL(%r15)
+ SIEEXIT __SF_SIE_CONTROL(%r15),%r13
#endif
-0: CHECK_STACK __LC_SAVE_AREA_ASYNC
+0: CHECK_STACK __LC_SAVE_AREA_ASYNC,%r13
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 2f
-1: lctlg %c1,%c1,__LC_KERNEL_ASCE
- lg %r15,__LC_KERNEL_STACK
+1: lctlg %c1,%c1,__LC_KERNEL_ASCE(%r13)
+ lg %r15,__LC_KERNEL_STACK(%r13)
2: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
la %r11,STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r7,__PT_R0(%r11)
@@ -417,18 +432,18 @@ SYM_CODE_START(\name)
xgr %r7,%r7
xgr %r10,%r10
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
- mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
- MBEAR %r11
+ mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC(%r13)
+ MBEAR %r11,%r13
stmg %r8,%r9,__PT_PSW(%r11)
lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,\handler
- mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
+ mvc __LC_RETURN_PSW(16,%r13),__PT_PSW(%r11)
tmhh %r8,0x0001 # returning to user ?
jno 2f
STACKLEAK_ERASE
- lctlg %c1,%c1,__LC_USER_ASCE
+ lctlg %c1,%c1,__LC_USER_ASCE(%r13)
BPON
- stpt __LC_EXIT_TIMER
+ stpt __LC_EXIT_TIMER(%r13)
2: LBEAR __PT_LAST_BREAK(%r11)
lmg %r0,%r15,__PT_R0(%r11)
LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE
@@ -443,35 +458,37 @@ INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq
*/
SYM_CODE_START(mcck_int_handler)
BPOFF
- lmg %r8,%r9,__LC_MCK_OLD_PSW
- TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
+ GET_LC %r13
+ lmg %r8,%r9,__LC_MCK_OLD_PSW(%r13)
+ TSTMSK __LC_MCCK_CODE(%r13),MCCK_CODE_SYSTEM_DAMAGE
jo .Lmcck_panic # yes -> rest of mcck code invalid
- TSTMSK __LC_MCCK_CODE,MCCK_CODE_CR_VALID
+ TSTMSK __LC_MCCK_CODE(%r13),MCCK_CODE_CR_VALID
jno .Lmcck_panic # control registers invalid -> panic
ptlb
- lghi %r14,__LC_CPU_TIMER_SAVE_AREA
- mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
- TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
+ lay %r14,__LC_CPU_TIMER_SAVE_AREA(%r13)
+ mvc __LC_MCCK_ENTER_TIMER(8,%r13),0(%r14)
+ TSTMSK __LC_MCCK_CODE(%r13),MCCK_CODE_CPU_TIMER_VALID
jo 3f
- la %r14,__LC_SYS_ENTER_TIMER
- clc 0(8,%r14),__LC_EXIT_TIMER
+ la %r14,__LC_SYS_ENTER_TIMER(%r13)
+ clc 0(8,%r14),__LC_EXIT_TIMER(%r13)
jl 1f
- la %r14,__LC_EXIT_TIMER
-1: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER
+ la %r14,__LC_EXIT_TIMER(%r13)
+1: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER(%r13)
jl 2f
- la %r14,__LC_LAST_UPDATE_TIMER
+ la %r14,__LC_LAST_UPDATE_TIMER(%r13)
2: spt 0(%r14)
- mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
-3: TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID
+ mvc __LC_MCCK_ENTER_TIMER(8,%r13),0(%r14)
+3: TSTMSK __LC_MCCK_CODE(%r13),MCCK_CODE_PSW_MWP_VALID
jno .Lmcck_panic
tmhh %r8,0x0001 # interrupting from user ?
jnz .Lmcck_user
- TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
+ TSTMSK __LC_MCCK_CODE(%r13),MCCK_CODE_PSW_IA_VALID
jno .Lmcck_panic
#if IS_ENABLED(CONFIG_KVM)
- TSTMSK __LC_CPU_FLAGS,_CIF_SIE
+ lg %r10,__LC_CURRENT(%r13)
+ tm __TI_sie(%r10),0xff
jz .Lmcck_user
- # Need to compare the address instead of a CIF_SIE* flag.
+ # Need to compare the address instead of __TI_SIE flag.
# Otherwise there would be a race between setting the flag
# and entering SIE (or leaving and clearing the flag). This
# would cause machine checks targeted at the guest to be
@@ -480,18 +497,19 @@ SYM_CODE_START(mcck_int_handler)
clgrjl %r9,%r14, 4f
larl %r14,.Lsie_leave
clgrjhe %r9,%r14, 4f
- oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
+ lg %r10,__LC_PCPU
+ oi __PCPU_FLAGS+7(%r10), _CIF_MCCK_GUEST
4: BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
- SIEEXIT __SF_SIE_CONTROL(%r15)
+ SIEEXIT __SF_SIE_CONTROL(%r15),%r13
#endif
.Lmcck_user:
- lg %r15,__LC_MCCK_STACK
+ lg %r15,__LC_MCCK_STACK(%r13)
la %r11,STACK_FRAME_OVERHEAD(%r15)
stctg %c1,%c1,__PT_CR1(%r11)
- lctlg %c1,%c1,__LC_KERNEL_ASCE
+ lctlg %c1,%c1,__LC_KERNEL_ASCE(%r13)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
- lghi %r14,__LC_GPREGS_SAVE_AREA+64
- stmg %r0,%r7,__PT_R0(%r11)
+ lay %r14,__LC_GPREGS_SAVE_AREA(%r13)
+ mvc __PT_R0(128,%r11),0(%r14)
# clear user controlled registers to prevent speculative use
xgr %r0,%r0
xgr %r1,%r1
@@ -501,7 +519,6 @@ SYM_CODE_START(mcck_int_handler)
xgr %r6,%r6
xgr %r7,%r7
xgr %r10,%r10
- mvc __PT_R8(64,%r11),0(%r14)
stmg %r8,%r9,__PT_PSW(%r11)
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
@@ -509,12 +526,13 @@ SYM_CODE_START(mcck_int_handler)
brasl %r14,s390_do_machine_check
lctlg %c1,%c1,__PT_CR1(%r11)
lmg %r0,%r10,__PT_R0(%r11)
- mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
- tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
+ mvc __LC_RETURN_MCCK_PSW(16,%r13),__PT_PSW(%r11) # move return PSW
+ tm __LC_RETURN_MCCK_PSW+1(%r13),0x01 # returning to user ?
jno 0f
BPON
- stpt __LC_EXIT_TIMER
-0: ALTERNATIVE "nop", __stringify(lghi %r12,__LC_LAST_BREAK_SAVE_AREA),193
+ stpt __LC_EXIT_TIMER(%r13)
+0: ALTERNATIVE "brcl 0,0", __stringify(lay %r12,__LC_LAST_BREAK_SAVE_AREA(%r13)),\
+ ALT_FACILITY(193)
LBEAR 0(%r12)
lmg %r11,%r15,__PT_R11(%r11)
LPSWEY __LC_RETURN_MCCK_PSW,__LC_RETURN_MCCK_LPSWE
@@ -550,7 +568,7 @@ SYM_CODE_START(mcck_int_handler)
SYM_CODE_END(mcck_int_handler)
SYM_CODE_START(restart_int_handler)
- ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40
+ ALTERNATIVE "nop", "lpp _LPP_OFFSET", ALT_FACILITY(40)
stg %r15,__LC_SAVE_AREA_RESTART
TSTMSK __LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4
jz 0f
@@ -558,15 +576,17 @@ SYM_CODE_START(restart_int_handler)
0: larl %r15,daton_psw
lpswe 0(%r15) # turn dat on, keep irqs off
.Ldaton:
- lg %r15,__LC_RESTART_STACK
+ GET_LC %r15
+ lg %r15,__LC_RESTART_STACK(%r15)
xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
- mvc STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
- mvc STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW
+ GET_LC %r13
+ mvc STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART(%r13)
+ mvc STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW(%r13)
xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
- lg %r1,__LC_RESTART_FN # load fn, parm & source cpu
- lg %r2,__LC_RESTART_DATA
- lgf %r3,__LC_RESTART_SOURCE
+ lg %r1,__LC_RESTART_FN(%r13) # load fn, parm & source cpu
+ lg %r2,__LC_RESTART_DATA(%r13)
+ lgf %r3,__LC_RESTART_SOURCE(%r13)
ltgr %r3,%r3 # test source cpu address
jm 1f # negative -> skip source stop
0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu
@@ -588,7 +608,8 @@ SYM_CODE_END(restart_int_handler)
* Setup a pt_regs so that show_trace can provide a good call trace.
*/
SYM_CODE_START(stack_overflow)
- lg %r15,__LC_NODAT_STACK # change to panic stack
+ GET_LC %r15
+ lg %r15,__LC_NODAT_STACK(%r15) # change to panic stack
la %r11,STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r7,__PT_R0(%r11)
stmg %r8,%r9,__PT_PSW(%r11)
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 45413b04efc5..396034b2fe67 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -10,6 +10,7 @@
#include <linux/init.h>
#include <linux/linkage.h>
+#include <asm/lowcore.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/page.h>
@@ -18,14 +19,15 @@
__HEAD
SYM_CODE_START(startup_continue)
larl %r1,tod_clock_base
- mvc 0(16,%r1),__LC_BOOT_CLOCK
+ GET_LC %r2
+ mvc 0(16,%r1),__LC_BOOT_CLOCK(%r2)
#
# Setup stack
#
larl %r14,init_task
- stg %r14,__LC_CURRENT
+ stg %r14,__LC_CURRENT(%r2)
larl %r15,init_thread_union+STACK_INIT_OFFSET
- stg %r15,__LC_KERNEL_STACK
+ stg %r15,__LC_KERNEL_STACK(%r2)
brasl %r14,sclp_early_adjust_va # allow sclp_early_printk
brasl %r14,startup_init # s390 specific early init
brasl %r14,start_kernel # common init code
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 3a7d6e172211..f17bb7bf9392 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -2112,7 +2112,7 @@ void do_restart(void *arg)
tracing_off();
debug_locks_off();
lgr_info_log();
- smp_call_online_cpu(__do_restart, arg);
+ smp_call_ipl_cpu(__do_restart, arg);
}
/* on halt */
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index f4cf65da6d49..8f681ccfb83a 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -62,7 +62,7 @@ static void __do_machine_kdump(void *data)
* This need to be done *after* s390_reset_system set the
* prefix register of this CPU to zero
*/
- memcpy(absolute_pointer(__LC_FPREGS_SAVE_AREA),
+ memcpy(absolute_pointer(get_lowcore()->floating_pt_save_area),
phys_to_virt(prefix + __LC_FPREGS_SAVE_AREA), 512);
call_nodat(1, int, purgatory, int, 1);
diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c
index 9b8c24ebb008..e11ec15960a1 100644
--- a/arch/s390/kernel/nospec-branch.c
+++ b/arch/s390/kernel/nospec-branch.c
@@ -4,6 +4,8 @@
#include <linux/cpu.h>
#include <asm/nospec-branch.h>
+int nobp = IS_ENABLED(CONFIG_KERNEL_NOBP);
+
static int __init nobp_setup_early(char *str)
{
bool enabled;
@@ -17,11 +19,11 @@ static int __init nobp_setup_early(char *str)
* The user explicitly requested nobp=1, enable it and
* disable the expoline support.
*/
- __set_facility(82, alt_stfle_fac_list);
+ nobp = 1;
if (IS_ENABLED(CONFIG_EXPOLINE))
nospec_disable = 1;
} else {
- __clear_facility(82, alt_stfle_fac_list);
+ nobp = 0;
}
return 0;
}
@@ -29,7 +31,7 @@ early_param("nobp", nobp_setup_early);
static int __init nospec_setup_early(char *str)
{
- __clear_facility(82, alt_stfle_fac_list);
+ nobp = 0;
return 0;
}
early_param("nospec", nospec_setup_early);
@@ -40,7 +42,7 @@ static int __init nospec_report(void)
pr_info("Spectre V2 mitigation: etokens\n");
if (nospec_uses_trampoline())
pr_info("Spectre V2 mitigation: execute trampolines\n");
- if (__test_facility(82, alt_stfle_fac_list))
+ if (nobp_enabled())
pr_info("Spectre V2 mitigation: limited branch prediction\n");
return 0;
}
@@ -66,14 +68,14 @@ void __init nospec_auto_detect(void)
*/
if (__is_defined(CC_USING_EXPOLINE))
nospec_disable = 1;
- __clear_facility(82, alt_stfle_fac_list);
+ nobp = 0;
} else if (__is_defined(CC_USING_EXPOLINE)) {
/*
* The kernel has been compiled with expolines.
* Keep expolines enabled and disable nobp.
*/
nospec_disable = 0;
- __clear_facility(82, alt_stfle_fac_list);
+ nobp = 0;
}
/*
* If the kernel has not been compiled with expolines the
@@ -86,7 +88,7 @@ static int __init spectre_v2_setup_early(char *str)
{
if (str && !strncmp(str, "on", 2)) {
nospec_disable = 0;
- __clear_facility(82, alt_stfle_fac_list);
+ nobp = 0;
}
if (str && !strncmp(str, "off", 3))
nospec_disable = 1;
diff --git a/arch/s390/kernel/nospec-sysfs.c b/arch/s390/kernel/nospec-sysfs.c
index 52d4353188ad..a95188818637 100644
--- a/arch/s390/kernel/nospec-sysfs.c
+++ b/arch/s390/kernel/nospec-sysfs.c
@@ -17,7 +17,7 @@ ssize_t cpu_show_spectre_v2(struct device *dev,
return sprintf(buf, "Mitigation: etokens\n");
if (nospec_uses_trampoline())
return sprintf(buf, "Mitigation: execute trampolines\n");
- if (__test_facility(82, alt_stfle_fac_list))
+ if (nobp_enabled())
return sprintf(buf, "Mitigation: limited branch prediction\n");
return sprintf(buf, "Vulnerable\n");
}
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 1434642e9cba..6968be98af11 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -556,25 +556,31 @@ static int cfdiag_diffctr(struct cpu_cf_events *cpuhw, unsigned long auth)
struct cf_trailer_entry *trailer_start, *trailer_stop;
struct cf_ctrset_entry *ctrstart, *ctrstop;
size_t offset = 0;
+ int i;
- auth &= (1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1;
- do {
+ for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
ctrstart = (struct cf_ctrset_entry *)(cpuhw->start + offset);
ctrstop = (struct cf_ctrset_entry *)(cpuhw->stop + offset);
+ /* Counter set not authorized */
+ if (!(auth & cpumf_ctr_ctl[i]))
+ continue;
+ /* Counter set size zero was not saved */
+ if (!cpum_cf_read_setsize(i))
+ continue;
+
if (memcmp(ctrstop, ctrstart, sizeof(*ctrstop))) {
pr_err_once("cpum_cf_diag counter set compare error "
"in set %i\n", ctrstart->set);
return 0;
}
- auth &= ~cpumf_ctr_ctl[ctrstart->set];
if (ctrstart->def == CF_DIAG_CTRSET_DEF) {
cfdiag_diffctrset((u64 *)(ctrstart + 1),
(u64 *)(ctrstop + 1), ctrstart->ctr);
offset += ctrstart->ctr * sizeof(u64) +
sizeof(*ctrstart);
}
- } while (ctrstart->def && auth);
+ }
/* Save time_stamp from start of event in stop's trailer */
trailer_start = (struct cf_trailer_entry *)(cpuhw->start + offset);
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 65c1464eea4f..5ce9a795a0fe 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -17,7 +17,8 @@
#include <linux/mm_types.h>
#include <linux/delay.h>
#include <linux/cpu.h>
-
+#include <linux/smp.h>
+#include <asm/text-patching.h>
#include <asm/diag.h>
#include <asm/facility.h>
#include <asm/elf.h>
@@ -79,6 +80,23 @@ void notrace stop_machine_yield(const struct cpumask *cpumask)
}
}
+static void do_sync_core(void *info)
+{
+ sync_core();
+}
+
+void text_poke_sync(void)
+{
+ on_each_cpu(do_sync_core, NULL, 1);
+}
+
+void text_poke_sync_lock(void)
+{
+ cpus_read_lock();
+ text_poke_sync();
+ cpus_read_unlock();
+}
+
/*
* cpu_init - initializes state that is per-CPU.
*/
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
index 88087a32ebc6..69fcaf54d5ca 100644
--- a/arch/s390/kernel/reipl.S
+++ b/arch/s390/kernel/reipl.S
@@ -9,6 +9,7 @@
#include <asm/asm-offsets.h>
#include <asm/nospec-insn.h>
#include <asm/sigp.h>
+#include <asm/lowcore.h>
GEN_BR_THUNK %r9
@@ -20,20 +21,15 @@
# r3 = Parameter for function
#
SYM_CODE_START(store_status)
- /* Save register one and load save area base */
- stg %r1,__LC_SAVE_AREA_RESTART
+ STMG_LC %r0,%r15,__LC_GPREGS_SAVE_AREA
/* General purpose registers */
- lghi %r1,__LC_GPREGS_SAVE_AREA
- stmg %r0,%r15,0(%r1)
- mvc 8(8,%r1),__LC_SAVE_AREA_RESTART
+ GET_LC %r13
/* Control registers */
- lghi %r1,__LC_CREGS_SAVE_AREA
- stctg %c0,%c15,0(%r1)
+ stctg %c0,%c15,__LC_CREGS_SAVE_AREA(%r13)
/* Access registers */
- lghi %r1,__LC_AREGS_SAVE_AREA
- stam %a0,%a15,0(%r1)
+ stamy %a0,%a15,__LC_AREGS_SAVE_AREA(%r13)
/* Floating point registers */
- lghi %r1,__LC_FPREGS_SAVE_AREA
+ lay %r1,__LC_FPREGS_SAVE_AREA(%r13)
std %f0, 0x00(%r1)
std %f1, 0x08(%r1)
std %f2, 0x10(%r1)
@@ -51,21 +47,21 @@ SYM_CODE_START(store_status)
std %f14,0x70(%r1)
std %f15,0x78(%r1)
/* Floating point control register */
- lghi %r1,__LC_FP_CREG_SAVE_AREA
+ lay %r1,__LC_FP_CREG_SAVE_AREA(%r13)
stfpc 0(%r1)
/* CPU timer */
- lghi %r1,__LC_CPU_TIMER_SAVE_AREA
+ lay %r1,__LC_CPU_TIMER_SAVE_AREA(%r13)
stpt 0(%r1)
/* Store prefix register */
- lghi %r1,__LC_PREFIX_SAVE_AREA
+ lay %r1,__LC_PREFIX_SAVE_AREA(%r13)
stpx 0(%r1)
/* Clock comparator - seven bytes */
- lghi %r1,__LC_CLOCK_COMP_SAVE_AREA
larl %r4,clkcmp
stckc 0(%r4)
+ lay %r1,__LC_CLOCK_COMP_SAVE_AREA(%r13)
mvc 1(7,%r1),1(%r4)
/* Program status word */
- lghi %r1,__LC_PSW_SAVE_AREA
+ lay %r1,__LC_PSW_SAVE_AREA(%r13)
epsw %r4,%r5
st %r4,0(%r1)
st %r5,4(%r1)
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 3993f4caf224..4ec99f73fa27 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -149,13 +149,12 @@ unsigned long __bootdata_preserved(max_mappable);
struct physmem_info __bootdata(physmem_info);
struct vm_layout __bootdata_preserved(vm_layout);
-EXPORT_SYMBOL_GPL(vm_layout);
+EXPORT_SYMBOL(vm_layout);
int __bootdata_preserved(__kaslr_enabled);
unsigned int __bootdata_preserved(zlib_dfltcc_support);
EXPORT_SYMBOL(zlib_dfltcc_support);
u64 __bootdata_preserved(stfle_fac_list[16]);
EXPORT_SYMBOL(stfle_fac_list);
-u64 alt_stfle_fac_list[16];
struct oldmem_data __bootdata_preserved(oldmem_data);
unsigned long VMALLOC_START;
@@ -406,6 +405,7 @@ static void __init setup_lowcore(void)
panic("%s: Failed to allocate %zu bytes align=%zx\n",
__func__, sizeof(*lc), sizeof(*lc));
+ lc->pcpu = (unsigned long)per_cpu_ptr(&pcpu_devices, 0);
lc->restart_psw.mask = PSW_KERNEL_BITS & ~PSW_MASK_DAT;
lc->restart_psw.addr = __pa(restart_int_handler);
lc->external_new_psw.mask = PSW_KERNEL_BITS;
@@ -889,6 +889,9 @@ void __init setup_arch(char **cmdline_p)
else
pr_info("Linux is running as a guest in 64-bit mode\n");
+ if (have_relocated_lowcore())
+ pr_info("Lowcore relocated to 0x%px\n", get_lowcore());
+
log_component_list();
/* Have one command line that is parsed and saved in /proc/cmdline */
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index c3c54adf67bc..fbba37ec53cf 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -74,16 +74,15 @@ enum {
CPU_STATE_CONFIGURED,
};
-struct pcpu {
- unsigned long ec_mask; /* bit mask for ec_xxx functions */
- unsigned long ec_clk; /* sigp timestamp for ec_xxx */
- signed char state; /* physical cpu state */
- signed char polarization; /* physical polarization */
- u16 address; /* physical cpu address */
-};
-
static u8 boot_core_type;
-static struct pcpu pcpu_devices[NR_CPUS];
+DEFINE_PER_CPU(struct pcpu, pcpu_devices);
+/*
+ * Pointer to the pcpu area of the boot CPU. This is required when a restart
+ * interrupt is triggered on an offline CPU. For that case accessing percpu
+ * data with the common primitives does not work, since the percpu offset is
+ * stored in a non existent lowcore.
+ */
+static struct pcpu *ipl_pcpu;
unsigned int smp_cpu_mt_shift;
EXPORT_SYMBOL(smp_cpu_mt_shift);
@@ -174,8 +173,8 @@ static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address)
int cpu;
for_each_cpu(cpu, mask)
- if (pcpu_devices[cpu].address == address)
- return pcpu_devices + cpu;
+ if (per_cpu(pcpu_devices, cpu).address == address)
+ return &per_cpu(pcpu_devices, cpu);
return NULL;
}
@@ -230,13 +229,11 @@ out:
return -ENOMEM;
}
-static void pcpu_free_lowcore(struct pcpu *pcpu)
+static void pcpu_free_lowcore(struct pcpu *pcpu, int cpu)
{
unsigned long async_stack, nodat_stack, mcck_stack;
struct lowcore *lc;
- int cpu;
- cpu = pcpu - pcpu_devices;
lc = lowcore_ptr[cpu];
nodat_stack = lc->nodat_stack - STACK_INIT_OFFSET;
async_stack = lc->async_stack - STACK_INIT_OFFSET;
@@ -259,6 +256,7 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
lc->cpu_nr = cpu;
+ lc->pcpu = (unsigned long)pcpu;
lc->restart_flags = RESTART_FLAG_CTLREGS;
lc->spinlock_lockval = arch_spin_lockval(cpu);
lc->spinlock_index = 0;
@@ -277,12 +275,10 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
arch_spin_lock_setup(cpu);
}
-static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
+static void pcpu_attach_task(int cpu, struct task_struct *tsk)
{
struct lowcore *lc;
- int cpu;
- cpu = pcpu - pcpu_devices;
lc = lowcore_ptr[cpu];
lc->kernel_stack = (unsigned long)task_stack_page(tsk) + STACK_INIT_OFFSET;
lc->current_task = (unsigned long)tsk;
@@ -296,18 +292,16 @@ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
lc->steal_timer = 0;
}
-static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
+static void pcpu_start_fn(int cpu, void (*func)(void *), void *data)
{
struct lowcore *lc;
- int cpu;
- cpu = pcpu - pcpu_devices;
lc = lowcore_ptr[cpu];
lc->restart_stack = lc->kernel_stack;
lc->restart_fn = (unsigned long) func;
lc->restart_data = (unsigned long) data;
lc->restart_source = -1U;
- pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
+ pcpu_sigp_retry(per_cpu_ptr(&pcpu_devices, cpu), SIGP_RESTART, 0);
}
typedef void (pcpu_delegate_fn)(void *);
@@ -320,14 +314,14 @@ static void __pcpu_delegate(pcpu_delegate_fn *func, void *data)
func(data); /* should not return */
}
-static void pcpu_delegate(struct pcpu *pcpu,
+static void pcpu_delegate(struct pcpu *pcpu, int cpu,
pcpu_delegate_fn *func,
void *data, unsigned long stack)
{
struct lowcore *lc, *abs_lc;
unsigned int source_cpu;
- lc = lowcore_ptr[pcpu - pcpu_devices];
+ lc = lowcore_ptr[cpu];
source_cpu = stap();
if (pcpu->address == source_cpu) {
@@ -377,38 +371,22 @@ static int pcpu_set_smt(unsigned int mtid)
smp_cpu_mt_shift = 0;
while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift))
smp_cpu_mt_shift++;
- pcpu_devices[0].address = stap();
+ per_cpu(pcpu_devices, 0).address = stap();
}
return cc;
}
/*
- * Call function on an online CPU.
- */
-void smp_call_online_cpu(void (*func)(void *), void *data)
-{
- struct pcpu *pcpu;
-
- /* Use the current cpu if it is online. */
- pcpu = pcpu_find_address(cpu_online_mask, stap());
- if (!pcpu)
- /* Use the first online cpu. */
- pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
- pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
-}
-
-/*
* Call function on the ipl CPU.
*/
void smp_call_ipl_cpu(void (*func)(void *), void *data)
{
struct lowcore *lc = lowcore_ptr[0];
- if (pcpu_devices[0].address == stap())
+ if (ipl_pcpu->address == stap())
lc = get_lowcore();
- pcpu_delegate(&pcpu_devices[0], func, data,
- lc->nodat_stack);
+ pcpu_delegate(ipl_pcpu, 0, func, data, lc->nodat_stack);
}
int smp_find_processor_id(u16 address)
@@ -416,21 +394,21 @@ int smp_find_processor_id(u16 address)
int cpu;
for_each_present_cpu(cpu)
- if (pcpu_devices[cpu].address == address)
+ if (per_cpu(pcpu_devices, cpu).address == address)
return cpu;
return -1;
}
void schedule_mcck_handler(void)
{
- pcpu_ec_call(pcpu_devices + smp_processor_id(), ec_mcck_pending);
+ pcpu_ec_call(this_cpu_ptr(&pcpu_devices), ec_mcck_pending);
}
bool notrace arch_vcpu_is_preempted(int cpu)
{
if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
return false;
- if (pcpu_running(pcpu_devices + cpu))
+ if (pcpu_running(per_cpu_ptr(&pcpu_devices, cpu)))
return false;
return true;
}
@@ -442,7 +420,7 @@ void notrace smp_yield_cpu(int cpu)
return;
diag_stat_inc_norecursion(DIAG_STAT_X09C);
asm volatile("diag %0,0,0x9c"
- : : "d" (pcpu_devices[cpu].address));
+ : : "d" (per_cpu(pcpu_devices, cpu).address));
}
EXPORT_SYMBOL_GPL(smp_yield_cpu);
@@ -463,7 +441,7 @@ void notrace smp_emergency_stop(void)
end = get_tod_clock() + (1000000UL << 12);
for_each_cpu(cpu, &cpumask) {
- struct pcpu *pcpu = pcpu_devices + cpu;
+ struct pcpu *pcpu = per_cpu_ptr(&pcpu_devices, cpu);
set_bit(ec_stop_cpu, &pcpu->ec_mask);
while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
0, NULL) == SIGP_CC_BUSY &&
@@ -472,7 +450,7 @@ void notrace smp_emergency_stop(void)
}
while (get_tod_clock() < end) {
for_each_cpu(cpu, &cpumask)
- if (pcpu_stopped(pcpu_devices + cpu))
+ if (pcpu_stopped(per_cpu_ptr(&pcpu_devices, cpu)))
cpumask_clear_cpu(cpu, &cpumask);
if (cpumask_empty(&cpumask))
break;
@@ -487,6 +465,7 @@ NOKPROBE_SYMBOL(smp_emergency_stop);
*/
void smp_send_stop(void)
{
+ struct pcpu *pcpu;
int cpu;
/* Disable all interrupts/machine checks */
@@ -502,8 +481,9 @@ void smp_send_stop(void)
for_each_online_cpu(cpu) {
if (cpu == smp_processor_id())
continue;
- pcpu_sigp_retry(pcpu_devices + cpu, SIGP_STOP, 0);
- while (!pcpu_stopped(pcpu_devices + cpu))
+ pcpu = per_cpu_ptr(&pcpu_devices, cpu);
+ pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
+ while (!pcpu_stopped(pcpu))
cpu_relax();
}
}
@@ -517,7 +497,7 @@ static void smp_handle_ext_call(void)
unsigned long bits;
/* handle bit signal external calls */
- bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0);
+ bits = this_cpu_xchg(pcpu_devices.ec_mask, 0);
if (test_bit(ec_stop_cpu, &bits))
smp_stop_cpu();
if (test_bit(ec_schedule, &bits))
@@ -542,12 +522,12 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
int cpu;
for_each_cpu(cpu, mask)
- pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
+ pcpu_ec_call(per_cpu_ptr(&pcpu_devices, cpu), ec_call_function_single);
}
void arch_send_call_function_single_ipi(int cpu)
{
- pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
+ pcpu_ec_call(per_cpu_ptr(&pcpu_devices, cpu), ec_call_function_single);
}
/*
@@ -557,13 +537,13 @@ void arch_send_call_function_single_ipi(int cpu)
*/
void arch_smp_send_reschedule(int cpu)
{
- pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
+ pcpu_ec_call(per_cpu_ptr(&pcpu_devices, cpu), ec_schedule);
}
#ifdef CONFIG_IRQ_WORK
void arch_irq_work_raise(void)
{
- pcpu_ec_call(pcpu_devices + smp_processor_id(), ec_irq_work);
+ pcpu_ec_call(this_cpu_ptr(&pcpu_devices), ec_irq_work);
}
#endif
@@ -575,7 +555,7 @@ int smp_store_status(int cpu)
struct pcpu *pcpu;
unsigned long pa;
- pcpu = pcpu_devices + cpu;
+ pcpu = per_cpu_ptr(&pcpu_devices, cpu);
lc = lowcore_ptr[cpu];
pa = __pa(&lc->floating_pt_save_area);
if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS,
@@ -683,17 +663,17 @@ void __init smp_save_dump_secondary_cpus(void)
void smp_cpu_set_polarization(int cpu, int val)
{
- pcpu_devices[cpu].polarization = val;
+ per_cpu(pcpu_devices, cpu).polarization = val;
}
int smp_cpu_get_polarization(int cpu)
{
- return pcpu_devices[cpu].polarization;
+ return per_cpu(pcpu_devices, cpu).polarization;
}
int smp_cpu_get_cpu_address(int cpu)
{
- return pcpu_devices[cpu].address;
+ return per_cpu(pcpu_devices, cpu).address;
}
static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
@@ -732,7 +712,7 @@ static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail,
for (i = 0; (i <= smp_cpu_mtid) && (cpu < nr_cpu_ids); i++) {
if (pcpu_find_address(cpu_present_mask, address + i))
continue;
- pcpu = pcpu_devices + cpu;
+ pcpu = per_cpu_ptr(&pcpu_devices, cpu);
pcpu->address = address + i;
if (configured)
pcpu->state = CPU_STATE_CONFIGURED;
@@ -767,7 +747,7 @@ static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
* that all SMT threads get subsequent logical CPU numbers.
*/
if (early) {
- core_id = pcpu_devices[0].address >> smp_cpu_mt_shift;
+ core_id = per_cpu(pcpu_devices, 0).address >> smp_cpu_mt_shift;
for (i = 0; i < info->configured; i++) {
core = &info->core[i];
if (core->core_id == core_id) {
@@ -867,7 +847,7 @@ static void smp_start_secondary(void *cpuvoid)
/* Upping and downing of CPUs */
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
- struct pcpu *pcpu = pcpu_devices + cpu;
+ struct pcpu *pcpu = per_cpu_ptr(&pcpu_devices, cpu);
int rc;
if (pcpu->state != CPU_STATE_CONFIGURED)
@@ -885,8 +865,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
*/
system_ctlreg_lock();
pcpu_prepare_secondary(pcpu, cpu);
- pcpu_attach_task(pcpu, tidle);
- pcpu_start_fn(pcpu, smp_start_secondary, NULL);
+ pcpu_attach_task(cpu, tidle);
+ pcpu_start_fn(cpu, smp_start_secondary, NULL);
/* Wait until cpu puts itself in the online & active maps */
while (!cpu_online(cpu))
cpu_relax();
@@ -931,18 +911,19 @@ void __cpu_die(unsigned int cpu)
struct pcpu *pcpu;
/* Wait until target cpu is down */
- pcpu = pcpu_devices + cpu;
+ pcpu = per_cpu_ptr(&pcpu_devices, cpu);
while (!pcpu_stopped(pcpu))
cpu_relax();
- pcpu_free_lowcore(pcpu);
+ pcpu_free_lowcore(pcpu, cpu);
cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
+ pcpu->flags = 0;
}
void __noreturn cpu_die(void)
{
idle_task_exit();
- pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
+ pcpu_sigp_retry(this_cpu_ptr(&pcpu_devices), SIGP_STOP, 0);
for (;;) ;
}
@@ -972,11 +953,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
void __init smp_prepare_boot_cpu(void)
{
- struct pcpu *pcpu = pcpu_devices;
+ struct lowcore *lc = get_lowcore();
WARN_ON(!cpu_present(0) || !cpu_online(0));
- pcpu->state = CPU_STATE_CONFIGURED;
- get_lowcore()->percpu_offset = __per_cpu_offset[0];
+ lc->percpu_offset = __per_cpu_offset[0];
+ ipl_pcpu = per_cpu_ptr(&pcpu_devices, 0);
+ ipl_pcpu->state = CPU_STATE_CONFIGURED;
+ lc->pcpu = (unsigned long)ipl_pcpu;
smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
}
@@ -984,8 +967,8 @@ void __init smp_setup_processor_id(void)
{
struct lowcore *lc = get_lowcore();
- pcpu_devices[0].address = stap();
lc->cpu_nr = 0;
+ per_cpu(pcpu_devices, 0).address = stap();
lc->spinlock_lockval = arch_spin_lockval(0);
lc->spinlock_index = 0;
}
@@ -1007,7 +990,7 @@ static ssize_t cpu_configure_show(struct device *dev,
ssize_t count;
mutex_lock(&smp_cpu_state_mutex);
- count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
+ count = sprintf(buf, "%d\n", per_cpu(pcpu_devices, dev->id).state);
mutex_unlock(&smp_cpu_state_mutex);
return count;
}
@@ -1033,7 +1016,7 @@ static ssize_t cpu_configure_store(struct device *dev,
for (i = 0; i <= smp_cpu_mtid; i++)
if (cpu_online(cpu + i))
goto out;
- pcpu = pcpu_devices + cpu;
+ pcpu = per_cpu_ptr(&pcpu_devices, cpu);
rc = 0;
switch (val) {
case 0:
@@ -1045,7 +1028,7 @@ static ssize_t cpu_configure_store(struct device *dev,
for (i = 0; i <= smp_cpu_mtid; i++) {
if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
continue;
- pcpu[i].state = CPU_STATE_STANDBY;
+ per_cpu(pcpu_devices, cpu + i).state = CPU_STATE_STANDBY;
smp_cpu_set_polarization(cpu + i,
POLARIZATION_UNKNOWN);
}
@@ -1060,7 +1043,7 @@ static ssize_t cpu_configure_store(struct device *dev,
for (i = 0; i <= smp_cpu_mtid; i++) {
if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
continue;
- pcpu[i].state = CPU_STATE_CONFIGURED;
+ per_cpu(pcpu_devices, cpu + i).state = CPU_STATE_CONFIGURED;
smp_cpu_set_polarization(cpu + i,
POLARIZATION_UNKNOWN);
}
@@ -1079,7 +1062,7 @@ static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
static ssize_t show_cpu_address(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
+ return sprintf(buf, "%d\n", per_cpu(pcpu_devices, dev->id).address);
}
static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
@@ -1105,14 +1088,14 @@ static struct attribute_group cpu_online_attr_group = {
static int smp_cpu_online(unsigned int cpu)
{
- struct cpu *c = &per_cpu(cpu_devices, cpu);
+ struct cpu *c = per_cpu_ptr(&cpu_devices, cpu);
return sysfs_create_group(&c->dev.kobj, &cpu_online_attr_group);
}
static int smp_cpu_pre_down(unsigned int cpu)
{
- struct cpu *c = &per_cpu(cpu_devices, cpu);
+ struct cpu *c = per_cpu_ptr(&cpu_devices, cpu);
sysfs_remove_group(&c->dev.kobj, &cpu_online_attr_group);
return 0;
@@ -1125,7 +1108,7 @@ bool arch_cpu_is_hotpluggable(int cpu)
int arch_register_cpu(int cpu)
{
- struct cpu *c = &per_cpu(cpu_devices, cpu);
+ struct cpu *c = per_cpu_ptr(&cpu_devices, cpu);
int rc;
c->hotpluggable = arch_cpu_is_hotpluggable(cpu);
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 98ef6dc7916b..22029ecae1c5 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -594,7 +594,7 @@ static int __init topology_setup(char *str)
}
early_param("topology", topology_setup);
-static int topology_ctl_handler(struct ctl_table *ctl, int write,
+static int topology_ctl_handler(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int enabled = topology_is_enabled();
diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
index fa62fa0e369f..36db065c7cf7 100644
--- a/arch/s390/kernel/uv.c
+++ b/arch/s390/kernel/uv.c
@@ -18,11 +18,22 @@
#include <asm/sections.h>
#include <asm/uv.h>
+#if !IS_ENABLED(CONFIG_KVM)
+unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
+{
+ return 0;
+}
+
+int gmap_fault(struct gmap *gmap, unsigned long gaddr,
+ unsigned int fault_flags)
+{
+ return 0;
+}
+#endif
+
/* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
-#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
int __bootdata_preserved(prot_virt_guest);
EXPORT_SYMBOL(prot_virt_guest);
-#endif
/*
* uv_info contains both host and guest information but it's currently only
@@ -35,7 +46,6 @@ EXPORT_SYMBOL(prot_virt_guest);
struct uv_info __bootdata_preserved(uv_info);
EXPORT_SYMBOL(uv_info);
-#if IS_ENABLED(CONFIG_KVM)
int __bootdata_preserved(prot_virt_host);
EXPORT_SYMBOL(prot_virt_host);
@@ -543,9 +553,6 @@ int arch_make_page_accessible(struct page *page)
return arch_make_folio_accessible(page_folio(page));
}
EXPORT_SYMBOL_GPL(arch_make_page_accessible);
-#endif
-
-#if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
static ssize_t uv_query_facilities(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -721,24 +728,13 @@ static struct attribute_group uv_query_attr_group = {
static ssize_t uv_is_prot_virt_guest(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- int val = 0;
-
-#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
- val = prot_virt_guest;
-#endif
- return sysfs_emit(buf, "%d\n", val);
+ return sysfs_emit(buf, "%d\n", prot_virt_guest);
}
static ssize_t uv_is_prot_virt_host(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- int val = 0;
-
-#if IS_ENABLED(CONFIG_KVM)
- val = prot_virt_host;
-#endif
-
- return sysfs_emit(buf, "%d\n", val);
+ return sysfs_emit(buf, "%d\n", prot_virt_host);
}
static struct kobj_attribute uv_prot_virt_guest =
@@ -790,4 +786,3 @@ out_kobj:
return rc;
}
device_initcall(uv_info_init);
-#endif
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index a1ce3925ec71..975c654cf5a5 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -190,6 +190,9 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
INIT_DATA_SECTION(0x100)
+ RUNTIME_CONST(shift, d_hash_shift)
+ RUNTIME_CONST(ptr, dentry_hashtable)
+
PERCPU_SECTION(0x100)
. = ALIGN(PAGE_SIZE);
@@ -219,6 +222,8 @@ SECTIONS
QUAD(init_mm)
QUAD(swapper_pg_dir)
QUAD(invalid_pg_dir)
+ QUAD(__alt_instructions)
+ QUAD(__alt_instructions_end)
#ifdef CONFIG_KASAN
QUAD(kasan_early_shadow_page)
QUAD(kasan_early_shadow_pte)
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 0c9a73a18826..9f86ad8fa8b4 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -75,7 +75,7 @@ static inline int arch_load_niai4(int *lock)
int owner;
asm_inline volatile(
- ALTERNATIVE("nop", ".insn rre,0xb2fa0000,4,0", 49) /* NIAI 4 */
+ ALTERNATIVE("nop", ".insn rre,0xb2fa0000,4,0", ALT_FACILITY(49)) /* NIAI 4 */
" l %0,%1\n"
: "=d" (owner) : "Q" (*lock) : "memory");
return owner;
@@ -86,7 +86,7 @@ static inline int arch_cmpxchg_niai8(int *lock, int old, int new)
int expected = old;
asm_inline volatile(
- ALTERNATIVE("nop", ".insn rre,0xb2fa0000,8,0", 49) /* NIAI 8 */
+ ALTERNATIVE("nop", ".insn rre,0xb2fa0000,8,0", ALT_FACILITY(49)) /* NIAI 8 */
" cs %0,%3,%1\n"
: "=d" (old), "=Q" (*lock)
: "0" (old), "d" (new), "Q" (*lock)
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 5cb5e724cde3..75d15bf41d97 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -243,7 +243,7 @@ static int cmm_skip_blanks(char *cp, char **endp)
return str != cp;
}
-static int cmm_pages_handler(struct ctl_table *ctl, int write,
+static int cmm_pages_handler(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
long nr = cmm_get_pages();
@@ -262,7 +262,7 @@ static int cmm_pages_handler(struct ctl_table *ctl, int write,
return 0;
}
-static int cmm_timed_pages_handler(struct ctl_table *ctl, int write,
+static int cmm_timed_pages_handler(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp,
loff_t *ppos)
{
@@ -282,7 +282,7 @@ static int cmm_timed_pages_handler(struct ctl_table *ctl, int write,
return 0;
}
-static int cmm_timeout_handler(struct ctl_table *ctl, int write,
+static int cmm_timeout_handler(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
char buf[64], *p;
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index 45db5f47b22d..98dab3e049de 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -36,6 +36,16 @@ enum address_markers_idx {
VMEMMAP_END_NR,
VMALLOC_NR,
VMALLOC_END_NR,
+#ifdef CONFIG_KMSAN
+ KMSAN_VMALLOC_SHADOW_START_NR,
+ KMSAN_VMALLOC_SHADOW_END_NR,
+ KMSAN_VMALLOC_ORIGIN_START_NR,
+ KMSAN_VMALLOC_ORIGIN_END_NR,
+ KMSAN_MODULES_SHADOW_START_NR,
+ KMSAN_MODULES_SHADOW_END_NR,
+ KMSAN_MODULES_ORIGIN_START_NR,
+ KMSAN_MODULES_ORIGIN_END_NR,
+#endif
MODULES_NR,
MODULES_END_NR,
ABS_LOWCORE_NR,
@@ -65,6 +75,16 @@ static struct addr_marker address_markers[] = {
[VMEMMAP_END_NR] = {0, "vmemmap Area End"},
[VMALLOC_NR] = {0, "vmalloc Area Start"},
[VMALLOC_END_NR] = {0, "vmalloc Area End"},
+#ifdef CONFIG_KMSAN
+ [KMSAN_VMALLOC_SHADOW_START_NR] = {0, "Kmsan vmalloc Shadow Start"},
+ [KMSAN_VMALLOC_SHADOW_END_NR] = {0, "Kmsan vmalloc Shadow End"},
+ [KMSAN_VMALLOC_ORIGIN_START_NR] = {0, "Kmsan vmalloc Origins Start"},
+ [KMSAN_VMALLOC_ORIGIN_END_NR] = {0, "Kmsan vmalloc Origins End"},
+ [KMSAN_MODULES_SHADOW_START_NR] = {0, "Kmsan Modules Shadow Start"},
+ [KMSAN_MODULES_SHADOW_END_NR] = {0, "Kmsan Modules Shadow End"},
+ [KMSAN_MODULES_ORIGIN_START_NR] = {0, "Kmsan Modules Origins Start"},
+ [KMSAN_MODULES_ORIGIN_END_NR] = {0, "Kmsan Modules Origins End"},
+#endif
[MODULES_NR] = {0, "Modules Area Start"},
[MODULES_END_NR] = {0, "Modules Area End"},
[ABS_LOWCORE_NR] = {0, "Lowcore Area Start"},
@@ -307,6 +327,16 @@ static int pt_dump_init(void)
address_markers[KFENCE_START_NR].start_address = kfence_start;
address_markers[KFENCE_END_NR].start_address = kfence_start + KFENCE_POOL_SIZE;
#endif
+#ifdef CONFIG_KMSAN
+ address_markers[KMSAN_VMALLOC_SHADOW_START_NR].start_address = KMSAN_VMALLOC_SHADOW_START;
+ address_markers[KMSAN_VMALLOC_SHADOW_END_NR].start_address = KMSAN_VMALLOC_SHADOW_END;
+ address_markers[KMSAN_VMALLOC_ORIGIN_START_NR].start_address = KMSAN_VMALLOC_ORIGIN_START;
+ address_markers[KMSAN_VMALLOC_ORIGIN_END_NR].start_address = KMSAN_VMALLOC_ORIGIN_END;
+ address_markers[KMSAN_MODULES_SHADOW_START_NR].start_address = KMSAN_MODULES_SHADOW_START;
+ address_markers[KMSAN_MODULES_SHADOW_END_NR].start_address = KMSAN_MODULES_SHADOW_END;
+ address_markers[KMSAN_MODULES_ORIGIN_START_NR].start_address = KMSAN_MODULES_ORIGIN_START;
+ address_markers[KMSAN_MODULES_ORIGIN_END_NR].start_address = KMSAN_MODULES_ORIGIN_END;
+#endif
sort_address_markers();
#ifdef CONFIG_PTDUMP_DEBUGFS
debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 632c3a55feed..28a18c42ba99 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -48,7 +48,7 @@ static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t siz
}
/*
- * s390_kernel_write - write to kernel memory bypassing DAT
+ * __s390_kernel_write - write to kernel memory bypassing DAT
* @dst: destination address
* @src: source address
* @size: number of bytes to copy
@@ -61,7 +61,7 @@ static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t siz
*/
static DEFINE_SPINLOCK(s390_kernel_write_lock);
-notrace void *s390_kernel_write(void *dst, const void *src, size_t size)
+notrace void *__s390_kernel_write(void *dst, const void *src, size_t size)
{
void *tmp = dst;
unsigned long flags;
diff --git a/arch/s390/pci/pci_irq.c b/arch/s390/pci/pci_irq.c
index 0ef83b6ac0db..84482a921332 100644
--- a/arch/s390/pci/pci_irq.c
+++ b/arch/s390/pci/pci_irq.c
@@ -268,33 +268,20 @@ static void zpci_floating_irq_handler(struct airq_struct *airq,
}
}
-int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
+static int __alloc_airq(struct zpci_dev *zdev, int msi_vecs,
+ unsigned long *bit)
{
- struct zpci_dev *zdev = to_zpci(pdev);
- unsigned int hwirq, msi_vecs, cpu;
- unsigned long bit;
- struct msi_desc *msi;
- struct msi_msg msg;
- int cpu_addr;
- int rc, irq;
-
- zdev->aisb = -1UL;
- zdev->msi_first_bit = -1U;
- if (type == PCI_CAP_ID_MSI && nvec > 1)
- return 1;
- msi_vecs = min_t(unsigned int, nvec, zdev->max_msi);
-
if (irq_delivery == DIRECTED) {
/* Allocate cpu vector bits */
- bit = airq_iv_alloc(zpci_ibv[0], msi_vecs);
- if (bit == -1UL)
+ *bit = airq_iv_alloc(zpci_ibv[0], msi_vecs);
+ if (*bit == -1UL)
return -EIO;
} else {
/* Allocate adapter summary indicator bit */
- bit = airq_iv_alloc_bit(zpci_sbv);
- if (bit == -1UL)
+ *bit = airq_iv_alloc_bit(zpci_sbv);
+ if (*bit == -1UL)
return -EIO;
- zdev->aisb = bit;
+ zdev->aisb = *bit;
/* Create adapter interrupt vector */
zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK, NULL);
@@ -302,27 +289,66 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
return -ENOMEM;
/* Wire up shortcut pointer */
- zpci_ibv[bit] = zdev->aibv;
+ zpci_ibv[*bit] = zdev->aibv;
/* Each function has its own interrupt vector */
- bit = 0;
+ *bit = 0;
}
+ return 0;
+}
- /* Request MSI interrupts */
+int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
+{
+ unsigned int hwirq, msi_vecs, irqs_per_msi, i, cpu;
+ struct zpci_dev *zdev = to_zpci(pdev);
+ struct msi_desc *msi;
+ struct msi_msg msg;
+ unsigned long bit;
+ int cpu_addr;
+ int rc, irq;
+
+ zdev->aisb = -1UL;
+ zdev->msi_first_bit = -1U;
+
+ msi_vecs = min_t(unsigned int, nvec, zdev->max_msi);
+ if (msi_vecs < nvec) {
+ pr_info("%s requested %d irqs, allocate system limit of %d",
+ pci_name(pdev), nvec, zdev->max_msi);
+ }
+
+ rc = __alloc_airq(zdev, msi_vecs, &bit);
+ if (rc < 0)
+ return rc;
+
+ /*
+ * Request MSI interrupts:
+ * When using MSI, nvec_used interrupt sources and their irq
+ * descriptors are controlled through one msi descriptor.
+ * Thus the outer loop over msi descriptors shall run only once,
+ * while two inner loops iterate over the interrupt vectors.
+ * When using MSI-X, each interrupt vector/irq descriptor
+ * is bound to exactly one msi descriptor (nvec_used is one).
+ * So the inner loops are executed once, while the outer iterates
+ * over the MSI-X descriptors.
+ */
hwirq = bit;
msi_for_each_desc(msi, &pdev->dev, MSI_DESC_NOTASSOCIATED) {
- rc = -EIO;
if (hwirq - bit >= msi_vecs)
break;
- irq = __irq_alloc_descs(-1, 0, 1, 0, THIS_MODULE,
- (irq_delivery == DIRECTED) ?
- msi->affinity : NULL);
+ irqs_per_msi = min_t(unsigned int, msi_vecs, msi->nvec_used);
+ irq = __irq_alloc_descs(-1, 0, irqs_per_msi, 0, THIS_MODULE,
+ (irq_delivery == DIRECTED) ?
+ msi->affinity : NULL);
if (irq < 0)
return -ENOMEM;
- rc = irq_set_msi_desc(irq, msi);
- if (rc)
- return rc;
- irq_set_chip_and_handler(irq, &zpci_irq_chip,
- handle_percpu_irq);
+
+ for (i = 0; i < irqs_per_msi; i++) {
+ rc = irq_set_msi_desc_off(irq, i, msi);
+ if (rc)
+ return rc;
+ irq_set_chip_and_handler(irq + i, &zpci_irq_chip,
+ handle_percpu_irq);
+ }
+
msg.data = hwirq - bit;
if (irq_delivery == DIRECTED) {
if (msi->affinity)
@@ -335,31 +361,35 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
msg.address_lo |= (cpu_addr << 8);
for_each_possible_cpu(cpu) {
- airq_iv_set_data(zpci_ibv[cpu], hwirq, irq);
+ for (i = 0; i < irqs_per_msi; i++)
+ airq_iv_set_data(zpci_ibv[cpu],
+ hwirq + i, irq + i);
}
} else {
msg.address_lo = zdev->msi_addr & 0xffffffff;
- airq_iv_set_data(zdev->aibv, hwirq, irq);
+ for (i = 0; i < irqs_per_msi; i++)
+ airq_iv_set_data(zdev->aibv, hwirq + i, irq + i);
}
msg.address_hi = zdev->msi_addr >> 32;
pci_write_msi_msg(irq, &msg);
- hwirq++;
+ hwirq += irqs_per_msi;
}
zdev->msi_first_bit = bit;
- zdev->msi_nr_irqs = msi_vecs;
+ zdev->msi_nr_irqs = hwirq - bit;
rc = zpci_set_irq(zdev);
if (rc)
return rc;
- return (msi_vecs == nvec) ? 0 : msi_vecs;
+ return (zdev->msi_nr_irqs == nvec) ? 0 : zdev->msi_nr_irqs;
}
void arch_teardown_msi_irqs(struct pci_dev *pdev)
{
struct zpci_dev *zdev = to_zpci(pdev);
struct msi_desc *msi;
+ unsigned int i;
int rc;
/* Disable interrupts */
@@ -369,8 +399,10 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
/* Release MSI interrupts */
msi_for_each_desc(msi, &pdev->dev, MSI_DESC_ASSOCIATED) {
- irq_set_msi_desc(msi->irq, NULL);
- irq_free_desc(msi->irq);
+ for (i = 0; i < msi->nvec_used; i++) {
+ irq_set_msi_desc(msi->irq + i, NULL);
+ irq_free_desc(msi->irq + i);
+ }
msi->msg.address_lo = 0;
msi->msg.address_hi = 0;
msi->msg.data = 0;
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 5e6a3ead51fb..1aa3c4a0c5b2 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -3,8 +3,6 @@ config SUPERH
def_bool y
select ARCH_32BIT_OFF_T
select ARCH_HAS_CPU_CACHE_ALIASING
- select ARCH_ENABLE_MEMORY_HOTPLUG if SPARSEMEM && MMU
- select ARCH_ENABLE_MEMORY_HOTREMOVE if SPARSEMEM && MMU
select ARCH_HAVE_NMI_SAFE_CMPXCHG if (GUSA_RB || CPU_SH4A)
select ARCH_HAS_BINFMT_FLAT if !MMU
select ARCH_HAS_CPU_FINALIZE_INIT
diff --git a/arch/sh/configs/apsh4ad0a_defconfig b/arch/sh/configs/apsh4ad0a_defconfig
index 05d21d91f41d..137573610ec4 100644
--- a/arch/sh/configs/apsh4ad0a_defconfig
+++ b/arch/sh/configs/apsh4ad0a_defconfig
@@ -24,8 +24,6 @@ CONFIG_BFQ_GROUP_IOSCHED=y
CONFIG_CPU_SUBTYPE_SH7786=y
CONFIG_MEMORY_SIZE=0x10000000
CONFIG_HUGETLB_PAGE_SIZE_1MB=y
-CONFIG_MEMORY_HOTPLUG=y
-CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_SH_STORE_QUEUES=y
CONFIG_SH_APSH4AD0A=y
diff --git a/arch/sh/configs/sdk7786_defconfig b/arch/sh/configs/sdk7786_defconfig
index 7b427c17fbfe..07894f13441e 100644
--- a/arch/sh/configs/sdk7786_defconfig
+++ b/arch/sh/configs/sdk7786_defconfig
@@ -43,8 +43,6 @@ CONFIG_MEMORY_SIZE=0x20000000
CONFIG_PMB=y
CONFIG_NUMA=y
CONFIG_HUGETLB_PAGE_SIZE_64MB=y
-CONFIG_MEMORY_HOTPLUG=y
-CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_SH_STORE_QUEUES=y
CONFIG_SPECULATIVE_EXECUTION=y
diff --git a/arch/sh/configs/shx3_defconfig b/arch/sh/configs/shx3_defconfig
index aa353dff7f19..9a0df5ea3866 100644
--- a/arch/sh/configs/shx3_defconfig
+++ b/arch/sh/configs/shx3_defconfig
@@ -31,8 +31,6 @@ CONFIG_CPU_SUBTYPE_SHX3=y
CONFIG_MEMORY_START=0x0c000000
CONFIG_NUMA=y
CONFIG_PAGE_SIZE_64KB=y
-CONFIG_MEMORY_HOTPLUG=y
-CONFIG_MEMORY_HOTREMOVE=y
CONFIG_SH_STORE_QUEUES=y
CONFIG_SH_X3PROTO=y
CONFIG_NO_HZ=y
diff --git a/arch/sh/drivers/push-switch.c b/arch/sh/drivers/push-switch.c
index 362e4860bf52..1dea43381b5a 100644
--- a/arch/sh/drivers/push-switch.c
+++ b/arch/sh/drivers/push-switch.c
@@ -131,4 +131,5 @@ module_exit(switch_exit);
MODULE_VERSION(DRV_VERSION);
MODULE_AUTHOR("Paul Mundt");
+MODULE_DESCRIPTION("Generic push-switch framework");
MODULE_LICENSE("GPL v2");
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index f32a1963ff0c..1862411665ab 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -144,10 +144,6 @@ config ARCH_SPARSEMEM_DEFAULT
config ARCH_SELECT_MEMORY_MODEL
def_bool y
-config ARCH_MEMORY_PROBE
- def_bool y
- depends on MEMORY_HOTPLUG
-
config IOREMAP_FIXED
def_bool y
depends on X2TLB
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index bf1b54055316..d1fe90b2f5ff 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -395,31 +395,3 @@ void __init mem_init(void)
mem_init_done = 1;
}
-
-#ifdef CONFIG_MEMORY_HOTPLUG
-int arch_add_memory(int nid, u64 start, u64 size,
- struct mhp_params *params)
-{
- unsigned long start_pfn = PFN_DOWN(start);
- unsigned long nr_pages = size >> PAGE_SHIFT;
- int ret;
-
- if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
- return -EINVAL;
-
- /* We only have ZONE_NORMAL, so this is easy.. */
- ret = __add_pages(nid, start_pfn, nr_pages, params);
- if (unlikely(ret))
- printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
-
- return ret;
-}
-
-void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
-{
- unsigned long start_pfn = PFN_DOWN(start);
- unsigned long nr_pages = size >> PAGE_SHIFT;
-
- __remove_pages(start_pfn, nr_pages, altmap);
-}
-#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/sparc/boot/install.sh b/arch/sparc/boot/install.sh
index 4f130f3f30d6..68de67c5621e 100755
--- a/arch/sparc/boot/install.sh
+++ b/arch/sparc/boot/install.sh
@@ -16,6 +16,8 @@
# $3 - kernel map file
# $4 - default install path (blank if root directory)
+set -e
+
if [ -f $4/vmlinuz ]; then
mv $4/vmlinuz $4/vmlinuz.old
fi
diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h
index 587fb7841096..0ca8c3463166 100644
--- a/arch/sparc/include/asm/vio.h
+++ b/arch/sparc/include/asm/vio.h
@@ -483,11 +483,7 @@ int __vio_register_driver(struct vio_driver *drv, struct module *owner,
__vio_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
void vio_unregister_driver(struct vio_driver *drv);
-static inline struct vio_driver *to_vio_driver(struct device_driver *drv)
-{
- return container_of(drv, struct vio_driver, driver);
-}
-
+#define to_vio_driver(__drv) container_of_const(__drv, struct vio_driver, driver)
#define to_vio_dev(__dev) container_of_const(__dev, struct vio_dev, dev)
int vio_ldc_send(struct vio_driver_state *vio, void *data, int len);
diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c
index 846a55f942d4..07933d75ac81 100644
--- a/arch/sparc/kernel/vio.c
+++ b/arch/sparc/kernel/vio.c
@@ -54,10 +54,10 @@ static int vio_hotplug(const struct device *dev, struct kobj_uevent_env *env)
return 0;
}
-static int vio_bus_match(struct device *dev, struct device_driver *drv)
+static int vio_bus_match(struct device *dev, const struct device_driver *drv)
{
struct vio_dev *vio_dev = to_vio_dev(dev);
- struct vio_driver *vio_drv = to_vio_driver(drv);
+ const struct vio_driver *vio_drv = to_vio_driver(drv);
const struct vio_device_id *matches = vio_drv->id_table;
if (!matches)
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index 93a5a8999b07..dca84fd6d00a 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -11,7 +11,7 @@ config UML
select ARCH_HAS_KCOV
select ARCH_HAS_STRNCPY_FROM_USER
select ARCH_HAS_STRNLEN_USER
- select ARCH_NO_PREEMPT
+ select ARCH_NO_PREEMPT_DYNAMIC
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_KASAN if X86_64
select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN
@@ -31,7 +31,8 @@ config UML
select TRACE_IRQFLAGS_SUPPORT
select TTY # Needed for line.c
select HAVE_ARCH_VMAP_STACK
- select HAVE_RUST if X86_64
+ select HAVE_RUST
+ select ARCH_HAS_UBSAN
config MMU
bool
@@ -48,12 +49,13 @@ config NO_IOMEM
config UML_IOMEM_EMULATION
bool
select INDIRECT_IOMEM
+ select HAS_IOPORT
select GENERIC_PCI_IOMAP
select GENERIC_IOMAP
select NO_GENERIC_PCI_IOPORT_MAP
config NO_IOPORT_MAP
- def_bool y
+ def_bool !UML_IOMEM_EMULATION
config ISA
bool
diff --git a/arch/um/drivers/Kconfig b/arch/um/drivers/Kconfig
index b94b2618e7d8..ede40a160c5e 100644
--- a/arch/um/drivers/Kconfig
+++ b/arch/um/drivers/Kconfig
@@ -297,26 +297,6 @@ config UML_NET_MCAST
If unsure, say N.
-config UML_NET_PCAP
- bool "pcap transport (obsolete)"
- depends on UML_NET
- depends on !MODVERSIONS
- select MAY_HAVE_RUNTIME_DEPS
- help
- The pcap transport makes a pcap packet stream on the host look
- like an ethernet device inside UML. This is useful for making
- UML act as a network monitor for the host. You must have libcap
- installed in order to build the pcap transport into UML.
-
- For more information, see
- <http://user-mode-linux.sourceforge.net/old/networking.html> That site
- has examples of the UML command line to use to enable this option.
-
- NOTE: THIS TRANSPORT IS DEPRECATED AND WILL BE REMOVED SOON!!! Please
- migrate to UML_NET_VECTOR.
-
- If unsure, say N.
-
config UML_NET_SLIRP
bool "SLiRP transport (obsolete)"
depends on UML_NET
diff --git a/arch/um/drivers/Makefile b/arch/um/drivers/Makefile
index 0e6af81096fd..57882e6bc215 100644
--- a/arch/um/drivers/Makefile
+++ b/arch/um/drivers/Makefile
@@ -20,14 +20,9 @@ harddog-objs := harddog_kern.o
harddog-builtin-$(CONFIG_UML_WATCHDOG) := harddog_user.o harddog_user_exp.o
rtc-objs := rtc_kern.o rtc_user.o
-LDFLAGS_pcap.o = $(shell $(CC) $(KBUILD_CFLAGS) -print-file-name=libpcap.a)
-
LDFLAGS_vde.o = $(shell $(CC) $(CFLAGS) -print-file-name=libvdeplug.a)
-targets := pcap_kern.o pcap_user.o vde_kern.o vde_user.o
-
-$(obj)/pcap.o: $(obj)/pcap_kern.o $(obj)/pcap_user.o
- $(LD) -r -dp -o $@ $^ $(ld_flags)
+targets := vde_kern.o vde_user.o
$(obj)/vde.o: $(obj)/vde_kern.o $(obj)/vde_user.o
$(LD) -r -dp -o $@ $^ $(ld_flags)
@@ -49,7 +44,6 @@ obj-$(CONFIG_UML_NET_DAEMON) += daemon.o
obj-$(CONFIG_UML_NET_VECTOR) += vector.o
obj-$(CONFIG_UML_NET_VDE) += vde.o
obj-$(CONFIG_UML_NET_MCAST) += umcast.o
-obj-$(CONFIG_UML_NET_PCAP) += pcap.o
obj-$(CONFIG_UML_NET) += net.o
obj-$(CONFIG_MCONSOLE) += mconsole.o
obj-$(CONFIG_MMAPPER) += mmapper_kern.o
@@ -69,7 +63,7 @@ obj-$(CONFIG_UML_RTC) += rtc.o
obj-$(CONFIG_UML_PCI_OVER_VIRTIO) += virt-pci.o
# pcap_user.o must be added explicitly.
-USER_OBJS := fd.o null.o pty.o tty.o xterm.o slip_common.o pcap_user.o vde_user.o vector_user.o
+USER_OBJS := fd.o null.o pty.o tty.o xterm.o slip_common.o vde_user.o vector_user.o
CFLAGS_null.o = -DDEV_NULL=$(DEV_NULL_PATH)
CFLAGS_xterm.o += '-DCONFIG_XTERM_CHAN_DEFAULT_EMULATOR="$(CONFIG_XTERM_CHAN_DEFAULT_EMULATOR)"'
diff --git a/arch/um/drivers/chan.h b/arch/um/drivers/chan.h
index e14b9cdf7a33..5a61db512ffb 100644
--- a/arch/um/drivers/chan.h
+++ b/arch/um/drivers/chan.h
@@ -22,7 +22,8 @@ struct chan {
unsigned int output:1;
unsigned int opened:1;
unsigned int enabled:1;
- int fd;
+ int fd_in;
+ int fd_out; /* only different to fd_in if blocking output is needed */
const struct chan_ops *ops;
void *data;
};
diff --git a/arch/um/drivers/chan_kern.c b/arch/um/drivers/chan_kern.c
index 37538b4168da..e78a99816c86 100644
--- a/arch/um/drivers/chan_kern.c
+++ b/arch/um/drivers/chan_kern.c
@@ -81,6 +81,12 @@ static const struct chan_ops not_configged_ops = {
};
#endif /* CONFIG_NOCONFIG_CHAN */
+static inline bool need_output_blocking(void)
+{
+ return time_travel_mode == TT_MODE_INFCPU ||
+ time_travel_mode == TT_MODE_EXTERNAL;
+}
+
static int open_one_chan(struct chan *chan)
{
int fd, err;
@@ -96,15 +102,43 @@ static int open_one_chan(struct chan *chan)
return fd;
err = os_set_fd_block(fd, 0);
- if (err) {
- (*chan->ops->close)(fd, chan->data);
- return err;
- }
+ if (err)
+ goto out_close;
+
+ chan->fd_in = fd;
+ chan->fd_out = fd;
+
+ /*
+ * In time-travel modes infinite-CPU and external we need to guarantee
+ * that any writes to the output succeed immdiately from the point of
+ * the VM. The best way to do this is to put the FD in blocking mode
+ * and simply wait/retry until everything is written.
+ * As every write is guaranteed to complete, we also do not need to
+ * request an IRQ for the output.
+ *
+ * Note that input cannot happen in a time synchronized way. We permit
+ * it, but time passes very quickly if anything waits for a read.
+ */
+ if (chan->output && need_output_blocking()) {
+ err = os_dup_file(chan->fd_out);
+ if (err < 0)
+ goto out_close;
- chan->fd = fd;
+ chan->fd_out = err;
+
+ err = os_set_fd_block(chan->fd_out, 1);
+ if (err) {
+ os_close_file(chan->fd_out);
+ goto out_close;
+ }
+ }
chan->opened = 1;
return 0;
+
+out_close:
+ (*chan->ops->close)(fd, chan->data);
+ return err;
}
static int open_chan(struct list_head *chans)
@@ -125,7 +159,7 @@ static int open_chan(struct list_head *chans)
void chan_enable_winch(struct chan *chan, struct tty_port *port)
{
if (chan && chan->primary && chan->ops->winch)
- register_winch(chan->fd, port);
+ register_winch(chan->fd_in, port);
}
static void line_timer_cb(struct work_struct *work)
@@ -156,8 +190,9 @@ int enable_chan(struct line *line)
if (chan->enabled)
continue;
- err = line_setup_irq(chan->fd, chan->input, chan->output, line,
- chan);
+ err = line_setup_irq(chan->fd_in, chan->input,
+ chan->output && !need_output_blocking(),
+ line, chan);
if (err)
goto out_close;
@@ -196,7 +231,8 @@ void free_irqs(void)
if (chan->input && chan->enabled)
um_free_irq(chan->line->read_irq, chan);
- if (chan->output && chan->enabled)
+ if (chan->output && chan->enabled &&
+ !need_output_blocking())
um_free_irq(chan->line->write_irq, chan);
chan->enabled = 0;
}
@@ -216,15 +252,19 @@ static void close_one_chan(struct chan *chan, int delay_free_irq)
} else {
if (chan->input && chan->enabled)
um_free_irq(chan->line->read_irq, chan);
- if (chan->output && chan->enabled)
+ if (chan->output && chan->enabled &&
+ !need_output_blocking())
um_free_irq(chan->line->write_irq, chan);
chan->enabled = 0;
}
+ if (chan->fd_out != chan->fd_in)
+ os_close_file(chan->fd_out);
if (chan->ops->close != NULL)
- (*chan->ops->close)(chan->fd, chan->data);
+ (*chan->ops->close)(chan->fd_in, chan->data);
chan->opened = 0;
- chan->fd = -1;
+ chan->fd_in = -1;
+ chan->fd_out = -1;
}
void close_chan(struct line *line)
@@ -244,7 +284,7 @@ void close_chan(struct line *line)
void deactivate_chan(struct chan *chan, int irq)
{
if (chan && chan->enabled)
- deactivate_fd(chan->fd, irq);
+ deactivate_fd(chan->fd_in, irq);
}
int write_chan(struct chan *chan, const u8 *buf, size_t len, int write_irq)
@@ -254,7 +294,7 @@ int write_chan(struct chan *chan, const u8 *buf, size_t len, int write_irq)
if (len == 0 || !chan || !chan->ops->write)
return 0;
- n = chan->ops->write(chan->fd, buf, len, chan->data);
+ n = chan->ops->write(chan->fd_out, buf, len, chan->data);
if (chan->primary) {
ret = n;
}
@@ -268,7 +308,7 @@ int console_write_chan(struct chan *chan, const char *buf, int len)
if (!chan || !chan->ops->console_write)
return 0;
- n = chan->ops->console_write(chan->fd, buf, len);
+ n = chan->ops->console_write(chan->fd_out, buf, len);
if (chan->primary)
ret = n;
return ret;
@@ -296,14 +336,14 @@ int chan_window_size(struct line *line, unsigned short *rows_out,
if (chan && chan->primary) {
if (chan->ops->window_size == NULL)
return 0;
- return chan->ops->window_size(chan->fd, chan->data,
+ return chan->ops->window_size(chan->fd_in, chan->data,
rows_out, cols_out);
}
chan = line->chan_out;
if (chan && chan->primary) {
if (chan->ops->window_size == NULL)
return 0;
- return chan->ops->window_size(chan->fd, chan->data,
+ return chan->ops->window_size(chan->fd_in, chan->data,
rows_out, cols_out);
}
return 0;
@@ -319,7 +359,7 @@ static void free_one_chan(struct chan *chan)
(*chan->ops->free)(chan->data);
if (chan->primary && chan->output)
- ignore_sigio_fd(chan->fd);
+ ignore_sigio_fd(chan->fd_in);
kfree(chan);
}
@@ -478,7 +518,8 @@ static struct chan *parse_chan(struct line *line, char *str, int device,
.output = 0,
.opened = 0,
.enabled = 0,
- .fd = -1,
+ .fd_in = -1,
+ .fd_out = -1,
.ops = ops,
.data = data });
return chan;
@@ -549,7 +590,7 @@ void chan_interrupt(struct line *line, int irq)
schedule_delayed_work(&line->task, 1);
goto out;
}
- err = chan->ops->read(chan->fd, &c, chan->data);
+ err = chan->ops->read(chan->fd_in, &c, chan->data);
if (err > 0)
tty_insert_flip_char(port, c, TTY_NORMAL);
} while (err > 0);
diff --git a/arch/um/drivers/chan_user.c b/arch/um/drivers/chan_user.c
index ec04e47b9d79..a66e556012c4 100644
--- a/arch/um/drivers/chan_user.c
+++ b/arch/um/drivers/chan_user.c
@@ -23,7 +23,7 @@ int generic_read(int fd, __u8 *c_out, void *unused)
{
int n;
- n = read(fd, c_out, sizeof(*c_out));
+ CATCH_EINTR(n = read(fd, c_out, sizeof(*c_out)));
if (n > 0)
return n;
else if (n == 0)
@@ -37,11 +37,23 @@ int generic_read(int fd, __u8 *c_out, void *unused)
int generic_write(int fd, const __u8 *buf, size_t n, void *unused)
{
+ int written = 0;
int err;
- err = write(fd, buf, n);
- if (err > 0)
- return err;
+ /* The FD may be in blocking mode, as such, need to retry short writes,
+ * they may have been interrupted by a signal.
+ */
+ do {
+ errno = 0;
+ err = write(fd, buf + written, n - written);
+ if (err > 0) {
+ written += err;
+ continue;
+ }
+ } while (err < 0 && errno == EINTR);
+
+ if (written > 0)
+ return written;
else if (errno == EAGAIN)
return 0;
else if (err == 0)
diff --git a/arch/um/drivers/harddog_kern.c b/arch/um/drivers/harddog_kern.c
index 60d1c6cab8a9..99a7144b229f 100644
--- a/arch/um/drivers/harddog_kern.c
+++ b/arch/um/drivers/harddog_kern.c
@@ -49,6 +49,7 @@
#include "mconsole.h"
#include "harddog.h"
+MODULE_DESCRIPTION("UML hardware watchdog");
MODULE_LICENSE("GPL");
static DEFINE_MUTEX(harddog_mutex);
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index d82bc3fdb86e..43d8959cc746 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -383,6 +383,7 @@ int setup_one_line(struct line *lines, int n, char *init,
parse_chan_pair(NULL, line, n, opts, error_out);
err = 0;
}
+ *error_out = "configured as 'none'";
} else {
char *new = kstrdup(init, GFP_KERNEL);
if (!new) {
@@ -406,6 +407,7 @@ int setup_one_line(struct line *lines, int n, char *init,
}
}
if (err) {
+ *error_out = "failed to parse channel pair";
line->init_str = NULL;
line->valid = 0;
kfree(new);
diff --git a/arch/um/drivers/pcap_kern.c b/arch/um/drivers/pcap_kern.c
deleted file mode 100644
index d9bf95d7867b..000000000000
--- a/arch/um/drivers/pcap_kern.c
+++ /dev/null
@@ -1,113 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- */
-
-#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <net_kern.h>
-#include "pcap_user.h"
-
-struct pcap_init {
- char *host_if;
- int promisc;
- int optimize;
- char *filter;
-};
-
-static void pcap_init_kern(struct net_device *dev, void *data)
-{
- struct uml_net_private *pri;
- struct pcap_data *ppri;
- struct pcap_init *init = data;
-
- pri = netdev_priv(dev);
- ppri = (struct pcap_data *) pri->user;
- ppri->host_if = init->host_if;
- ppri->promisc = init->promisc;
- ppri->optimize = init->optimize;
- ppri->filter = init->filter;
-
- printk("pcap backend, host interface %s\n", ppri->host_if);
-}
-
-static int pcap_read(int fd, struct sk_buff *skb, struct uml_net_private *lp)
-{
- return pcap_user_read(fd, skb_mac_header(skb),
- skb->dev->mtu + ETH_HEADER_OTHER,
- (struct pcap_data *) &lp->user);
-}
-
-static int pcap_write(int fd, struct sk_buff *skb, struct uml_net_private *lp)
-{
- return -EPERM;
-}
-
-static const struct net_kern_info pcap_kern_info = {
- .init = pcap_init_kern,
- .protocol = eth_protocol,
- .read = pcap_read,
- .write = pcap_write,
-};
-
-static int pcap_setup(char *str, char **mac_out, void *data)
-{
- struct pcap_init *init = data;
- char *remain, *host_if = NULL, *options[2] = { NULL, NULL };
- int i;
-
- *init = ((struct pcap_init)
- { .host_if = "eth0",
- .promisc = 1,
- .optimize = 0,
- .filter = NULL });
-
- remain = split_if_spec(str, &host_if, &init->filter,
- &options[0], &options[1], mac_out, NULL);
- if (remain != NULL) {
- printk(KERN_ERR "pcap_setup - Extra garbage on "
- "specification : '%s'\n", remain);
- return 0;
- }
-
- if (host_if != NULL)
- init->host_if = host_if;
-
- for (i = 0; i < ARRAY_SIZE(options); i++) {
- if (options[i] == NULL)
- continue;
- if (!strcmp(options[i], "promisc"))
- init->promisc = 1;
- else if (!strcmp(options[i], "nopromisc"))
- init->promisc = 0;
- else if (!strcmp(options[i], "optimize"))
- init->optimize = 1;
- else if (!strcmp(options[i], "nooptimize"))
- init->optimize = 0;
- else {
- printk(KERN_ERR "pcap_setup : bad option - '%s'\n",
- options[i]);
- return 0;
- }
- }
-
- return 1;
-}
-
-static struct transport pcap_transport = {
- .list = LIST_HEAD_INIT(pcap_transport.list),
- .name = "pcap",
- .setup = pcap_setup,
- .user = &pcap_user_info,
- .kern = &pcap_kern_info,
- .private_size = sizeof(struct pcap_data),
- .setup_size = sizeof(struct pcap_init),
-};
-
-static int register_pcap(void)
-{
- register_transport(&pcap_transport);
- return 0;
-}
-
-late_initcall(register_pcap);
diff --git a/arch/um/drivers/pcap_user.c b/arch/um/drivers/pcap_user.c
deleted file mode 100644
index 52ddda3e3b10..000000000000
--- a/arch/um/drivers/pcap_user.c
+++ /dev/null
@@ -1,137 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- */
-
-#include <errno.h>
-#include <pcap.h>
-#include <string.h>
-#include <asm/types.h>
-#include <net_user.h>
-#include "pcap_user.h"
-#include <um_malloc.h>
-
-#define PCAP_FD(p) (*(int *)(p))
-
-static int pcap_user_init(void *data, void *dev)
-{
- struct pcap_data *pri = data;
- pcap_t *p;
- char errors[PCAP_ERRBUF_SIZE];
-
- p = pcap_open_live(pri->host_if, ETH_MAX_PACKET + ETH_HEADER_OTHER,
- pri->promisc, 0, errors);
- if (p == NULL) {
- printk(UM_KERN_ERR "pcap_user_init : pcap_open_live failed - "
- "'%s'\n", errors);
- return -EINVAL;
- }
-
- pri->dev = dev;
- pri->pcap = p;
- return 0;
-}
-
-static int pcap_user_open(void *data)
-{
- struct pcap_data *pri = data;
- __u32 netmask;
- int err;
-
- if (pri->pcap == NULL)
- return -ENODEV;
-
- if (pri->filter != NULL) {
- err = dev_netmask(pri->dev, &netmask);
- if (err < 0) {
- printk(UM_KERN_ERR "pcap_user_open : dev_netmask failed\n");
- return -EIO;
- }
-
- pri->compiled = uml_kmalloc(sizeof(struct bpf_program),
- UM_GFP_KERNEL);
- if (pri->compiled == NULL) {
- printk(UM_KERN_ERR "pcap_user_open : kmalloc failed\n");
- return -ENOMEM;
- }
-
- err = pcap_compile(pri->pcap,
- (struct bpf_program *) pri->compiled,
- pri->filter, pri->optimize, netmask);
- if (err < 0) {
- printk(UM_KERN_ERR "pcap_user_open : pcap_compile failed - "
- "'%s'\n", pcap_geterr(pri->pcap));
- goto out;
- }
-
- err = pcap_setfilter(pri->pcap, pri->compiled);
- if (err < 0) {
- printk(UM_KERN_ERR "pcap_user_open : pcap_setfilter "
- "failed - '%s'\n", pcap_geterr(pri->pcap));
- goto out;
- }
- }
-
- return PCAP_FD(pri->pcap);
-
- out:
- kfree(pri->compiled);
- return -EIO;
-}
-
-static void pcap_remove(void *data)
-{
- struct pcap_data *pri = data;
-
- if (pri->compiled != NULL)
- pcap_freecode(pri->compiled);
-
- if (pri->pcap != NULL)
- pcap_close(pri->pcap);
-}
-
-struct pcap_handler_data {
- char *buffer;
- int len;
-};
-
-static void handler(u_char *data, const struct pcap_pkthdr *header,
- const u_char *packet)
-{
- int len;
-
- struct pcap_handler_data *hdata = (struct pcap_handler_data *) data;
-
- len = hdata->len < header->caplen ? hdata->len : header->caplen;
- memcpy(hdata->buffer, packet, len);
- hdata->len = len;
-}
-
-int pcap_user_read(int fd, void *buffer, int len, struct pcap_data *pri)
-{
- struct pcap_handler_data hdata = ((struct pcap_handler_data)
- { .buffer = buffer,
- .len = len });
- int n;
-
- n = pcap_dispatch(pri->pcap, 1, handler, (u_char *) &hdata);
- if (n < 0) {
- printk(UM_KERN_ERR "pcap_dispatch failed - %s\n",
- pcap_geterr(pri->pcap));
- return -EIO;
- }
- else if (n == 0)
- return 0;
- return hdata.len;
-}
-
-const struct net_user_info pcap_user_info = {
- .init = pcap_user_init,
- .open = pcap_user_open,
- .close = NULL,
- .remove = pcap_remove,
- .add_address = NULL,
- .delete_address = NULL,
- .mtu = ETH_MAX_PACKET,
- .max_packet = ETH_MAX_PACKET + ETH_HEADER_OTHER,
-};
diff --git a/arch/um/drivers/pcap_user.h b/arch/um/drivers/pcap_user.h
deleted file mode 100644
index 216246f5f09b..000000000000
--- a/arch/um/drivers/pcap_user.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
- */
-
-#include <net_user.h>
-
-struct pcap_data {
- char *host_if;
- int promisc;
- int optimize;
- char *filter;
- void *compiled;
- void *pcap;
- void *dev;
-};
-
-extern const struct net_user_info pcap_user_info;
-
-extern int pcap_user_read(int fd, void *buf, int len, struct pcap_data *pri);
-
diff --git a/arch/um/drivers/port_kern.c b/arch/um/drivers/port_kern.c
index c52b3ff3c092..a4508470df78 100644
--- a/arch/um/drivers/port_kern.c
+++ b/arch/um/drivers/port_kern.c
@@ -45,15 +45,17 @@ struct connection {
static irqreturn_t pipe_interrupt(int irq, void *data)
{
struct connection *conn = data;
- int fd;
+ int n_fds = 1, fd = -1;
+ ssize_t ret;
- fd = os_rcv_fd(conn->socket[0], &conn->helper_pid);
- if (fd < 0) {
- if (fd == -EAGAIN)
+ ret = os_rcv_fd_msg(conn->socket[0], &fd, n_fds, &conn->helper_pid,
+ sizeof(conn->helper_pid));
+ if (ret != sizeof(conn->helper_pid)) {
+ if (ret == -EAGAIN)
return IRQ_NONE;
- printk(KERN_ERR "pipe_interrupt : os_rcv_fd returned %d\n",
- -fd);
+ printk(KERN_ERR "pipe_interrupt : os_rcv_fd_msg returned %zd\n",
+ ret);
os_close_file(conn->fd);
}
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 9f1e76ddda5a..7f28ec1929dc 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -36,7 +36,6 @@
#include <linux/vmalloc.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
-#include <asm/tlbflush.h>
#include <kern_util.h>
#include "mconsole_kern.h"
#include <init.h>
@@ -106,7 +105,6 @@ static inline void ubd_set_bit(__u64 bit, unsigned char *data)
#define DRIVER_NAME "uml-blkdev"
static DEFINE_MUTEX(ubd_lock);
-static DEFINE_MUTEX(ubd_mutex); /* replaces BKL, might not be needed */
static int ubd_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long arg);
@@ -759,7 +757,6 @@ static int ubd_open_dev(struct ubd *ubd_dev)
printk(KERN_ERR "Failed to vmalloc COW bitmap\n");
goto error;
}
- flush_tlb_kernel_vm();
err = read_cow_bitmap(ubd_dev->fd, ubd_dev->cow.bitmap,
ubd_dev->cow.bitmap_offset,
diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c
index 4279793b11b7..2d473282ab51 100644
--- a/arch/um/drivers/vector_kern.c
+++ b/arch/um/drivers/vector_kern.c
@@ -1115,11 +1115,12 @@ static int irq_rr;
static int vector_net_close(struct net_device *dev)
{
struct vector_private *vp = netdev_priv(dev);
- unsigned long flags;
netif_stop_queue(dev);
del_timer(&vp->tl);
+ vp->opened = false;
+
if (vp->fds == NULL)
return 0;
@@ -1158,10 +1159,7 @@ static int vector_net_close(struct net_device *dev)
destroy_queue(vp->tx_queue);
kfree(vp->fds);
vp->fds = NULL;
- spin_lock_irqsave(&vp->lock, flags);
- vp->opened = false;
vp->in_error = false;
- spin_unlock_irqrestore(&vp->lock, flags);
return 0;
}
@@ -1203,17 +1201,12 @@ static void vector_reset_tx(struct work_struct *work)
static int vector_net_open(struct net_device *dev)
{
struct vector_private *vp = netdev_priv(dev);
- unsigned long flags;
int err = -EINVAL;
struct vector_device *vdevice;
- spin_lock_irqsave(&vp->lock, flags);
- if (vp->opened) {
- spin_unlock_irqrestore(&vp->lock, flags);
+ if (vp->opened)
return -ENXIO;
- }
vp->opened = true;
- spin_unlock_irqrestore(&vp->lock, flags);
vp->bpf = uml_vector_user_bpf(get_bpf_file(vp->parsed));
@@ -1387,8 +1380,6 @@ static int vector_net_load_bpf_flash(struct net_device *dev,
return -1;
}
- spin_lock(&vp->lock);
-
if (vp->bpf != NULL) {
if (vp->opened)
uml_vector_detach_bpf(vp->fds->rx_fd, vp->bpf);
@@ -1417,15 +1408,12 @@ static int vector_net_load_bpf_flash(struct net_device *dev,
if (vp->opened)
result = uml_vector_attach_bpf(vp->fds->rx_fd, vp->bpf);
- spin_unlock(&vp->lock);
-
return result;
free_buffer:
release_firmware(fw);
flash_fail:
- spin_unlock(&vp->lock);
if (vp->bpf != NULL)
kfree(vp->bpf->filter);
kfree(vp->bpf);
@@ -1631,7 +1619,6 @@ static void vector_eth_configure(
INIT_WORK(&vp->reset_tx, vector_reset_tx);
timer_setup(&vp->tl, vector_timer_expire, 0);
- spin_lock_init(&vp->lock);
/* FIXME */
dev->netdev_ops = &vector_netdev_ops;
diff --git a/arch/um/drivers/vector_kern.h b/arch/um/drivers/vector_kern.h
index 2a1fa8e0f3e1..806df551be0b 100644
--- a/arch/um/drivers/vector_kern.h
+++ b/arch/um/drivers/vector_kern.h
@@ -71,7 +71,6 @@ struct vector_estats {
struct vector_private {
struct list_head list;
- spinlock_t lock;
struct net_device *dev;
struct napi_struct napi ____cacheline_aligned;
diff --git a/arch/um/drivers/xterm.c b/arch/um/drivers/xterm.c
index 6918de5e2956..e4316c7981e8 100644
--- a/arch/um/drivers/xterm.c
+++ b/arch/um/drivers/xterm.c
@@ -156,7 +156,7 @@ static int xterm_open(int input, int output, int primary, void *d,
new = xterm_fd(fd, &data->helper_pid);
if (new < 0) {
err = new;
- printk(UM_KERN_ERR "xterm_open : os_rcv_fd failed, err = %d\n",
+ printk(UM_KERN_ERR "xterm_open : xterm_fd failed, err = %d\n",
-err);
goto out_kill;
}
diff --git a/arch/um/drivers/xterm_kern.c b/arch/um/drivers/xterm_kern.c
index 8011e51993d5..3971252cb1a6 100644
--- a/arch/um/drivers/xterm_kern.c
+++ b/arch/um/drivers/xterm_kern.c
@@ -21,12 +21,19 @@ struct xterm_wait {
static irqreturn_t xterm_interrupt(int irq, void *data)
{
struct xterm_wait *xterm = data;
- int fd;
+ int fd = -1, n_fds = 1;
+ ssize_t ret;
- fd = os_rcv_fd(xterm->fd, &xterm->pid);
- if (fd == -EAGAIN)
+ ret = os_rcv_fd_msg(xterm->fd, &fd, n_fds,
+ &xterm->pid, sizeof(xterm->pid));
+ if (ret == -EAGAIN)
return IRQ_NONE;
+ if (ret < 0)
+ fd = ret;
+ else if (ret != sizeof(xterm->pid))
+ fd = -EMSGSIZE;
+
xterm->new_fd = fd;
complete(&xterm->ready);
diff --git a/arch/um/include/asm/mmu.h b/arch/um/include/asm/mmu.h
index f2923c767bb9..a3eaca41ff61 100644
--- a/arch/um/include/asm/mmu.h
+++ b/arch/um/include/asm/mmu.h
@@ -7,15 +7,13 @@
#define __ARCH_UM_MMU_H
#include <mm_id.h>
-#include <asm/mm_context.h>
typedef struct mm_context {
struct mm_id id;
- struct uml_arch_mm_context arch;
-} mm_context_t;
-/* Avoid tangled inclusion with asm/ldt.h */
-extern long init_new_ldt(struct mm_context *to_mm, struct mm_context *from_mm);
-extern void free_ldt(struct mm_context *mm);
+ /* Address range in need of a TLB sync */
+ unsigned long sync_tlb_range_from;
+ unsigned long sync_tlb_range_to;
+} mm_context_t;
#endif
diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h
index 68e2eb9cfb47..23dcc914d44e 100644
--- a/arch/um/include/asm/mmu_context.h
+++ b/arch/um/include/asm/mmu_context.h
@@ -13,8 +13,6 @@
#include <asm/mm_hooks.h>
#include <asm/mmu.h>
-extern void force_flush_all(void);
-
#define activate_mm activate_mm
static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
{
diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
index e1ece21dbe3f..5bb397b65efb 100644
--- a/arch/um/include/asm/pgtable.h
+++ b/arch/um/include/asm/pgtable.h
@@ -244,6 +244,38 @@ static inline void set_pte(pte_t *pteptr, pte_t pteval)
#define PFN_PTE_SHIFT PAGE_SHIFT
+static inline void um_tlb_mark_sync(struct mm_struct *mm, unsigned long start,
+ unsigned long end)
+{
+ if (!mm->context.sync_tlb_range_to) {
+ mm->context.sync_tlb_range_from = start;
+ mm->context.sync_tlb_range_to = end;
+ } else {
+ if (start < mm->context.sync_tlb_range_from)
+ mm->context.sync_tlb_range_from = start;
+ if (end > mm->context.sync_tlb_range_to)
+ mm->context.sync_tlb_range_to = end;
+ }
+}
+
+#define set_ptes set_ptes
+static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, int nr)
+{
+ /* Basically the default implementation */
+ size_t length = nr * PAGE_SIZE;
+
+ for (;;) {
+ set_pte(ptep, pte);
+ if (--nr == 0)
+ break;
+ ptep++;
+ pte = __pte(pte_val(pte) + (nr << PFN_PTE_SHIFT));
+ }
+
+ um_tlb_mark_sync(mm, addr, addr + length);
+}
+
#define __HAVE_ARCH_PTE_SAME
static inline int pte_same(pte_t pte_a, pte_t pte_b)
{
diff --git a/arch/um/include/asm/tlbflush.h b/arch/um/include/asm/tlbflush.h
index a5bda890390d..db997976b6ea 100644
--- a/arch/um/include/asm/tlbflush.h
+++ b/arch/um/include/asm/tlbflush.h
@@ -9,23 +9,51 @@
#include <linux/mm.h>
/*
- * TLB flushing:
+ * In UML, we need to sync the TLB over by using mmap/munmap/mprotect syscalls
+ * from the process handling the MM (which can be the kernel itself).
+ *
+ * To track updates, we can hook into set_ptes and flush_tlb_*. With set_ptes
+ * we catch all PTE transitions where memory that was unusable becomes usable.
+ * While with flush_tlb_* we can track any memory that becomes unusable and
+ * even if a higher layer of the page table was modified.
+ *
+ * So, we simply track updates using both methods and mark the memory area to
+ * be synced later on. The only special case is that flush_tlb_kern_* needs to
+ * be executed immediately as there is no good synchronization point in that
+ * case. In contrast, in the set_ptes case we can wait for the next kernel
+ * segfault before we do the synchornization.
*
- * - flush_tlb() flushes the current mm struct TLBs
* - flush_tlb_all() flushes all processes TLBs
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
- * - flush_tlb_kernel_vm() flushes the kernel vm area
* - flush_tlb_range(vma, start, end) flushes a range of pages
+ * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
*/
+extern int um_tlb_sync(struct mm_struct *mm);
+
extern void flush_tlb_all(void);
extern void flush_tlb_mm(struct mm_struct *mm);
-extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
- unsigned long end);
-extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long address);
-extern void flush_tlb_kernel_vm(void);
-extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
-extern void __flush_tlb_one(unsigned long addr);
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long address)
+{
+ um_tlb_mark_sync(vma->vm_mm, address, address + PAGE_SIZE);
+}
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ um_tlb_mark_sync(vma->vm_mm, start, end);
+}
+
+static inline void flush_tlb_kernel_range(unsigned long start,
+ unsigned long end)
+{
+ um_tlb_mark_sync(&init_mm, start, end);
+
+ /* Kernel needs to be synced immediately */
+ um_tlb_sync(&init_mm);
+}
#endif
diff --git a/arch/um/include/shared/as-layout.h b/arch/um/include/shared/as-layout.h
index c22f46a757dc..06292fca5a4d 100644
--- a/arch/um/include/shared/as-layout.h
+++ b/arch/um/include/shared/as-layout.h
@@ -23,7 +23,7 @@
#define STUB_START stub_start
#define STUB_CODE STUB_START
#define STUB_DATA (STUB_CODE + UM_KERN_PAGE_SIZE)
-#define STUB_DATA_PAGES 1 /* must be a power of two */
+#define STUB_DATA_PAGES 2 /* must be a power of two */
#define STUB_END (STUB_DATA + STUB_DATA_PAGES * UM_KERN_PAGE_SIZE)
#ifndef __ASSEMBLY__
diff --git a/arch/um/include/shared/common-offsets.h b/arch/um/include/shared/common-offsets.h
index 96195483fbd0..579ed946a3a9 100644
--- a/arch/um/include/shared/common-offsets.h
+++ b/arch/um/include/shared/common-offsets.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* for use by sys-$SUBARCH/kernel-offsets.c */
-#include <stub-data.h>
DEFINE(KERNEL_MADV_REMOVE, MADV_REMOVE);
@@ -30,7 +29,3 @@ DEFINE(UML_CONFIG_64BIT, CONFIG_64BIT);
DEFINE(UML_CONFIG_UML_TIME_TRAVEL_SUPPORT, CONFIG_UML_TIME_TRAVEL_SUPPORT);
#endif
-/* for stub */
-DEFINE(UML_STUB_FIELD_OFFSET, offsetof(struct stub_data, offset));
-DEFINE(UML_STUB_FIELD_CHILD_ERR, offsetof(struct stub_data, child_err));
-DEFINE(UML_STUB_FIELD_FD, offsetof(struct stub_data, fd));
diff --git a/arch/um/include/shared/kern_util.h b/arch/um/include/shared/kern_util.h
index 95521b1f5b20..d8ffd2db168e 100644
--- a/arch/um/include/shared/kern_util.h
+++ b/arch/um/include/shared/kern_util.h
@@ -13,7 +13,6 @@ struct siginfo;
extern int uml_exitcode;
-extern int ncpus;
extern int kmalloc_ok;
#define UML_ROUND_UP(addr) \
diff --git a/arch/um/include/shared/os.h b/arch/um/include/shared/os.h
index aff8906304ea..9a039d6f1f74 100644
--- a/arch/um/include/shared/os.h
+++ b/arch/um/include/shared/os.h
@@ -163,8 +163,10 @@ extern int os_set_fd_block(int fd, int blocking);
extern int os_accept_connection(int fd);
extern int os_create_unix_socket(const char *file, int len, int close_on_exec);
extern int os_shutdown_socket(int fd, int r, int w);
+extern int os_dup_file(int fd);
extern void os_close_file(int fd);
-extern int os_rcv_fd(int fd, int *helper_pid_out);
+ssize_t os_rcv_fd_msg(int fd, int *fds, unsigned int n_fds,
+ void *data, size_t data_len);
extern int os_connect_socket(const char *name);
extern int os_file_type(char *file);
extern int os_file_mode(const char *file, struct openflags *mode_out);
@@ -179,6 +181,8 @@ extern int os_eventfd(unsigned int initval, int flags);
extern int os_sendmsg_fds(int fd, const void *buf, unsigned int len,
const int *fds, unsigned int fds_num);
int os_poll(unsigned int n, const int *fds);
+void *os_mmap_rw_shared(int fd, size_t size);
+void *os_mremap_rw_shared(void *old_addr, size_t old_size, size_t new_size);
/* start_up.c */
extern void os_early_checks(void);
@@ -191,6 +195,9 @@ extern void get_host_cpu_features(
/* mem.c */
extern int create_mem_file(unsigned long long len);
+/* tlb.c */
+extern void report_enomem(void);
+
/* process.c */
extern unsigned long os_process_pc(int pid);
extern int os_process_parent(int pid);
@@ -268,24 +275,20 @@ extern long long os_persistent_clock_emulation(void);
extern long long os_nsecs(void);
/* skas/mem.c */
-extern long run_syscall_stub(struct mm_id * mm_idp,
- int syscall, unsigned long *args, long expected,
- void **addr, int done);
-extern long syscall_stub_data(struct mm_id * mm_idp,
- unsigned long *data, int data_count,
- void **addr, void **stub_addr);
-extern int map(struct mm_id * mm_idp, unsigned long virt,
- unsigned long len, int prot, int phys_fd,
- unsigned long long offset, int done, void **data);
-extern int unmap(struct mm_id * mm_idp, unsigned long addr, unsigned long len,
- int done, void **data);
-extern int protect(struct mm_id * mm_idp, unsigned long addr,
- unsigned long len, unsigned int prot, int done, void **data);
+int syscall_stub_flush(struct mm_id *mm_idp);
+struct stub_syscall *syscall_stub_alloc(struct mm_id *mm_idp);
+void syscall_stub_dump_error(struct mm_id *mm_idp);
+
+int map(struct mm_id *mm_idp, unsigned long virt,
+ unsigned long len, int prot, int phys_fd,
+ unsigned long long offset);
+int unmap(struct mm_id *mm_idp, unsigned long addr, unsigned long len);
+int protect(struct mm_id *mm_idp, unsigned long addr,
+ unsigned long len, unsigned int prot);
/* skas/process.c */
extern int is_skas_winch(int pid, int fd, void *data);
extern int start_userspace(unsigned long stub_stack);
-extern int copy_context_skas0(unsigned long stack, int pid);
extern void userspace(struct uml_pt_regs *regs, unsigned long *aux_fp_regs);
extern void new_thread(void *stack, jmp_buf *buf, void (*handler)(void));
extern void switch_threads(jmp_buf *me, jmp_buf *you);
diff --git a/arch/um/include/shared/skas/mm_id.h b/arch/um/include/shared/skas/mm_id.h
index 92dbf727e384..1e76ba40feba 100644
--- a/arch/um/include/shared/skas/mm_id.h
+++ b/arch/um/include/shared/skas/mm_id.h
@@ -12,7 +12,7 @@ struct mm_id {
int pid;
} u;
unsigned long stack;
- int kill;
+ int syscall_data_len;
};
void __switch_mm(struct mm_id *mm_idp);
diff --git a/arch/um/include/shared/skas/skas.h b/arch/um/include/shared/skas/skas.h
index c93d2cbc8f32..ebaa116de30b 100644
--- a/arch/um/include/shared/skas/skas.h
+++ b/arch/um/include/shared/skas/skas.h
@@ -15,5 +15,7 @@ extern void new_thread_handler(void);
extern void handle_syscall(struct uml_pt_regs *regs);
extern long execute_syscall_skas(void *r);
extern unsigned long current_stub_stack(void);
+extern struct mm_id *current_mm_id(void);
+extern void current_mm_sync(void);
#endif
diff --git a/arch/um/include/shared/skas/stub-data.h b/arch/um/include/shared/skas/stub-data.h
index 5e3ade3fb38b..2b6b44759dfa 100644
--- a/arch/um/include/shared/skas/stub-data.h
+++ b/arch/um/include/shared/skas/stub-data.h
@@ -8,10 +8,42 @@
#ifndef __STUB_DATA_H
#define __STUB_DATA_H
+#include <linux/compiler_types.h>
+#include <as-layout.h>
+#include <sysdep/tls.h>
+
+#define STUB_NEXT_SYSCALL(s) \
+ ((struct stub_syscall *) (((unsigned long) s) + (s)->cmd_len))
+
+enum stub_syscall_type {
+ STUB_SYSCALL_UNSET = 0,
+ STUB_SYSCALL_MMAP,
+ STUB_SYSCALL_MUNMAP,
+ STUB_SYSCALL_MPROTECT,
+};
+
+struct stub_syscall {
+ struct {
+ unsigned long addr;
+ unsigned long length;
+ unsigned long offset;
+ int fd;
+ int prot;
+ } mem;
+
+ enum stub_syscall_type syscall;
+};
+
struct stub_data {
unsigned long offset;
- int fd;
- long parent_err, child_err;
+ long err, child_err;
+
+ int syscall_data_len;
+ /* 128 leaves enough room for additional fields in the struct */
+ struct stub_syscall syscall_data[(UM_KERN_PAGE_SIZE - 128) / sizeof(struct stub_syscall)] __aligned(16);
+
+ /* Stack for our signal handlers and for calling into . */
+ unsigned char sigstack[UM_KERN_PAGE_SIZE] __aligned(UM_KERN_PAGE_SIZE);
};
#endif
diff --git a/arch/um/include/shared/timetravel.h b/arch/um/include/shared/timetravel.h
index e5c3d69f1b69..c8db2f213dba 100644
--- a/arch/um/include/shared/timetravel.h
+++ b/arch/um/include/shared/timetravel.h
@@ -15,8 +15,17 @@ enum time_travel_mode {
#if defined(UML_CONFIG_UML_TIME_TRAVEL_SUPPORT) || \
defined(CONFIG_UML_TIME_TRAVEL_SUPPORT)
extern enum time_travel_mode time_travel_mode;
+extern int time_travel_should_print_bc_msg;
#else
#define time_travel_mode TT_MODE_OFF
+#define time_travel_should_print_bc_msg 0
#endif /* (UML_)CONFIG_UML_TIME_TRAVEL_SUPPORT */
+void _time_travel_print_bc_msg(void);
+static inline void time_travel_print_bc_msg(void)
+{
+ if (time_travel_should_print_bc_msg)
+ _time_travel_print_bc_msg();
+}
+
#endif /* _UM_TIME_TRAVEL_H_ */
diff --git a/arch/um/include/shared/user.h b/arch/um/include/shared/user.h
index 326e52450e41..bbab79c0c074 100644
--- a/arch/um/include/shared/user.h
+++ b/arch/um/include/shared/user.h
@@ -42,11 +42,19 @@ extern void panic(const char *fmt, ...)
#define printk(...) _printk(__VA_ARGS__)
extern int _printk(const char *fmt, ...)
__attribute__ ((format (printf, 1, 2)));
+extern void print_hex_dump(const char *level, const char *prefix_str,
+ int prefix_type, int rowsize, int groupsize,
+ const void *buf, size_t len, _Bool ascii);
#else
static inline int printk(const char *fmt, ...)
{
return 0;
}
+static inline void print_hex_dump(const char *level, const char *prefix_str,
+ int prefix_type, int rowsize, int groupsize,
+ const void *buf, size_t len, _Bool ascii)
+{
+}
#endif
extern int in_aton(char *str);
diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c
index 827a0d3fa589..2c15bb2c104c 100644
--- a/arch/um/kernel/exec.c
+++ b/arch/um/kernel/exec.c
@@ -22,17 +22,8 @@
void flush_thread(void)
{
- void *data = NULL;
- int ret;
-
arch_flush_thread(&current->thread.arch);
- ret = unmap(&current->mm->context.id, 0, TASK_SIZE, 1, &data);
- if (ret) {
- printk(KERN_ERR "%s - clearing address space failed, err = %d\n",
- __func__, ret);
- force_sig(SIGKILL);
- }
get_safe_registers(current_pt_regs()->regs.gp,
current_pt_regs()->regs.fp);
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index 635d44606bfe..534e91797f89 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -37,7 +37,7 @@ struct irq_reg {
bool pending;
bool wakeup;
#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
- bool pending_on_resume;
+ bool pending_event;
void (*timetravel_handler)(int, int, void *,
struct time_travel_event *);
struct time_travel_event event;
@@ -56,6 +56,9 @@ static DEFINE_SPINLOCK(irq_lock);
static LIST_HEAD(active_fds);
static DECLARE_BITMAP(irqs_allocated, UM_LAST_SIGNAL_IRQ);
static bool irqs_suspended;
+#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
+static bool irqs_pending;
+#endif
static void irq_io_loop(struct irq_reg *irq, struct uml_pt_regs *regs)
{
@@ -84,9 +87,12 @@ static void irq_event_handler(struct time_travel_event *ev)
{
struct irq_reg *reg = container_of(ev, struct irq_reg, event);
- /* do nothing if suspended - just to cause a wakeup */
- if (irqs_suspended)
+ /* do nothing if suspended; just cause a wakeup and mark as pending */
+ if (irqs_suspended) {
+ irqs_pending = true;
+ reg->pending_event = true;
return;
+ }
generic_handle_irq(reg->irq);
}
@@ -110,16 +116,47 @@ static bool irq_do_timetravel_handler(struct irq_entry *entry,
if (!reg->event.pending)
return false;
- if (irqs_suspended)
- reg->pending_on_resume = true;
return true;
}
+
+static void irq_do_pending_events(bool timetravel_handlers_only)
+{
+ struct irq_entry *entry;
+
+ if (!irqs_pending || timetravel_handlers_only)
+ return;
+
+ irqs_pending = false;
+
+ list_for_each_entry(entry, &active_fds, list) {
+ enum um_irq_type t;
+
+ for (t = 0; t < NUM_IRQ_TYPES; t++) {
+ struct irq_reg *reg = &entry->reg[t];
+
+ /*
+ * Any timetravel_handler was invoked already, just
+ * directly run the IRQ.
+ */
+ if (reg->pending_event) {
+ irq_enter();
+ generic_handle_irq(reg->irq);
+ irq_exit();
+ reg->pending_event = false;
+ }
+ }
+ }
+}
#else
static bool irq_do_timetravel_handler(struct irq_entry *entry,
enum um_irq_type t)
{
return false;
}
+
+static void irq_do_pending_events(bool timetravel_handlers_only)
+{
+}
#endif
static void sigio_reg_handler(int idx, struct irq_entry *entry, enum um_irq_type t,
@@ -145,6 +182,8 @@ static void sigio_reg_handler(int idx, struct irq_entry *entry, enum um_irq_type
*/
if (timetravel_handlers_only) {
#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
+ reg->pending_event = true;
+ irqs_pending = true;
mark_sigio_pending();
#endif
return;
@@ -162,6 +201,10 @@ static void _sigio_handler(struct uml_pt_regs *regs,
if (timetravel_handlers_only && !um_irq_timetravel_handler_used())
return;
+ /* Flush out pending events that were ignored due to time-travel. */
+ if (!irqs_suspended)
+ irq_do_pending_events(timetravel_handlers_only);
+
while (1) {
/* This is now lockless - epoll keeps back-referencesto the irqs
* which have trigger it so there is no need to walk the irq
@@ -195,7 +238,9 @@ static void _sigio_handler(struct uml_pt_regs *regs,
void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
{
+ preempt_disable();
_sigio_handler(regs, irqs_suspended);
+ preempt_enable();
}
static struct irq_entry *get_irq_entry_by_fd(int fd)
@@ -543,30 +588,7 @@ void um_irqs_resume(void)
unsigned long flags;
- local_irq_save(flags);
-#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
- /*
- * We don't need to lock anything here since we're in resume
- * and nothing else is running, but have disabled IRQs so we
- * don't try anything else with the interrupt list from there.
- */
- list_for_each_entry(entry, &active_fds, list) {
- enum um_irq_type t;
-
- for (t = 0; t < NUM_IRQ_TYPES; t++) {
- struct irq_reg *reg = &entry->reg[t];
-
- if (reg->pending_on_resume) {
- irq_enter();
- generic_handle_irq(reg->irq);
- irq_exit();
- reg->pending_on_resume = false;
- }
- }
- }
-#endif
-
- spin_lock(&irq_lock);
+ spin_lock_irqsave(&irq_lock, flags);
list_for_each_entry(entry, &active_fds, list) {
if (entry->suspended) {
int err = os_set_fd_async(entry->fd);
diff --git a/arch/um/kernel/ksyms.c b/arch/um/kernel/ksyms.c
index 3a85bde3e173..f2fb77da08cf 100644
--- a/arch/um/kernel/ksyms.c
+++ b/arch/um/kernel/ksyms.c
@@ -33,7 +33,7 @@ EXPORT_SYMBOL(os_shutdown_socket);
EXPORT_SYMBOL(os_create_unix_socket);
EXPORT_SYMBOL(os_connect_socket);
EXPORT_SYMBOL(os_accept_connection);
-EXPORT_SYMBOL(os_rcv_fd);
+EXPORT_SYMBOL(os_rcv_fd_msg);
EXPORT_SYMBOL(run_helper);
EXPORT_SYMBOL(os_major);
EXPORT_SYMBOL(os_minor);
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index ca91accd64fc..a5b4fe2ad931 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -73,7 +73,6 @@ void __init mem_init(void)
/* this will put all low memory onto the freelists */
memblock_free_all();
- max_low_pfn = totalram_pages();
max_pfn = max_low_pfn;
kmalloc_ok = 1;
}
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index d2134802f6a8..f36b63f53bab 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -122,8 +122,6 @@ void new_thread_handler(void)
/* Called magically, see new_thread_handler above */
static void fork_handler(void)
{
- force_flush_all();
-
schedule_tail(current->thread.prev_sched);
/*
@@ -237,73 +235,6 @@ int copy_from_user_proc(void *to, void __user *from, int size)
return copy_from_user(to, from, size);
}
-static atomic_t using_sysemu = ATOMIC_INIT(0);
-int sysemu_supported;
-
-static void set_using_sysemu(int value)
-{
- if (value > sysemu_supported)
- return;
- atomic_set(&using_sysemu, value);
-}
-
-static int get_using_sysemu(void)
-{
- return atomic_read(&using_sysemu);
-}
-
-static int sysemu_proc_show(struct seq_file *m, void *v)
-{
- seq_printf(m, "%d\n", get_using_sysemu());
- return 0;
-}
-
-static int sysemu_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, sysemu_proc_show, NULL);
-}
-
-static ssize_t sysemu_proc_write(struct file *file, const char __user *buf,
- size_t count, loff_t *pos)
-{
- char tmp[2];
-
- if (copy_from_user(tmp, buf, 1))
- return -EFAULT;
-
- if (tmp[0] >= '0' && tmp[0] <= '2')
- set_using_sysemu(tmp[0] - '0');
- /* We use the first char, but pretend to write everything */
- return count;
-}
-
-static const struct proc_ops sysemu_proc_ops = {
- .proc_open = sysemu_proc_open,
- .proc_read = seq_read,
- .proc_lseek = seq_lseek,
- .proc_release = single_release,
- .proc_write = sysemu_proc_write,
-};
-
-static int __init make_proc_sysemu(void)
-{
- struct proc_dir_entry *ent;
- if (!sysemu_supported)
- return 0;
-
- ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_ops);
-
- if (ent == NULL)
- {
- printk(KERN_WARNING "Failed to register /proc/sysemu\n");
- return 0;
- }
-
- return 0;
-}
-
-late_initcall(make_proc_sysemu);
-
int singlestepping(void)
{
return test_thread_flag(TIF_SINGLESTEP);
diff --git a/arch/um/kernel/reboot.c b/arch/um/kernel/reboot.c
index 25840eee1068..3736bca626ba 100644
--- a/arch/um/kernel/reboot.c
+++ b/arch/um/kernel/reboot.c
@@ -59,3 +59,18 @@ void machine_halt(void)
{
machine_power_off();
}
+
+static int sys_power_off_handler(struct sys_off_data *data)
+{
+ machine_power_off();
+ return 0;
+}
+
+static int register_power_off(void)
+{
+ register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_DEFAULT,
+ sys_power_off_handler, NULL);
+ return 0;
+}
+__initcall(register_power_off);
diff --git a/arch/um/kernel/skas/Makefile b/arch/um/kernel/skas/Makefile
index f93972a25765..6f86d53e3d69 100644
--- a/arch/um/kernel/skas/Makefile
+++ b/arch/um/kernel/skas/Makefile
@@ -3,15 +3,14 @@
# Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
#
-obj-y := clone.o mmu.o process.o syscall.o uaccess.o
+obj-y := stub.o mmu.o process.o syscall.o uaccess.o
-# clone.o is in the stub, so it can't be built with profiling
+# stub.o is in the stub, so it can't be built with profiling
# GCC hardened also auto-enables -fpic, but we need %ebx so it can't work ->
# disable it
-CFLAGS_clone.o := $(CFLAGS_NO_HARDENING)
-UNPROFILE_OBJS := clone.o
-
+CFLAGS_stub.o := $(CFLAGS_NO_HARDENING)
+UNPROFILE_OBJS := stub.o
KCOV_INSTRUMENT := n
include $(srctree)/arch/um/scripts/Makefile.rules
diff --git a/arch/um/kernel/skas/clone.c b/arch/um/kernel/skas/clone.c
deleted file mode 100644
index 62435187dda4..000000000000
--- a/arch/um/kernel/skas/clone.c
+++ /dev/null
@@ -1,48 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
- * Copyright (C) 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- */
-
-#include <signal.h>
-#include <sched.h>
-#include <asm/unistd.h>
-#include <sys/time.h>
-#include <as-layout.h>
-#include <ptrace_user.h>
-#include <stub-data.h>
-#include <sysdep/stub.h>
-
-/*
- * This is in a separate file because it needs to be compiled with any
- * extraneous gcc flags (-pg, -fprofile-arcs, -ftest-coverage) disabled
- *
- * Use UM_KERN_PAGE_SIZE instead of PAGE_SIZE because that calls getpagesize
- * on some systems.
- */
-
-void __attribute__ ((__section__ (".__syscall_stub")))
-stub_clone_handler(void)
-{
- struct stub_data *data = get_stub_data();
- long err;
-
- err = stub_syscall2(__NR_clone, CLONE_PARENT | CLONE_FILES | SIGCHLD,
- (unsigned long)data +
- STUB_DATA_PAGES * UM_KERN_PAGE_SIZE / 2);
- if (err) {
- data->parent_err = err;
- goto done;
- }
-
- err = stub_syscall4(__NR_ptrace, PTRACE_TRACEME, 0, 0, 0);
- if (err) {
- data->child_err = err;
- goto done;
- }
-
- remap_stack_and_trap();
-
- done:
- trap_myself();
-}
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index aeed1c2aaf3c..47f98d87ea3c 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -14,11 +14,14 @@
#include <as-layout.h>
#include <os.h>
#include <skas.h>
+#include <stub-data.h>
+
+/* Ensure the stub_data struct covers the allocated area */
+static_assert(sizeof(struct stub_data) == STUB_DATA_PAGES * UM_KERN_PAGE_SIZE);
int init_new_context(struct task_struct *task, struct mm_struct *mm)
{
- struct mm_context *from_mm = NULL;
- struct mm_context *to_mm = &mm->context;
+ struct mm_id *new_id = &mm->context.id;
unsigned long stack = 0;
int ret = -ENOMEM;
@@ -26,34 +29,46 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm)
if (stack == 0)
goto out;
- to_mm->id.stack = stack;
- if (current->mm != NULL && current->mm != &init_mm)
- from_mm = &current->mm->context;
+ new_id->stack = stack;
block_signals_trace();
- if (from_mm)
- to_mm->id.u.pid = copy_context_skas0(stack,
- from_mm->id.u.pid);
- else to_mm->id.u.pid = start_userspace(stack);
+ new_id->u.pid = start_userspace(stack);
unblock_signals_trace();
- if (to_mm->id.u.pid < 0) {
- ret = to_mm->id.u.pid;
+ if (new_id->u.pid < 0) {
+ ret = new_id->u.pid;
goto out_free;
}
- ret = init_new_ldt(to_mm, from_mm);
- if (ret < 0) {
- printk(KERN_ERR "init_new_context_skas - init_ldt"
- " failed, errno = %d\n", ret);
- goto out_free;
- }
+ /*
+ * Ensure the new MM is clean and nothing unwanted is mapped.
+ *
+ * TODO: We should clear the memory up to STUB_START to ensure there is
+ * nothing mapped there, i.e. we (currently) have:
+ *
+ * |- user memory -|- unused -|- stub -|- unused -|
+ * ^ TASK_SIZE ^ STUB_START
+ *
+ * Meaning we have two unused areas where we may still have valid
+ * mappings from our internal clone(). That isn't really a problem as
+ * userspace is not going to access them, but it is definitely not
+ * correct.
+ *
+ * However, we are "lucky" and if rseq is configured, then on 32 bit
+ * it will fall into the first empty range while on 64 bit it is going
+ * to use an anonymous mapping in the second range. As such, things
+ * continue to work for now as long as we don't start unmapping these
+ * areas.
+ *
+ * Change this to STUB_START once we have a clean userspace.
+ */
+ unmap(new_id, 0, TASK_SIZE);
return 0;
out_free:
- if (to_mm->id.stack != 0)
- free_pages(to_mm->id.stack, ilog2(STUB_DATA_PAGES));
+ if (new_id->stack != 0)
+ free_pages(new_id->stack, ilog2(STUB_DATA_PAGES));
out:
return ret;
}
@@ -76,5 +91,4 @@ void destroy_context(struct mm_struct *mm)
os_kill_ptraced_process(mmu->id.u.pid, 1);
free_pages(mmu->id.stack, ilog2(STUB_DATA_PAGES));
- free_ldt(mmu);
}
diff --git a/arch/um/kernel/skas/process.c b/arch/um/kernel/skas/process.c
index 99a5cbb36083..5f9c1c5f36e2 100644
--- a/arch/um/kernel/skas/process.c
+++ b/arch/um/kernel/skas/process.c
@@ -8,6 +8,8 @@
#include <linux/sched/task_stack.h>
#include <linux/sched/task.h>
+#include <asm/tlbflush.h>
+
#include <as-layout.h>
#include <kern.h>
#include <os.h>
@@ -50,3 +52,19 @@ unsigned long current_stub_stack(void)
return current->mm->context.id.stack;
}
+
+struct mm_id *current_mm_id(void)
+{
+ if (current->mm == NULL)
+ return NULL;
+
+ return &current->mm->context.id;
+}
+
+void current_mm_sync(void)
+{
+ if (current->mm == NULL)
+ return;
+
+ um_tlb_sync(current->mm);
+}
diff --git a/arch/um/kernel/skas/stub.c b/arch/um/kernel/skas/stub.c
new file mode 100644
index 000000000000..5d52ffa682dc
--- /dev/null
+++ b/arch/um/kernel/skas/stub.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Benjamin Berg <benjamin@sipsolutions.net>
+ */
+
+#include <sysdep/stub.h>
+
+static __always_inline int syscall_handler(struct stub_data *d)
+{
+ int i;
+ unsigned long res;
+
+ for (i = 0; i < d->syscall_data_len; i++) {
+ struct stub_syscall *sc = &d->syscall_data[i];
+
+ switch (sc->syscall) {
+ case STUB_SYSCALL_MMAP:
+ res = stub_syscall6(STUB_MMAP_NR,
+ sc->mem.addr, sc->mem.length,
+ sc->mem.prot,
+ MAP_SHARED | MAP_FIXED,
+ sc->mem.fd, sc->mem.offset);
+ if (res != sc->mem.addr) {
+ d->err = res;
+ d->syscall_data_len = i;
+ return -1;
+ }
+ break;
+ case STUB_SYSCALL_MUNMAP:
+ res = stub_syscall2(__NR_munmap,
+ sc->mem.addr, sc->mem.length);
+ if (res) {
+ d->err = res;
+ d->syscall_data_len = i;
+ return -1;
+ }
+ break;
+ case STUB_SYSCALL_MPROTECT:
+ res = stub_syscall3(__NR_mprotect,
+ sc->mem.addr, sc->mem.length,
+ sc->mem.prot);
+ if (res) {
+ d->err = res;
+ d->syscall_data_len = i;
+ return -1;
+ }
+ break;
+ default:
+ d->err = -95; /* EOPNOTSUPP */
+ d->syscall_data_len = i;
+ return -1;
+ }
+ }
+
+ d->err = 0;
+ d->syscall_data_len = 0;
+
+ return 0;
+}
+
+void __section(".__syscall_stub")
+stub_syscall_handler(void)
+{
+ struct stub_data *d = get_stub_data();
+
+ syscall_handler(d);
+
+ trap_myself();
+}
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
index a8bfe8be1526..47b9f5e63566 100644
--- a/arch/um/kernel/time.c
+++ b/arch/um/kernel/time.c
@@ -31,6 +31,7 @@ EXPORT_SYMBOL_GPL(time_travel_mode);
static bool time_travel_start_set;
static unsigned long long time_travel_start;
static unsigned long long time_travel_time;
+static unsigned long long time_travel_shm_offset;
static LIST_HEAD(time_travel_events);
static LIST_HEAD(time_travel_irqs);
static unsigned long long time_travel_timer_interval;
@@ -40,8 +41,11 @@ static int time_travel_ext_fd = -1;
static unsigned int time_travel_ext_waiting;
static bool time_travel_ext_prev_request_valid;
static unsigned long long time_travel_ext_prev_request;
-static bool time_travel_ext_free_until_valid;
-static unsigned long long time_travel_ext_free_until;
+static unsigned long long *time_travel_ext_free_until;
+static unsigned long long _time_travel_ext_free_until;
+static u16 time_travel_shm_id;
+static struct um_timetravel_schedshm *time_travel_shm;
+static union um_timetravel_schedshm_client *time_travel_shm_client;
static void time_travel_set_time(unsigned long long ns)
{
@@ -58,8 +62,52 @@ enum time_travel_message_handling {
TTMH_IDLE,
TTMH_POLL,
TTMH_READ,
+ TTMH_READ_START_ACK,
};
+static u64 bc_message;
+int time_travel_should_print_bc_msg;
+
+void _time_travel_print_bc_msg(void)
+{
+ time_travel_should_print_bc_msg = 0;
+ printk(KERN_INFO "time-travel: received broadcast 0x%llx\n", bc_message);
+}
+
+static void time_travel_setup_shm(int fd, u16 id)
+{
+ u32 len;
+
+ time_travel_shm = os_mmap_rw_shared(fd, sizeof(*time_travel_shm));
+
+ if (!time_travel_shm)
+ goto out;
+
+ len = time_travel_shm->len;
+
+ if (time_travel_shm->version != UM_TIMETRAVEL_SCHEDSHM_VERSION ||
+ len < struct_size(time_travel_shm, clients, id + 1)) {
+ os_unmap_memory(time_travel_shm, sizeof(*time_travel_shm));
+ time_travel_shm = NULL;
+ goto out;
+ }
+
+ time_travel_shm = os_mremap_rw_shared(time_travel_shm,
+ sizeof(*time_travel_shm),
+ len);
+ if (!time_travel_shm)
+ goto out;
+
+ time_travel_shm_offset = time_travel_shm->current_time;
+ time_travel_shm_client = &time_travel_shm->clients[id];
+ time_travel_shm_client->capa |= UM_TIMETRAVEL_SCHEDSHM_CAP_TIME_SHARE;
+ time_travel_shm_id = id;
+ /* always look at that free_until from now on */
+ time_travel_ext_free_until = &time_travel_shm->free_until;
+out:
+ os_close_file(fd);
+}
+
static void time_travel_handle_message(struct um_timetravel_msg *msg,
enum time_travel_message_handling mode)
{
@@ -80,7 +128,20 @@ static void time_travel_handle_message(struct um_timetravel_msg *msg,
}
}
- ret = os_read_file(time_travel_ext_fd, msg, sizeof(*msg));
+ if (unlikely(mode == TTMH_READ_START_ACK)) {
+ int fd[UM_TIMETRAVEL_SHARED_MAX_FDS];
+
+ ret = os_rcv_fd_msg(time_travel_ext_fd, fd,
+ ARRAY_SIZE(fd), msg, sizeof(*msg));
+ if (ret == sizeof(*msg)) {
+ time_travel_setup_shm(fd[UM_TIMETRAVEL_SHARED_MEMFD],
+ msg->time & UM_TIMETRAVEL_START_ACK_ID);
+ /* we don't use the logging for now */
+ os_close_file(fd[UM_TIMETRAVEL_SHARED_LOGFD]);
+ }
+ } else {
+ ret = os_read_file(time_travel_ext_fd, msg, sizeof(*msg));
+ }
if (ret == 0)
panic("time-travel external link is broken\n");
@@ -96,10 +157,24 @@ static void time_travel_handle_message(struct um_timetravel_msg *msg,
return;
case UM_TIMETRAVEL_RUN:
time_travel_set_time(msg->time);
+ if (time_travel_shm) {
+ /* no request right now since we're running */
+ time_travel_shm_client->flags &=
+ ~UM_TIMETRAVEL_SCHEDSHM_FLAGS_REQ_RUN;
+ /* no ack for shared memory RUN */
+ return;
+ }
break;
case UM_TIMETRAVEL_FREE_UNTIL:
- time_travel_ext_free_until_valid = true;
- time_travel_ext_free_until = msg->time;
+ /* not supposed to get this with shm, but ignore it */
+ if (time_travel_shm)
+ break;
+ time_travel_ext_free_until = &_time_travel_ext_free_until;
+ _time_travel_ext_free_until = msg->time;
+ break;
+ case UM_TIMETRAVEL_BROADCAST:
+ bc_message = msg->time;
+ time_travel_should_print_bc_msg = 1;
break;
}
@@ -136,8 +211,15 @@ static u64 time_travel_ext_req(u32 op, u64 time)
block_signals_hard();
os_write_file(time_travel_ext_fd, &msg, sizeof(msg));
+ /* no ACK expected for WAIT in shared memory mode */
+ if (msg.op == UM_TIMETRAVEL_WAIT && time_travel_shm)
+ goto done;
+
while (msg.op != UM_TIMETRAVEL_ACK)
- time_travel_handle_message(&msg, TTMH_READ);
+ time_travel_handle_message(&msg,
+ op == UM_TIMETRAVEL_START ?
+ TTMH_READ_START_ACK :
+ TTMH_READ);
if (msg.seq != mseq)
panic("time-travel: ACK message has different seqno! op=%d, seq=%d != %d time=%lld\n",
@@ -145,6 +227,7 @@ static u64 time_travel_ext_req(u32 op, u64 time)
if (op == UM_TIMETRAVEL_GET)
time_travel_set_time(msg.time);
+done:
unblock_signals_hard();
return msg.time;
@@ -180,13 +263,33 @@ static void time_travel_ext_update_request(unsigned long long time)
/*
* if we're running and are allowed to run past the request
* then we don't need to update it either
+ *
+ * Note for shm we ignore FREE_UNTIL messages and leave the pointer
+ * to shared memory, and for non-shm the offset is 0.
*/
- if (!time_travel_ext_waiting && time_travel_ext_free_until_valid &&
- time < time_travel_ext_free_until)
+ if (!time_travel_ext_waiting && time_travel_ext_free_until &&
+ time < (*time_travel_ext_free_until - time_travel_shm_offset))
return;
time_travel_ext_prev_request = time;
time_travel_ext_prev_request_valid = true;
+
+ if (time_travel_shm) {
+ union um_timetravel_schedshm_client *running;
+
+ running = &time_travel_shm->clients[time_travel_shm->running_id];
+
+ if (running->capa & UM_TIMETRAVEL_SCHEDSHM_CAP_TIME_SHARE) {
+ time_travel_shm_client->flags |=
+ UM_TIMETRAVEL_SCHEDSHM_FLAGS_REQ_RUN;
+ time += time_travel_shm_offset;
+ time_travel_shm_client->req_time = time;
+ if (time < time_travel_shm->free_until)
+ time_travel_shm->free_until = time;
+ return;
+ }
+ }
+
time_travel_ext_req(UM_TIMETRAVEL_REQUEST, time);
}
@@ -194,6 +297,14 @@ void __time_travel_propagate_time(void)
{
static unsigned long long last_propagated;
+ if (time_travel_shm) {
+ if (time_travel_shm->running_id != time_travel_shm_id)
+ panic("time-travel: setting time while not running\n");
+ time_travel_shm->current_time = time_travel_time +
+ time_travel_shm_offset;
+ return;
+ }
+
if (last_propagated == time_travel_time)
return;
@@ -209,9 +320,12 @@ static bool time_travel_ext_request(unsigned long long time)
* If we received an external sync point ("free until") then we
* don't have to request/wait for anything until then, unless
* we're already waiting.
+ *
+ * Note for shm we ignore FREE_UNTIL messages and leave the pointer
+ * to shared memory, and for non-shm the offset is 0.
*/
- if (!time_travel_ext_waiting && time_travel_ext_free_until_valid &&
- time < time_travel_ext_free_until)
+ if (!time_travel_ext_waiting && time_travel_ext_free_until &&
+ time < (*time_travel_ext_free_until - time_travel_shm_offset))
return false;
time_travel_ext_update_request(time);
@@ -225,7 +339,8 @@ static void time_travel_ext_wait(bool idle)
};
time_travel_ext_prev_request_valid = false;
- time_travel_ext_free_until_valid = false;
+ if (!time_travel_shm)
+ time_travel_ext_free_until = NULL;
time_travel_ext_waiting++;
time_travel_ext_req(UM_TIMETRAVEL_WAIT, -1);
@@ -248,7 +363,11 @@ static void time_travel_ext_wait(bool idle)
static void time_travel_ext_get_time(void)
{
- time_travel_ext_req(UM_TIMETRAVEL_GET, -1);
+ if (time_travel_shm)
+ time_travel_set_time(time_travel_shm->current_time -
+ time_travel_shm_offset);
+ else
+ time_travel_ext_req(UM_TIMETRAVEL_GET, -1);
}
static void __time_travel_update_time(unsigned long long ns, bool idle)
@@ -875,9 +994,49 @@ static int setup_time_travel_start(char *str)
return 1;
}
-__setup("time-travel-start", setup_time_travel_start);
+__setup("time-travel-start=", setup_time_travel_start);
__uml_help(setup_time_travel_start,
-"time-travel-start=<seconds>\n"
+"time-travel-start=<nanoseconds>\n"
"Configure the UML instance's wall clock to start at this value rather than\n"
"the host's wall clock at the time of UML boot.\n");
+static struct kobject *bc_time_kobject;
+
+static ssize_t bc_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "0x%llx", bc_message);
+}
+
+static ssize_t bc_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int ret;
+ u64 user_bc_message;
+
+ ret = kstrtou64(buf, 0, &user_bc_message);
+ if (ret)
+ return ret;
+
+ bc_message = user_bc_message;
+
+ time_travel_ext_req(UM_TIMETRAVEL_BROADCAST, bc_message);
+ pr_info("um: time: sent broadcast message: 0x%llx\n", bc_message);
+ return count;
+}
+
+static struct kobj_attribute bc_attribute = __ATTR(bc-message, 0660, bc_show, bc_store);
+
+static int __init um_bc_start(void)
+{
+ if (time_travel_mode != TT_MODE_EXTERNAL)
+ return 0;
+
+ bc_time_kobject = kobject_create_and_add("um-ext-time", kernel_kobj);
+ if (!bc_time_kobject)
+ return 0;
+
+ if (sysfs_create_file(bc_time_kobject, &bc_attribute.attr))
+ pr_debug("failed to create the bc file in /sys/kernel/um_time");
+
+ return 0;
+}
+late_initcall(um_bc_start);
#endif
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index 8784f03fa4a6..44c6fc697f3a 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -15,209 +15,54 @@
#include <skas.h>
#include <kern_util.h>
-struct host_vm_change {
- struct host_vm_op {
- enum { NONE, MMAP, MUNMAP, MPROTECT } type;
- union {
- struct {
- unsigned long addr;
- unsigned long len;
- unsigned int prot;
- int fd;
- __u64 offset;
- } mmap;
- struct {
- unsigned long addr;
- unsigned long len;
- } munmap;
- struct {
- unsigned long addr;
- unsigned long len;
- unsigned int prot;
- } mprotect;
- } u;
- } ops[1];
- int userspace;
- int index;
- struct mm_struct *mm;
- void *data;
- int force;
+struct vm_ops {
+ struct mm_id *mm_idp;
+
+ int (*mmap)(struct mm_id *mm_idp,
+ unsigned long virt, unsigned long len, int prot,
+ int phys_fd, unsigned long long offset);
+ int (*unmap)(struct mm_id *mm_idp,
+ unsigned long virt, unsigned long len);
+ int (*mprotect)(struct mm_id *mm_idp,
+ unsigned long virt, unsigned long len,
+ unsigned int prot);
};
-#define INIT_HVC(mm, force, userspace) \
- ((struct host_vm_change) \
- { .ops = { { .type = NONE } }, \
- .mm = mm, \
- .data = NULL, \
- .userspace = userspace, \
- .index = 0, \
- .force = force })
-
-static void report_enomem(void)
+static int kern_map(struct mm_id *mm_idp,
+ unsigned long virt, unsigned long len, int prot,
+ int phys_fd, unsigned long long offset)
{
- printk(KERN_ERR "UML ran out of memory on the host side! "
- "This can happen due to a memory limitation or "
- "vm.max_map_count has been reached.\n");
-}
-
-static int do_ops(struct host_vm_change *hvc, int end,
- int finished)
-{
- struct host_vm_op *op;
- int i, ret = 0;
-
- for (i = 0; i < end && !ret; i++) {
- op = &hvc->ops[i];
- switch (op->type) {
- case MMAP:
- if (hvc->userspace)
- ret = map(&hvc->mm->context.id, op->u.mmap.addr,
- op->u.mmap.len, op->u.mmap.prot,
- op->u.mmap.fd,
- op->u.mmap.offset, finished,
- &hvc->data);
- else
- map_memory(op->u.mmap.addr, op->u.mmap.offset,
- op->u.mmap.len, 1, 1, 1);
- break;
- case MUNMAP:
- if (hvc->userspace)
- ret = unmap(&hvc->mm->context.id,
- op->u.munmap.addr,
- op->u.munmap.len, finished,
- &hvc->data);
- else
- ret = os_unmap_memory(
- (void *) op->u.munmap.addr,
- op->u.munmap.len);
-
- break;
- case MPROTECT:
- if (hvc->userspace)
- ret = protect(&hvc->mm->context.id,
- op->u.mprotect.addr,
- op->u.mprotect.len,
- op->u.mprotect.prot,
- finished, &hvc->data);
- else
- ret = os_protect_memory(
- (void *) op->u.mprotect.addr,
- op->u.mprotect.len,
- 1, 1, 1);
- break;
- default:
- printk(KERN_ERR "Unknown op type %d in do_ops\n",
- op->type);
- BUG();
- break;
- }
- }
-
- if (ret == -ENOMEM)
- report_enomem();
-
- return ret;
+ /* TODO: Why is executable needed to be always set in the kernel? */
+ return os_map_memory((void *)virt, phys_fd, offset, len,
+ prot & UM_PROT_READ, prot & UM_PROT_WRITE,
+ 1);
}
-static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
- unsigned int prot, struct host_vm_change *hvc)
+static int kern_unmap(struct mm_id *mm_idp,
+ unsigned long virt, unsigned long len)
{
- __u64 offset;
- struct host_vm_op *last;
- int fd = -1, ret = 0;
-
- if (hvc->userspace)
- fd = phys_mapping(phys, &offset);
- else
- offset = phys;
- if (hvc->index != 0) {
- last = &hvc->ops[hvc->index - 1];
- if ((last->type == MMAP) &&
- (last->u.mmap.addr + last->u.mmap.len == virt) &&
- (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
- (last->u.mmap.offset + last->u.mmap.len == offset)) {
- last->u.mmap.len += len;
- return 0;
- }
- }
-
- if (hvc->index == ARRAY_SIZE(hvc->ops)) {
- ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
- hvc->index = 0;
- }
-
- hvc->ops[hvc->index++] = ((struct host_vm_op)
- { .type = MMAP,
- .u = { .mmap = { .addr = virt,
- .len = len,
- .prot = prot,
- .fd = fd,
- .offset = offset }
- } });
- return ret;
+ return os_unmap_memory((void *)virt, len);
}
-static int add_munmap(unsigned long addr, unsigned long len,
- struct host_vm_change *hvc)
+static int kern_mprotect(struct mm_id *mm_idp,
+ unsigned long virt, unsigned long len,
+ unsigned int prot)
{
- struct host_vm_op *last;
- int ret = 0;
-
- if (hvc->index != 0) {
- last = &hvc->ops[hvc->index - 1];
- if ((last->type == MUNMAP) &&
- (last->u.munmap.addr + last->u.mmap.len == addr)) {
- last->u.munmap.len += len;
- return 0;
- }
- }
-
- if (hvc->index == ARRAY_SIZE(hvc->ops)) {
- ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
- hvc->index = 0;
- }
-
- hvc->ops[hvc->index++] = ((struct host_vm_op)
- { .type = MUNMAP,
- .u = { .munmap = { .addr = addr,
- .len = len } } });
- return ret;
+ return os_protect_memory((void *)virt, len,
+ prot & UM_PROT_READ, prot & UM_PROT_WRITE,
+ 1);
}
-static int add_mprotect(unsigned long addr, unsigned long len,
- unsigned int prot, struct host_vm_change *hvc)
+void report_enomem(void)
{
- struct host_vm_op *last;
- int ret = 0;
-
- if (hvc->index != 0) {
- last = &hvc->ops[hvc->index - 1];
- if ((last->type == MPROTECT) &&
- (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
- (last->u.mprotect.prot == prot)) {
- last->u.mprotect.len += len;
- return 0;
- }
- }
-
- if (hvc->index == ARRAY_SIZE(hvc->ops)) {
- ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
- hvc->index = 0;
- }
-
- hvc->ops[hvc->index++] = ((struct host_vm_op)
- { .type = MPROTECT,
- .u = { .mprotect = { .addr = addr,
- .len = len,
- .prot = prot } } });
- return ret;
+ printk(KERN_ERR "UML ran out of memory on the host side! "
+ "This can happen due to a memory limitation or "
+ "vm.max_map_count has been reached.\n");
}
-#define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
-
static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end,
- struct host_vm_change *hvc)
+ struct vm_ops *ops)
{
pte_t *pte;
int r, w, x, prot, ret = 0;
@@ -235,15 +80,22 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
(x ? UM_PROT_EXEC : 0));
- if (hvc->force || pte_newpage(*pte)) {
+ if (pte_newpage(*pte)) {
if (pte_present(*pte)) {
- if (pte_newpage(*pte))
- ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
- PAGE_SIZE, prot, hvc);
+ if (pte_newpage(*pte)) {
+ __u64 offset;
+ unsigned long phys =
+ pte_val(*pte) & PAGE_MASK;
+ int fd = phys_mapping(phys, &offset);
+
+ ret = ops->mmap(ops->mm_idp, addr,
+ PAGE_SIZE, prot, fd,
+ offset);
+ }
} else
- ret = add_munmap(addr, PAGE_SIZE, hvc);
+ ret = ops->unmap(ops->mm_idp, addr, PAGE_SIZE);
} else if (pte_newprot(*pte))
- ret = add_mprotect(addr, PAGE_SIZE, prot, hvc);
+ ret = ops->mprotect(ops->mm_idp, addr, PAGE_SIZE, prot);
*pte = pte_mkuptodate(*pte);
} while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret));
return ret;
@@ -251,7 +103,7 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
static inline int update_pmd_range(pud_t *pud, unsigned long addr,
unsigned long end,
- struct host_vm_change *hvc)
+ struct vm_ops *ops)
{
pmd_t *pmd;
unsigned long next;
@@ -261,19 +113,20 @@ static inline int update_pmd_range(pud_t *pud, unsigned long addr,
do {
next = pmd_addr_end(addr, end);
if (!pmd_present(*pmd)) {
- if (hvc->force || pmd_newpage(*pmd)) {
- ret = add_munmap(addr, next - addr, hvc);
+ if (pmd_newpage(*pmd)) {
+ ret = ops->unmap(ops->mm_idp, addr,
+ next - addr);
pmd_mkuptodate(*pmd);
}
}
- else ret = update_pte_range(pmd, addr, next, hvc);
+ else ret = update_pte_range(pmd, addr, next, ops);
} while (pmd++, addr = next, ((addr < end) && !ret));
return ret;
}
static inline int update_pud_range(p4d_t *p4d, unsigned long addr,
unsigned long end,
- struct host_vm_change *hvc)
+ struct vm_ops *ops)
{
pud_t *pud;
unsigned long next;
@@ -283,19 +136,20 @@ static inline int update_pud_range(p4d_t *p4d, unsigned long addr,
do {
next = pud_addr_end(addr, end);
if (!pud_present(*pud)) {
- if (hvc->force || pud_newpage(*pud)) {
- ret = add_munmap(addr, next - addr, hvc);
+ if (pud_newpage(*pud)) {
+ ret = ops->unmap(ops->mm_idp, addr,
+ next - addr);
pud_mkuptodate(*pud);
}
}
- else ret = update_pmd_range(pud, addr, next, hvc);
+ else ret = update_pmd_range(pud, addr, next, ops);
} while (pud++, addr = next, ((addr < end) && !ret));
return ret;
}
static inline int update_p4d_range(pgd_t *pgd, unsigned long addr,
unsigned long end,
- struct host_vm_change *hvc)
+ struct vm_ops *ops)
{
p4d_t *p4d;
unsigned long next;
@@ -305,227 +159,59 @@ static inline int update_p4d_range(pgd_t *pgd, unsigned long addr,
do {
next = p4d_addr_end(addr, end);
if (!p4d_present(*p4d)) {
- if (hvc->force || p4d_newpage(*p4d)) {
- ret = add_munmap(addr, next - addr, hvc);
+ if (p4d_newpage(*p4d)) {
+ ret = ops->unmap(ops->mm_idp, addr,
+ next - addr);
p4d_mkuptodate(*p4d);
}
} else
- ret = update_pud_range(p4d, addr, next, hvc);
+ ret = update_pud_range(p4d, addr, next, ops);
} while (p4d++, addr = next, ((addr < end) && !ret));
return ret;
}
-static void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
- unsigned long end_addr, int force)
+int um_tlb_sync(struct mm_struct *mm)
{
pgd_t *pgd;
- struct host_vm_change hvc;
- unsigned long addr = start_addr, next;
- int ret = 0, userspace = 1;
+ struct vm_ops ops;
+ unsigned long addr = mm->context.sync_tlb_range_from, next;
+ int ret = 0;
+
+ if (mm->context.sync_tlb_range_to == 0)
+ return 0;
+
+ ops.mm_idp = &mm->context.id;
+ if (mm == &init_mm) {
+ ops.mmap = kern_map;
+ ops.unmap = kern_unmap;
+ ops.mprotect = kern_mprotect;
+ } else {
+ ops.mmap = map;
+ ops.unmap = unmap;
+ ops.mprotect = protect;
+ }
- hvc = INIT_HVC(mm, force, userspace);
pgd = pgd_offset(mm, addr);
do {
- next = pgd_addr_end(addr, end_addr);
+ next = pgd_addr_end(addr, mm->context.sync_tlb_range_to);
if (!pgd_present(*pgd)) {
- if (force || pgd_newpage(*pgd)) {
- ret = add_munmap(addr, next - addr, &hvc);
+ if (pgd_newpage(*pgd)) {
+ ret = ops.unmap(ops.mm_idp, addr,
+ next - addr);
pgd_mkuptodate(*pgd);
}
} else
- ret = update_p4d_range(pgd, addr, next, &hvc);
- } while (pgd++, addr = next, ((addr < end_addr) && !ret));
+ ret = update_p4d_range(pgd, addr, next, &ops);
+ } while (pgd++, addr = next,
+ ((addr < mm->context.sync_tlb_range_to) && !ret));
- if (!ret)
- ret = do_ops(&hvc, hvc.index, 1);
-
- /* This is not an else because ret is modified above */
- if (ret) {
- struct mm_id *mm_idp = &current->mm->context.id;
-
- printk(KERN_ERR "fix_range_common: failed, killing current "
- "process: %d\n", task_tgid_vnr(current));
- mm_idp->kill = 1;
- }
-}
-
-static int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
-{
- struct mm_struct *mm;
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- unsigned long addr, last;
- int updated = 0, err = 0, force = 0, userspace = 0;
- struct host_vm_change hvc;
-
- mm = &init_mm;
- hvc = INIT_HVC(mm, force, userspace);
- for (addr = start; addr < end;) {
- pgd = pgd_offset(mm, addr);
- if (!pgd_present(*pgd)) {
- last = ADD_ROUND(addr, PGDIR_SIZE);
- if (last > end)
- last = end;
- if (pgd_newpage(*pgd)) {
- updated = 1;
- err = add_munmap(addr, last - addr, &hvc);
- if (err < 0)
- panic("munmap failed, errno = %d\n",
- -err);
- }
- addr = last;
- continue;
- }
-
- p4d = p4d_offset(pgd, addr);
- if (!p4d_present(*p4d)) {
- last = ADD_ROUND(addr, P4D_SIZE);
- if (last > end)
- last = end;
- if (p4d_newpage(*p4d)) {
- updated = 1;
- err = add_munmap(addr, last - addr, &hvc);
- if (err < 0)
- panic("munmap failed, errno = %d\n",
- -err);
- }
- addr = last;
- continue;
- }
-
- pud = pud_offset(p4d, addr);
- if (!pud_present(*pud)) {
- last = ADD_ROUND(addr, PUD_SIZE);
- if (last > end)
- last = end;
- if (pud_newpage(*pud)) {
- updated = 1;
- err = add_munmap(addr, last - addr, &hvc);
- if (err < 0)
- panic("munmap failed, errno = %d\n",
- -err);
- }
- addr = last;
- continue;
- }
-
- pmd = pmd_offset(pud, addr);
- if (!pmd_present(*pmd)) {
- last = ADD_ROUND(addr, PMD_SIZE);
- if (last > end)
- last = end;
- if (pmd_newpage(*pmd)) {
- updated = 1;
- err = add_munmap(addr, last - addr, &hvc);
- if (err < 0)
- panic("munmap failed, errno = %d\n",
- -err);
- }
- addr = last;
- continue;
- }
-
- pte = pte_offset_kernel(pmd, addr);
- if (!pte_present(*pte) || pte_newpage(*pte)) {
- updated = 1;
- err = add_munmap(addr, PAGE_SIZE, &hvc);
- if (err < 0)
- panic("munmap failed, errno = %d\n",
- -err);
- if (pte_present(*pte))
- err = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
- PAGE_SIZE, 0, &hvc);
- }
- else if (pte_newprot(*pte)) {
- updated = 1;
- err = add_mprotect(addr, PAGE_SIZE, 0, &hvc);
- }
- addr += PAGE_SIZE;
- }
- if (!err)
- err = do_ops(&hvc, hvc.index, 1);
-
- if (err < 0)
- panic("flush_tlb_kernel failed, errno = %d\n", err);
- return updated;
-}
-
-void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
-{
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- struct mm_struct *mm = vma->vm_mm;
- void *flush = NULL;
- int r, w, x, prot, err = 0;
- struct mm_id *mm_id;
-
- address &= PAGE_MASK;
-
- pgd = pgd_offset(mm, address);
- if (!pgd_present(*pgd))
- goto kill;
-
- p4d = p4d_offset(pgd, address);
- if (!p4d_present(*p4d))
- goto kill;
-
- pud = pud_offset(p4d, address);
- if (!pud_present(*pud))
- goto kill;
-
- pmd = pmd_offset(pud, address);
- if (!pmd_present(*pmd))
- goto kill;
-
- pte = pte_offset_kernel(pmd, address);
-
- r = pte_read(*pte);
- w = pte_write(*pte);
- x = pte_exec(*pte);
- if (!pte_young(*pte)) {
- r = 0;
- w = 0;
- } else if (!pte_dirty(*pte)) {
- w = 0;
- }
-
- mm_id = &mm->context.id;
- prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
- (x ? UM_PROT_EXEC : 0));
- if (pte_newpage(*pte)) {
- if (pte_present(*pte)) {
- unsigned long long offset;
- int fd;
-
- fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
- err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
- 1, &flush);
- }
- else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
- }
- else if (pte_newprot(*pte))
- err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
-
- if (err) {
- if (err == -ENOMEM)
- report_enomem();
-
- goto kill;
- }
-
- *pte = pte_mkuptodate(*pte);
+ if (ret == -ENOMEM)
+ report_enomem();
- return;
+ mm->context.sync_tlb_range_from = 0;
+ mm->context.sync_tlb_range_to = 0;
-kill:
- printk(KERN_ERR "Failed to flush page for address 0x%lx\n", address);
- force_sig(SIGKILL);
+ return ret;
}
void flush_tlb_all(void)
@@ -540,60 +226,11 @@ void flush_tlb_all(void)
flush_tlb_mm(current->mm);
}
-void flush_tlb_kernel_range(unsigned long start, unsigned long end)
-{
- flush_tlb_kernel_range_common(start, end);
-}
-
-void flush_tlb_kernel_vm(void)
-{
- flush_tlb_kernel_range_common(start_vm, end_vm);
-}
-
-void __flush_tlb_one(unsigned long addr)
-{
- flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
-}
-
-static void fix_range(struct mm_struct *mm, unsigned long start_addr,
- unsigned long end_addr, int force)
-{
- /*
- * Don't bother flushing if this address space is about to be
- * destroyed.
- */
- if (atomic_read(&mm->mm_users) == 0)
- return;
-
- fix_range_common(mm, start_addr, end_addr, force);
-}
-
-void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
- unsigned long end)
-{
- if (vma->vm_mm == NULL)
- flush_tlb_kernel_range_common(start, end);
- else fix_range(vma->vm_mm, start, end, 0);
-}
-EXPORT_SYMBOL(flush_tlb_range);
-
void flush_tlb_mm(struct mm_struct *mm)
{
struct vm_area_struct *vma;
VMA_ITERATOR(vmi, mm, 0);
for_each_vma(vmi, vma)
- fix_range(mm, vma->vm_start, vma->vm_end, 0);
-}
-
-void force_flush_all(void)
-{
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- VMA_ITERATOR(vmi, mm, 0);
-
- mmap_read_lock(mm);
- for_each_vma(vmi, vma)
- fix_range(mm, vma->vm_start, vma->vm_end, 1);
- mmap_read_unlock(mm);
+ um_tlb_mark_sync(mm, vma->vm_start, vma->vm_end);
}
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index 6d8ae86ae978..97c8df9c4401 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -113,7 +113,7 @@ good_area:
#if 0
WARN_ON(!pte_young(*pte) || (is_write && !pte_dirty(*pte)));
#endif
- flush_tlb_page(vma, address);
+
out:
mmap_read_unlock(mm);
out_nosemaphore:
@@ -210,8 +210,17 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
if (!is_user && regs)
current->thread.segv_regs = container_of(regs, struct pt_regs, regs);
- if (!is_user && (address >= start_vm) && (address < end_vm)) {
- flush_tlb_kernel_vm();
+ if (!is_user && init_mm.context.sync_tlb_range_to) {
+ /*
+ * Kernel has pending updates from set_ptes that were not
+ * flushed yet. Syncing them should fix the pagefault (if not
+ * we'll get here again and panic).
+ */
+ err = um_tlb_sync(&init_mm);
+ if (err == -ENOMEM)
+ report_enomem();
+ if (err)
+ panic("Failed to sync kernel TLBs: %d", err);
goto out;
}
else if (current->mm == NULL) {
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index e95f805e5004..8e594cda6d77 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -126,9 +126,6 @@ unsigned long uml_reserved; /* Also modified in mem_init */
unsigned long start_vm;
unsigned long end_vm;
-/* Set in uml_ncpus_setup */
-int ncpus = 1;
-
/* Set in early boot */
static int have_root __initdata;
static int have_console __initdata;
diff --git a/arch/um/os-Linux/file.c b/arch/um/os-Linux/file.c
index fc4450db59bd..5adf8f630049 100644
--- a/arch/um/os-Linux/file.c
+++ b/arch/um/os-Linux/file.c
@@ -17,6 +17,7 @@
#include <sys/stat.h>
#include <sys/sysmacros.h>
#include <sys/un.h>
+#include <sys/mman.h>
#include <sys/types.h>
#include <sys/eventfd.h>
#include <poll.h>
@@ -240,6 +241,16 @@ out:
return err;
}
+int os_dup_file(int fd)
+{
+ int new_fd = dup(fd);
+
+ if (new_fd < 0)
+ return -errno;
+
+ return new_fd;
+}
+
void os_close_file(int fd)
{
close(fd);
@@ -502,44 +513,47 @@ int os_shutdown_socket(int fd, int r, int w)
return 0;
}
-int os_rcv_fd(int fd, int *helper_pid_out)
+/**
+ * os_rcv_fd_msg - receive message with (optional) FDs
+ * @fd: the FD to receive from
+ * @fds: the array for FDs to write to
+ * @n_fds: number of FDs to receive (@fds array size)
+ * @data: the message buffer
+ * @data_len: the size of the message to receive
+ *
+ * Receive a message with FDs.
+ *
+ * Returns: the size of the received message, or an error code
+ */
+ssize_t os_rcv_fd_msg(int fd, int *fds, unsigned int n_fds,
+ void *data, size_t data_len)
{
- int new, n;
- char buf[CMSG_SPACE(sizeof(new))];
- struct msghdr msg;
+ char buf[CMSG_SPACE(sizeof(*fds) * n_fds)];
struct cmsghdr *cmsg;
- struct iovec iov;
-
- msg.msg_name = NULL;
- msg.msg_namelen = 0;
- iov = ((struct iovec) { .iov_base = helper_pid_out,
- .iov_len = sizeof(*helper_pid_out) });
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
- msg.msg_control = buf;
- msg.msg_controllen = sizeof(buf);
- msg.msg_flags = 0;
+ struct iovec iov = {
+ .iov_base = data,
+ .iov_len = data_len,
+ };
+ struct msghdr msg = {
+ .msg_iov = &iov,
+ .msg_iovlen = 1,
+ .msg_control = buf,
+ .msg_controllen = sizeof(buf),
+ };
+ int n;
n = recvmsg(fd, &msg, 0);
if (n < 0)
return -errno;
- else if (n != iov.iov_len)
- *helper_pid_out = -1;
cmsg = CMSG_FIRSTHDR(&msg);
- if (cmsg == NULL) {
- printk(UM_KERN_ERR "rcv_fd didn't receive anything, "
- "error = %d\n", errno);
- return -1;
- }
- if ((cmsg->cmsg_level != SOL_SOCKET) ||
- (cmsg->cmsg_type != SCM_RIGHTS)) {
- printk(UM_KERN_ERR "rcv_fd didn't receive a descriptor\n");
- return -1;
- }
+ if (!cmsg ||
+ cmsg->cmsg_level != SOL_SOCKET ||
+ cmsg->cmsg_type != SCM_RIGHTS)
+ return n;
- new = ((int *) CMSG_DATA(cmsg))[0];
- return new;
+ memcpy(fds, CMSG_DATA(cmsg), cmsg->cmsg_len);
+ return n;
}
int os_create_unix_socket(const char *file, int len, int close_on_exec)
@@ -705,3 +719,25 @@ int os_poll(unsigned int n, const int *fds)
return -EIO;
}
+
+void *os_mmap_rw_shared(int fd, size_t size)
+{
+ void *res = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+
+ if (res == MAP_FAILED)
+ return NULL;
+
+ return res;
+}
+
+void *os_mremap_rw_shared(void *old_addr, size_t old_size, size_t new_size)
+{
+ void *res;
+
+ res = mremap(old_addr, old_size, new_size, MREMAP_MAYMOVE, NULL);
+
+ if (res == MAP_FAILED)
+ return NULL;
+
+ return res;
+}
diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
index 787cfb9a0308..b11ed66c8bb0 100644
--- a/arch/um/os-Linux/signal.c
+++ b/arch/um/os-Linux/signal.c
@@ -8,6 +8,7 @@
#include <stdlib.h>
#include <stdarg.h>
+#include <stdbool.h>
#include <errno.h>
#include <signal.h>
#include <string.h>
@@ -65,9 +66,7 @@ static void sig_handler_common(int sig, struct siginfo *si, mcontext_t *mc)
int signals_enabled;
#ifdef UML_CONFIG_UML_TIME_TRAVEL_SUPPORT
-static int signals_blocked;
-#else
-#define signals_blocked 0
+static int signals_blocked, signals_blocked_pending;
#endif
static unsigned int signals_pending;
static unsigned int signals_active = 0;
@@ -76,14 +75,27 @@ static void sig_handler(int sig, struct siginfo *si, mcontext_t *mc)
{
int enabled = signals_enabled;
- if ((signals_blocked || !enabled) && (sig == SIGIO)) {
+#ifdef UML_CONFIG_UML_TIME_TRAVEL_SUPPORT
+ if ((signals_blocked ||
+ __atomic_load_n(&signals_blocked_pending, __ATOMIC_SEQ_CST)) &&
+ (sig == SIGIO)) {
+ /* increment so unblock will do another round */
+ __atomic_add_fetch(&signals_blocked_pending, 1,
+ __ATOMIC_SEQ_CST);
+ return;
+ }
+#endif
+
+ if (!enabled && (sig == SIGIO)) {
/*
* In TT_MODE_EXTERNAL, need to still call time-travel
- * handlers unless signals are also blocked for the
- * external time message processing. This will mark
- * signals_pending by itself (only if necessary.)
+ * handlers. This will mark signals_pending by itself
+ * (only if necessary.)
+ * Note we won't get here if signals are hard-blocked
+ * (which is handled above), in that case the hard-
+ * unblock will handle things.
*/
- if (!signals_blocked && time_travel_mode == TT_MODE_EXTERNAL)
+ if (time_travel_mode == TT_MODE_EXTERNAL)
sigio_run_timetravel_handlers();
else
signals_pending |= SIGIO_MASK;
@@ -380,33 +392,99 @@ int um_set_signals_trace(int enable)
#ifdef UML_CONFIG_UML_TIME_TRAVEL_SUPPORT
void mark_sigio_pending(void)
{
+ /*
+ * It would seem that this should be atomic so
+ * it isn't a read-modify-write with a signal
+ * that could happen in the middle, losing the
+ * value set by the signal.
+ *
+ * However, this function is only called when in
+ * time-travel=ext simulation mode, in which case
+ * the only signal ever pending is SIGIO, which
+ * is blocked while this can be called, and the
+ * timer signal (SIGALRM) cannot happen.
+ */
signals_pending |= SIGIO_MASK;
}
void block_signals_hard(void)
{
- if (signals_blocked)
- return;
- signals_blocked = 1;
+ signals_blocked++;
barrier();
}
void unblock_signals_hard(void)
{
+ static bool unblocking;
+
if (!signals_blocked)
+ panic("unblocking signals while not blocked");
+
+ if (--signals_blocked)
return;
- /* Must be set to 0 before we check the pending bits etc. */
- signals_blocked = 0;
+ /*
+ * Must be set to 0 before we check pending so the
+ * SIGIO handler will run as normal unless we're still
+ * going to process signals_blocked_pending.
+ */
barrier();
- if (signals_pending && signals_enabled) {
- /* this is a bit inefficient, but that's not really important */
- block_signals();
- unblock_signals();
- } else if (signals_pending & SIGIO_MASK) {
- /* we need to run time-travel handlers even if not enabled */
- sigio_run_timetravel_handlers();
+ /*
+ * Note that block_signals_hard()/unblock_signals_hard() can be called
+ * within the unblock_signals()/sigio_run_timetravel_handlers() below.
+ * This would still be prone to race conditions since it's actually a
+ * call _within_ e.g. vu_req_read_message(), where we observed this
+ * issue, which loops. Thus, if the inner call handles the recorded
+ * pending signals, we can get out of the inner call with the real
+ * signal hander no longer blocked, and still have a race. Thus don't
+ * handle unblocking in the inner call, if it happens, but only in
+ * the outermost call - 'unblocking' serves as an ownership for the
+ * signals_blocked_pending decrement.
+ */
+ if (unblocking)
+ return;
+ unblocking = true;
+
+ while (__atomic_load_n(&signals_blocked_pending, __ATOMIC_SEQ_CST)) {
+ if (signals_enabled) {
+ /* signals are enabled so we can touch this */
+ signals_pending |= SIGIO_MASK;
+ /*
+ * this is a bit inefficient, but that's
+ * not really important
+ */
+ block_signals();
+ unblock_signals();
+ } else {
+ /*
+ * we need to run time-travel handlers even
+ * if not enabled
+ */
+ sigio_run_timetravel_handlers();
+ }
+
+ /*
+ * The decrement of signals_blocked_pending must be atomic so
+ * that the signal handler will either happen before or after
+ * the decrement, not during a read-modify-write:
+ * - If it happens before, it can increment it and we'll
+ * decrement it and do another round in the loop.
+ * - If it happens after it'll see 0 for both signals_blocked
+ * and signals_blocked_pending and thus run the handler as
+ * usual (subject to signals_enabled, but that's unrelated.)
+ *
+ * Note that a call to unblock_signals_hard() within the calls
+ * to unblock_signals() or sigio_run_timetravel_handlers() above
+ * will do nothing due to the 'unblocking' state, so this cannot
+ * underflow as the only one decrementing will be the outermost
+ * one.
+ */
+ if (__atomic_sub_fetch(&signals_blocked_pending, 1,
+ __ATOMIC_SEQ_CST) < 0)
+ panic("signals_blocked_pending underflow");
}
+
+ unblocking = false;
}
#endif
diff --git a/arch/um/os-Linux/skas/mem.c b/arch/um/os-Linux/skas/mem.c
index 1f9c1bffc3a6..c55430775efd 100644
--- a/arch/um/os-Linux/skas/mem.c
+++ b/arch/um/os-Linux/skas/mem.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
+ * Copyright (C) 2021 Benjamin Berg <benjamin@sipsolutions.net>
* Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
@@ -19,7 +20,30 @@
#include <sysdep/stub.h>
#include "../internal.h"
-extern char batch_syscall_stub[], __syscall_stub_start[];
+extern char __syscall_stub_start[];
+
+void syscall_stub_dump_error(struct mm_id *mm_idp)
+{
+ struct stub_data *proc_data = (void *)mm_idp->stack;
+ struct stub_syscall *sc;
+
+ if (proc_data->syscall_data_len < 0 ||
+ proc_data->syscall_data_len >= ARRAY_SIZE(proc_data->syscall_data))
+ panic("Syscall data was corrupted by stub (len is: %d, expected maximum: %d)!",
+ proc_data->syscall_data_len,
+ mm_idp->syscall_data_len);
+
+ sc = &proc_data->syscall_data[proc_data->syscall_data_len];
+
+ printk(UM_KERN_ERR "%s : length = %d, last offset = %d",
+ __func__, mm_idp->syscall_data_len,
+ proc_data->syscall_data_len);
+ printk(UM_KERN_ERR "%s : stub syscall type %d failed, return value = 0x%lx\n",
+ __func__, sc->syscall, proc_data->err);
+
+ print_hex_dump(UM_KERN_ERR, " syscall data: ", 0,
+ 16, 4, sc, sizeof(*sc), 0);
+}
static inline unsigned long *check_init_stack(struct mm_id * mm_idp,
unsigned long *stack)
@@ -36,22 +60,24 @@ static unsigned long syscall_regs[MAX_REG_NR];
static int __init init_syscall_regs(void)
{
get_safe_registers(syscall_regs, NULL);
+
syscall_regs[REGS_IP_INDEX] = STUB_CODE +
- ((unsigned long) batch_syscall_stub -
+ ((unsigned long) stub_syscall_handler -
(unsigned long) __syscall_stub_start);
- syscall_regs[REGS_SP_INDEX] = STUB_DATA;
+ syscall_regs[REGS_SP_INDEX] = STUB_DATA +
+ offsetof(struct stub_data, sigstack) +
+ sizeof(((struct stub_data *) 0)->sigstack) -
+ sizeof(void *);
return 0;
}
__initcall(init_syscall_regs);
-static inline long do_syscall_stub(struct mm_id * mm_idp, void **addr)
+static inline long do_syscall_stub(struct mm_id *mm_idp)
{
+ struct stub_data *proc_data = (void *)mm_idp->stack;
int n, i;
- long ret, offset;
- unsigned long * data;
- unsigned long * syscall;
int err, pid = mm_idp->u.pid;
n = ptrace_setregs(pid, syscall_regs);
@@ -63,6 +89,9 @@ static inline long do_syscall_stub(struct mm_id * mm_idp, void **addr)
__func__, -n);
}
+ /* Inform process how much we have filled in. */
+ proc_data->syscall_data_len = mm_idp->syscall_data_len;
+
err = ptrace(PTRACE_CONT, pid, 0, 0);
if (err)
panic("Failed to continue stub, pid = %d, errno = %d\n", pid,
@@ -71,135 +100,141 @@ static inline long do_syscall_stub(struct mm_id * mm_idp, void **addr)
wait_stub_done(pid);
/*
- * When the stub stops, we find the following values on the
- * beginning of the stack:
- * (long )return_value
- * (long )offset to failed sycall-data (0, if no error)
+ * proc_data->err will be non-zero if there was an (unexpected) error.
+ * In that case, syscall_data_len points to the last executed syscall,
+ * otherwise it will be zero (but we do not need to rely on that).
*/
- ret = *((unsigned long *) mm_idp->stack);
- offset = *((unsigned long *) mm_idp->stack + 1);
- if (offset) {
- data = (unsigned long *)(mm_idp->stack + offset - STUB_DATA);
- printk(UM_KERN_ERR "%s : ret = %ld, offset = %ld, data = %p\n",
- __func__, ret, offset, data);
- syscall = (unsigned long *)((unsigned long)data + data[0]);
- printk(UM_KERN_ERR "%s: syscall %ld failed, return value = 0x%lx, expected return value = 0x%lx\n",
- __func__, syscall[0], ret, syscall[7]);
- printk(UM_KERN_ERR " syscall parameters: 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n",
- syscall[1], syscall[2], syscall[3],
- syscall[4], syscall[5], syscall[6]);
- for (n = 1; n < data[0]/sizeof(long); n++) {
- if (n == 1)
- printk(UM_KERN_ERR " additional syscall data:");
- if (n % 4 == 1)
- printk("\n" UM_KERN_ERR " ");
- printk(" 0x%lx", data[n]);
- }
- if (n > 1)
- printk("\n");
- }
- else ret = 0;
+ if (proc_data->err < 0) {
+ syscall_stub_dump_error(mm_idp);
- *addr = check_init_stack(mm_idp, NULL);
+ /* Store error code in case someone tries to add more syscalls */
+ mm_idp->syscall_data_len = proc_data->err;
+ } else {
+ mm_idp->syscall_data_len = 0;
+ }
- return ret;
+ return mm_idp->syscall_data_len;
}
-long run_syscall_stub(struct mm_id * mm_idp, int syscall,
- unsigned long *args, long expected, void **addr,
- int done)
+int syscall_stub_flush(struct mm_id *mm_idp)
{
- unsigned long *stack = check_init_stack(mm_idp, *addr);
-
- *stack += sizeof(long);
- stack += *stack / sizeof(long);
-
- *stack++ = syscall;
- *stack++ = args[0];
- *stack++ = args[1];
- *stack++ = args[2];
- *stack++ = args[3];
- *stack++ = args[4];
- *stack++ = args[5];
- *stack++ = expected;
- *stack = 0;
-
- if (!done && ((((unsigned long) stack) & ~UM_KERN_PAGE_MASK) <
- UM_KERN_PAGE_SIZE - 10 * sizeof(long))) {
- *addr = stack;
+ int res;
+
+ if (mm_idp->syscall_data_len == 0)
return 0;
+
+ /* If an error happened already, report it and reset the state. */
+ if (mm_idp->syscall_data_len < 0) {
+ res = mm_idp->syscall_data_len;
+ mm_idp->syscall_data_len = 0;
+ return res;
}
- return do_syscall_stub(mm_idp, addr);
+ res = do_syscall_stub(mm_idp);
+ mm_idp->syscall_data_len = 0;
+
+ return res;
}
-long syscall_stub_data(struct mm_id * mm_idp,
- unsigned long *data, int data_count,
- void **addr, void **stub_addr)
+struct stub_syscall *syscall_stub_alloc(struct mm_id *mm_idp)
{
- unsigned long *stack;
- int ret = 0;
-
- /*
- * If *addr still is uninitialized, it *must* contain NULL.
- * Thus in this case do_syscall_stub correctly won't be called.
- */
- if ((((unsigned long) *addr) & ~UM_KERN_PAGE_MASK) >=
- UM_KERN_PAGE_SIZE - (10 + data_count) * sizeof(long)) {
- ret = do_syscall_stub(mm_idp, addr);
- /* in case of error, don't overwrite data on stack */
- if (ret)
- return ret;
+ struct stub_syscall *sc;
+ struct stub_data *proc_data = (struct stub_data *) mm_idp->stack;
+
+ if (mm_idp->syscall_data_len > 0 &&
+ mm_idp->syscall_data_len == ARRAY_SIZE(proc_data->syscall_data))
+ do_syscall_stub(mm_idp);
+
+ if (mm_idp->syscall_data_len < 0) {
+ /* Return dummy to retain error state. */
+ sc = &proc_data->syscall_data[0];
+ } else {
+ sc = &proc_data->syscall_data[mm_idp->syscall_data_len];
+ mm_idp->syscall_data_len += 1;
}
+ memset(sc, 0, sizeof(*sc));
- stack = check_init_stack(mm_idp, *addr);
- *addr = stack;
+ return sc;
+}
- *stack = data_count * sizeof(long);
+static struct stub_syscall *syscall_stub_get_previous(struct mm_id *mm_idp,
+ int syscall_type,
+ unsigned long virt)
+{
+ if (mm_idp->syscall_data_len > 0) {
+ struct stub_data *proc_data = (void *) mm_idp->stack;
+ struct stub_syscall *sc;
- memcpy(stack + 1, data, data_count * sizeof(long));
+ sc = &proc_data->syscall_data[mm_idp->syscall_data_len - 1];
- *stub_addr = (void *)(((unsigned long)(stack + 1) &
- ~UM_KERN_PAGE_MASK) + STUB_DATA);
+ if (sc->syscall == syscall_type &&
+ sc->mem.addr + sc->mem.length == virt)
+ return sc;
+ }
- return 0;
+ return NULL;
}
-int map(struct mm_id * mm_idp, unsigned long virt, unsigned long len, int prot,
- int phys_fd, unsigned long long offset, int done, void **data)
+int map(struct mm_id *mm_idp, unsigned long virt, unsigned long len, int prot,
+ int phys_fd, unsigned long long offset)
{
- int ret;
- unsigned long args[] = { virt, len, prot,
- MAP_SHARED | MAP_FIXED, phys_fd,
- MMAP_OFFSET(offset) };
+ struct stub_syscall *sc;
- ret = run_syscall_stub(mm_idp, STUB_MMAP_NR, args, virt,
- data, done);
+ /* Compress with previous syscall if that is possible */
+ sc = syscall_stub_get_previous(mm_idp, STUB_SYSCALL_MMAP, virt);
+ if (sc && sc->mem.prot == prot && sc->mem.fd == phys_fd &&
+ sc->mem.offset == MMAP_OFFSET(offset - sc->mem.length)) {
+ sc->mem.length += len;
+ return 0;
+ }
- return ret;
+ sc = syscall_stub_alloc(mm_idp);
+ sc->syscall = STUB_SYSCALL_MMAP;
+ sc->mem.addr = virt;
+ sc->mem.length = len;
+ sc->mem.prot = prot;
+ sc->mem.fd = phys_fd;
+ sc->mem.offset = MMAP_OFFSET(offset);
+
+ return 0;
}
-int unmap(struct mm_id * mm_idp, unsigned long addr, unsigned long len,
- int done, void **data)
+int unmap(struct mm_id *mm_idp, unsigned long addr, unsigned long len)
{
- int ret;
- unsigned long args[] = { (unsigned long) addr, len, 0, 0, 0,
- 0 };
+ struct stub_syscall *sc;
- ret = run_syscall_stub(mm_idp, __NR_munmap, args, 0,
- data, done);
+ /* Compress with previous syscall if that is possible */
+ sc = syscall_stub_get_previous(mm_idp, STUB_SYSCALL_MUNMAP, addr);
+ if (sc) {
+ sc->mem.length += len;
+ return 0;
+ }
- return ret;
+ sc = syscall_stub_alloc(mm_idp);
+ sc->syscall = STUB_SYSCALL_MUNMAP;
+ sc->mem.addr = addr;
+ sc->mem.length = len;
+
+ return 0;
}
-int protect(struct mm_id * mm_idp, unsigned long addr, unsigned long len,
- unsigned int prot, int done, void **data)
+int protect(struct mm_id *mm_idp, unsigned long addr, unsigned long len,
+ unsigned int prot)
{
- int ret;
- unsigned long args[] = { addr, len, prot, 0, 0, 0 };
+ struct stub_syscall *sc;
- ret = run_syscall_stub(mm_idp, __NR_mprotect, args, 0,
- data, done);
+ /* Compress with previous syscall if that is possible */
+ sc = syscall_stub_get_previous(mm_idp, STUB_SYSCALL_MPROTECT, addr);
+ if (sc && sc->mem.prot == prot) {
+ sc->mem.length += len;
+ return 0;
+ }
- return ret;
+ sc = syscall_stub_alloc(mm_idp);
+ sc->syscall = STUB_SYSCALL_MPROTECT;
+ sc->mem.addr = addr;
+ sc->mem.length = len;
+ sc->mem.prot = prot;
+
+ return 0;
}
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index 41a288dcfc34..f7088345b3fc 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -23,6 +23,7 @@
#include <skas.h>
#include <sysdep/stub.h>
#include <linux/threads.h>
+#include <timetravel.h>
#include "../internal.h"
int is_skas_winch(int pid, int fd, void *data)
@@ -253,7 +254,6 @@ static int userspace_tramp(void *stack)
}
int userspace_pid[NR_CPUS];
-int kill_userspace_mm[NR_CPUS];
/**
* start_userspace() - prepare a new userspace process
@@ -345,8 +345,20 @@ void userspace(struct uml_pt_regs *regs, unsigned long *aux_fp_regs)
interrupt_end();
while (1) {
- if (kill_userspace_mm[0])
+ time_travel_print_bc_msg();
+
+ current_mm_sync();
+
+ /* Flush out any pending syscalls */
+ err = syscall_stub_flush(current_mm_id());
+ if (err) {
+ if (err == -ENOMEM)
+ report_enomem();
+
+ printk(UM_KERN_ERR "%s - Error flushing stub syscalls: %d",
+ __func__, -err);
fatal_sigsegv();
+ }
/*
* This can legitimately fail if the process loads a
@@ -461,113 +473,6 @@ void userspace(struct uml_pt_regs *regs, unsigned long *aux_fp_regs)
}
}
-static unsigned long thread_regs[MAX_REG_NR];
-static unsigned long thread_fp_regs[FP_SIZE];
-
-static int __init init_thread_regs(void)
-{
- get_safe_registers(thread_regs, thread_fp_regs);
- /* Set parent's instruction pointer to start of clone-stub */
- thread_regs[REGS_IP_INDEX] = STUB_CODE +
- (unsigned long) stub_clone_handler -
- (unsigned long) __syscall_stub_start;
- thread_regs[REGS_SP_INDEX] = STUB_DATA + STUB_DATA_PAGES * UM_KERN_PAGE_SIZE -
- sizeof(void *);
-#ifdef __SIGNAL_FRAMESIZE
- thread_regs[REGS_SP_INDEX] -= __SIGNAL_FRAMESIZE;
-#endif
- return 0;
-}
-
-__initcall(init_thread_regs);
-
-int copy_context_skas0(unsigned long new_stack, int pid)
-{
- int err;
- unsigned long current_stack = current_stub_stack();
- struct stub_data *data = (struct stub_data *) current_stack;
- struct stub_data *child_data = (struct stub_data *) new_stack;
- unsigned long long new_offset;
- int new_fd = phys_mapping(uml_to_phys((void *)new_stack), &new_offset);
-
- /*
- * prepare offset and fd of child's stack as argument for parent's
- * and child's mmap2 calls
- */
- *data = ((struct stub_data) {
- .offset = MMAP_OFFSET(new_offset),
- .fd = new_fd,
- .parent_err = -ESRCH,
- .child_err = 0,
- });
-
- *child_data = ((struct stub_data) {
- .child_err = -ESRCH,
- });
-
- err = ptrace_setregs(pid, thread_regs);
- if (err < 0) {
- err = -errno;
- printk(UM_KERN_ERR "%s : PTRACE_SETREGS failed, pid = %d, errno = %d\n",
- __func__, pid, -err);
- return err;
- }
-
- err = put_fp_registers(pid, thread_fp_regs);
- if (err < 0) {
- printk(UM_KERN_ERR "%s : put_fp_registers failed, pid = %d, err = %d\n",
- __func__, pid, err);
- return err;
- }
-
- /*
- * Wait, until parent has finished its work: read child's pid from
- * parent's stack, and check, if bad result.
- */
- err = ptrace(PTRACE_CONT, pid, 0, 0);
- if (err) {
- err = -errno;
- printk(UM_KERN_ERR "Failed to continue new process, pid = %d, errno = %d\n",
- pid, errno);
- return err;
- }
-
- wait_stub_done(pid);
-
- pid = data->parent_err;
- if (pid < 0) {
- printk(UM_KERN_ERR "%s - stub-parent reports error %d\n",
- __func__, -pid);
- return pid;
- }
-
- /*
- * Wait, until child has finished too: read child's result from
- * child's stack and check it.
- */
- wait_stub_done(pid);
- if (child_data->child_err != STUB_DATA) {
- printk(UM_KERN_ERR "%s - stub-child %d reports error %ld\n",
- __func__, pid, data->child_err);
- err = data->child_err;
- goto out_kill;
- }
-
- if (ptrace(PTRACE_SETOPTIONS, pid, NULL,
- (void *)PTRACE_O_TRACESYSGOOD) < 0) {
- err = -errno;
- printk(UM_KERN_ERR "%s : PTRACE_SETOPTIONS failed, errno = %d\n",
- __func__, errno);
- goto out_kill;
- }
-
- return pid;
-
- out_kill:
- os_kill_ptraced_process(pid, 1);
- return err;
-}
-
void new_thread(void *stack, jmp_buf *buf, void (*handler)(void))
{
(*buf)[0].JB_IP = (unsigned long) handler;
@@ -684,5 +589,4 @@ void reboot_skas(void)
void __switch_mm(struct mm_id *mm_idp)
{
userspace_pid[0] = mm_idp->u.pid;
- kill_userspace_mm[0] = mm_idp->kill;
}
diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
index 89ad9f4f865c..93fc82c01aba 100644
--- a/arch/um/os-Linux/start_up.c
+++ b/arch/um/os-Linux/start_up.c
@@ -17,6 +17,7 @@
#include <sys/wait.h>
#include <sys/time.h>
#include <sys/resource.h>
+#include <asm/ldt.h>
#include <asm/unistd.h>
#include <init.h>
#include <os.h>
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index cbe5fac4b9dd..007bab9f2a0e 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -287,6 +287,7 @@ config X86
select HAVE_UNSTABLE_SCHED_CLOCK
select HAVE_USER_RETURN_NOTIFIER
select HAVE_GENERIC_VDSO
+ select VDSO_GETRANDOM if X86_64
select HOTPLUG_PARALLEL if SMP && X86_64
select HOTPLUG_SMT if SMP
select HOTPLUG_SPLIT_STARTUP if SMP && X86_32
diff --git a/arch/x86/Makefile.um b/arch/x86/Makefile.um
index 2106a2bd152b..a46b1397ad01 100644
--- a/arch/x86/Makefile.um
+++ b/arch/x86/Makefile.um
@@ -9,6 +9,7 @@ core-y += arch/x86/crypto/
#
ifeq ($(CONFIG_CC_IS_CLANG),y)
KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx
+KBUILD_RUSTFLAGS += --target=$(objtree)/scripts/target.json
KBUILD_RUSTFLAGS += -Ctarget-feature=-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-avx,-avx2
endif
diff --git a/arch/x86/boot/install.sh b/arch/x86/boot/install.sh
index 0849f4b42745..93784abcd66d 100755
--- a/arch/x86/boot/install.sh
+++ b/arch/x86/boot/install.sh
@@ -16,6 +16,8 @@
# $3 - kernel map file
# $4 - default install path (blank if root directory)
+set -e
+
if [ -f $4/vmlinuz ]; then
mv $4/vmlinuz $4/vmlinuz.old
fi
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index 215a1b202a91..c9216ac4fb1e 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -7,7 +7,7 @@
include $(srctree)/lib/vdso/Makefile
# Files to link into the vDSO:
-vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o
+vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o vgetrandom.o vgetrandom-chacha.o
vobjs32-y := vdso32/note.o vdso32/system_call.o vdso32/sigreturn.o
vobjs32-y += vdso32/vclock_gettime.o vdso32/vgetcpu.o
vobjs-$(CONFIG_X86_SGX) += vsgx.o
@@ -73,6 +73,7 @@ CFLAGS_REMOVE_vdso32/vclock_gettime.o = -pg
CFLAGS_REMOVE_vgetcpu.o = -pg
CFLAGS_REMOVE_vdso32/vgetcpu.o = -pg
CFLAGS_REMOVE_vsgx.o = -pg
+CFLAGS_REMOVE_vgetrandom.o = -pg
#
# X32 processes use x32 vDSO to access 64bit kernel data.
diff --git a/arch/x86/entry/vdso/vdso.lds.S b/arch/x86/entry/vdso/vdso.lds.S
index e8c60ae7a7c8..0bab5f4af6d1 100644
--- a/arch/x86/entry/vdso/vdso.lds.S
+++ b/arch/x86/entry/vdso/vdso.lds.S
@@ -30,6 +30,8 @@ VERSION {
#ifdef CONFIG_X86_SGX
__vdso_sgx_enter_enclave;
#endif
+ getrandom;
+ __vdso_getrandom;
local: *;
};
}
diff --git a/arch/x86/entry/vdso/vgetrandom-chacha.S b/arch/x86/entry/vdso/vgetrandom-chacha.S
new file mode 100644
index 000000000000..bcba5639b8ee
--- /dev/null
+++ b/arch/x86/entry/vdso/vgetrandom-chacha.S
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include <linux/linkage.h>
+#include <asm/frame.h>
+
+.section .rodata, "a"
+.align 16
+CONSTANTS: .octa 0x6b20657479622d323320646e61707865
+.text
+
+/*
+ * Very basic SSE2 implementation of ChaCha20. Produces a given positive number
+ * of blocks of output with a nonce of 0, taking an input key and 8-byte
+ * counter. Importantly does not spill to the stack. Its arguments are:
+ *
+ * rdi: output bytes
+ * rsi: 32-byte key input
+ * rdx: 8-byte counter input/output
+ * rcx: number of 64-byte blocks to write to output
+ */
+SYM_FUNC_START(__arch_chacha20_blocks_nostack)
+
+.set output, %rdi
+.set key, %rsi
+.set counter, %rdx
+.set nblocks, %rcx
+.set i, %al
+/* xmm registers are *not* callee-save. */
+.set temp, %xmm0
+.set state0, %xmm1
+.set state1, %xmm2
+.set state2, %xmm3
+.set state3, %xmm4
+.set copy0, %xmm5
+.set copy1, %xmm6
+.set copy2, %xmm7
+.set copy3, %xmm8
+.set one, %xmm9
+
+ /* copy0 = "expand 32-byte k" */
+ movaps CONSTANTS(%rip),copy0
+ /* copy1,copy2 = key */
+ movups 0x00(key),copy1
+ movups 0x10(key),copy2
+ /* copy3 = counter || zero nonce */
+ movq 0x00(counter),copy3
+ /* one = 1 || 0 */
+ movq $1,%rax
+ movq %rax,one
+
+.Lblock:
+ /* state0,state1,state2,state3 = copy0,copy1,copy2,copy3 */
+ movdqa copy0,state0
+ movdqa copy1,state1
+ movdqa copy2,state2
+ movdqa copy3,state3
+
+ movb $10,i
+.Lpermute:
+ /* state0 += state1, state3 = rotl32(state3 ^ state0, 16) */
+ paddd state1,state0
+ pxor state0,state3
+ movdqa state3,temp
+ pslld $16,temp
+ psrld $16,state3
+ por temp,state3
+
+ /* state2 += state3, state1 = rotl32(state1 ^ state2, 12) */
+ paddd state3,state2
+ pxor state2,state1
+ movdqa state1,temp
+ pslld $12,temp
+ psrld $20,state1
+ por temp,state1
+
+ /* state0 += state1, state3 = rotl32(state3 ^ state0, 8) */
+ paddd state1,state0
+ pxor state0,state3
+ movdqa state3,temp
+ pslld $8,temp
+ psrld $24,state3
+ por temp,state3
+
+ /* state2 += state3, state1 = rotl32(state1 ^ state2, 7) */
+ paddd state3,state2
+ pxor state2,state1
+ movdqa state1,temp
+ pslld $7,temp
+ psrld $25,state1
+ por temp,state1
+
+ /* state1[0,1,2,3] = state1[1,2,3,0] */
+ pshufd $0x39,state1,state1
+ /* state2[0,1,2,3] = state2[2,3,0,1] */
+ pshufd $0x4e,state2,state2
+ /* state3[0,1,2,3] = state3[3,0,1,2] */
+ pshufd $0x93,state3,state3
+
+ /* state0 += state1, state3 = rotl32(state3 ^ state0, 16) */
+ paddd state1,state0
+ pxor state0,state3
+ movdqa state3,temp
+ pslld $16,temp
+ psrld $16,state3
+ por temp,state3
+
+ /* state2 += state3, state1 = rotl32(state1 ^ state2, 12) */
+ paddd state3,state2
+ pxor state2,state1
+ movdqa state1,temp
+ pslld $12,temp
+ psrld $20,state1
+ por temp,state1
+
+ /* state0 += state1, state3 = rotl32(state3 ^ state0, 8) */
+ paddd state1,state0
+ pxor state0,state3
+ movdqa state3,temp
+ pslld $8,temp
+ psrld $24,state3
+ por temp,state3
+
+ /* state2 += state3, state1 = rotl32(state1 ^ state2, 7) */
+ paddd state3,state2
+ pxor state2,state1
+ movdqa state1,temp
+ pslld $7,temp
+ psrld $25,state1
+ por temp,state1
+
+ /* state1[0,1,2,3] = state1[3,0,1,2] */
+ pshufd $0x93,state1,state1
+ /* state2[0,1,2,3] = state2[2,3,0,1] */
+ pshufd $0x4e,state2,state2
+ /* state3[0,1,2,3] = state3[1,2,3,0] */
+ pshufd $0x39,state3,state3
+
+ decb i
+ jnz .Lpermute
+
+ /* output0 = state0 + copy0 */
+ paddd copy0,state0
+ movups state0,0x00(output)
+ /* output1 = state1 + copy1 */
+ paddd copy1,state1
+ movups state1,0x10(output)
+ /* output2 = state2 + copy2 */
+ paddd copy2,state2
+ movups state2,0x20(output)
+ /* output3 = state3 + copy3 */
+ paddd copy3,state3
+ movups state3,0x30(output)
+
+ /* ++copy3.counter */
+ paddq one,copy3
+
+ /* output += 64, --nblocks */
+ addq $64,output
+ decq nblocks
+ jnz .Lblock
+
+ /* counter = copy3.counter */
+ movq copy3,0x00(counter)
+
+ /* Zero out the potentially sensitive regs, in case nothing uses these again. */
+ pxor state0,state0
+ pxor state1,state1
+ pxor state2,state2
+ pxor state3,state3
+ pxor copy1,copy1
+ pxor copy2,copy2
+ pxor temp,temp
+
+ ret
+SYM_FUNC_END(__arch_chacha20_blocks_nostack)
diff --git a/arch/x86/entry/vdso/vgetrandom.c b/arch/x86/entry/vdso/vgetrandom.c
new file mode 100644
index 000000000000..52d3c7faae2e
--- /dev/null
+++ b/arch/x86/entry/vdso/vgetrandom.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022-2024 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+#include <linux/types.h>
+
+#include "../../../../lib/vdso/getrandom.c"
+
+ssize_t __vdso_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state, size_t opaque_len);
+
+ssize_t __vdso_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state, size_t opaque_len)
+{
+ return __cvdso_getrandom(buffer, len, flags, opaque_state, opaque_len);
+}
+
+ssize_t getrandom(void *, size_t, unsigned int, void *, size_t)
+ __attribute__((weak, alias("__vdso_getrandom")));
diff --git a/arch/x86/include/asm/vdso/getrandom.h b/arch/x86/include/asm/vdso/getrandom.h
new file mode 100644
index 000000000000..b96e674cafde
--- /dev/null
+++ b/arch/x86/include/asm/vdso/getrandom.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022-2024 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+#ifndef __ASM_VDSO_GETRANDOM_H
+#define __ASM_VDSO_GETRANDOM_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/unistd.h>
+#include <asm/vvar.h>
+
+/**
+ * getrandom_syscall - Invoke the getrandom() syscall.
+ * @buffer: Destination buffer to fill with random bytes.
+ * @len: Size of @buffer in bytes.
+ * @flags: Zero or more GRND_* flags.
+ * Returns: The number of random bytes written to @buffer, or a negative value indicating an error.
+ */
+static __always_inline ssize_t getrandom_syscall(void *buffer, size_t len, unsigned int flags)
+{
+ long ret;
+
+ asm ("syscall" : "=a" (ret) :
+ "0" (__NR_getrandom), "D" (buffer), "S" (len), "d" (flags) :
+ "rcx", "r11", "memory");
+
+ return ret;
+}
+
+#define __vdso_rng_data (VVAR(_vdso_rng_data))
+
+static __always_inline const struct vdso_rng_data *__arch_get_vdso_rng_data(void)
+{
+ if (IS_ENABLED(CONFIG_TIME_NS) && __vdso_data->clock_mode == VDSO_CLOCKMODE_TIMENS)
+ return (void *)&__vdso_rng_data + ((void *)&__timens_vdso_data - (void *)&__vdso_data);
+ return &__vdso_rng_data;
+}
+
+/**
+ * __arch_chacha20_blocks_nostack - Generate ChaCha20 stream without using the stack.
+ * @dst_bytes: Destination buffer to hold @nblocks * 64 bytes of output.
+ * @key: 32-byte input key.
+ * @counter: 8-byte counter, read on input and updated on return.
+ * @nblocks: Number of blocks to generate.
+ *
+ * Generates a given positive number of blocks of ChaCha20 output with nonce=0, and does not write
+ * to any stack or memory outside of the parameters passed to it, in order to mitigate stack data
+ * leaking into forked child processes.
+ */
+extern void __arch_chacha20_blocks_nostack(u8 *dst_bytes, const u32 *key, u32 *counter, size_t nblocks);
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_GETRANDOM_H */
diff --git a/arch/x86/include/asm/vdso/vsyscall.h b/arch/x86/include/asm/vdso/vsyscall.h
index 93226281b450..972415a8be31 100644
--- a/arch/x86/include/asm/vdso/vsyscall.h
+++ b/arch/x86/include/asm/vdso/vsyscall.h
@@ -10,6 +10,8 @@
#include <asm/vvar.h>
DEFINE_VVAR(struct vdso_data, _vdso_data);
+DEFINE_VVAR_SINGLE(struct vdso_rng_data, _vdso_rng_data);
+
/*
* Update the vDSO data page to keep in sync with kernel timekeeping.
*/
diff --git a/arch/x86/include/asm/vvar.h b/arch/x86/include/asm/vvar.h
index 183e98e49ab9..9d9af37f7cab 100644
--- a/arch/x86/include/asm/vvar.h
+++ b/arch/x86/include/asm/vvar.h
@@ -26,6 +26,8 @@
*/
#define DECLARE_VVAR(offset, type, name) \
EMIT_VVAR(name, offset)
+#define DECLARE_VVAR_SINGLE(offset, type, name) \
+ EMIT_VVAR(name, offset)
#else
@@ -37,6 +39,10 @@ extern char __vvar_page;
extern type timens_ ## name[CS_BASES] \
__attribute__((visibility("hidden"))); \
+#define DECLARE_VVAR_SINGLE(offset, type, name) \
+ extern type vvar_ ## name \
+ __attribute__((visibility("hidden"))); \
+
#define VVAR(name) (vvar_ ## name)
#define TIMENS(name) (timens_ ## name)
@@ -44,12 +50,22 @@ extern char __vvar_page;
type name[CS_BASES] \
__attribute__((section(".vvar_" #name), aligned(16))) __visible
+#define DEFINE_VVAR_SINGLE(type, name) \
+ type name \
+ __attribute__((section(".vvar_" #name), aligned(16))) __visible
+
#endif
/* DECLARE_VVAR(offset, type, name) */
DECLARE_VVAR(128, struct vdso_data, _vdso_data)
+#if !defined(_SINGLE_DATA)
+#define _SINGLE_DATA
+DECLARE_VVAR_SINGLE(640, struct vdso_rng_data, _vdso_rng_data)
+#endif
+
#undef DECLARE_VVAR
+#undef DECLARE_VVAR_SINGLE
#endif
diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h
index 64fbd2dbc5b7..a9088250770f 100644
--- a/arch/x86/include/asm/xen/hypervisor.h
+++ b/arch/x86/include/asm/xen/hypervisor.h
@@ -62,11 +62,6 @@ void xen_arch_unregister_cpu(int num);
#ifdef CONFIG_PVH
void __init xen_pvh_init(struct boot_params *boot_params);
void __init mem_map_via_hcall(struct boot_params *boot_params_p);
-#ifdef CONFIG_XEN_PVH
-void __init xen_reserve_extra_memory(struct boot_params *bootp);
-#else
-static inline void xen_reserve_extra_memory(struct boot_params *bootp) { }
-#endif
#endif
/* Lazy mode for batching updates / context switch */
diff --git a/arch/x86/kernel/itmt.c b/arch/x86/kernel/itmt.c
index 9a7c03d47861..51b805c727fc 100644
--- a/arch/x86/kernel/itmt.c
+++ b/arch/x86/kernel/itmt.c
@@ -38,7 +38,7 @@ static bool __read_mostly sched_itmt_capable;
*/
unsigned int __read_mostly sysctl_sched_itmt_enabled;
-static int sched_itmt_update_handler(struct ctl_table *table, int write,
+static int sched_itmt_update_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
unsigned int old_sysctl;
diff --git a/arch/x86/platform/pvh/enlighten.c b/arch/x86/platform/pvh/enlighten.c
index 8c2d4b8de25d..944e0290f2c0 100644
--- a/arch/x86/platform/pvh/enlighten.c
+++ b/arch/x86/platform/pvh/enlighten.c
@@ -75,9 +75,6 @@ static void __init init_pvh_bootparams(bool xen_guest)
} else
xen_raw_printk("Warning: Can fit ISA range into e820\n");
- if (xen_guest)
- xen_reserve_extra_memory(&pvh_bootparams);
-
pvh_bootparams.hdr.cmd_line_ptr =
pvh_start_info.cmdline_paddr;
diff --git a/arch/x86/um/Makefile b/arch/x86/um/Makefile
index 8bc72a51b257..36e67fc97c22 100644
--- a/arch/x86/um/Makefile
+++ b/arch/x86/um/Makefile
@@ -9,9 +9,9 @@ else
BITS := 64
endif
-obj-y = bugs_$(BITS).o delay.o fault.o ldt.o \
+obj-y = bugs_$(BITS).o delay.o fault.o \
ptrace_$(BITS).o ptrace_user.o setjmp_$(BITS).o signal.o \
- stub_$(BITS).o stub_segv.o \
+ stub_segv.o \
sys_call_table_$(BITS).o sysrq_$(BITS).o tls_$(BITS).o \
mem_$(BITS).o subarch.o os-Linux/
@@ -31,7 +31,6 @@ obj-y += syscalls_64.o vdso/
subarch-y = ../lib/csum-partial_64.o ../lib/memcpy_64.o \
../lib/memmove_64.o ../lib/memset_64.o
-subarch-$(CONFIG_PREEMPTION) += ../entry/thunk_64.o
endif
diff --git a/arch/x86/um/asm/mm_context.h b/arch/x86/um/asm/mm_context.h
deleted file mode 100644
index dc32dc023c2f..000000000000
--- a/arch/x86/um/asm/mm_context.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (C) 2004 Fujitsu Siemens Computers GmbH
- * Licensed under the GPL
- *
- * Author: Bodo Stroesser <bstroesser@fujitsu-siemens.com>
- */
-
-#ifndef __ASM_LDT_H
-#define __ASM_LDT_H
-
-#include <linux/mutex.h>
-#include <asm/ldt.h>
-
-#define LDT_PAGES_MAX \
- ((LDT_ENTRIES * LDT_ENTRY_SIZE)/PAGE_SIZE)
-#define LDT_ENTRIES_PER_PAGE \
- (PAGE_SIZE/LDT_ENTRY_SIZE)
-#define LDT_DIRECT_ENTRIES \
- ((LDT_PAGES_MAX*sizeof(void *))/LDT_ENTRY_SIZE)
-
-struct ldt_entry {
- __u32 a;
- __u32 b;
-};
-
-typedef struct uml_ldt {
- int entry_count;
- struct mutex lock;
- union {
- struct ldt_entry * pages[LDT_PAGES_MAX];
- struct ldt_entry entries[LDT_DIRECT_ENTRIES];
- } u;
-} uml_ldt_t;
-
-#define LDT_entry_a(info) \
- ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
-
-#define LDT_entry_b(info) \
- (((info)->base_addr & 0xff000000) | \
- (((info)->base_addr & 0x00ff0000) >> 16) | \
- ((info)->limit & 0xf0000) | \
- (((info)->read_exec_only ^ 1) << 9) | \
- ((info)->contents << 10) | \
- (((info)->seg_not_present ^ 1) << 15) | \
- ((info)->seg_32bit << 22) | \
- ((info)->limit_in_pages << 23) | \
- ((info)->useable << 20) | \
- 0x7000)
-
-#define _LDT_empty(info) (\
- (info)->base_addr == 0 && \
- (info)->limit == 0 && \
- (info)->contents == 0 && \
- (info)->read_exec_only == 1 && \
- (info)->seg_32bit == 0 && \
- (info)->limit_in_pages == 0 && \
- (info)->seg_not_present == 1 && \
- (info)->useable == 0 )
-
-#ifdef CONFIG_X86_64
-#define LDT_empty(info) (_LDT_empty(info) && ((info)->lm == 0))
-#else
-#define LDT_empty(info) (_LDT_empty(info))
-#endif
-
-struct uml_arch_mm_context {
- uml_ldt_t ldt;
-};
-
-#endif
diff --git a/arch/x86/um/ldt.c b/arch/x86/um/ldt.c
deleted file mode 100644
index 255a44dd415a..000000000000
--- a/arch/x86/um/ldt.c
+++ /dev/null
@@ -1,380 +0,0 @@
-/*
- * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- * Licensed under the GPL
- */
-
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/syscalls.h>
-#include <linux/uaccess.h>
-#include <asm/unistd.h>
-#include <os.h>
-#include <skas.h>
-#include <sysdep/tls.h>
-
-static inline int modify_ldt (int func, void *ptr, unsigned long bytecount)
-{
- return syscall(__NR_modify_ldt, func, ptr, bytecount);
-}
-
-static long write_ldt_entry(struct mm_id *mm_idp, int func,
- struct user_desc *desc, void **addr, int done)
-{
- long res;
- void *stub_addr;
-
- BUILD_BUG_ON(sizeof(*desc) % sizeof(long));
-
- res = syscall_stub_data(mm_idp, (unsigned long *)desc,
- sizeof(*desc) / sizeof(long),
- addr, &stub_addr);
- if (!res) {
- unsigned long args[] = { func,
- (unsigned long)stub_addr,
- sizeof(*desc),
- 0, 0, 0 };
- res = run_syscall_stub(mm_idp, __NR_modify_ldt, args,
- 0, addr, done);
- }
-
- return res;
-}
-
-/*
- * In skas mode, we hold our own ldt data in UML.
- * Thus, the code implementing sys_modify_ldt_skas
- * is very similar to (and mostly stolen from) sys_modify_ldt
- * for arch/i386/kernel/ldt.c
- * The routines copied and modified in part are:
- * - read_ldt
- * - read_default_ldt
- * - write_ldt
- * - sys_modify_ldt_skas
- */
-
-static int read_ldt(void __user * ptr, unsigned long bytecount)
-{
- int i, err = 0;
- unsigned long size;
- uml_ldt_t *ldt = &current->mm->context.arch.ldt;
-
- if (!ldt->entry_count)
- goto out;
- if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
- bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
- err = bytecount;
-
- mutex_lock(&ldt->lock);
- if (ldt->entry_count <= LDT_DIRECT_ENTRIES) {
- size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES;
- if (size > bytecount)
- size = bytecount;
- if (copy_to_user(ptr, ldt->u.entries, size))
- err = -EFAULT;
- bytecount -= size;
- ptr += size;
- }
- else {
- for (i=0; i<ldt->entry_count/LDT_ENTRIES_PER_PAGE && bytecount;
- i++) {
- size = PAGE_SIZE;
- if (size > bytecount)
- size = bytecount;
- if (copy_to_user(ptr, ldt->u.pages[i], size)) {
- err = -EFAULT;
- break;
- }
- bytecount -= size;
- ptr += size;
- }
- }
- mutex_unlock(&ldt->lock);
-
- if (bytecount == 0 || err == -EFAULT)
- goto out;
-
- if (clear_user(ptr, bytecount))
- err = -EFAULT;
-
-out:
- return err;
-}
-
-static int read_default_ldt(void __user * ptr, unsigned long bytecount)
-{
- int err;
-
- if (bytecount > 5*LDT_ENTRY_SIZE)
- bytecount = 5*LDT_ENTRY_SIZE;
-
- err = bytecount;
- /*
- * UML doesn't support lcall7 and lcall27.
- * So, we don't really have a default ldt, but emulate
- * an empty ldt of common host default ldt size.
- */
- if (clear_user(ptr, bytecount))
- err = -EFAULT;
-
- return err;
-}
-
-static int write_ldt(void __user * ptr, unsigned long bytecount, int func)
-{
- uml_ldt_t *ldt = &current->mm->context.arch.ldt;
- struct mm_id * mm_idp = &current->mm->context.id;
- int i, err;
- struct user_desc ldt_info;
- struct ldt_entry entry0, *ldt_p;
- void *addr = NULL;
-
- err = -EINVAL;
- if (bytecount != sizeof(ldt_info))
- goto out;
- err = -EFAULT;
- if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
- goto out;
-
- err = -EINVAL;
- if (ldt_info.entry_number >= LDT_ENTRIES)
- goto out;
- if (ldt_info.contents == 3) {
- if (func == 1)
- goto out;
- if (ldt_info.seg_not_present == 0)
- goto out;
- }
-
- mutex_lock(&ldt->lock);
-
- err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1);
- if (err)
- goto out_unlock;
-
- if (ldt_info.entry_number >= ldt->entry_count &&
- ldt_info.entry_number >= LDT_DIRECT_ENTRIES) {
- for (i=ldt->entry_count/LDT_ENTRIES_PER_PAGE;
- i*LDT_ENTRIES_PER_PAGE <= ldt_info.entry_number;
- i++) {
- if (i == 0)
- memcpy(&entry0, ldt->u.entries,
- sizeof(entry0));
- ldt->u.pages[i] = (struct ldt_entry *)
- __get_free_page(GFP_KERNEL|__GFP_ZERO);
- if (!ldt->u.pages[i]) {
- err = -ENOMEM;
- /* Undo the change in host */
- memset(&ldt_info, 0, sizeof(ldt_info));
- write_ldt_entry(mm_idp, 1, &ldt_info, &addr, 1);
- goto out_unlock;
- }
- if (i == 0) {
- memcpy(ldt->u.pages[0], &entry0,
- sizeof(entry0));
- memcpy(ldt->u.pages[0]+1, ldt->u.entries+1,
- sizeof(entry0)*(LDT_DIRECT_ENTRIES-1));
- }
- ldt->entry_count = (i + 1) * LDT_ENTRIES_PER_PAGE;
- }
- }
- if (ldt->entry_count <= ldt_info.entry_number)
- ldt->entry_count = ldt_info.entry_number + 1;
-
- if (ldt->entry_count <= LDT_DIRECT_ENTRIES)
- ldt_p = ldt->u.entries + ldt_info.entry_number;
- else
- ldt_p = ldt->u.pages[ldt_info.entry_number/LDT_ENTRIES_PER_PAGE] +
- ldt_info.entry_number%LDT_ENTRIES_PER_PAGE;
-
- if (ldt_info.base_addr == 0 && ldt_info.limit == 0 &&
- (func == 1 || LDT_empty(&ldt_info))) {
- ldt_p->a = 0;
- ldt_p->b = 0;
- }
- else{
- if (func == 1)
- ldt_info.useable = 0;
- ldt_p->a = LDT_entry_a(&ldt_info);
- ldt_p->b = LDT_entry_b(&ldt_info);
- }
- err = 0;
-
-out_unlock:
- mutex_unlock(&ldt->lock);
-out:
- return err;
-}
-
-static long do_modify_ldt_skas(int func, void __user *ptr,
- unsigned long bytecount)
-{
- int ret = -ENOSYS;
-
- switch (func) {
- case 0:
- ret = read_ldt(ptr, bytecount);
- break;
- case 1:
- case 0x11:
- ret = write_ldt(ptr, bytecount, func);
- break;
- case 2:
- ret = read_default_ldt(ptr, bytecount);
- break;
- }
- return ret;
-}
-
-static DEFINE_SPINLOCK(host_ldt_lock);
-static short dummy_list[9] = {0, -1};
-static short * host_ldt_entries = NULL;
-
-static void ldt_get_host_info(void)
-{
- long ret;
- struct ldt_entry * ldt;
- short *tmp;
- int i, size, k, order;
-
- spin_lock(&host_ldt_lock);
-
- if (host_ldt_entries != NULL) {
- spin_unlock(&host_ldt_lock);
- return;
- }
- host_ldt_entries = dummy_list+1;
-
- spin_unlock(&host_ldt_lock);
-
- for (i = LDT_PAGES_MAX-1, order=0; i; i>>=1, order++)
- ;
-
- ldt = (struct ldt_entry *)
- __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
- if (ldt == NULL) {
- printk(KERN_ERR "ldt_get_host_info: couldn't allocate buffer "
- "for host ldt\n");
- return;
- }
-
- ret = modify_ldt(0, ldt, (1<<order)*PAGE_SIZE);
- if (ret < 0) {
- printk(KERN_ERR "ldt_get_host_info: couldn't read host ldt\n");
- goto out_free;
- }
- if (ret == 0) {
- /* default_ldt is active, simply write an empty entry 0 */
- host_ldt_entries = dummy_list;
- goto out_free;
- }
-
- for (i=0, size=0; i<ret/LDT_ENTRY_SIZE; i++) {
- if (ldt[i].a != 0 || ldt[i].b != 0)
- size++;
- }
-
- if (size < ARRAY_SIZE(dummy_list))
- host_ldt_entries = dummy_list;
- else {
- size = (size + 1) * sizeof(dummy_list[0]);
- tmp = kmalloc(size, GFP_KERNEL);
- if (tmp == NULL) {
- printk(KERN_ERR "ldt_get_host_info: couldn't allocate "
- "host ldt list\n");
- goto out_free;
- }
- host_ldt_entries = tmp;
- }
-
- for (i=0, k=0; i<ret/LDT_ENTRY_SIZE; i++) {
- if (ldt[i].a != 0 || ldt[i].b != 0)
- host_ldt_entries[k++] = i;
- }
- host_ldt_entries[k] = -1;
-
-out_free:
- free_pages((unsigned long)ldt, order);
-}
-
-long init_new_ldt(struct mm_context *new_mm, struct mm_context *from_mm)
-{
- struct user_desc desc;
- short * num_p;
- int i;
- long page, err=0;
- void *addr = NULL;
-
-
- mutex_init(&new_mm->arch.ldt.lock);
-
- if (!from_mm) {
- memset(&desc, 0, sizeof(desc));
- /*
- * Now we try to retrieve info about the ldt, we
- * inherited from the host. All ldt-entries found
- * will be reset in the following loop
- */
- ldt_get_host_info();
- for (num_p=host_ldt_entries; *num_p != -1; num_p++) {
- desc.entry_number = *num_p;
- err = write_ldt_entry(&new_mm->id, 1, &desc,
- &addr, *(num_p + 1) == -1);
- if (err)
- break;
- }
- new_mm->arch.ldt.entry_count = 0;
-
- goto out;
- }
-
- /*
- * Our local LDT is used to supply the data for
- * modify_ldt(READLDT), if PTRACE_LDT isn't available,
- * i.e., we have to use the stub for modify_ldt, which
- * can't handle the big read buffer of up to 64kB.
- */
- mutex_lock(&from_mm->arch.ldt.lock);
- if (from_mm->arch.ldt.entry_count <= LDT_DIRECT_ENTRIES)
- memcpy(new_mm->arch.ldt.u.entries, from_mm->arch.ldt.u.entries,
- sizeof(new_mm->arch.ldt.u.entries));
- else {
- i = from_mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE;
- while (i-->0) {
- page = __get_free_page(GFP_KERNEL|__GFP_ZERO);
- if (!page) {
- err = -ENOMEM;
- break;
- }
- new_mm->arch.ldt.u.pages[i] =
- (struct ldt_entry *) page;
- memcpy(new_mm->arch.ldt.u.pages[i],
- from_mm->arch.ldt.u.pages[i], PAGE_SIZE);
- }
- }
- new_mm->arch.ldt.entry_count = from_mm->arch.ldt.entry_count;
- mutex_unlock(&from_mm->arch.ldt.lock);
-
- out:
- return err;
-}
-
-
-void free_ldt(struct mm_context *mm)
-{
- int i;
-
- if (mm->arch.ldt.entry_count > LDT_DIRECT_ENTRIES) {
- i = mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE;
- while (i-- > 0)
- free_page((long) mm->arch.ldt.u.pages[i]);
- }
- mm->arch.ldt.entry_count = 0;
-}
-
-SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr ,
- unsigned long , bytecount)
-{
- /* See non-um modify_ldt() for why we do this cast */
- return (unsigned int)do_modify_ldt_skas(func, ptr, bytecount);
-}
diff --git a/arch/x86/um/shared/sysdep/stub.h b/arch/x86/um/shared/sysdep/stub.h
index ce0ca46ad383..dc89f4423454 100644
--- a/arch/x86/um/shared/sysdep/stub.h
+++ b/arch/x86/um/shared/sysdep/stub.h
@@ -12,4 +12,4 @@
#endif
extern void stub_segv_handler(int, siginfo_t *, void *);
-extern void stub_clone_handler(void);
+extern void stub_syscall_handler(void);
diff --git a/arch/x86/um/shared/sysdep/stub_32.h b/arch/x86/um/shared/sysdep/stub_32.h
index ea8b5a2d67af..0b44a86dd346 100644
--- a/arch/x86/um/shared/sysdep/stub_32.h
+++ b/arch/x86/um/shared/sysdep/stub_32.h
@@ -6,6 +6,7 @@
#ifndef __SYSDEP_STUB_H
#define __SYSDEP_STUB_H
+#include <stddef.h>
#include <asm/ptrace.h>
#include <generated/asm-offsets.h>
@@ -79,33 +80,31 @@ static __always_inline long stub_syscall5(long syscall, long arg1, long arg2,
return ret;
}
-static __always_inline void trap_myself(void)
+static __always_inline long stub_syscall6(long syscall, long arg1, long arg2,
+ long arg3, long arg4, long arg5,
+ long arg6)
{
- __asm("int3");
+ struct syscall_args {
+ int ebx, ebp;
+ } args = { arg1, arg6 };
+ long ret;
+
+ __asm__ volatile ("pushl %%ebp;"
+ "movl 0x4(%%ebx),%%ebp;"
+ "movl (%%ebx),%%ebx;"
+ "int $0x80;"
+ "popl %%ebp"
+ : "=a" (ret)
+ : "0" (syscall), "b" (&args),
+ "c" (arg2), "d" (arg3), "S" (arg4), "D" (arg5)
+ : "memory");
+
+ return ret;
}
-static __always_inline void remap_stack_and_trap(void)
+static __always_inline void trap_myself(void)
{
- __asm__ volatile (
- "movl %%esp,%%ebx ;"
- "andl %0,%%ebx ;"
- "movl %1,%%eax ;"
- "movl %%ebx,%%edi ; addl %2,%%edi ; movl (%%edi),%%edi ;"
- "movl %%ebx,%%ebp ; addl %3,%%ebp ; movl (%%ebp),%%ebp ;"
- "int $0x80 ;"
- "addl %4,%%ebx ; movl %%eax, (%%ebx) ;"
- "int $3"
- : :
- "g" (~(STUB_DATA_PAGES * UM_KERN_PAGE_SIZE - 1)),
- "g" (STUB_MMAP_NR),
- "g" (UML_STUB_FIELD_FD),
- "g" (UML_STUB_FIELD_OFFSET),
- "g" (UML_STUB_FIELD_CHILD_ERR),
- "c" (STUB_DATA_PAGES * UM_KERN_PAGE_SIZE),
- "d" (PROT_READ | PROT_WRITE),
- "S" (MAP_FIXED | MAP_SHARED)
- :
- "memory");
+ __asm("int3");
}
static __always_inline void *get_stub_data(void)
diff --git a/arch/x86/um/shared/sysdep/stub_64.h b/arch/x86/um/shared/sysdep/stub_64.h
index b24168ef0ac4..67f44284f1aa 100644
--- a/arch/x86/um/shared/sysdep/stub_64.h
+++ b/arch/x86/um/shared/sysdep/stub_64.h
@@ -6,6 +6,7 @@
#ifndef __SYSDEP_STUB_H
#define __SYSDEP_STUB_H
+#include <stddef.h>
#include <sysdep/ptrace_user.h>
#include <generated/asm-offsets.h>
#include <linux/stddef.h>
@@ -79,35 +80,25 @@ static __always_inline long stub_syscall5(long syscall, long arg1, long arg2,
return ret;
}
-static __always_inline void trap_myself(void)
+static __always_inline long stub_syscall6(long syscall, long arg1, long arg2,
+ long arg3, long arg4, long arg5,
+ long arg6)
{
- __asm("int3");
+ long ret;
+
+ __asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; movq %7,%%r9 ; "
+ __syscall
+ : "=a" (ret)
+ : "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3),
+ "g" (arg4), "g" (arg5), "g" (arg6)
+ : __syscall_clobber, "r10", "r8", "r9");
+
+ return ret;
}
-static __always_inline void remap_stack_and_trap(void)
+static __always_inline void trap_myself(void)
{
- __asm__ volatile (
- "movq %0,%%rax ;"
- "movq %%rsp,%%rdi ;"
- "andq %1,%%rdi ;"
- "movq %2,%%r10 ;"
- "movq %%rdi,%%r8 ; addq %3,%%r8 ; movq (%%r8),%%r8 ;"
- "movq %%rdi,%%r9 ; addq %4,%%r9 ; movq (%%r9),%%r9 ;"
- __syscall ";"
- "movq %%rsp,%%rdi ; andq %1,%%rdi ;"
- "addq %5,%%rdi ; movq %%rax, (%%rdi) ;"
- "int3"
- : :
- "g" (STUB_MMAP_NR),
- "g" (~(STUB_DATA_PAGES * UM_KERN_PAGE_SIZE - 1)),
- "g" (MAP_FIXED | MAP_SHARED),
- "g" (UML_STUB_FIELD_FD),
- "g" (UML_STUB_FIELD_OFFSET),
- "g" (UML_STUB_FIELD_CHILD_ERR),
- "S" (STUB_DATA_PAGES * UM_KERN_PAGE_SIZE),
- "d" (PROT_READ | PROT_WRITE)
- :
- __syscall_clobber, "r10", "r8", "r9");
+ __asm("int3");
}
static __always_inline void *get_stub_data(void)
diff --git a/arch/x86/um/stub_32.S b/arch/x86/um/stub_32.S
deleted file mode 100644
index 8291899e6aaf..000000000000
--- a/arch/x86/um/stub_32.S
+++ /dev/null
@@ -1,56 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#include <as-layout.h>
-
-.section .__syscall_stub, "ax"
-
- .globl batch_syscall_stub
-batch_syscall_stub:
- /* %esp comes in as "top of page" */
- mov %esp, %ecx
- /* %esp has pointer to first operation */
- add $8, %esp
-again:
- /* load length of additional data */
- mov 0x0(%esp), %eax
-
- /* if(length == 0) : end of list */
- /* write possible 0 to header */
- mov %eax, 0x4(%ecx)
- cmpl $0, %eax
- jz done
-
- /* save current pointer */
- mov %esp, 0x4(%ecx)
-
- /* skip additional data */
- add %eax, %esp
-
- /* load syscall-# */
- pop %eax
-
- /* load syscall params */
- pop %ebx
- pop %ecx
- pop %edx
- pop %esi
- pop %edi
- pop %ebp
-
- /* execute syscall */
- int $0x80
-
- /* restore top of page pointer in %ecx */
- mov %esp, %ecx
- andl $(~UM_KERN_PAGE_SIZE) + 1, %ecx
-
- /* check return value */
- pop %ebx
- cmp %ebx, %eax
- je again
-
-done:
- /* save return value */
- mov %eax, (%ecx)
-
- /* stop */
- int3
diff --git a/arch/x86/um/stub_64.S b/arch/x86/um/stub_64.S
deleted file mode 100644
index f3404640197a..000000000000
--- a/arch/x86/um/stub_64.S
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#include <as-layout.h>
-
-.section .__syscall_stub, "ax"
- .globl batch_syscall_stub
-batch_syscall_stub:
- /* %rsp has the pointer to first operation */
- mov %rsp, %rbx
- add $0x10, %rsp
-again:
- /* load length of additional data */
- mov 0x0(%rsp), %rax
-
- /* if(length == 0) : end of list */
- /* write possible 0 to header */
- mov %rax, 8(%rbx)
- cmp $0, %rax
- jz done
-
- /* save current pointer */
- mov %rsp, 8(%rbx)
-
- /* skip additional data */
- add %rax, %rsp
-
- /* load syscall-# */
- pop %rax
-
- /* load syscall params */
- pop %rdi
- pop %rsi
- pop %rdx
- pop %r10
- pop %r8
- pop %r9
-
- /* execute syscall */
- syscall
-
- /* check return value */
- pop %rcx
- cmp %rcx, %rax
- je again
-
-done:
- /* save return value */
- mov %rax, (%rbx)
-
- /* stop */
- int3
diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
index d301deee041f..fbb129023080 100644
--- a/arch/x86/um/tls_32.c
+++ b/arch/x86/um/tls_32.c
@@ -11,6 +11,7 @@
#include <os.h>
#include <skas.h>
#include <sysdep/tls.h>
+#include <asm/desc.h>
/*
* If needed we can detect when it's uninitialized.
diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c
index 27a2a02ef8fb..728a4366ca85 100644
--- a/arch/x86/xen/enlighten_pvh.c
+++ b/arch/x86/xen/enlighten_pvh.c
@@ -9,6 +9,7 @@
#include <asm/io_apic.h>
#include <asm/hypervisor.h>
#include <asm/e820/api.h>
+#include <asm/setup.h>
#include <xen/xen.h>
#include <asm/xen/interface.h>
@@ -27,54 +28,6 @@
bool __ro_after_init xen_pvh;
EXPORT_SYMBOL_GPL(xen_pvh);
-void __init xen_pvh_init(struct boot_params *boot_params)
-{
- u32 msr;
- u64 pfn;
-
- xen_pvh = 1;
- xen_domain_type = XEN_HVM_DOMAIN;
- xen_start_flags = pvh_start_info.flags;
-
- msr = cpuid_ebx(xen_cpuid_base() + 2);
- pfn = __pa(hypercall_page);
- wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
-
- if (xen_initial_domain())
- x86_init.oem.arch_setup = xen_add_preferred_consoles;
- x86_init.oem.banner = xen_banner;
-
- xen_efi_init(boot_params);
-
- if (xen_initial_domain()) {
- struct xen_platform_op op = {
- .cmd = XENPF_get_dom0_console,
- };
- int ret = HYPERVISOR_platform_op(&op);
-
- if (ret > 0)
- xen_init_vga(&op.u.dom0_console,
- min(ret * sizeof(char),
- sizeof(op.u.dom0_console)),
- &boot_params->screen_info);
- }
-}
-
-void __init mem_map_via_hcall(struct boot_params *boot_params_p)
-{
- struct xen_memory_map memmap;
- int rc;
-
- memmap.nr_entries = ARRAY_SIZE(boot_params_p->e820_table);
- set_xen_guest_handle(memmap.buffer, boot_params_p->e820_table);
- rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
- if (rc) {
- xen_raw_printk("XENMEM_memory_map failed (%d)\n", rc);
- BUG();
- }
- boot_params_p->e820_entries = memmap.nr_entries;
-}
-
/*
* Reserve e820 UNUSABLE regions to inflate the memory balloon.
*
@@ -89,8 +42,9 @@ void __init mem_map_via_hcall(struct boot_params *boot_params_p)
* hypervisor should notify us which memory ranges are suitable for creating
* foreign mappings, but that's not yet implemented.
*/
-void __init xen_reserve_extra_memory(struct boot_params *bootp)
+static void __init pvh_reserve_extra_memory(void)
{
+ struct boot_params *bootp = &boot_params;
unsigned int i, ram_pages = 0, extra_pages;
for (i = 0; i < bootp->e820_entries; i++) {
@@ -141,3 +95,58 @@ void __init xen_reserve_extra_memory(struct boot_params *bootp)
xen_add_extra_mem(PFN_UP(e->addr), pages);
}
}
+
+static void __init pvh_arch_setup(void)
+{
+ pvh_reserve_extra_memory();
+
+ if (xen_initial_domain())
+ xen_add_preferred_consoles();
+}
+
+void __init xen_pvh_init(struct boot_params *boot_params)
+{
+ u32 msr;
+ u64 pfn;
+
+ xen_pvh = 1;
+ xen_domain_type = XEN_HVM_DOMAIN;
+ xen_start_flags = pvh_start_info.flags;
+
+ msr = cpuid_ebx(xen_cpuid_base() + 2);
+ pfn = __pa(hypercall_page);
+ wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
+
+ x86_init.oem.arch_setup = pvh_arch_setup;
+ x86_init.oem.banner = xen_banner;
+
+ xen_efi_init(boot_params);
+
+ if (xen_initial_domain()) {
+ struct xen_platform_op op = {
+ .cmd = XENPF_get_dom0_console,
+ };
+ int ret = HYPERVISOR_platform_op(&op);
+
+ if (ret > 0)
+ xen_init_vga(&op.u.dom0_console,
+ min(ret * sizeof(char),
+ sizeof(op.u.dom0_console)),
+ &boot_params->screen_info);
+ }
+}
+
+void __init mem_map_via_hcall(struct boot_params *boot_params_p)
+{
+ struct xen_memory_map memmap;
+ int rc;
+
+ memmap.nr_entries = ARRAY_SIZE(boot_params_p->e820_table);
+ set_xen_guest_handle(memmap.buffer, boot_params_p->e820_table);
+ rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
+ if (rc) {
+ xen_raw_printk("XENMEM_memory_map failed (%d)\n", rc);
+ BUG();
+ }
+ boot_params_p->e820_entries = memmap.nr_entries;
+}
diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c
index d4cefd8a9af4..10c660fae8b3 100644
--- a/arch/x86/xen/multicalls.c
+++ b/arch/x86/xen/multicalls.c
@@ -54,8 +54,9 @@ struct mc_debug_data {
static DEFINE_PER_CPU(struct mc_buffer, mc_buffer);
static struct mc_debug_data mc_debug_data_early __initdata;
-static struct mc_debug_data __percpu *mc_debug_data __refdata =
+static DEFINE_PER_CPU(struct mc_debug_data *, mc_debug_data) =
&mc_debug_data_early;
+static struct mc_debug_data __percpu *mc_debug_data_ptr;
DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags);
static struct static_key mc_debug __ro_after_init;
@@ -70,16 +71,20 @@ static int __init xen_parse_mc_debug(char *arg)
}
early_param("xen_mc_debug", xen_parse_mc_debug);
+void mc_percpu_init(unsigned int cpu)
+{
+ per_cpu(mc_debug_data, cpu) = per_cpu_ptr(mc_debug_data_ptr, cpu);
+}
+
static int __init mc_debug_enable(void)
{
- struct mc_debug_data __percpu *mcdb;
unsigned long flags;
if (!mc_debug_enabled)
return 0;
- mcdb = alloc_percpu(struct mc_debug_data);
- if (!mcdb) {
+ mc_debug_data_ptr = alloc_percpu(struct mc_debug_data);
+ if (!mc_debug_data_ptr) {
pr_err("xen_mc_debug inactive\n");
static_key_slow_dec(&mc_debug);
return -ENOMEM;
@@ -88,7 +93,7 @@ static int __init mc_debug_enable(void)
/* Be careful when switching to percpu debug data. */
local_irq_save(flags);
xen_mc_flush();
- mc_debug_data = mcdb;
+ mc_percpu_init(0);
local_irq_restore(flags);
pr_info("xen_mc_debug active\n");
@@ -150,7 +155,7 @@ void xen_mc_flush(void)
trace_xen_mc_flush(b->mcidx, b->argidx, b->cbidx);
if (static_key_false(&mc_debug)) {
- mcdb = this_cpu_ptr(mc_debug_data);
+ mcdb = __this_cpu_read(mc_debug_data);
memcpy(mcdb->entries, b->entries,
b->mcidx * sizeof(struct multicall_entry));
}
@@ -230,7 +235,7 @@ struct multicall_space __xen_mc_entry(size_t args)
ret.mc = &b->entries[b->mcidx];
if (static_key_false(&mc_debug)) {
- struct mc_debug_data *mcdb = this_cpu_ptr(mc_debug_data);
+ struct mc_debug_data *mcdb = __this_cpu_read(mc_debug_data);
mcdb->caller[b->mcidx] = __builtin_return_address(0);
mcdb->argsz[b->mcidx] = args;
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index a0c3e77e3d5b..806ddb2391d9 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -690,6 +690,7 @@ char * __init xen_memory_setup(void)
struct xen_memory_map memmap;
unsigned long max_pages;
unsigned long extra_pages = 0;
+ unsigned long maxmem_pages;
int i;
int op;
@@ -761,8 +762,8 @@ char * __init xen_memory_setup(void)
* Make sure we have no memory above max_pages, as this area
* isn't handled by the p2m management.
*/
- extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
- extra_pages, max_pages - max_pfn);
+ maxmem_pages = EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM));
+ extra_pages = min3(maxmem_pages, extra_pages, max_pages - max_pfn);
i = 0;
addr = xen_e820_table.entries[0].addr;
size = xen_e820_table.entries[0].size;
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index 7ea57f728b89..6863d3da7dec 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -305,6 +305,7 @@ static int xen_pv_kick_ap(unsigned int cpu, struct task_struct *idle)
return rc;
xen_pmu_init(cpu);
+ mc_percpu_init(cpu);
/*
* Why is this a BUG? If the hypercall fails then everything can be
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index e7775dff9452..0cf16fc79e0b 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -257,6 +257,9 @@ void xen_mc_callback(void (*fn)(void *), void *data);
*/
struct multicall_space xen_mc_extend_args(unsigned long op, size_t arg_size);
+/* Do percpu data initialization for multicalls. */
+void mc_percpu_init(unsigned int cpu);
+
extern bool is_xen_pmu;
irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id);
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index bdbd60ae8897..284bc2e03580 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1047,10 +1047,10 @@ EXPORT_SYMBOL(acpi_bus_unregister_driver);
ACPI Bus operations
-------------------------------------------------------------------------- */
-static int acpi_bus_match(struct device *dev, struct device_driver *drv)
+static int acpi_bus_match(struct device *dev, const struct device_driver *drv)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
- struct acpi_driver *acpi_drv = to_acpi_driver(drv);
+ const struct acpi_driver *acpi_drv = to_acpi_driver(drv);
return acpi_dev->flags.match_driver
&& !acpi_match_device_ids(acpi_dev, acpi_drv->ids);
diff --git a/drivers/acpi/numa/Kconfig b/drivers/acpi/numa/Kconfig
index 849c2bd820b9..f33194d1e43f 100644
--- a/drivers/acpi/numa/Kconfig
+++ b/drivers/acpi/numa/Kconfig
@@ -1,9 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
config ACPI_NUMA
- bool "NUMA support"
- depends on NUMA
- depends on (X86 || ARM64 || LOONGARCH)
- default y if ARM64
+ def_bool NUMA && !X86
config ACPI_HMAT
bool "ACPI Heterogeneous Memory Attribute Table Support"
diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
index e3f26e71637a..44f91f2c6c5d 100644
--- a/drivers/acpi/numa/srat.c
+++ b/drivers/acpi/numa/srat.c
@@ -167,6 +167,19 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
}
}
break;
+
+ case ACPI_SRAT_TYPE_RINTC_AFFINITY:
+ {
+ struct acpi_srat_rintc_affinity *p =
+ (struct acpi_srat_rintc_affinity *)header;
+ pr_debug("SRAT Processor (acpi id[0x%04x]) in proximity domain %d %s\n",
+ p->acpi_processor_uid,
+ p->proximity_domain,
+ (p->flags & ACPI_SRAT_RINTC_ENABLED) ?
+ "enabled" : "disabled");
+ }
+ break;
+
default:
pr_warn("Found unsupported SRAT entry (type = 0x%x)\n",
header->type);
@@ -450,6 +463,21 @@ acpi_parse_gi_affinity(union acpi_subtable_headers *header,
}
#endif /* defined(CONFIG_X86) || defined (CONFIG_ARM64) */
+static int __init
+acpi_parse_rintc_affinity(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_srat_rintc_affinity *rintc_affinity;
+
+ rintc_affinity = (struct acpi_srat_rintc_affinity *)header;
+ acpi_table_print_srat_entry(&header->common);
+
+ /* let architecture-dependent part to do it */
+ acpi_numa_rintc_affinity_init(rintc_affinity);
+
+ return 0;
+}
+
static int __init acpi_parse_srat(struct acpi_table_header *table)
{
struct acpi_table_srat *srat = (struct acpi_table_srat *)table;
@@ -485,7 +513,7 @@ int __init acpi_numa_init(void)
/* SRAT: System Resource Affinity Table */
if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
- struct acpi_subtable_proc srat_proc[4];
+ struct acpi_subtable_proc srat_proc[5];
memset(srat_proc, 0, sizeof(srat_proc));
srat_proc[0].id = ACPI_SRAT_TYPE_CPU_AFFINITY;
@@ -496,6 +524,8 @@ int __init acpi_numa_init(void)
srat_proc[2].handler = acpi_parse_gicc_affinity;
srat_proc[3].id = ACPI_SRAT_TYPE_GENERIC_AFFINITY;
srat_proc[3].handler = acpi_parse_gi_affinity;
+ srat_proc[4].id = ACPI_SRAT_TYPE_RINTC_AFFINITY;
+ srat_proc[4].handler = acpi_parse_rintc_affinity;
acpi_table_parse_entries_array(ACPI_SIG_SRAT,
sizeof(struct acpi_table_srat),
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index aba3aa95b224..34bc880ca20b 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -26,7 +26,7 @@
#include <linux/iommu.h>
#include <linux/dma-map-ops.h>
-#define to_amba_driver(d) container_of(d, struct amba_driver, drv)
+#define to_amba_driver(d) container_of_const(d, struct amba_driver, drv)
/* called on periphid match and class 0x9 coresight device. */
static int
@@ -205,10 +205,10 @@ err_out:
return ret;
}
-static int amba_match(struct device *dev, struct device_driver *drv)
+static int amba_match(struct device *dev, const struct device_driver *drv)
{
struct amba_device *pcdev = to_amba_device(dev);
- struct amba_driver *pcdrv = to_amba_driver(drv);
+ const struct amba_driver *pcdrv = to_amba_driver(drv);
mutex_lock(&pcdev->periphid_lock);
if (!pcdev->periphid) {
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index 69d2138d7efb..21545ffba065 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -316,7 +316,7 @@ endif # PARPORT_PANEL
config PANEL_CHANGE_MESSAGE
bool "Change LCD initialization message ?"
- depends on CHARLCD
+ depends on CHARLCD || LINEDISP
help
This allows you to replace the boot message indicating the kernel version
and the driver version with a custom message. This is useful on appliances
diff --git a/drivers/auxdisplay/arm-charlcd.c b/drivers/auxdisplay/arm-charlcd.c
index 0b1c99cca733..a7eae99a48f7 100644
--- a/drivers/auxdisplay/arm-charlcd.c
+++ b/drivers/auxdisplay/arm-charlcd.c
@@ -270,7 +270,7 @@ static int __init charlcd_probe(struct platform_device *pdev)
struct charlcd *lcd;
struct resource *res;
- lcd = kzalloc(sizeof(struct charlcd), GFP_KERNEL);
+ lcd = kzalloc(sizeof(*lcd), GFP_KERNEL);
if (!lcd)
return -ENOMEM;
diff --git a/drivers/auxdisplay/charlcd.h b/drivers/auxdisplay/charlcd.h
index eed80063a6d2..4d4287209d04 100644
--- a/drivers/auxdisplay/charlcd.h
+++ b/drivers/auxdisplay/charlcd.h
@@ -36,6 +36,8 @@ enum charlcd_lines {
CHARLCD_LINES_2,
};
+struct charlcd_ops;
+
struct charlcd {
const struct charlcd_ops *ops;
const unsigned char *char_conv; /* Optional */
diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
index 7ac0b1b1d548..025dc6855cb2 100644
--- a/drivers/auxdisplay/hd44780.c
+++ b/drivers/auxdisplay/hd44780.c
@@ -230,7 +230,7 @@ static int hd44780_probe(struct platform_device *pdev)
if (!lcd)
goto fail1;
- hd = kzalloc(sizeof(struct hd44780), GFP_KERNEL);
+ hd = kzalloc(sizeof(*hd), GFP_KERNEL);
if (!hd)
goto fail2;
diff --git a/drivers/auxdisplay/hd44780_common.c b/drivers/auxdisplay/hd44780_common.c
index 7cbf375b0fa5..4ef87c3118c0 100644
--- a/drivers/auxdisplay/hd44780_common.c
+++ b/drivers/auxdisplay/hd44780_common.c
@@ -366,4 +366,5 @@ struct hd44780_common *hd44780_common_alloc(void)
}
EXPORT_SYMBOL_GPL(hd44780_common_alloc);
+MODULE_DESCRIPTION("Common functions for HD44780 (and compatibles) LCD displays");
MODULE_LICENSE("GPL");
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
index ce987944662c..8a7034b41d50 100644
--- a/drivers/auxdisplay/ht16k33.c
+++ b/drivers/auxdisplay/ht16k33.c
@@ -483,6 +483,7 @@ static int ht16k33_led_probe(struct device *dev, struct led_classdev *led,
led->max_brightness = MAX_BRIGHTNESS;
err = devm_led_classdev_register_ext(dev, led, &init_data);
+ fwnode_handle_put(init_data.fwnode);
if (err)
dev_err(dev, "Failed to register LED\n");
diff --git a/drivers/auxdisplay/line-display.c b/drivers/auxdisplay/line-display.c
index e2b546210f8d..731ffdfafc4e 100644
--- a/drivers/auxdisplay/line-display.c
+++ b/drivers/auxdisplay/line-display.c
@@ -8,7 +8,9 @@
* Copyright (C) 2021 Glider bv
*/
+#ifndef CONFIG_PANEL_BOOT_MESSAGE
#include <generated/utsrelease.h>
+#endif
#include <linux/container_of.h>
#include <linux/device.h>
@@ -312,6 +314,12 @@ static int linedisp_init_map(struct linedisp *linedisp)
return 0;
}
+#ifdef CONFIG_PANEL_BOOT_MESSAGE
+#define LINEDISP_INIT_TEXT CONFIG_PANEL_BOOT_MESSAGE
+#else
+#define LINEDISP_INIT_TEXT "Linux " UTS_RELEASE " "
+#endif
+
/**
* linedisp_register - register a character line display
* @linedisp: pointer to character line display structure
@@ -359,7 +367,7 @@ int linedisp_register(struct linedisp *linedisp, struct device *parent,
goto out_del_timer;
/* display a default message */
- err = linedisp_display(linedisp, "Linux " UTS_RELEASE " ", -1);
+ err = linedisp_display(linedisp, LINEDISP_INIT_TEXT, -1);
if (err)
goto out_del_dev;
@@ -388,4 +396,5 @@ void linedisp_unregister(struct linedisp *linedisp)
}
EXPORT_SYMBOL_NS_GPL(linedisp_unregister, LINEDISP);
+MODULE_DESCRIPTION("Character line display core support");
MODULE_LICENSE("GPL");
diff --git a/drivers/base/arch_numa.c b/drivers/base/arch_numa.c
index 5b59d133b6af..555aee3ee8e7 100644
--- a/drivers/base/arch_numa.c
+++ b/drivers/base/arch_numa.c
@@ -445,7 +445,7 @@ static int __init arch_acpi_numa_init(void)
ret = acpi_numa_init();
if (ret) {
- pr_info("Failed to initialise from firmware\n");
+ pr_debug("Failed to initialise from firmware\n");
return ret;
}
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index c66d070207a0..75fcb75d5515 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -8,6 +8,7 @@
#include <linux/acpi.h>
#include <linux/cacheinfo.h>
+#include <linux/cleanup.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/device.h>
@@ -513,10 +514,10 @@ core_initcall(free_raw_capacity);
*/
static int __init get_cpu_for_node(struct device_node *node)
{
- struct device_node *cpu_node;
int cpu;
+ struct device_node *cpu_node __free(device_node) =
+ of_parse_phandle(node, "cpu", 0);
- cpu_node = of_parse_phandle(node, "cpu", 0);
if (!cpu_node)
return -1;
@@ -527,7 +528,6 @@ static int __init get_cpu_for_node(struct device_node *node)
pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n",
cpu_node, cpumask_pr_args(cpu_possible_mask));
- of_node_put(cpu_node);
return cpu;
}
@@ -538,28 +538,28 @@ static int __init parse_core(struct device_node *core, int package_id,
bool leaf = true;
int i = 0;
int cpu;
- struct device_node *t;
do {
snprintf(name, sizeof(name), "thread%d", i);
- t = of_get_child_by_name(core, name);
- if (t) {
- leaf = false;
- cpu = get_cpu_for_node(t);
- if (cpu >= 0) {
- cpu_topology[cpu].package_id = package_id;
- cpu_topology[cpu].cluster_id = cluster_id;
- cpu_topology[cpu].core_id = core_id;
- cpu_topology[cpu].thread_id = i;
- } else if (cpu != -ENODEV) {
- pr_err("%pOF: Can't get CPU for thread\n", t);
- of_node_put(t);
- return -EINVAL;
- }
- of_node_put(t);
+ struct device_node *t __free(device_node) =
+ of_get_child_by_name(core, name);
+
+ if (!t)
+ break;
+
+ leaf = false;
+ cpu = get_cpu_for_node(t);
+ if (cpu >= 0) {
+ cpu_topology[cpu].package_id = package_id;
+ cpu_topology[cpu].cluster_id = cluster_id;
+ cpu_topology[cpu].core_id = core_id;
+ cpu_topology[cpu].thread_id = i;
+ } else if (cpu != -ENODEV) {
+ pr_err("%pOF: Can't get CPU for thread\n", t);
+ return -EINVAL;
}
i++;
- } while (t);
+ } while (1);
cpu = get_cpu_for_node(core);
if (cpu >= 0) {
@@ -586,7 +586,6 @@ static int __init parse_cluster(struct device_node *cluster, int package_id,
char name[20];
bool leaf = true;
bool has_cores = false;
- struct device_node *c;
int core_id = 0;
int i, ret;
@@ -598,49 +597,50 @@ static int __init parse_cluster(struct device_node *cluster, int package_id,
i = 0;
do {
snprintf(name, sizeof(name), "cluster%d", i);
- c = of_get_child_by_name(cluster, name);
- if (c) {
- leaf = false;
- ret = parse_cluster(c, package_id, i, depth + 1);
- if (depth > 0)
- pr_warn("Topology for clusters of clusters not yet supported\n");
- of_node_put(c);
- if (ret != 0)
- return ret;
- }
+ struct device_node *c __free(device_node) =
+ of_get_child_by_name(cluster, name);
+
+ if (!c)
+ break;
+
+ leaf = false;
+ ret = parse_cluster(c, package_id, i, depth + 1);
+ if (depth > 0)
+ pr_warn("Topology for clusters of clusters not yet supported\n");
+ if (ret != 0)
+ return ret;
i++;
- } while (c);
+ } while (1);
/* Now check for cores */
i = 0;
do {
snprintf(name, sizeof(name), "core%d", i);
- c = of_get_child_by_name(cluster, name);
- if (c) {
- has_cores = true;
-
- if (depth == 0) {
- pr_err("%pOF: cpu-map children should be clusters\n",
- c);
- of_node_put(c);
- return -EINVAL;
- }
+ struct device_node *c __free(device_node) =
+ of_get_child_by_name(cluster, name);
- if (leaf) {
- ret = parse_core(c, package_id, cluster_id,
- core_id++);
- } else {
- pr_err("%pOF: Non-leaf cluster with core %s\n",
- cluster, name);
- ret = -EINVAL;
- }
+ if (!c)
+ break;
+
+ has_cores = true;
- of_node_put(c);
+ if (depth == 0) {
+ pr_err("%pOF: cpu-map children should be clusters\n", c);
+ return -EINVAL;
+ }
+
+ if (leaf) {
+ ret = parse_core(c, package_id, cluster_id, core_id++);
if (ret != 0)
return ret;
+ } else {
+ pr_err("%pOF: Non-leaf cluster with core %s\n",
+ cluster, name);
+ return -EINVAL;
}
+
i++;
- } while (c);
+ } while (1);
if (leaf && !has_cores)
pr_warn("%pOF: empty cluster\n", cluster);
@@ -651,22 +651,24 @@ static int __init parse_cluster(struct device_node *cluster, int package_id,
static int __init parse_socket(struct device_node *socket)
{
char name[20];
- struct device_node *c;
bool has_socket = false;
int package_id = 0, ret;
do {
snprintf(name, sizeof(name), "socket%d", package_id);
- c = of_get_child_by_name(socket, name);
- if (c) {
- has_socket = true;
- ret = parse_cluster(c, package_id, -1, 0);
- of_node_put(c);
- if (ret != 0)
- return ret;
- }
+ struct device_node *c __free(device_node) =
+ of_get_child_by_name(socket, name);
+
+ if (!c)
+ break;
+
+ has_socket = true;
+ ret = parse_cluster(c, package_id, -1, 0);
+ if (ret != 0)
+ return ret;
+
package_id++;
- } while (c);
+ } while (1);
if (!has_socket)
ret = parse_cluster(socket, 0, -1, 0);
@@ -676,11 +678,11 @@ static int __init parse_socket(struct device_node *socket)
static int __init parse_dt_topology(void)
{
- struct device_node *cn, *map;
int ret = 0;
int cpu;
+ struct device_node *cn __free(device_node) =
+ of_find_node_by_path("/cpus");
- cn = of_find_node_by_path("/cpus");
if (!cn) {
pr_err("No CPU information found in DT\n");
return 0;
@@ -690,13 +692,15 @@ static int __init parse_dt_topology(void)
* When topology is provided cpu-map is essentially a root
* cluster with restricted subnodes.
*/
- map = of_get_child_by_name(cn, "cpu-map");
+ struct device_node *map __free(device_node) =
+ of_get_child_by_name(cn, "cpu-map");
+
if (!map)
- goto out;
+ return ret;
ret = parse_socket(map);
if (ret != 0)
- goto out_map;
+ return ret;
topology_normalize_cpu_scale();
@@ -706,14 +710,9 @@ static int __init parse_dt_topology(void)
*/
for_each_possible_cpu(cpu)
if (cpu_topology[cpu].package_id < 0) {
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
-out_map:
- of_node_put(map);
-out:
- of_node_put(cn);
return ret;
}
#endif
diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c
index 3f01f4ec69e5..54b92839e05c 100644
--- a/drivers/base/auxiliary.c
+++ b/drivers/base/auxiliary.c
@@ -177,10 +177,10 @@ static const struct auxiliary_device_id *auxiliary_match_id(const struct auxilia
return NULL;
}
-static int auxiliary_match(struct device *dev, struct device_driver *drv)
+static int auxiliary_match(struct device *dev, const struct device_driver *drv)
{
struct auxiliary_device *auxdev = to_auxiliary_dev(dev);
- struct auxiliary_driver *auxdrv = to_auxiliary_drv(drv);
+ const struct auxiliary_driver *auxdrv = to_auxiliary_drv(drv);
return !!auxiliary_match_id(auxdrv->id_table, auxdev);
}
@@ -203,7 +203,7 @@ static const struct dev_pm_ops auxiliary_dev_pm_ops = {
static int auxiliary_bus_probe(struct device *dev)
{
- struct auxiliary_driver *auxdrv = to_auxiliary_drv(dev->driver);
+ const struct auxiliary_driver *auxdrv = to_auxiliary_drv(dev->driver);
struct auxiliary_device *auxdev = to_auxiliary_dev(dev);
int ret;
@@ -222,7 +222,7 @@ static int auxiliary_bus_probe(struct device *dev)
static void auxiliary_bus_remove(struct device *dev)
{
- struct auxiliary_driver *auxdrv = to_auxiliary_drv(dev->driver);
+ const struct auxiliary_driver *auxdrv = to_auxiliary_drv(dev->driver);
struct auxiliary_device *auxdev = to_auxiliary_dev(dev);
if (auxdrv->remove)
@@ -232,7 +232,7 @@ static void auxiliary_bus_remove(struct device *dev)
static void auxiliary_bus_shutdown(struct device *dev)
{
- struct auxiliary_driver *auxdrv = NULL;
+ const struct auxiliary_driver *auxdrv = NULL;
struct auxiliary_device *auxdev;
if (dev->driver) {
diff --git a/drivers/base/base.h b/drivers/base/base.h
index db4f910e8e36..0b53593372d7 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -112,7 +112,7 @@ struct device_private {
struct klist_node knode_bus;
struct klist_node knode_class;
struct list_head deferred_probe;
- struct device_driver *async_driver;
+ const struct device_driver *async_driver;
char *deferred_probe_reason;
struct device *device;
u8 dead:1;
@@ -155,13 +155,13 @@ bool bus_is_registered(const struct bus_type *bus);
int bus_add_driver(struct device_driver *drv);
void bus_remove_driver(struct device_driver *drv);
-void device_release_driver_internal(struct device *dev, struct device_driver *drv,
+void device_release_driver_internal(struct device *dev, const struct device_driver *drv,
struct device *parent);
-void driver_detach(struct device_driver *drv);
+void driver_detach(const struct device_driver *drv);
void driver_deferred_probe_del(struct device *dev);
void device_set_deferred_probe_reason(const struct device *dev, struct va_format *vaf);
-static inline int driver_match_device(struct device_driver *drv,
+static inline int driver_match_device(const struct device_driver *drv,
struct device *dev)
{
return drv->bus->match ? drv->bus->match(dev, drv) : 1;
@@ -175,8 +175,8 @@ static inline void dev_sync_state(struct device *dev)
dev->driver->sync_state(dev);
}
-int driver_add_groups(struct device_driver *drv, const struct attribute_group **groups);
-void driver_remove_groups(struct device_driver *drv, const struct attribute_group **groups);
+int driver_add_groups(const struct device_driver *drv, const struct attribute_group **groups);
+void driver_remove_groups(const struct device_driver *drv, const struct attribute_group **groups);
void device_driver_detach(struct device *dev);
int devres_release_all(struct device *dev);
@@ -192,8 +192,8 @@ extern struct kset *devices_kset;
void devices_kset_move_last(struct device *dev);
#if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS)
-int module_add_driver(struct module *mod, struct device_driver *drv);
-void module_remove_driver(struct device_driver *drv);
+int module_add_driver(struct module *mod, const struct device_driver *drv);
+void module_remove_driver(const struct device_driver *drv);
#else
static inline int module_add_driver(struct module *mod,
struct device_driver *drv)
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 2b4c0624b704..730cae66607c 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -5021,11 +5021,22 @@ int dev_err_probe(const struct device *dev, int err, const char *fmt, ...)
vaf.fmt = fmt;
vaf.va = &args;
- if (err != -EPROBE_DEFER) {
- dev_err(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
- } else {
+ switch (err) {
+ case -EPROBE_DEFER:
device_set_deferred_probe_reason(dev, &vaf);
dev_dbg(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
+ break;
+
+ case -ENOMEM:
+ /*
+ * We don't print anything on -ENOMEM, there is already enough
+ * output.
+ */
+ break;
+
+ default:
+ dev_err(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
+ break;
}
va_end(args);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index b57326fd48d4..fdaa24bb641a 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -26,7 +26,7 @@
static DEFINE_PER_CPU(struct device *, cpu_sys_devices);
-static int cpu_subsys_match(struct device *dev, struct device_driver *drv)
+static int cpu_subsys_match(struct device *dev, const struct device_driver *drv)
{
/* ACPI style match is the only one that may succeed. */
if (acpi_driver_match_device(dev, drv))
@@ -316,7 +316,7 @@ static ssize_t crash_hotplug_show(struct device *dev,
{
return sysfs_emit(buf, "%d\n", crash_check_hotplug_support());
}
-static DEVICE_ATTR_ADMIN_RO(crash_hotplug);
+static DEVICE_ATTR_RO(crash_hotplug);
#endif
static void cpu_device_release(struct device *dev)
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 83d352394fdf..9b745ba54de1 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -568,7 +568,7 @@ static void device_remove(struct device *dev)
dev->driver->remove(dev);
}
-static int call_driver_probe(struct device *dev, struct device_driver *drv)
+static int call_driver_probe(struct device *dev, const struct device_driver *drv)
{
int ret = 0;
@@ -599,7 +599,7 @@ static int call_driver_probe(struct device *dev, struct device_driver *drv)
return ret;
}
-static int really_probe(struct device *dev, struct device_driver *drv)
+static int really_probe(struct device *dev, const struct device_driver *drv)
{
bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE) &&
!drv->suppress_bind_attrs;
@@ -628,7 +628,8 @@ static int really_probe(struct device *dev, struct device_driver *drv)
}
re_probe:
- dev->driver = drv;
+ // FIXME - this cast should not be needed "soon"
+ dev->driver = (struct device_driver *)drv;
/* If using pinctrl, bind pins now before probing */
ret = pinctrl_bind_pins(dev);
@@ -727,7 +728,7 @@ done:
/*
* For initcall_debug, show the driver probe time.
*/
-static int really_probe_debug(struct device *dev, struct device_driver *drv)
+static int really_probe_debug(struct device *dev, const struct device_driver *drv)
{
ktime_t calltime, rettime;
int ret;
@@ -774,7 +775,7 @@ void wait_for_device_probe(void)
}
EXPORT_SYMBOL_GPL(wait_for_device_probe);
-static int __driver_probe_device(struct device_driver *drv, struct device *dev)
+static int __driver_probe_device(const struct device_driver *drv, struct device *dev)
{
int ret = 0;
@@ -819,7 +820,7 @@ static int __driver_probe_device(struct device_driver *drv, struct device *dev)
*
* If the device has a parent, runtime-resume the parent before driver probing.
*/
-static int driver_probe_device(struct device_driver *drv, struct device *dev)
+static int driver_probe_device(const struct device_driver *drv, struct device *dev)
{
int trigger_count = atomic_read(&deferred_trigger_count);
int ret;
@@ -863,7 +864,7 @@ static int __init save_async_options(char *buf)
}
__setup("driver_async_probe=", save_async_options);
-static bool driver_allows_async_probing(struct device_driver *drv)
+static bool driver_allows_async_probing(const struct device_driver *drv)
{
switch (drv->probe_type) {
case PROBE_PREFER_ASYNCHRONOUS:
@@ -1117,7 +1118,7 @@ static void __device_driver_unlock(struct device *dev, struct device *parent)
* Manually attach driver to a device. Will acquire both @dev lock and
* @dev->parent lock if needed. Returns 0 on success, -ERR on failure.
*/
-int device_driver_attach(struct device_driver *drv, struct device *dev)
+int device_driver_attach(const struct device_driver *drv, struct device *dev)
{
int ret;
@@ -1137,7 +1138,7 @@ EXPORT_SYMBOL_GPL(device_driver_attach);
static void __driver_attach_async_helper(void *_dev, async_cookie_t cookie)
{
struct device *dev = _dev;
- struct device_driver *drv;
+ const struct device_driver *drv;
int ret;
__device_driver_lock(dev, dev->parent);
@@ -1153,7 +1154,7 @@ static void __driver_attach_async_helper(void *_dev, async_cookie_t cookie)
static int __driver_attach(struct device *dev, void *data)
{
- struct device_driver *drv = data;
+ const struct device_driver *drv = data;
bool async = false;
int ret;
@@ -1226,9 +1227,10 @@ static int __driver_attach(struct device *dev, void *data)
* returns 0 and the @dev->driver is set, we've found a
* compatible pair.
*/
-int driver_attach(struct device_driver *drv)
+int driver_attach(const struct device_driver *drv)
{
- return bus_for_each_dev(drv->bus, NULL, drv, __driver_attach);
+ /* The (void *) will be put back to const * in __driver_attach() */
+ return bus_for_each_dev(drv->bus, NULL, (void *)drv, __driver_attach);
}
EXPORT_SYMBOL_GPL(driver_attach);
@@ -1284,7 +1286,7 @@ static void __device_release_driver(struct device *dev, struct device *parent)
}
void device_release_driver_internal(struct device *dev,
- struct device_driver *drv,
+ const struct device_driver *drv,
struct device *parent)
{
__device_driver_lock(dev, parent);
@@ -1333,7 +1335,7 @@ void device_driver_detach(struct device *dev)
* driver_detach - detach driver from all devices it controls.
* @drv: driver.
*/
-void driver_detach(struct device_driver *drv)
+void driver_detach(const struct device_driver *drv)
{
struct device_private *dev_prv;
struct device *dev;
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 3df0025d12aa..a2ce0ead06a6 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -85,7 +85,7 @@ static void group_close_release(struct device *dev, void *res)
/* noop */
}
-static struct devres_group * node_to_group(struct devres_node *node)
+static struct devres_group *node_to_group(struct devres_node *node)
{
if (node->release == &group_open_release)
return container_of(node, struct devres_group, node[0]);
@@ -107,8 +107,8 @@ static bool check_dr_size(size_t size, size_t *tot_size)
return true;
}
-static __always_inline struct devres * alloc_dr(dr_release_t release,
- size_t size, gfp_t gfp, int nid)
+static __always_inline struct devres *alloc_dr(dr_release_t release,
+ size_t size, gfp_t gfp, int nid)
{
size_t tot_size;
struct devres *dr;
@@ -283,8 +283,8 @@ static struct devres *find_dr(struct device *dev, dr_release_t release,
* RETURNS:
* Pointer to found devres, NULL if not found.
*/
-void * devres_find(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data)
+void *devres_find(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data)
{
struct devres *dr;
unsigned long flags;
@@ -313,8 +313,8 @@ EXPORT_SYMBOL_GPL(devres_find);
* RETURNS:
* Pointer to found or added devres.
*/
-void * devres_get(struct device *dev, void *new_res,
- dr_match_t match, void *match_data)
+void *devres_get(struct device *dev, void *new_res,
+ dr_match_t match, void *match_data)
{
struct devres *new_dr = container_of(new_res, struct devres, data);
struct devres *dr;
@@ -349,8 +349,8 @@ EXPORT_SYMBOL_GPL(devres_get);
* RETURNS:
* Pointer to removed devres on success, NULL if not found.
*/
-void * devres_remove(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data)
+void *devres_remove(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data)
{
struct devres *dr;
unsigned long flags;
@@ -549,7 +549,7 @@ int devres_release_all(struct device *dev)
* RETURNS:
* ID of the new group, NULL on failure.
*/
-void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
+void *devres_open_group(struct device *dev, void *id, gfp_t gfp)
{
struct devres_group *grp;
unsigned long flags;
@@ -567,6 +567,7 @@ void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
grp->id = grp;
if (id)
grp->id = id;
+ grp->color = 0;
spin_lock_irqsave(&dev->devres_lock, flags);
add_dr(dev, &grp->node[0]);
@@ -576,7 +577,7 @@ void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
EXPORT_SYMBOL_GPL(devres_open_group);
/* Find devres group with ID @id. If @id is NULL, look for the latest. */
-static struct devres_group * find_group(struct device *dev, void *id)
+static struct devres_group *find_group(struct device *dev, void *id)
{
struct devres_node *node;
@@ -896,9 +897,12 @@ void *devm_krealloc(struct device *dev, void *ptr, size_t new_size, gfp_t gfp)
/*
* Otherwise: allocate new, larger chunk. We need to allocate before
* taking the lock as most probably the caller uses GFP_KERNEL.
+ * alloc_dr() will call check_dr_size() to reserve extra memory
+ * for struct devres automatically, so size @new_size user request
+ * is delivered to it directly as devm_kmalloc() does.
*/
new_dr = alloc_dr(devm_kmalloc_release,
- total_new_size, gfp, dev_to_node(dev));
+ new_size, gfp, dev_to_node(dev));
if (!new_dr)
return NULL;
@@ -1222,7 +1226,11 @@ EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
*/
void devm_free_percpu(struct device *dev, void __percpu *pdata)
{
- WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match,
+ /*
+ * Use devres_release() to prevent memory leakage as
+ * devm_free_pages() does.
+ */
+ WARN_ON(devres_release(dev, devm_percpu_release, devm_percpu_match,
(__force void *)pdata));
}
EXPORT_SYMBOL_GPL(devm_free_percpu);
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index c8436c26ed6a..88c6fd1f1992 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -148,7 +148,7 @@ EXPORT_SYMBOL_GPL(driver_for_each_device);
* if it does. If the callback returns non-zero, this function will
* return to the caller and not iterate over any more devices.
*/
-struct device *driver_find_device(struct device_driver *drv,
+struct device *driver_find_device(const struct device_driver *drv,
struct device *start, const void *data,
int (*match)(struct device *dev, const void *data))
{
@@ -173,7 +173,7 @@ EXPORT_SYMBOL_GPL(driver_find_device);
* @drv: driver.
* @attr: driver attribute descriptor.
*/
-int driver_create_file(struct device_driver *drv,
+int driver_create_file(const struct device_driver *drv,
const struct driver_attribute *attr)
{
int error;
@@ -191,7 +191,7 @@ EXPORT_SYMBOL_GPL(driver_create_file);
* @drv: driver.
* @attr: driver attribute descriptor.
*/
-void driver_remove_file(struct device_driver *drv,
+void driver_remove_file(const struct device_driver *drv,
const struct driver_attribute *attr)
{
if (drv)
@@ -199,13 +199,13 @@ void driver_remove_file(struct device_driver *drv,
}
EXPORT_SYMBOL_GPL(driver_remove_file);
-int driver_add_groups(struct device_driver *drv,
+int driver_add_groups(const struct device_driver *drv,
const struct attribute_group **groups)
{
return sysfs_create_groups(&drv->p->kobj, groups);
}
-void driver_remove_groups(struct device_driver *drv,
+void driver_remove_groups(const struct device_driver *drv,
const struct attribute_group **groups)
{
sysfs_remove_groups(&drv->p->kobj, groups);
diff --git a/drivers/base/firmware_loader/Kconfig b/drivers/base/firmware_loader/Kconfig
index 5ca00e02fe82..a03701674265 100644
--- a/drivers/base/firmware_loader/Kconfig
+++ b/drivers/base/firmware_loader/Kconfig
@@ -37,6 +37,13 @@ config FW_LOADER_DEBUG
SHA256 checksums to the kernel log for each firmware file that is
loaded.
+config RUST_FW_LOADER_ABSTRACTIONS
+ bool "Rust Firmware Loader abstractions"
+ depends on RUST
+ depends on FW_LOADER=y
+ help
+ This enables the Rust abstractions for the firmware loader API.
+
if FW_LOADER
config FW_LOADER_PAGED_BUF
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index da8ca01d011c..a03ee4b11134 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -1172,34 +1172,11 @@ static void request_firmware_work_func(struct work_struct *work)
kfree(fw_work);
}
-/**
- * request_firmware_nowait() - asynchronous version of request_firmware
- * @module: module requesting the firmware
- * @uevent: sends uevent to copy the firmware image if this flag
- * is non-zero else the firmware copy must be done manually.
- * @name: name of firmware file
- * @device: device for which firmware is being loaded
- * @gfp: allocation flags
- * @context: will be passed over to @cont, and
- * @fw may be %NULL if firmware request fails.
- * @cont: function will be called asynchronously when the firmware
- * request is over.
- *
- * Caller must hold the reference count of @device.
- *
- * Asynchronous variant of request_firmware() for user contexts:
- * - sleep for as small periods as possible since it may
- * increase kernel boot time of built-in device drivers
- * requesting firmware in their ->probe() methods, if
- * @gfp is GFP_KERNEL.
- *
- * - can't sleep at all if @gfp is GFP_ATOMIC.
- **/
-int
-request_firmware_nowait(
+
+static int _request_firmware_nowait(
struct module *module, bool uevent,
const char *name, struct device *device, gfp_t gfp, void *context,
- void (*cont)(const struct firmware *fw, void *context))
+ void (*cont)(const struct firmware *fw, void *context), bool nowarn)
{
struct firmware_work *fw_work;
@@ -1217,7 +1194,8 @@ request_firmware_nowait(
fw_work->context = context;
fw_work->cont = cont;
fw_work->opt_flags = FW_OPT_NOWAIT |
- (uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER);
+ (uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER) |
+ (nowarn ? FW_OPT_NO_WARN : 0);
if (!uevent && fw_cache_is_setup(device, name)) {
kfree_const(fw_work->name);
@@ -1236,8 +1214,66 @@ request_firmware_nowait(
schedule_work(&fw_work->work);
return 0;
}
+
+/**
+ * request_firmware_nowait() - asynchronous version of request_firmware
+ * @module: module requesting the firmware
+ * @uevent: sends uevent to copy the firmware image if this flag
+ * is non-zero else the firmware copy must be done manually.
+ * @name: name of firmware file
+ * @device: device for which firmware is being loaded
+ * @gfp: allocation flags
+ * @context: will be passed over to @cont, and
+ * @fw may be %NULL if firmware request fails.
+ * @cont: function will be called asynchronously when the firmware
+ * request is over.
+ *
+ * Caller must hold the reference count of @device.
+ *
+ * Asynchronous variant of request_firmware() for user contexts:
+ * - sleep for as small periods as possible since it may
+ * increase kernel boot time of built-in device drivers
+ * requesting firmware in their ->probe() methods, if
+ * @gfp is GFP_KERNEL.
+ *
+ * - can't sleep at all if @gfp is GFP_ATOMIC.
+ **/
+int request_firmware_nowait(
+ struct module *module, bool uevent,
+ const char *name, struct device *device, gfp_t gfp, void *context,
+ void (*cont)(const struct firmware *fw, void *context))
+{
+ return _request_firmware_nowait(module, uevent, name, device, gfp,
+ context, cont, false);
+
+}
EXPORT_SYMBOL(request_firmware_nowait);
+/**
+ * firmware_request_nowait_nowarn() - async version of request_firmware_nowarn
+ * @module: module requesting the firmware
+ * @name: name of firmware file
+ * @device: device for which firmware is being loaded
+ * @gfp: allocation flags
+ * @context: will be passed over to @cont, and
+ * @fw may be %NULL if firmware request fails.
+ * @cont: function will be called asynchronously when the firmware
+ * request is over.
+ *
+ * Similar in function to request_firmware_nowait(), but doesn't print a warning
+ * when the firmware file could not be found and always sends a uevent to copy
+ * the firmware image.
+ */
+int firmware_request_nowait_nowarn(
+ struct module *module, const char *name,
+ struct device *device, gfp_t gfp, void *context,
+ void (*cont)(const struct firmware *fw, void *context))
+{
+ return _request_firmware_nowait(module, FW_ACTION_UEVENT, name, device,
+ gfp, context, cont, true);
+}
+EXPORT_SYMBOL_GPL(firmware_request_nowait_nowarn);
+
#ifdef CONFIG_FW_CACHE
static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain);
diff --git a/drivers/base/isa.c b/drivers/base/isa.c
index e23d0b49a793..bfd9215c9070 100644
--- a/drivers/base/isa.c
+++ b/drivers/base/isa.c
@@ -23,7 +23,7 @@ struct isa_dev {
#define to_isa_dev(x) container_of((x), struct isa_dev, dev)
-static int isa_bus_match(struct device *dev, struct device_driver *driver)
+static int isa_bus_match(struct device *dev, const struct device_driver *driver)
{
struct isa_driver *isa_driver = to_isa_driver(driver);
diff --git a/drivers/base/module.c b/drivers/base/module.c
index a1b55da07127..7af224e6914a 100644
--- a/drivers/base/module.c
+++ b/drivers/base/module.c
@@ -9,7 +9,7 @@
#include <linux/string.h>
#include "base.h"
-static char *make_driver_name(struct device_driver *drv)
+static char *make_driver_name(const struct device_driver *drv)
{
char *driver_name;
@@ -30,7 +30,7 @@ static void module_create_drivers_dir(struct module_kobject *mk)
mutex_unlock(&drivers_dir_mutex);
}
-int module_add_driver(struct module *mod, struct device_driver *drv)
+int module_add_driver(struct module *mod, const struct device_driver *drv)
{
char *driver_name;
struct module_kobject *mk = NULL;
@@ -89,7 +89,7 @@ out:
return ret;
}
-void module_remove_driver(struct device_driver *drv)
+void module_remove_driver(const struct device_driver *drv)
{
struct module_kobject *mk = NULL;
char *driver_name;
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 10c577963418..4c3ee6521ba5 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -608,7 +608,7 @@ int platform_device_add_resources(struct platform_device *pdev,
struct resource *r = NULL;
if (res) {
- r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL);
+ r = kmemdup_array(res, num, sizeof(*r), GFP_KERNEL);
if (!r)
return -ENOMEM;
}
@@ -1122,7 +1122,7 @@ static int platform_legacy_resume(struct device *dev)
int platform_pm_suspend(struct device *dev)
{
- struct device_driver *drv = dev->driver;
+ const struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
@@ -1140,7 +1140,7 @@ int platform_pm_suspend(struct device *dev)
int platform_pm_resume(struct device *dev)
{
- struct device_driver *drv = dev->driver;
+ const struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
@@ -1162,7 +1162,7 @@ int platform_pm_resume(struct device *dev)
int platform_pm_freeze(struct device *dev)
{
- struct device_driver *drv = dev->driver;
+ const struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
@@ -1180,7 +1180,7 @@ int platform_pm_freeze(struct device *dev)
int platform_pm_thaw(struct device *dev)
{
- struct device_driver *drv = dev->driver;
+ const struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
@@ -1198,7 +1198,7 @@ int platform_pm_thaw(struct device *dev)
int platform_pm_poweroff(struct device *dev)
{
- struct device_driver *drv = dev->driver;
+ const struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
@@ -1216,7 +1216,7 @@ int platform_pm_poweroff(struct device *dev)
int platform_pm_restore(struct device *dev)
{
- struct device_driver *drv = dev->driver;
+ const struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
@@ -1332,7 +1332,7 @@ __ATTRIBUTE_GROUPS(platform_dev);
* and compare it against the name of the driver. Return whether they match
* or not.
*/
-static int platform_match(struct device *dev, struct device_driver *drv)
+static int platform_match(struct device *dev, const struct device_driver *drv)
{
struct platform_device *pdev = to_platform_device(dev);
struct platform_driver *pdrv = to_platform_driver(drv);
@@ -1420,14 +1420,8 @@ static void platform_remove(struct device *_dev)
struct platform_driver *drv = to_platform_driver(_dev->driver);
struct platform_device *dev = to_platform_device(_dev);
- if (drv->remove_new) {
- drv->remove_new(dev);
- } else if (drv->remove) {
- int ret = drv->remove(dev);
-
- if (ret)
- dev_warn(_dev, "remove callback returned a non-zero value. This will be ignored.\n");
- }
+ if (drv->remove)
+ drv->remove(dev);
dev_pm_domain_detach(_dev, true);
}
diff --git a/drivers/base/regmap/regcache-maple.c b/drivers/base/regmap/regcache-maple.c
index f0df2da6d522..2dea9d259c49 100644
--- a/drivers/base/regmap/regcache-maple.c
+++ b/drivers/base/regmap/regcache-maple.c
@@ -110,7 +110,8 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min,
struct maple_tree *mt = map->cache;
MA_STATE(mas, mt, min, max);
unsigned long *entry, *lower, *upper;
- unsigned long lower_index, lower_last;
+ /* initialized to work around false-positive -Wuninitialized warning */
+ unsigned long lower_index = 0, lower_last = 0;
unsigned long upper_index, upper_last;
int ret = 0;
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 6b5d34919c72..6ecfc821cf83 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -26,7 +26,7 @@ static unsigned int bcma_bus_next_num;
/* bcma_buses_mutex locks the bcma_bus_next_num */
static DEFINE_MUTEX(bcma_buses_mutex);
-static int bcma_bus_match(struct device *dev, struct device_driver *drv);
+static int bcma_bus_match(struct device *dev, const struct device_driver *drv);
static int bcma_device_probe(struct device *dev);
static void bcma_device_remove(struct device *dev);
static int bcma_device_uevent(const struct device *dev, struct kobj_uevent_env *env);
@@ -584,10 +584,10 @@ void bcma_driver_unregister(struct bcma_driver *drv)
}
EXPORT_SYMBOL_GPL(bcma_driver_unregister);
-static int bcma_bus_match(struct device *dev, struct device_driver *drv)
+static int bcma_bus_match(struct device *dev, const struct device_driver *drv)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
- struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
+ const struct bcma_driver *adrv = container_of_const(drv, struct bcma_driver, drv);
const struct bcma_device_id *cid = &core->id;
const struct bcma_device_id *did;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 008e850555f4..9c8b19a22c2a 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -362,7 +362,7 @@ enum rbd_watch_state {
enum rbd_lock_state {
RBD_LOCK_STATE_UNLOCKED,
RBD_LOCK_STATE_LOCKED,
- RBD_LOCK_STATE_RELEASING,
+ RBD_LOCK_STATE_QUIESCING,
};
/* WatchNotify::ClientId */
@@ -422,7 +422,7 @@ struct rbd_device {
struct list_head running_list;
struct completion acquire_wait;
int acquire_err;
- struct completion releasing_wait;
+ struct completion quiescing_wait;
spinlock_t object_map_lock;
u8 *object_map;
@@ -525,7 +525,7 @@ static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
lockdep_assert_held(&rbd_dev->lock_rwsem);
return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
- rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
+ rbd_dev->lock_state == RBD_LOCK_STATE_QUIESCING;
}
static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
@@ -3457,13 +3457,14 @@ static void rbd_lock_del_request(struct rbd_img_request *img_req)
lockdep_assert_held(&rbd_dev->lock_rwsem);
spin_lock(&rbd_dev->lock_lists_lock);
if (!list_empty(&img_req->lock_item)) {
+ rbd_assert(!list_empty(&rbd_dev->running_list));
list_del_init(&img_req->lock_item);
- need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
+ need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_QUIESCING &&
list_empty(&rbd_dev->running_list));
}
spin_unlock(&rbd_dev->lock_lists_lock);
if (need_wakeup)
- complete(&rbd_dev->releasing_wait);
+ complete(&rbd_dev->quiescing_wait);
}
static int rbd_img_exclusive_lock(struct rbd_img_request *img_req)
@@ -3476,11 +3477,6 @@ static int rbd_img_exclusive_lock(struct rbd_img_request *img_req)
if (rbd_lock_add_request(img_req))
return 1;
- if (rbd_dev->opts->exclusive) {
- WARN_ON(1); /* lock got released? */
- return -EROFS;
- }
-
/*
* Note the use of mod_delayed_work() in rbd_acquire_lock()
* and cancel_delayed_work() in wake_lock_waiters().
@@ -4181,16 +4177,16 @@ static bool rbd_quiesce_lock(struct rbd_device *rbd_dev)
/*
* Ensure that all in-flight IO is flushed.
*/
- rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
- rbd_assert(!completion_done(&rbd_dev->releasing_wait));
+ rbd_dev->lock_state = RBD_LOCK_STATE_QUIESCING;
+ rbd_assert(!completion_done(&rbd_dev->quiescing_wait));
if (list_empty(&rbd_dev->running_list))
return true;
up_write(&rbd_dev->lock_rwsem);
- wait_for_completion(&rbd_dev->releasing_wait);
+ wait_for_completion(&rbd_dev->quiescing_wait);
down_write(&rbd_dev->lock_rwsem);
- if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
+ if (rbd_dev->lock_state != RBD_LOCK_STATE_QUIESCING)
return false;
rbd_assert(list_empty(&rbd_dev->running_list));
@@ -4601,6 +4597,10 @@ static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
rbd_warn(rbd_dev, "failed to update lock cookie: %d",
ret);
+ if (rbd_dev->opts->exclusive)
+ rbd_warn(rbd_dev,
+ "temporarily releasing lock on exclusive mapping");
+
/*
* Lock cookie cannot be updated on older OSDs, so do
* a manual release and queue an acquire.
@@ -5376,7 +5376,7 @@ static struct rbd_device *__rbd_dev_create(struct rbd_spec *spec)
INIT_LIST_HEAD(&rbd_dev->acquiring_list);
INIT_LIST_HEAD(&rbd_dev->running_list);
init_completion(&rbd_dev->acquire_wait);
- init_completion(&rbd_dev->releasing_wait);
+ init_completion(&rbd_dev->quiescing_wait);
spin_lock_init(&rbd_dev->object_map_lock);
@@ -6582,11 +6582,6 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
if (ret)
return ret;
- /*
- * The lock may have been released by now, unless automatic lock
- * transitions are disabled.
- */
- rbd_assert(!rbd_dev->opts->exclusive || rbd_is_lock_owner(rbd_dev));
return 0;
}
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index 78b96cd63de9..dd68b8191a0a 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -80,11 +80,11 @@ static phys_addr_t mc_portal_base_phys_addr;
*
* Returns 1 on success, 0 otherwise.
*/
-static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv)
+static int fsl_mc_bus_match(struct device *dev, const struct device_driver *drv)
{
const struct fsl_mc_device_id *id;
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
- struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv);
+ const struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv);
bool found = false;
/* When driver_override is set, only bind to the matching driver */
diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
index 4acfac73ca9a..b3eafcf2a2c5 100644
--- a/drivers/bus/mhi/ep/main.c
+++ b/drivers/bus/mhi/ep/main.c
@@ -1694,10 +1694,10 @@ static int mhi_ep_uevent(const struct device *dev, struct kobj_uevent_env *env)
mhi_dev->name);
}
-static int mhi_ep_match(struct device *dev, struct device_driver *drv)
+static int mhi_ep_match(struct device *dev, const struct device_driver *drv)
{
struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
- struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(drv);
+ const struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(drv);
const struct mhi_device_id *id;
/*
diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c
index 173f79918741..ce7d2e62c2f1 100644
--- a/drivers/bus/mhi/host/init.c
+++ b/drivers/bus/mhi/host/init.c
@@ -1442,10 +1442,10 @@ static int mhi_uevent(const struct device *dev, struct kobj_uevent_env *env)
mhi_dev->name);
}
-static int mhi_match(struct device *dev, struct device_driver *drv)
+static int mhi_match(struct device *dev, const struct device_driver *drv)
{
struct mhi_device *mhi_dev = to_mhi_device(dev);
- struct mhi_driver *mhi_drv = to_mhi_driver(drv);
+ const struct mhi_driver *mhi_drv = to_mhi_driver(drv);
const struct mhi_device_id *id;
/*
diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c
index 8baf14bd5eff..12dd32fd0b62 100644
--- a/drivers/bus/mips_cdmm.c
+++ b/drivers/bus/mips_cdmm.c
@@ -37,7 +37,7 @@
/* Each block of device registers is 64 bytes */
#define CDMM_DRB_SIZE 64
-#define to_mips_cdmm_driver(d) container_of(d, struct mips_cdmm_driver, drv)
+#define to_mips_cdmm_driver(d) container_of_const(d, struct mips_cdmm_driver, drv)
/* Default physical base address */
static phys_addr_t mips_cdmm_default_base;
@@ -59,10 +59,10 @@ mips_cdmm_lookup(const struct mips_cdmm_device_id *table,
return ret ? table : NULL;
}
-static int mips_cdmm_match(struct device *dev, struct device_driver *drv)
+static int mips_cdmm_match(struct device *dev, const struct device_driver *drv)
{
struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev);
- struct mips_cdmm_driver *cdrv = to_mips_cdmm_driver(drv);
+ const struct mips_cdmm_driver *cdrv = to_mips_cdmm_driver(drv);
return mips_cdmm_lookup(cdrv->id_table, cdev) != NULL;
}
diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c
index 641c1a6adc8a..8412406c4f1d 100644
--- a/drivers/bus/moxtet.c
+++ b/drivers/bus/moxtet.c
@@ -83,10 +83,10 @@ static const struct attribute_group *moxtet_dev_groups[] = {
NULL,
};
-static int moxtet_match(struct device *dev, struct device_driver *drv)
+static int moxtet_match(struct device *dev, const struct device_driver *drv)
{
struct moxtet_device *mdev = to_moxtet_device(dev);
- struct moxtet_driver *tdrv = to_moxtet_driver(drv);
+ const struct moxtet_driver *tdrv = to_moxtet_driver(drv);
const enum turris_mox_module_id *t;
if (of_driver_match_device(dev, drv))
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index ac6c7e4900f4..eee41fb798a1 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -130,7 +130,7 @@ struct sunxi_rsb {
/* bus / slave device related functions */
static const struct bus_type sunxi_rsb_bus;
-static int sunxi_rsb_device_match(struct device *dev, struct device_driver *drv)
+static int sunxi_rsb_device_match(struct device *dev, const struct device_driver *drv)
{
return of_driver_match_device(dev, drv);
}
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 8767e04d6c89..2b59ef61dda2 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -2291,11 +2291,9 @@ static int sysc_init_idlemode(struct sysc *ddata, u8 *idlemodes,
const char *name)
{
struct device_node *np = ddata->dev->of_node;
- struct property *prop;
- const __be32 *p;
u32 val;
- of_property_for_each_u32(np, name, prop, p, val) {
+ of_property_for_each_u32(np, name, val) {
if (val >= SYSC_NR_IDLEMODES) {
dev_err(ddata->dev, "invalid idlemode: %i\n", val);
return -EINVAL;
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 49e4829b7264..9b0f37d4b9d4 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -3473,7 +3473,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
return 0;
}
-static int cdrom_sysctl_info(struct ctl_table *ctl, int write,
+static int cdrom_sysctl_info(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int pos;
@@ -3586,7 +3586,7 @@ static void cdrom_update_settings(void)
mutex_unlock(&cdrom_mutex);
}
-static int cdrom_sysctl_handler(struct ctl_table *ctl, int write,
+static int cdrom_sysctl_handler(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
diff --git a/drivers/cdx/cdx.c b/drivers/cdx/cdx.c
index 236d381dc5f7..07371cb653d3 100644
--- a/drivers/cdx/cdx.c
+++ b/drivers/cdx/cdx.c
@@ -262,10 +262,10 @@ EXPORT_SYMBOL_GPL(cdx_clear_master);
*
* Return: true on success, false otherwise.
*/
-static int cdx_bus_match(struct device *dev, struct device_driver *drv)
+static int cdx_bus_match(struct device *dev, const struct device_driver *drv)
{
struct cdx_device *cdx_dev = to_cdx_device(dev);
- struct cdx_driver *cdx_drv = to_cdx_driver(drv);
+ const struct cdx_driver *cdx_drv = to_cdx_driver(drv);
const struct cdx_device_id *found_id = NULL;
const struct cdx_device_id *ids;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 2597cb43f438..87fe61295ea1 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
- * Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ * Copyright (C) 2017-2024 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
* Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
* Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights reserved.
*
@@ -56,6 +56,10 @@
#include <linux/sched/isolation.h>
#include <crypto/chacha.h>
#include <crypto/blake2s.h>
+#ifdef CONFIG_VDSO_GETRANDOM
+#include <vdso/getrandom.h>
+#include <vdso/datapage.h>
+#endif
#include <asm/archrandom.h>
#include <asm/processor.h>
#include <asm/irq.h>
@@ -271,6 +275,15 @@ static void crng_reseed(struct work_struct *work)
if (next_gen == ULONG_MAX)
++next_gen;
WRITE_ONCE(base_crng.generation, next_gen);
+#ifdef CONFIG_VDSO_GETRANDOM
+ /* base_crng.generation's invalid value is ULONG_MAX, while
+ * _vdso_rng_data.generation's invalid value is 0, so add one to the
+ * former to arrive at the latter. Use smp_store_release so that this
+ * is ordered with the write above to base_crng.generation. Pairs with
+ * the smp_rmb() before the syscall in the vDSO code.
+ */
+ smp_store_release(&_vdso_rng_data.generation, next_gen + 1);
+#endif
if (!static_branch_likely(&crng_is_ready))
crng_init = CRNG_READY;
spin_unlock_irqrestore(&base_crng.lock, flags);
@@ -721,6 +734,9 @@ static void __cold _credit_init_bits(size_t bits)
if (static_key_initialized && system_unbound_wq)
queue_work(system_unbound_wq, &set_ready);
atomic_notifier_call_chain(&random_ready_notifier, 0, NULL);
+#ifdef CONFIG_VDSO_GETRANDOM
+ WRITE_ONCE(_vdso_rng_data.is_ready, true);
+#endif
wake_up_interruptible(&crng_init_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
pr_notice("crng init done\n");
@@ -1604,7 +1620,7 @@ static u8 sysctl_bootid[UUID_SIZE];
* UUID. The difference is in whether table->data is NULL; if it is,
* then a new UUID is generated and returned to the user.
*/
-static int proc_do_uuid(struct ctl_table *table, int write, void *buf,
+static int proc_do_uuid(const struct ctl_table *table, int write, void *buf,
size_t *lenp, loff_t *ppos)
{
u8 tmp_uuid[UUID_SIZE], *uuid;
@@ -1635,7 +1651,7 @@ static int proc_do_uuid(struct ctl_table *table, int write, void *buf,
}
/* The same as proc_dointvec, but writes don't change anything. */
-static int proc_do_rointvec(struct ctl_table *table, int write, void *buf,
+static int proc_do_rointvec(const struct ctl_table *table, int write, void *buf,
size_t *lenp, loff_t *ppos)
{
return write ? 0 : proc_dointvec(table, 0, buf, lenp, ppos);
diff --git a/drivers/clk/clk-conf.c b/drivers/clk/clk-conf.c
index 1a4e6340f95c..058420562020 100644
--- a/drivers/clk/clk-conf.c
+++ b/drivers/clk/clk-conf.c
@@ -81,13 +81,11 @@ err:
static int __set_clk_rates(struct device_node *node, bool clk_supplier)
{
struct of_phandle_args clkspec;
- struct property *prop;
- const __be32 *cur;
int rc, index = 0;
struct clk *clk;
u32 rate;
- of_property_for_each_u32(node, "assigned-clock-rates", prop, cur, rate) {
+ of_property_for_each_u32(node, "assigned-clock-rates", rate) {
if (rate) {
rc = of_parse_phandle_with_args(node, "assigned-clocks",
"#clock-cells", index, &clkspec);
diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c
index 4ce83c5265b8..a4c92c5ef3ff 100644
--- a/drivers/clk/clk-si5351.c
+++ b/drivers/clk/clk-si5351.c
@@ -1175,8 +1175,8 @@ static int si5351_dt_parse(struct i2c_client *client,
{
struct device_node *child, *np = client->dev.of_node;
struct si5351_platform_data *pdata;
- struct property *prop;
- const __be32 *p;
+ u32 array[4];
+ int sz, i;
int num = 0;
u32 val;
@@ -1191,20 +1191,24 @@ static int si5351_dt_parse(struct i2c_client *client,
* property silabs,pll-source : <num src>, [<..>]
* allow to selectively set pll source
*/
- of_property_for_each_u32(np, "silabs,pll-source", prop, p, num) {
+ sz = of_property_read_variable_u32_array(np, "silabs,pll-source", array, 2, 4);
+ sz = (sz == -EINVAL) ? 0 : sz; /* Missing property is OK */
+ if (sz < 0)
+ return dev_err_probe(&client->dev, sz, "invalid pll-source\n");
+ if (sz % 2)
+ return dev_err_probe(&client->dev, -EINVAL,
+ "missing pll-source for pll %d\n", array[sz - 1]);
+
+ for (i = 0; i < sz; i += 2) {
+ num = array[i];
+ val = array[i + 1];
+
if (num >= 2) {
dev_err(&client->dev,
"invalid pll %d on pll-source prop\n", num);
return -EINVAL;
}
- p = of_prop_next_u32(prop, p, &val);
- if (!p) {
- dev_err(&client->dev,
- "missing pll-source for pll %d\n", num);
- return -EINVAL;
- }
-
switch (val) {
case 0:
pdata->pll_src[num] = SI5351_PLL_SRC_XTAL;
@@ -1232,19 +1236,24 @@ static int si5351_dt_parse(struct i2c_client *client,
pdata->pll_reset[0] = true;
pdata->pll_reset[1] = true;
- of_property_for_each_u32(np, "silabs,pll-reset-mode", prop, p, num) {
+ sz = of_property_read_variable_u32_array(np, "silabs,pll-reset-mode", array, 2, 4);
+ sz = (sz == -EINVAL) ? 0 : sz; /* Missing property is OK */
+ if (sz < 0)
+ return dev_err_probe(&client->dev, sz, "invalid pll-reset-mode\n");
+ if (sz % 2)
+ return dev_err_probe(&client->dev, -EINVAL,
+ "missing pll-reset-mode for pll %d\n", array[sz - 1]);
+
+ for (i = 0; i < sz; i += 2) {
+ num = array[i];
+ val = array[i + 1];
+
if (num >= 2) {
dev_err(&client->dev,
"invalid pll %d on pll-reset-mode prop\n", num);
return -EINVAL;
}
- p = of_prop_next_u32(prop, p, &val);
- if (!p) {
- dev_err(&client->dev,
- "missing pll-reset-mode for pll %d\n", num);
- return -EINVAL;
- }
switch (val) {
case 0:
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 8cca52be993f..285ed1ad8a37 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -5364,9 +5364,8 @@ EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
const char *of_clk_get_parent_name(const struct device_node *np, int index)
{
struct of_phandle_args clkspec;
- struct property *prop;
const char *clk_name;
- const __be32 *vp;
+ bool found = false;
u32 pv;
int rc;
int count;
@@ -5383,15 +5382,16 @@ const char *of_clk_get_parent_name(const struct device_node *np, int index)
/* if there is an indices property, use it to transfer the index
* specified into an array offset for the clock-output-names property.
*/
- of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
+ of_property_for_each_u32(clkspec.np, "clock-indices", pv) {
if (index == pv) {
index = count;
+ found = true;
break;
}
count++;
}
/* We went off the end of 'clock-indices' without finding it */
- if (prop && !vp)
+ if (of_property_present(clkspec.np, "clock-indices") && !found)
return NULL;
if (of_property_read_string_index(clkspec.np, "clock-output-names",
@@ -5504,14 +5504,12 @@ static int parent_ready(struct device_node *np)
int of_clk_detect_critical(struct device_node *np, int index,
unsigned long *flags)
{
- struct property *prop;
- const __be32 *cur;
uint32_t idx;
if (!np || !flags)
return -EINVAL;
- of_property_for_each_u32(np, "clock-critical", prop, cur, idx)
+ of_property_for_each_u32(np, "clock-critical", idx)
if (index == idx)
*flags |= CLK_IS_CRITICAL;
diff --git a/drivers/clk/davinci/da8xx-cfgchip.c b/drivers/clk/davinci/da8xx-cfgchip.c
index ad2d0df43dc6..ec60ecb517f1 100644
--- a/drivers/clk/davinci/da8xx-cfgchip.c
+++ b/drivers/clk/davinci/da8xx-cfgchip.c
@@ -508,7 +508,7 @@ da8xx_cfgchip_register_usb0_clk48(struct device *dev,
const char * const parent_names[] = { "usb_refclkin", "pll0_auxclk" };
struct clk *fck_clk;
struct da8xx_usb0_clk48 *usb0;
- struct clk_init_data init;
+ struct clk_init_data init = {};
int ret;
fck_clk = devm_clk_get(dev, "fck");
@@ -583,7 +583,7 @@ da8xx_cfgchip_register_usb1_clk48(struct device *dev,
{
const char * const parent_names[] = { "usb0_clk48", "usb_refclkin" };
struct da8xx_usb1_clk48 *usb1;
- struct clk_init_data init;
+ struct clk_init_data init = {};
int ret;
usb1 = devm_kzalloc(dev, sizeof(*usb1), GFP_KERNEL);
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index ea3788ba46f7..33cc1f73c69d 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -227,11 +227,9 @@ EXPORT_SYMBOL_GPL(qcom_cc_register_sleep_clk);
static void qcom_cc_drop_protected(struct device *dev, struct qcom_cc *cc)
{
struct device_node *np = dev->of_node;
- struct property *prop;
- const __be32 *p;
u32 i;
- of_property_for_each_u32(np, "protected-clocks", prop, p, i) {
+ of_property_for_each_u32(np, "protected-clocks", i) {
if (i >= cc->num_rclks)
continue;
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index a026ccca7315..28945b6b0ee1 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -1040,19 +1040,20 @@ static unsigned long __init exynos4_get_xom(void)
static void __init exynos4_clk_register_finpll(struct samsung_clk_provider *ctx)
{
struct samsung_fixed_rate_clock fclk;
- struct clk *clk;
- unsigned long finpll_f = 24000000;
+ unsigned long finpll_f;
+ unsigned int parent;
char *parent_name;
unsigned int xom = exynos4_get_xom();
parent_name = xom & 1 ? "xusbxti" : "xxti";
- clk = clk_get(NULL, parent_name);
- if (IS_ERR(clk)) {
+ parent = xom & 1 ? CLK_XUSBXTI : CLK_XXTI;
+
+ finpll_f = clk_hw_get_rate(ctx->clk_data.hws[parent]);
+ if (!finpll_f) {
pr_err("%s: failed to lookup parent clock %s, assuming "
"fin_pll clock frequency is 24MHz\n", __func__,
parent_name);
- } else {
- finpll_f = clk_get_rate(clk);
+ finpll_f = 24000000;
}
fclk.id = CLK_FIN_PLL;
diff --git a/drivers/clk/sophgo/clk-sg2042-pll.c b/drivers/clk/sophgo/clk-sg2042-pll.c
index 9695e64fc23b..ff9deeef509b 100644
--- a/drivers/clk/sophgo/clk-sg2042-pll.c
+++ b/drivers/clk/sophgo/clk-sg2042-pll.c
@@ -387,7 +387,7 @@ static int sg2042_clk_pll_set_rate(struct clk_hw *hw,
struct sg2042_pll_clock *pll = to_sg2042_pll_clk(hw);
struct sg2042_pll_ctrl pctrl_table;
unsigned long flags;
- u32 value;
+ u32 value = 0;
int ret;
spin_lock_irqsave(pll->lock, flags);
diff --git a/drivers/clk/sunxi/clk-simple-gates.c b/drivers/clk/sunxi/clk-simple-gates.c
index 0399627c226a..845efc1ec800 100644
--- a/drivers/clk/sunxi/clk-simple-gates.c
+++ b/drivers/clk/sunxi/clk-simple-gates.c
@@ -21,11 +21,9 @@ static void __init sunxi_simple_gates_setup(struct device_node *node,
{
struct clk_onecell_data *clk_data;
const char *clk_parent, *clk_name;
- struct property *prop;
struct resource res;
void __iomem *clk_reg;
void __iomem *reg;
- const __be32 *p;
int number, i = 0, j;
u8 clk_bit;
u32 index;
@@ -47,7 +45,7 @@ static void __init sunxi_simple_gates_setup(struct device_node *node,
if (!clk_data->clks)
goto err_free_data;
- of_property_for_each_u32(node, "clock-indices", prop, p, index) {
+ of_property_for_each_u32(node, "clock-indices", index) {
of_property_read_string_index(node, "clock-output-names",
i, &clk_name);
diff --git a/drivers/clk/sunxi/clk-sun8i-bus-gates.c b/drivers/clk/sunxi/clk-sun8i-bus-gates.c
index b87f331f63c9..8482ac8e5898 100644
--- a/drivers/clk/sunxi/clk-sun8i-bus-gates.c
+++ b/drivers/clk/sunxi/clk-sun8i-bus-gates.c
@@ -24,11 +24,9 @@ static void __init sun8i_h3_bus_gates_init(struct device_node *node)
const char *parents[PARENT_MAX];
struct clk_onecell_data *clk_data;
const char *clk_name;
- struct property *prop;
struct resource res;
void __iomem *clk_reg;
void __iomem *reg;
- const __be32 *p;
int number, i;
u8 clk_bit;
int index;
@@ -58,7 +56,7 @@ static void __init sun8i_h3_bus_gates_init(struct device_node *node)
goto err_free_data;
i = 0;
- of_property_for_each_u32(node, "clock-indices", prop, p, index) {
+ of_property_for_each_u32(node, "clock-indices", index) {
of_property_read_string_index(node, "clock-output-names",
i, &clk_name);
diff --git a/drivers/clk/thead/Kconfig b/drivers/clk/thead/Kconfig
index 1710d50bf9d4..95e0d9eb965e 100644
--- a/drivers/clk/thead/Kconfig
+++ b/drivers/clk/thead/Kconfig
@@ -3,6 +3,7 @@
config CLK_THEAD_TH1520_AP
bool "T-HEAD TH1520 AP clock support"
depends on ARCH_THEAD || COMPILE_TEST
+ depends on 64BIT
default ARCH_THEAD
select REGMAP_MMIO
help
diff --git a/drivers/clocksource/samsung_pwm_timer.c b/drivers/clocksource/samsung_pwm_timer.c
index 6e46781bc9ac..b9561e3f196c 100644
--- a/drivers/clocksource/samsung_pwm_timer.c
+++ b/drivers/clocksource/samsung_pwm_timer.c
@@ -418,8 +418,6 @@ void __init samsung_pwm_clocksource_init(void __iomem *base,
static int __init samsung_pwm_alloc(struct device_node *np,
const struct samsung_pwm_variant *variant)
{
- struct property *prop;
- const __be32 *cur;
u32 val;
int i, ret;
@@ -427,7 +425,7 @@ static int __init samsung_pwm_alloc(struct device_node *np,
for (i = 0; i < SAMSUNG_PWM_NUM; ++i)
pwm.irq[i] = irq_of_parse_and_map(np, i);
- of_property_for_each_u32(np, "samsung,pwm-outputs", prop, cur, val) {
+ of_property_for_each_u32(np, "samsung,pwm-outputs", val) {
if (val >= SAMSUNG_PWM_NUM) {
pr_warn("%s: invalid channel index in samsung,pwm-outputs property\n", __func__);
continue;
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index 887ed6e358fb..cb730050d3d4 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -2082,7 +2082,7 @@ static int cxl_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
cxl_device_id(dev));
}
-static int cxl_bus_match(struct device *dev, struct device_driver *drv)
+static int cxl_bus_match(struct device *dev, const struct device_driver *drv)
{
return cxl_device_id(dev) == to_cxl_drv(drv)->id;
}
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index a6613a6f8923..720aa07976b0 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -825,10 +825,7 @@ struct cxl_driver {
int id;
};
-static inline struct cxl_driver *to_cxl_drv(struct device_driver *drv)
-{
- return container_of(drv, struct cxl_driver, drv);
-}
+#define to_cxl_drv(__drv) container_of_const(__drv, struct cxl_driver, drv)
int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner,
const char *modname);
diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
index 3ef9550bd2ca..fde29e0ad68b 100644
--- a/drivers/dax/bus.c
+++ b/drivers/dax/bus.c
@@ -39,12 +39,9 @@ static int dax_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
return add_uevent_var(env, "MODALIAS=" DAX_DEVICE_MODALIAS_FMT, 0);
}
-static struct dax_device_driver *to_dax_drv(struct device_driver *drv)
-{
- return container_of(drv, struct dax_device_driver, drv);
-}
+#define to_dax_drv(__drv) container_of_const(__drv, struct dax_device_driver, drv)
-static struct dax_id *__dax_match_id(struct dax_device_driver *dax_drv,
+static struct dax_id *__dax_match_id(const struct dax_device_driver *dax_drv,
const char *dev_name)
{
struct dax_id *dax_id;
@@ -57,7 +54,7 @@ static struct dax_id *__dax_match_id(struct dax_device_driver *dax_drv,
return NULL;
}
-static int dax_match_id(struct dax_device_driver *dax_drv, struct device *dev)
+static int dax_match_id(const struct dax_device_driver *dax_drv, struct device *dev)
{
int match;
@@ -68,7 +65,7 @@ static int dax_match_id(struct dax_device_driver *dax_drv, struct device *dev)
return match;
}
-static int dax_match_type(struct dax_device_driver *dax_drv, struct device *dev)
+static int dax_match_type(const struct dax_device_driver *dax_drv, struct device *dev)
{
enum dax_driver_type type = DAXDRV_DEVICE_TYPE;
struct dev_dax *dev_dax = to_dev_dax(dev);
@@ -156,7 +153,7 @@ static struct attribute *dax_drv_attrs[] = {
};
ATTRIBUTE_GROUPS(dax_drv);
-static int dax_bus_match(struct device *dev, struct device_driver *drv);
+static int dax_bus_match(struct device *dev, const struct device_driver *drv);
/*
* Static dax regions are regions created by an external subsystem
@@ -250,9 +247,9 @@ static const struct bus_type dax_bus_type = {
.drv_groups = dax_drv_groups,
};
-static int dax_bus_match(struct device *dev, struct device_driver *drv)
+static int dax_bus_match(struct device *dev, const struct device_driver *drv)
{
- struct dax_device_driver *dax_drv = to_dax_drv(drv);
+ const struct dax_device_driver *dax_drv = to_dax_drv(drv);
if (dax_match_id(dax_drv, dev))
return 1;
diff --git a/drivers/dca/dca-sysfs.c b/drivers/dca/dca-sysfs.c
index fcc83ede0909..b915c2b4601e 100644
--- a/drivers/dca/dca-sysfs.c
+++ b/drivers/dca/dca-sysfs.c
@@ -13,7 +13,9 @@
#include <linux/gfp.h>
#include <linux/export.h>
-static struct class *dca_class;
+static const struct class dca_class = {
+ .name = "dca",
+};
static struct idr dca_idr;
static spinlock_t dca_idr_lock;
@@ -22,14 +24,14 @@ int dca_sysfs_add_req(struct dca_provider *dca, struct device *dev, int slot)
struct device *cd;
static int req_count;
- cd = device_create(dca_class, dca->cd, MKDEV(0, slot + 1), NULL,
+ cd = device_create(&dca_class, dca->cd, MKDEV(0, slot + 1), NULL,
"requester%d", req_count++);
return PTR_ERR_OR_ZERO(cd);
}
void dca_sysfs_remove_req(struct dca_provider *dca, int slot)
{
- device_destroy(dca_class, MKDEV(0, slot + 1));
+ device_destroy(&dca_class, MKDEV(0, slot + 1));
}
int dca_sysfs_add_provider(struct dca_provider *dca, struct device *dev)
@@ -49,7 +51,7 @@ int dca_sysfs_add_provider(struct dca_provider *dca, struct device *dev)
if (ret < 0)
return ret;
- cd = device_create(dca_class, dev, MKDEV(0, 0), NULL, "dca%d", dca->id);
+ cd = device_create(&dca_class, dev, MKDEV(0, 0), NULL, "dca%d", dca->id);
if (IS_ERR(cd)) {
spin_lock(&dca_idr_lock);
idr_remove(&dca_idr, dca->id);
@@ -71,20 +73,22 @@ void dca_sysfs_remove_provider(struct dca_provider *dca)
int __init dca_sysfs_init(void)
{
+ int err;
+
idr_init(&dca_idr);
spin_lock_init(&dca_idr_lock);
- dca_class = class_create("dca");
- if (IS_ERR(dca_class)) {
+ err = class_register(&dca_class);
+ if (err) {
idr_destroy(&dca_idr);
- return PTR_ERR(dca_class);
+ return err;
}
return 0;
}
void __exit dca_sysfs_exit(void)
{
- class_destroy(dca_class);
+ class_unregister(&dca_class);
idr_destroy(&dca_idr);
}
diff --git a/drivers/dio/dio-driver.c b/drivers/dio/dio-driver.c
index 2d9fa6011945..12fa2d209dab 100644
--- a/drivers/dio/dio-driver.c
+++ b/drivers/dio/dio-driver.c
@@ -110,10 +110,10 @@ void dio_unregister_driver(struct dio_driver *drv)
* and 0 if there is no match.
*/
-static int dio_bus_match(struct device *dev, struct device_driver *drv)
+static int dio_bus_match(struct device *dev, const struct device_driver *drv)
{
struct dio_dev *d = to_dio_dev(dev);
- struct dio_driver *dio_drv = to_dio_driver(drv);
+ const struct dio_driver *dio_drv = to_dio_driver(drv);
const struct dio_device_id *ids = dio_drv->id_table;
if (!ids)
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 9fc99cfbef08..cc0a62c34861 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -568,38 +568,6 @@ config ST_FDMA
Say Y here if you have such a chipset.
If unsure, say N.
-config STM32_DMA
- bool "STMicroelectronics STM32 DMA support"
- depends on ARCH_STM32 || COMPILE_TEST
- select DMA_ENGINE
- select DMA_VIRTUAL_CHANNELS
- help
- Enable support for the on-chip DMA controller on STMicroelectronics
- STM32 MCUs.
- If you have a board based on such a MCU and wish to use DMA say Y
- here.
-
-config STM32_DMAMUX
- bool "STMicroelectronics STM32 dma multiplexer support"
- depends on STM32_DMA || COMPILE_TEST
- help
- Enable support for the on-chip DMA multiplexer on STMicroelectronics
- STM32 MCUs.
- If you have a board based on such a MCU and wish to use DMAMUX say Y
- here.
-
-config STM32_MDMA
- bool "STMicroelectronics STM32 master dma support"
- depends on ARCH_STM32 || COMPILE_TEST
- depends on OF
- select DMA_ENGINE
- select DMA_VIRTUAL_CHANNELS
- help
- Enable support for the on-chip MDMA controller on STMicroelectronics
- STM32 platforms.
- If you have a board based on STM32 SoC and wish to use the master DMA
- say Y here.
-
config SPRD_DMA
tristate "Spreadtrum DMA support"
depends on ARCH_SPRD || COMPILE_TEST
@@ -772,6 +740,8 @@ source "drivers/dma/fsl-dpaa2-qdma/Kconfig"
source "drivers/dma/lgm/Kconfig"
+source "drivers/dma/stm32/Kconfig"
+
# clients
comment "DMA Clients"
depends on DMA_ENGINE
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 802ca916f05f..374ea98faf43 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -70,9 +70,6 @@ obj-$(CONFIG_PXA_DMA) += pxa_dma.o
obj-$(CONFIG_RENESAS_DMA) += sh/
obj-$(CONFIG_SF_PDMA) += sf-pdma/
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
-obj-$(CONFIG_STM32_DMA) += stm32-dma.o
-obj-$(CONFIG_STM32_DMAMUX) += stm32-dmamux.o
-obj-$(CONFIG_STM32_MDMA) += stm32-mdma.o
obj-$(CONFIG_SPRD_DMA) += sprd-dma.o
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
obj-$(CONFIG_TEGRA186_GPC_DMA) += tegra186-gpc-dma.o
@@ -88,5 +85,6 @@ obj-$(CONFIG_INTEL_LDMA) += lgm/
obj-y += mediatek/
obj-y += qcom/
+obj-y += stm32/
obj-y += ti/
obj-y += xilinx/
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
index a8e3615235b8..0968176f323d 100644
--- a/drivers/dma/altera-msgdma.c
+++ b/drivers/dma/altera-msgdma.c
@@ -233,7 +233,7 @@ static void msgdma_free_descriptor(struct msgdma_device *mdev,
struct msgdma_sw_desc *child, *next;
mdev->desc_free_cnt++;
- list_add_tail(&desc->node, &mdev->free_list);
+ list_move_tail(&desc->node, &mdev->free_list);
list_for_each_entry_safe(child, next, &desc->tx_list, node) {
mdev->desc_free_cnt++;
list_move_tail(&child->node, &mdev->free_list);
@@ -583,22 +583,25 @@ static void msgdma_issue_pending(struct dma_chan *chan)
static void msgdma_chan_desc_cleanup(struct msgdma_device *mdev)
{
struct msgdma_sw_desc *desc, *next;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&mdev->lock, irqflags);
list_for_each_entry_safe(desc, next, &mdev->done_list, node) {
struct dmaengine_desc_callback cb;
- list_del(&desc->node);
-
dmaengine_desc_get_callback(&desc->async_tx, &cb);
if (dmaengine_desc_callback_valid(&cb)) {
- spin_unlock(&mdev->lock);
+ spin_unlock_irqrestore(&mdev->lock, irqflags);
dmaengine_desc_callback_invoke(&cb, NULL);
- spin_lock(&mdev->lock);
+ spin_lock_irqsave(&mdev->lock, irqflags);
}
/* Run any dependencies, then free the descriptor */
msgdma_free_descriptor(mdev, desc);
}
+
+ spin_unlock_irqrestore(&mdev->lock, irqflags);
}
/**
@@ -713,10 +716,11 @@ static void msgdma_tasklet(struct tasklet_struct *t)
}
msgdma_complete_descriptor(mdev);
- msgdma_chan_desc_cleanup(mdev);
}
spin_unlock_irqrestore(&mdev->lock, flags);
+
+ msgdma_chan_desc_cleanup(mdev);
}
/**
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 491b22240221..c380a4dda77a 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1037,7 +1037,8 @@ static int get_dma_id(struct dma_device *device)
}
static int __dma_async_device_channel_register(struct dma_device *device,
- struct dma_chan *chan)
+ struct dma_chan *chan,
+ const char *name)
{
int rc;
@@ -1066,8 +1067,10 @@ static int __dma_async_device_channel_register(struct dma_device *device,
chan->dev->device.parent = device->dev;
chan->dev->chan = chan;
chan->dev->dev_id = device->dev_id;
- dev_set_name(&chan->dev->device, "dma%dchan%d",
- device->dev_id, chan->chan_id);
+ if (!name)
+ dev_set_name(&chan->dev->device, "dma%dchan%d", device->dev_id, chan->chan_id);
+ else
+ dev_set_name(&chan->dev->device, name);
rc = device_register(&chan->dev->device);
if (rc)
goto err_out_ida;
@@ -1087,11 +1090,12 @@ static int __dma_async_device_channel_register(struct dma_device *device,
}
int dma_async_device_channel_register(struct dma_device *device,
- struct dma_chan *chan)
+ struct dma_chan *chan,
+ const char *name)
{
int rc;
- rc = __dma_async_device_channel_register(device, chan);
+ rc = __dma_async_device_channel_register(device, chan, name);
if (rc < 0)
return rc;
@@ -1203,7 +1207,7 @@ int dma_async_device_register(struct dma_device *device)
/* represent channels in sysfs. Probably want devs too */
list_for_each_entry(chan, &device->channels, device_node) {
- rc = __dma_async_device_channel_register(device, chan);
+ rc = __dma_async_device_channel_register(device, chan, NULL);
if (rc < 0)
goto err_out;
}
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index a4f608837849..1f201a542b37 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -1372,4 +1372,5 @@ static void __exit dmatest_exit(void)
module_exit(dmatest_exit);
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
+MODULE_DESCRIPTION("DMA Engine test module");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpdmai.c b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c
index 36897b41ee7e..b4323d243d6d 100644
--- a/drivers/dma/fsl-dpaa2-qdma/dpdmai.c
+++ b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c
@@ -367,4 +367,5 @@ int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags,
}
EXPORT_SYMBOL_GPL(dpdmai_get_tx_queue);
+MODULE_DESCRIPTION("NXP DPAA2 QDMA driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index 3af430787315..b7f15ab96855 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -59,7 +59,6 @@ void fsl_edma_tx_chan_handler(struct fsl_edma_chan *fsl_chan)
vchan_cookie_complete(&fsl_chan->edesc->vdesc);
fsl_chan->edesc = NULL;
fsl_chan->status = DMA_COMPLETE;
- fsl_chan->idle = true;
} else {
vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
}
@@ -239,7 +238,7 @@ int fsl_edma_terminate_all(struct dma_chan *chan)
spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
fsl_edma_disable_request(fsl_chan);
fsl_chan->edesc = NULL;
- fsl_chan->idle = true;
+ fsl_chan->status = DMA_COMPLETE;
vchan_get_all_descriptors(&fsl_chan->vchan, &head);
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
@@ -259,7 +258,6 @@ int fsl_edma_pause(struct dma_chan *chan)
if (fsl_chan->edesc) {
fsl_edma_disable_request(fsl_chan);
fsl_chan->status = DMA_PAUSED;
- fsl_chan->idle = true;
}
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
return 0;
@@ -274,7 +272,6 @@ int fsl_edma_resume(struct dma_chan *chan)
if (fsl_chan->edesc) {
fsl_edma_enable_request(fsl_chan);
fsl_chan->status = DMA_IN_PROGRESS;
- fsl_chan->idle = false;
}
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
return 0;
@@ -758,6 +755,8 @@ struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan,
fsl_desc->iscyclic = false;
fsl_chan->is_sw = true;
+ if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_MEM_REMOTE)
+ fsl_chan->is_remote = true;
/* To match with copy_align and max_seg_size so 1 tcd is enough */
fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[0].vtcd, dma_src, dma_dst,
@@ -780,7 +779,6 @@ void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
fsl_edma_enable_request(fsl_chan);
fsl_chan->status = DMA_IN_PROGRESS;
- fsl_chan->idle = false;
}
void fsl_edma_issue_pending(struct dma_chan *chan)
@@ -805,6 +803,7 @@ void fsl_edma_issue_pending(struct dma_chan *chan)
int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ int ret;
if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK)
clk_prepare_enable(fsl_chan->clk);
@@ -813,6 +812,17 @@ int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_TCD64 ?
sizeof(struct fsl_edma_hw_tcd64) : sizeof(struct fsl_edma_hw_tcd),
32, 0);
+
+ if (fsl_chan->txirq) {
+ ret = request_irq(fsl_chan->txirq, fsl_chan->irq_handler, IRQF_SHARED,
+ fsl_chan->chan_name, fsl_chan);
+
+ if (ret) {
+ dma_pool_destroy(fsl_chan->tcd_pool);
+ return ret;
+ }
+ }
+
return 0;
}
@@ -832,11 +842,15 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan)
fsl_edma_unprep_slave_dma(fsl_chan);
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+ if (fsl_chan->txirq)
+ free_irq(fsl_chan->txirq, fsl_chan);
+
vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
dma_pool_destroy(fsl_chan->tcd_pool);
fsl_chan->tcd_pool = NULL;
fsl_chan->is_sw = false;
fsl_chan->srcid = 0;
+ fsl_chan->is_remote = false;
if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK)
clk_disable_unprepare(fsl_chan->clk);
}
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index ac66222c1604..ce37e1ee9c46 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -150,7 +150,6 @@ struct fsl_edma_chan {
struct virt_dma_chan vchan;
enum dma_status status;
enum fsl_edma_pm_state pm_state;
- bool idle;
struct fsl_edma_engine *edma;
struct fsl_edma_desc *edesc;
struct dma_slave_config cfg;
@@ -172,6 +171,7 @@ struct fsl_edma_chan {
int priority;
int hw_chanid;
int txirq;
+ irqreturn_t (*irq_handler)(int irq, void *dev_id);
bool is_rxchan;
bool is_remote;
bool is_multi_fifo;
@@ -194,6 +194,7 @@ struct fsl_edma_desc {
#define FSL_EDMA_DRV_HAS_PD BIT(5)
#define FSL_EDMA_DRV_HAS_CHCLK BIT(6)
#define FSL_EDMA_DRV_HAS_CHMUX BIT(7)
+#define FSL_EDMA_DRV_MEM_REMOTE BIT(8)
/* control and status register is in tcd address space, edma3 reg layout */
#define FSL_EDMA_DRV_SPLIT_REG BIT(9)
#define FSL_EDMA_DRV_BUS_8BYTE BIT(10)
@@ -455,7 +456,6 @@ static inline struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd)
static inline void fsl_edma_err_chan_handler(struct fsl_edma_chan *fsl_chan)
{
fsl_chan->status = DMA_ERROR;
- fsl_chan->idle = true;
}
void fsl_edma_tx_chan_handler(struct fsl_edma_chan *fsl_chan);
diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
index 391e4f13dfeb..c66185c5a199 100644
--- a/drivers/dma/fsl-edma-main.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -65,6 +65,13 @@ static irqreturn_t fsl_edma3_tx_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static irqreturn_t fsl_edma2_tx_handler(int irq, void *devi_id)
+{
+ struct fsl_edma_chan *fsl_chan = devi_id;
+
+ return fsl_edma_tx_handler(irq, fsl_chan->edma);
+}
+
static irqreturn_t fsl_edma_err_handler(int irq, void *dev_id)
{
struct fsl_edma_engine *fsl_edma = dev_id;
@@ -228,7 +235,6 @@ fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma
static int fsl_edma3_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
{
- int ret;
int i;
for (i = 0; i < fsl_edma->n_chans; i++) {
@@ -243,13 +249,7 @@ static int fsl_edma3_irq_init(struct platform_device *pdev, struct fsl_edma_engi
if (fsl_chan->txirq < 0)
return -EINVAL;
- ret = devm_request_irq(&pdev->dev, fsl_chan->txirq,
- fsl_edma3_tx_handler, IRQF_SHARED,
- fsl_chan->chan_name, fsl_chan);
- if (ret) {
- dev_err(&pdev->dev, "Can't register chan%d's IRQ.\n", i);
- return -EINVAL;
- }
+ fsl_chan->irq_handler = fsl_edma3_tx_handler;
}
return 0;
@@ -278,19 +278,20 @@ fsl_edma2_irq_init(struct platform_device *pdev,
*/
for (i = 0; i < count; i++) {
irq = platform_get_irq(pdev, i);
+ ret = 0;
if (irq < 0)
return -ENXIO;
/* The last IRQ is for eDMA err */
- if (i == count - 1)
+ if (i == count - 1) {
ret = devm_request_irq(&pdev->dev, irq,
fsl_edma_err_handler,
0, "eDMA2-ERR", fsl_edma);
- else
- ret = devm_request_irq(&pdev->dev, irq,
- fsl_edma_tx_handler, 0,
- fsl_edma->chans[i].chan_name,
- fsl_edma);
+ } else {
+ fsl_edma->chans[i].txirq = irq;
+ fsl_edma->chans[i].irq_handler = fsl_edma2_tx_handler;
+ }
+
if (ret)
return ret;
}
@@ -342,7 +343,7 @@ static struct fsl_edma_drvdata imx7ulp_data = {
};
static struct fsl_edma_drvdata imx8qm_data = {
- .flags = FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3,
+ .flags = FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3 | FSL_EDMA_DRV_MEM_REMOTE,
.chreg_space_sz = 0x10000,
.chreg_off = 0x10000,
.setup_irq = fsl_edma3_irq_init,
@@ -543,7 +544,6 @@ static int fsl_edma_probe(struct platform_device *pdev)
fsl_chan->edma = fsl_edma;
fsl_chan->pm_state = RUNNING;
fsl_chan->srcid = 0;
- fsl_chan->idle = true;
fsl_chan->dma_dir = DMA_NONE;
fsl_chan->vchan.desc_free = fsl_edma_free_desc;
@@ -668,7 +668,7 @@ static int fsl_edma_suspend_late(struct device *dev)
continue;
spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
/* Make sure chan is idle or will force disable. */
- if (unlikely(!fsl_chan->idle)) {
+ if (unlikely(fsl_chan->status == DMA_IN_PROGRESS)) {
dev_warn(dev, "WARN: There is non-idle channel.");
fsl_edma_disable_request(fsl_chan);
fsl_edma_chan_mux(fsl_chan, 0, false);
diff --git a/drivers/dma/idxd/bus.c b/drivers/dma/idxd/bus.c
index b83b27e04f2a..e647a684485d 100644
--- a/drivers/dma/idxd/bus.c
+++ b/drivers/dma/idxd/bus.c
@@ -33,10 +33,10 @@ void idxd_driver_unregister(struct idxd_device_driver *idxd_drv)
EXPORT_SYMBOL_GPL(idxd_driver_unregister);
static int idxd_config_bus_match(struct device *dev,
- struct device_driver *drv)
+ const struct device_driver *drv)
{
- struct idxd_device_driver *idxd_drv =
- container_of(drv, struct idxd_device_driver, drv);
+ const struct idxd_device_driver *idxd_drv =
+ container_of_const(drv, struct idxd_device_driver, drv);
struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
int i = 0;
diff --git a/drivers/dma/idxd/compat.c b/drivers/dma/idxd/compat.c
index 5fd38d1b9d28..a4adb0c17995 100644
--- a/drivers/dma/idxd/compat.c
+++ b/drivers/dma/idxd/compat.c
@@ -7,7 +7,6 @@
#include <linux/device/bus.h>
#include "idxd.h"
-extern int device_driver_attach(struct device_driver *drv, struct device *dev);
extern void device_driver_detach(struct device *dev);
#define DRIVER_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
index cd835eabd31b..dbecd699237e 100644
--- a/drivers/dma/idxd/dma.c
+++ b/drivers/dma/idxd/dma.c
@@ -269,7 +269,7 @@ static int idxd_register_dma_channel(struct idxd_wq *wq)
desc->txd.tx_submit = idxd_dma_tx_submit;
}
- rc = dma_async_device_channel_register(dma, chan);
+ rc = dma_async_device_channel_register(dma, chan, NULL);
if (rc < 0) {
kfree(idxd_chan);
return rc;
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 385c488c9cd1..21f6905b554d 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -22,6 +22,7 @@
#include "perfmon.h"
MODULE_VERSION(IDXD_DRIVER_VERSION);
+MODULE_DESCRIPTION("Intel Data Streaming Accelerator and In-Memory Analytics Accelerator common driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Intel Corporation");
MODULE_IMPORT_NS(IDXD);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 003e1580b902..72299a08af44 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -2107,9 +2107,8 @@ static int sdma_get_firmware(struct sdma_engine *sdma,
{
int ret;
- ret = request_firmware_nowait(THIS_MODULE,
- FW_ACTION_UEVENT, fw_name, sdma->dev,
- GFP_KERNEL, sdma, sdma_load_firmware);
+ ret = firmware_request_nowait_nowarn(THIS_MODULE, fw_name, sdma->dev,
+ GFP_KERNEL, sdma, sdma_load_firmware);
return ret;
}
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index e8f45a7fded4..7b502b60b38b 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -23,6 +23,7 @@
#include "../dmaengine.h"
MODULE_VERSION(IOAT_DMA_VERSION);
+MODULE_DESCRIPTION("Intel I/OAT DMA Linux driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel Corporation");
diff --git a/drivers/dma/mcf-edma-main.c b/drivers/dma/mcf-edma-main.c
index 78c606f6d002..0c5862bf26f8 100644
--- a/drivers/dma/mcf-edma-main.c
+++ b/drivers/dma/mcf-edma-main.c
@@ -64,7 +64,6 @@ static irqreturn_t mcf_edma_err_handler(int irq, void *dev_id)
fsl_edma_disable_request(&mcf_edma->chans[ch]);
iowrite8(EDMA_CERR_CERR(ch), regs->cerr);
mcf_edma->chans[ch].status = DMA_ERROR;
- mcf_edma->chans[ch].idle = true;
}
}
@@ -196,7 +195,6 @@ static int mcf_edma_probe(struct platform_device *pdev)
mcf_chan->edma = mcf_edma;
mcf_chan->srcid = i;
- mcf_chan->idle = true;
mcf_chan->dma_dir = DMA_NONE;
mcf_chan->vchan.desc_free = fsl_edma_free_desc;
vchan_init(&mcf_chan->vchan, &mcf_edma->dma_dev);
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
index c48d68cbff92..66dc6d31b603 100644
--- a/drivers/dma/moxart-dma.c
+++ b/drivers/dma/moxart-dma.c
@@ -148,11 +148,6 @@ struct moxart_dmadev {
unsigned int irq;
};
-struct moxart_filter_data {
- struct moxart_dmadev *mdc;
- struct of_phandle_args *dma_spec;
-};
-
static const unsigned int es_bytes[] = {
[MOXART_DMA_DATA_TYPE_S8] = 1,
[MOXART_DMA_DATA_TYPE_S16] = 2,
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index 1c93864e0e4d..e6ebd688d746 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -476,12 +476,6 @@ struct gpi_dev {
struct gpii *gpiis;
};
-struct reg_info {
- char *name;
- u32 offset;
- u32 val;
-};
-
struct gchan {
struct virt_dma_chan vc;
u32 chid;
@@ -1197,7 +1191,6 @@ static int gpi_reset_chan(struct gchan *gchan, enum gpi_cmd gpi_cmd)
{
struct gpii *gpii = gchan->gpii;
struct gpi_ring *ch_ring = &gchan->ch_ring;
- unsigned long flags;
LIST_HEAD(list);
int ret;
@@ -1220,9 +1213,9 @@ static int gpi_reset_chan(struct gchan *gchan, enum gpi_cmd gpi_cmd)
gpi_mark_stale_events(gchan);
/* remove all async descriptors */
- spin_lock_irqsave(&gchan->vc.lock, flags);
+ spin_lock(&gchan->vc.lock);
vchan_get_all_descriptors(&gchan->vc, &list);
- spin_unlock_irqrestore(&gchan->vc.lock, flags);
+ spin_unlock(&gchan->vc.lock);
write_unlock_irq(&gpii->pm_lock);
vchan_dma_desc_free_list(&gchan->vc, &list);
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index 721b4ac0857a..4d2cd8d9ec74 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -957,4 +957,5 @@ static struct platform_driver hidma_driver = {
};
module_platform_driver(hidma_driver);
+MODULE_DESCRIPTION("Qualcomm Technologies HIDMA Channel support");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
index bb883e138ebf..4805ce390ffa 100644
--- a/drivers/dma/qcom/hidma_mgmt.c
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -331,4 +331,5 @@ static struct platform_driver hidma_mgmt_driver = {
};
module_platform_driver(hidma_mgmt_driver);
+MODULE_DESCRIPTION("Qualcomm Technologies HIDMA DMA engine interface");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c
index 1f1e86ba5c66..65a27c5a7bce 100644
--- a/drivers/dma/sh/rz-dmac.c
+++ b/drivers/dma/sh/rz-dmac.c
@@ -540,8 +540,8 @@ static int rz_dmac_terminate_all(struct dma_chan *chan)
spin_lock_irqsave(&channel->vc.lock, flags);
list_splice_tail_init(&channel->ld_active, &channel->ld_free);
list_splice_tail_init(&channel->ld_queue, &channel->ld_free);
- spin_unlock_irqrestore(&channel->vc.lock, flags);
vchan_get_all_descriptors(&channel->vc, &head);
+ spin_unlock_irqrestore(&channel->vc.lock, flags);
vchan_dma_desc_free_list(&channel->vc, &head);
return 0;
diff --git a/drivers/dma/stm32/Kconfig b/drivers/dma/stm32/Kconfig
new file mode 100644
index 000000000000..4d8d8063133b
--- /dev/null
+++ b/drivers/dma/stm32/Kconfig
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# STM32 DMA controllers drivers
+#
+if ARCH_STM32 || COMPILE_TEST
+
+config STM32_DMA
+ bool "STMicroelectronics STM32 DMA support"
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the on-chip DMA controller on STMicroelectronics
+ STM32 platforms.
+ If you have a board based on STM32 SoC with such DMA controller
+ and want to use DMA say Y here.
+
+config STM32_DMAMUX
+ bool "STMicroelectronics STM32 DMA multiplexer support"
+ depends on STM32_DMA
+ help
+ Enable support for the on-chip DMA multiplexer on STMicroelectronics
+ STM32 platforms.
+ If you have a board based on STM32 SoC with such DMA multiplexer
+ and want to use DMAMUX say Y here.
+
+config STM32_MDMA
+ bool "STMicroelectronics STM32 master DMA support"
+ depends on OF
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the on-chip MDMA controller on STMicroelectronics
+ STM32 platforms.
+ If you have a board based on STM32 SoC with such DMA controller
+ and want to use MDMA say Y here.
+
+config STM32_DMA3
+ tristate "STMicroelectronics STM32 DMA3 support"
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the on-chip DMA3 controller on STMicroelectronics
+ STM32 platforms.
+ If you have a board based on STM32 SoC with such DMA3 controller
+ and want to use DMA3, say Y here.
+
+endif
diff --git a/drivers/dma/stm32/Makefile b/drivers/dma/stm32/Makefile
new file mode 100644
index 000000000000..5082db4b4c1c
--- /dev/null
+++ b/drivers/dma/stm32/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_STM32_DMA) += stm32-dma.o
+obj-$(CONFIG_STM32_DMAMUX) += stm32-dmamux.o
+obj-$(CONFIG_STM32_MDMA) += stm32-mdma.o
+obj-$(CONFIG_STM32_DMA3) += stm32-dma3.o
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32/stm32-dma.c
index 90857d08a1a7..917f8e922373 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32/stm32-dma.c
@@ -28,7 +28,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
-#include "virt-dma.h"
+#include "../virt-dma.h"
#define STM32_DMA_LISR 0x0000 /* DMA Low Int Status Reg */
#define STM32_DMA_HISR 0x0004 /* DMA High Int Status Reg */
diff --git a/drivers/dma/stm32/stm32-dma3.c b/drivers/dma/stm32/stm32-dma3.c
new file mode 100644
index 000000000000..4087e0263a48
--- /dev/null
+++ b/drivers/dma/stm32/stm32-dma3.c
@@ -0,0 +1,1847 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * STM32 DMA3 controller driver
+ *
+ * Copyright (C) STMicroelectronics 2024
+ * Author(s): Amelie Delaunay <amelie.delaunay@foss.st.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include "../virt-dma.h"
+
+#define STM32_DMA3_SECCFGR 0x00
+#define STM32_DMA3_PRIVCFGR 0x04
+#define STM32_DMA3_RCFGLOCKR 0x08
+#define STM32_DMA3_MISR 0x0c
+#define STM32_DMA3_SMISR 0x10
+
+#define STM32_DMA3_CLBAR(x) (0x50 + 0x80 * (x))
+#define STM32_DMA3_CCIDCFGR(x) (0x54 + 0x80 * (x))
+#define STM32_DMA3_CSEMCR(x) (0x58 + 0x80 * (x))
+#define STM32_DMA3_CFCR(x) (0x5c + 0x80 * (x))
+#define STM32_DMA3_CSR(x) (0x60 + 0x80 * (x))
+#define STM32_DMA3_CCR(x) (0x64 + 0x80 * (x))
+#define STM32_DMA3_CTR1(x) (0x90 + 0x80 * (x))
+#define STM32_DMA3_CTR2(x) (0x94 + 0x80 * (x))
+#define STM32_DMA3_CBR1(x) (0x98 + 0x80 * (x))
+#define STM32_DMA3_CSAR(x) (0x9c + 0x80 * (x))
+#define STM32_DMA3_CDAR(x) (0xa0 + 0x80 * (x))
+#define STM32_DMA3_CLLR(x) (0xcc + 0x80 * (x))
+
+#define STM32_DMA3_HWCFGR13 0xfc0 /* G_PER_CTRL(X) x=8..15 */
+#define STM32_DMA3_HWCFGR12 0xfc4 /* G_PER_CTRL(X) x=0..7 */
+#define STM32_DMA3_HWCFGR4 0xfe4 /* G_FIFO_SIZE(X) x=8..15 */
+#define STM32_DMA3_HWCFGR3 0xfe8 /* G_FIFO_SIZE(X) x=0..7 */
+#define STM32_DMA3_HWCFGR2 0xfec /* G_MAX_REQ_ID */
+#define STM32_DMA3_HWCFGR1 0xff0 /* G_MASTER_PORTS, G_NUM_CHANNELS, G_Mx_DATA_WIDTH */
+#define STM32_DMA3_VERR 0xff4
+
+/* SECCFGR DMA secure configuration register */
+#define SECCFGR_SEC(x) BIT(x)
+
+/* MISR DMA non-secure/secure masked interrupt status register */
+#define MISR_MIS(x) BIT(x)
+
+/* CxLBAR DMA channel x linked_list base address register */
+#define CLBAR_LBA GENMASK(31, 16)
+
+/* CxCIDCFGR DMA channel x CID register */
+#define CCIDCFGR_CFEN BIT(0)
+#define CCIDCFGR_SEM_EN BIT(1)
+#define CCIDCFGR_SCID GENMASK(5, 4)
+#define CCIDCFGR_SEM_WLIST_CID0 BIT(16)
+#define CCIDCFGR_SEM_WLIST_CID1 BIT(17)
+#define CCIDCFGR_SEM_WLIST_CID2 BIT(18)
+
+enum ccidcfgr_cid {
+ CCIDCFGR_CID0,
+ CCIDCFGR_CID1,
+ CCIDCFGR_CID2,
+};
+
+/* CxSEMCR DMA channel x semaphore control register */
+#define CSEMCR_SEM_MUTEX BIT(0)
+#define CSEMCR_SEM_CCID GENMASK(5, 4)
+
+/* CxFCR DMA channel x flag clear register */
+#define CFCR_TCF BIT(8)
+#define CFCR_HTF BIT(9)
+#define CFCR_DTEF BIT(10)
+#define CFCR_ULEF BIT(11)
+#define CFCR_USEF BIT(12)
+#define CFCR_SUSPF BIT(13)
+
+/* CxSR DMA channel x status register */
+#define CSR_IDLEF BIT(0)
+#define CSR_TCF BIT(8)
+#define CSR_HTF BIT(9)
+#define CSR_DTEF BIT(10)
+#define CSR_ULEF BIT(11)
+#define CSR_USEF BIT(12)
+#define CSR_SUSPF BIT(13)
+#define CSR_ALL_F GENMASK(13, 8)
+#define CSR_FIFOL GENMASK(24, 16)
+
+/* CxCR DMA channel x control register */
+#define CCR_EN BIT(0)
+#define CCR_RESET BIT(1)
+#define CCR_SUSP BIT(2)
+#define CCR_TCIE BIT(8)
+#define CCR_HTIE BIT(9)
+#define CCR_DTEIE BIT(10)
+#define CCR_ULEIE BIT(11)
+#define CCR_USEIE BIT(12)
+#define CCR_SUSPIE BIT(13)
+#define CCR_ALLIE GENMASK(13, 8)
+#define CCR_LSM BIT(16)
+#define CCR_LAP BIT(17)
+#define CCR_PRIO GENMASK(23, 22)
+
+enum ccr_prio {
+ CCR_PRIO_LOW,
+ CCR_PRIO_MID,
+ CCR_PRIO_HIGH,
+ CCR_PRIO_VERY_HIGH,
+};
+
+/* CxTR1 DMA channel x transfer register 1 */
+#define CTR1_SINC BIT(3)
+#define CTR1_SBL_1 GENMASK(9, 4)
+#define CTR1_DINC BIT(19)
+#define CTR1_DBL_1 GENMASK(25, 20)
+#define CTR1_SDW_LOG2 GENMASK(1, 0)
+#define CTR1_PAM GENMASK(12, 11)
+#define CTR1_SAP BIT(14)
+#define CTR1_DDW_LOG2 GENMASK(17, 16)
+#define CTR1_DAP BIT(30)
+
+enum ctr1_dw {
+ CTR1_DW_BYTE,
+ CTR1_DW_HWORD,
+ CTR1_DW_WORD,
+ CTR1_DW_DWORD, /* Depends on HWCFGR1.G_M0_DATA_WIDTH_ENC and .G_M1_DATA_WIDTH_ENC */
+};
+
+enum ctr1_pam {
+ CTR1_PAM_0S_LT, /* if DDW > SDW, padded with 0s else left-truncated */
+ CTR1_PAM_SE_RT, /* if DDW > SDW, sign extended else right-truncated */
+ CTR1_PAM_PACK_UNPACK, /* FIFO queued */
+};
+
+/* CxTR2 DMA channel x transfer register 2 */
+#define CTR2_REQSEL GENMASK(7, 0)
+#define CTR2_SWREQ BIT(9)
+#define CTR2_DREQ BIT(10)
+#define CTR2_BREQ BIT(11)
+#define CTR2_PFREQ BIT(12)
+#define CTR2_TCEM GENMASK(31, 30)
+
+enum ctr2_tcem {
+ CTR2_TCEM_BLOCK,
+ CTR2_TCEM_REPEAT_BLOCK,
+ CTR2_TCEM_LLI,
+ CTR2_TCEM_CHANNEL,
+};
+
+/* CxBR1 DMA channel x block register 1 */
+#define CBR1_BNDT GENMASK(15, 0)
+
+/* CxLLR DMA channel x linked-list address register */
+#define CLLR_LA GENMASK(15, 2)
+#define CLLR_ULL BIT(16)
+#define CLLR_UDA BIT(27)
+#define CLLR_USA BIT(28)
+#define CLLR_UB1 BIT(29)
+#define CLLR_UT2 BIT(30)
+#define CLLR_UT1 BIT(31)
+
+/* HWCFGR13 DMA hardware configuration register 13 x=8..15 */
+/* HWCFGR12 DMA hardware configuration register 12 x=0..7 */
+#define G_PER_CTRL(x) (ULL(0x1) << (4 * (x)))
+
+/* HWCFGR4 DMA hardware configuration register 4 x=8..15 */
+/* HWCFGR3 DMA hardware configuration register 3 x=0..7 */
+#define G_FIFO_SIZE(x) (ULL(0x7) << (4 * (x)))
+
+#define get_chan_hwcfg(x, mask, reg) (((reg) & (mask)) >> (4 * (x)))
+
+/* HWCFGR2 DMA hardware configuration register 2 */
+#define G_MAX_REQ_ID GENMASK(7, 0)
+
+/* HWCFGR1 DMA hardware configuration register 1 */
+#define G_MASTER_PORTS GENMASK(2, 0)
+#define G_NUM_CHANNELS GENMASK(12, 8)
+#define G_M0_DATA_WIDTH_ENC GENMASK(25, 24)
+#define G_M1_DATA_WIDTH_ENC GENMASK(29, 28)
+
+enum stm32_dma3_master_ports {
+ AXI64, /* 1x AXI: 64-bit port 0 */
+ AHB32, /* 1x AHB: 32-bit port 0 */
+ AHB32_AHB32, /* 2x AHB: 32-bit port 0 and 32-bit port 1 */
+ AXI64_AHB32, /* 1x AXI 64-bit port 0 and 1x AHB 32-bit port 1 */
+ AXI64_AXI64, /* 2x AXI: 64-bit port 0 and 64-bit port 1 */
+ AXI128_AHB32, /* 1x AXI 128-bit port 0 and 1x AHB 32-bit port 1 */
+};
+
+enum stm32_dma3_port_data_width {
+ DW_32, /* 32-bit, for AHB */
+ DW_64, /* 64-bit, for AXI */
+ DW_128, /* 128-bit, for AXI */
+ DW_INVALID,
+};
+
+/* VERR DMA version register */
+#define VERR_MINREV GENMASK(3, 0)
+#define VERR_MAJREV GENMASK(7, 4)
+
+/* Device tree */
+/* struct stm32_dma3_dt_conf */
+/* .ch_conf */
+#define STM32_DMA3_DT_PRIO GENMASK(1, 0) /* CCR_PRIO */
+#define STM32_DMA3_DT_FIFO GENMASK(7, 4)
+/* .tr_conf */
+#define STM32_DMA3_DT_SINC BIT(0) /* CTR1_SINC */
+#define STM32_DMA3_DT_SAP BIT(1) /* CTR1_SAP */
+#define STM32_DMA3_DT_DINC BIT(4) /* CTR1_DINC */
+#define STM32_DMA3_DT_DAP BIT(5) /* CTR1_DAP */
+#define STM32_DMA3_DT_BREQ BIT(8) /* CTR2_BREQ */
+#define STM32_DMA3_DT_PFREQ BIT(9) /* CTR2_PFREQ */
+#define STM32_DMA3_DT_TCEM GENMASK(13, 12) /* CTR2_TCEM */
+
+/* struct stm32_dma3_chan .config_set bitfield */
+#define STM32_DMA3_CFG_SET_DT BIT(0)
+#define STM32_DMA3_CFG_SET_DMA BIT(1)
+#define STM32_DMA3_CFG_SET_BOTH (STM32_DMA3_CFG_SET_DT | STM32_DMA3_CFG_SET_DMA)
+
+#define STM32_DMA3_MAX_BLOCK_SIZE ALIGN_DOWN(CBR1_BNDT, 64)
+#define port_is_ahb(maxdw) ({ typeof(maxdw) (_maxdw) = (maxdw); \
+ ((_maxdw) != DW_INVALID) && ((_maxdw) == DW_32); })
+#define port_is_axi(maxdw) ({ typeof(maxdw) (_maxdw) = (maxdw); \
+ ((_maxdw) != DW_INVALID) && ((_maxdw) != DW_32); })
+#define get_chan_max_dw(maxdw, maxburst)((port_is_ahb(maxdw) || \
+ (maxburst) < DMA_SLAVE_BUSWIDTH_8_BYTES) ? \
+ DMA_SLAVE_BUSWIDTH_4_BYTES : DMA_SLAVE_BUSWIDTH_8_BYTES)
+
+/* Static linked-list data structure (depends on update bits UT1/UT2/UB1/USA/UDA/ULL) */
+struct stm32_dma3_hwdesc {
+ u32 ctr1;
+ u32 ctr2;
+ u32 cbr1;
+ u32 csar;
+ u32 cdar;
+ u32 cllr;
+} __packed __aligned(32);
+
+/*
+ * CLLR_LA / sizeof(struct stm32_dma3_hwdesc) represents the number of hdwdesc that can be addressed
+ * by the pointer to the next linked-list data structure. The __aligned forces the 32-byte
+ * alignment. So use hardcoded 32. Multiplied by the max block size of each item, it represents
+ * the sg size limitation.
+ */
+#define STM32_DMA3_MAX_SEG_SIZE ((CLLR_LA / 32) * STM32_DMA3_MAX_BLOCK_SIZE)
+
+/*
+ * Linked-list items
+ */
+struct stm32_dma3_lli {
+ struct stm32_dma3_hwdesc *hwdesc;
+ dma_addr_t hwdesc_addr;
+};
+
+struct stm32_dma3_swdesc {
+ struct virt_dma_desc vdesc;
+ u32 ccr;
+ bool cyclic;
+ u32 lli_size;
+ struct stm32_dma3_lli lli[] __counted_by(lli_size);
+};
+
+struct stm32_dma3_dt_conf {
+ u32 ch_id;
+ u32 req_line;
+ u32 ch_conf;
+ u32 tr_conf;
+};
+
+struct stm32_dma3_chan {
+ struct virt_dma_chan vchan;
+ u32 id;
+ int irq;
+ u32 fifo_size;
+ u32 max_burst;
+ bool semaphore_mode;
+ struct stm32_dma3_dt_conf dt_config;
+ struct dma_slave_config dma_config;
+ u8 config_set;
+ struct dma_pool *lli_pool;
+ struct stm32_dma3_swdesc *swdesc;
+ enum ctr2_tcem tcem;
+ u32 dma_status;
+};
+
+struct stm32_dma3_ddata {
+ struct dma_device dma_dev;
+ void __iomem *base;
+ struct clk *clk;
+ struct stm32_dma3_chan *chans;
+ u32 dma_channels;
+ u32 dma_requests;
+ enum stm32_dma3_port_data_width ports_max_dw[2];
+};
+
+static inline struct stm32_dma3_ddata *to_stm32_dma3_ddata(struct stm32_dma3_chan *chan)
+{
+ return container_of(chan->vchan.chan.device, struct stm32_dma3_ddata, dma_dev);
+}
+
+static inline struct stm32_dma3_chan *to_stm32_dma3_chan(struct dma_chan *c)
+{
+ return container_of(c, struct stm32_dma3_chan, vchan.chan);
+}
+
+static inline struct stm32_dma3_swdesc *to_stm32_dma3_swdesc(struct virt_dma_desc *vdesc)
+{
+ return container_of(vdesc, struct stm32_dma3_swdesc, vdesc);
+}
+
+static struct device *chan2dev(struct stm32_dma3_chan *chan)
+{
+ return &chan->vchan.chan.dev->device;
+}
+
+static void stm32_dma3_chan_dump_reg(struct stm32_dma3_chan *chan)
+{
+ struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan);
+ struct device *dev = chan2dev(chan);
+ u32 id = chan->id, offset;
+
+ offset = STM32_DMA3_SECCFGR;
+ dev_dbg(dev, "SECCFGR(0x%03x): %08x\n", offset, readl_relaxed(ddata->base + offset));
+ offset = STM32_DMA3_PRIVCFGR;
+ dev_dbg(dev, "PRIVCFGR(0x%03x): %08x\n", offset, readl_relaxed(ddata->base + offset));
+ offset = STM32_DMA3_CCIDCFGR(id);
+ dev_dbg(dev, "C%dCIDCFGR(0x%03x): %08x\n", id, offset, readl_relaxed(ddata->base + offset));
+ offset = STM32_DMA3_CSEMCR(id);
+ dev_dbg(dev, "C%dSEMCR(0x%03x): %08x\n", id, offset, readl_relaxed(ddata->base + offset));
+ offset = STM32_DMA3_CSR(id);
+ dev_dbg(dev, "C%dSR(0x%03x): %08x\n", id, offset, readl_relaxed(ddata->base + offset));
+ offset = STM32_DMA3_CCR(id);
+ dev_dbg(dev, "C%dCR(0x%03x): %08x\n", id, offset, readl_relaxed(ddata->base + offset));
+ offset = STM32_DMA3_CTR1(id);
+ dev_dbg(dev, "C%dTR1(0x%03x): %08x\n", id, offset, readl_relaxed(ddata->base + offset));
+ offset = STM32_DMA3_CTR2(id);
+ dev_dbg(dev, "C%dTR2(0x%03x): %08x\n", id, offset, readl_relaxed(ddata->base + offset));
+ offset = STM32_DMA3_CBR1(id);
+ dev_dbg(dev, "C%dBR1(0x%03x): %08x\n", id, offset, readl_relaxed(ddata->base + offset));
+ offset = STM32_DMA3_CSAR(id);
+ dev_dbg(dev, "C%dSAR(0x%03x): %08x\n", id, offset, readl_relaxed(ddata->base + offset));
+ offset = STM32_DMA3_CDAR(id);
+ dev_dbg(dev, "C%dDAR(0x%03x): %08x\n", id, offset, readl_relaxed(ddata->base + offset));
+ offset = STM32_DMA3_CLLR(id);
+ dev_dbg(dev, "C%dLLR(0x%03x): %08x\n", id, offset, readl_relaxed(ddata->base + offset));
+ offset = STM32_DMA3_CLBAR(id);
+ dev_dbg(dev, "C%dLBAR(0x%03x): %08x\n", id, offset, readl_relaxed(ddata->base + offset));
+}
+
+static void stm32_dma3_chan_dump_hwdesc(struct stm32_dma3_chan *chan,
+ struct stm32_dma3_swdesc *swdesc)
+{
+ struct stm32_dma3_hwdesc *hwdesc;
+ int i;
+
+ for (i = 0; i < swdesc->lli_size; i++) {
+ hwdesc = swdesc->lli[i].hwdesc;
+ if (i)
+ dev_dbg(chan2dev(chan), "V\n");
+ dev_dbg(chan2dev(chan), "[%d]@%pad\n", i, &swdesc->lli[i].hwdesc_addr);
+ dev_dbg(chan2dev(chan), "| C%dTR1: %08x\n", chan->id, hwdesc->ctr1);
+ dev_dbg(chan2dev(chan), "| C%dTR2: %08x\n", chan->id, hwdesc->ctr2);
+ dev_dbg(chan2dev(chan), "| C%dBR1: %08x\n", chan->id, hwdesc->cbr1);
+ dev_dbg(chan2dev(chan), "| C%dSAR: %08x\n", chan->id, hwdesc->csar);
+ dev_dbg(chan2dev(chan), "| C%dDAR: %08x\n", chan->id, hwdesc->cdar);
+ dev_dbg(chan2dev(chan), "| C%dLLR: %08x\n", chan->id, hwdesc->cllr);
+ }
+
+ if (swdesc->cyclic) {
+ dev_dbg(chan2dev(chan), "|\n");
+ dev_dbg(chan2dev(chan), "-->[0]@%pad\n", &swdesc->lli[0].hwdesc_addr);
+ } else {
+ dev_dbg(chan2dev(chan), "X\n");
+ }
+}
+
+static struct stm32_dma3_swdesc *stm32_dma3_chan_desc_alloc(struct stm32_dma3_chan *chan, u32 count)
+{
+ struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan);
+ struct stm32_dma3_swdesc *swdesc;
+ int i;
+
+ /*
+ * If the memory to be allocated for the number of hwdesc (6 u32 members but 32-bytes
+ * aligned) is greater than the maximum address of CLLR_LA, then the last items can't be
+ * addressed, so abort the allocation.
+ */
+ if ((count * 32) > CLLR_LA) {
+ dev_err(chan2dev(chan), "Transfer is too big (> %luB)\n", STM32_DMA3_MAX_SEG_SIZE);
+ return NULL;
+ }
+
+ swdesc = kzalloc(struct_size(swdesc, lli, count), GFP_NOWAIT);
+ if (!swdesc)
+ return NULL;
+
+ for (i = 0; i < count; i++) {
+ swdesc->lli[i].hwdesc = dma_pool_zalloc(chan->lli_pool, GFP_NOWAIT,
+ &swdesc->lli[i].hwdesc_addr);
+ if (!swdesc->lli[i].hwdesc)
+ goto err_pool_free;
+ }
+ swdesc->lli_size = count;
+ swdesc->ccr = 0;
+
+ /* Set LL base address */
+ writel_relaxed(swdesc->lli[0].hwdesc_addr & CLBAR_LBA,
+ ddata->base + STM32_DMA3_CLBAR(chan->id));
+
+ /* Set LL allocated port */
+ swdesc->ccr &= ~CCR_LAP;
+
+ return swdesc;
+
+err_pool_free:
+ dev_err(chan2dev(chan), "Failed to alloc descriptors\n");
+ while (--i >= 0)
+ dma_pool_free(chan->lli_pool, swdesc->lli[i].hwdesc, swdesc->lli[i].hwdesc_addr);
+ kfree(swdesc);
+
+ return NULL;
+}
+
+static void stm32_dma3_chan_desc_free(struct stm32_dma3_chan *chan,
+ struct stm32_dma3_swdesc *swdesc)
+{
+ int i;
+
+ for (i = 0; i < swdesc->lli_size; i++)
+ dma_pool_free(chan->lli_pool, swdesc->lli[i].hwdesc, swdesc->lli[i].hwdesc_addr);
+
+ kfree(swdesc);
+}
+
+static void stm32_dma3_chan_vdesc_free(struct virt_dma_desc *vdesc)
+{
+ struct stm32_dma3_swdesc *swdesc = to_stm32_dma3_swdesc(vdesc);
+ struct stm32_dma3_chan *chan = to_stm32_dma3_chan(vdesc->tx.chan);
+
+ stm32_dma3_chan_desc_free(chan, swdesc);
+}
+
+static void stm32_dma3_check_user_setting(struct stm32_dma3_chan *chan)
+{
+ struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan);
+ struct device *dev = chan2dev(chan);
+ u32 ctr1 = readl_relaxed(ddata->base + STM32_DMA3_CTR1(chan->id));
+ u32 cbr1 = readl_relaxed(ddata->base + STM32_DMA3_CBR1(chan->id));
+ u32 csar = readl_relaxed(ddata->base + STM32_DMA3_CSAR(chan->id));
+ u32 cdar = readl_relaxed(ddata->base + STM32_DMA3_CDAR(chan->id));
+ u32 cllr = readl_relaxed(ddata->base + STM32_DMA3_CLLR(chan->id));
+ u32 bndt = FIELD_GET(CBR1_BNDT, cbr1);
+ u32 sdw = 1 << FIELD_GET(CTR1_SDW_LOG2, ctr1);
+ u32 ddw = 1 << FIELD_GET(CTR1_DDW_LOG2, ctr1);
+ u32 sap = FIELD_GET(CTR1_SAP, ctr1);
+ u32 dap = FIELD_GET(CTR1_DAP, ctr1);
+
+ if (!bndt && !FIELD_GET(CLLR_UB1, cllr))
+ dev_err(dev, "null source block size and no update of this value\n");
+ if (bndt % sdw)
+ dev_err(dev, "source block size not multiple of src data width\n");
+ if (FIELD_GET(CTR1_PAM, ctr1) == CTR1_PAM_PACK_UNPACK && bndt % ddw)
+ dev_err(dev, "(un)packing mode w/ src block size not multiple of dst data width\n");
+ if (csar % sdw)
+ dev_err(dev, "unaligned source address not multiple of src data width\n");
+ if (cdar % ddw)
+ dev_err(dev, "unaligned destination address not multiple of dst data width\n");
+ if (sdw == DMA_SLAVE_BUSWIDTH_8_BYTES && port_is_ahb(ddata->ports_max_dw[sap]))
+ dev_err(dev, "double-word source data width not supported on port %u\n", sap);
+ if (ddw == DMA_SLAVE_BUSWIDTH_8_BYTES && port_is_ahb(ddata->ports_max_dw[dap]))
+ dev_err(dev, "double-word destination data width not supported on port %u\n", dap);
+}
+
+static void stm32_dma3_chan_prep_hwdesc(struct stm32_dma3_chan *chan,
+ struct stm32_dma3_swdesc *swdesc,
+ u32 curr, dma_addr_t src, dma_addr_t dst, u32 len,
+ u32 ctr1, u32 ctr2, bool is_last, bool is_cyclic)
+{
+ struct stm32_dma3_hwdesc *hwdesc;
+ dma_addr_t next_lli;
+ u32 next = curr + 1;
+
+ hwdesc = swdesc->lli[curr].hwdesc;
+ hwdesc->ctr1 = ctr1;
+ hwdesc->ctr2 = ctr2;
+ hwdesc->cbr1 = FIELD_PREP(CBR1_BNDT, len);
+ hwdesc->csar = src;
+ hwdesc->cdar = dst;
+
+ if (is_last) {
+ if (is_cyclic)
+ next_lli = swdesc->lli[0].hwdesc_addr;
+ else
+ next_lli = 0;
+ } else {
+ next_lli = swdesc->lli[next].hwdesc_addr;
+ }
+
+ hwdesc->cllr = 0;
+ if (next_lli) {
+ hwdesc->cllr |= CLLR_UT1 | CLLR_UT2 | CLLR_UB1;
+ hwdesc->cllr |= CLLR_USA | CLLR_UDA | CLLR_ULL;
+ hwdesc->cllr |= (next_lli & CLLR_LA);
+ }
+
+ /*
+ * Make sure to flush the CPU's write buffers so that the descriptors are ready to be read
+ * by DMA3. By explicitly using a write memory barrier here, instead of doing it with writel
+ * to enable the channel, we avoid an unnecessary barrier in the case where the descriptors
+ * are reused (DMA_CTRL_REUSE).
+ */
+ if (is_last)
+ dma_wmb();
+}
+
+static enum dma_slave_buswidth stm32_dma3_get_max_dw(u32 chan_max_burst,
+ enum stm32_dma3_port_data_width port_max_dw,
+ u32 len, dma_addr_t addr)
+{
+ enum dma_slave_buswidth max_dw = get_chan_max_dw(port_max_dw, chan_max_burst);
+
+ /* len and addr must be a multiple of dw */
+ return 1 << __ffs(len | addr | max_dw);
+}
+
+static u32 stm32_dma3_get_max_burst(u32 len, enum dma_slave_buswidth dw, u32 chan_max_burst)
+{
+ u32 max_burst = chan_max_burst ? chan_max_burst / dw : 1;
+
+ /* len is a multiple of dw, so if len is < chan_max_burst, shorten burst */
+ if (len < chan_max_burst)
+ max_burst = len / dw;
+
+ /*
+ * HW doesn't modify the burst if burst size <= half of the fifo size.
+ * If len is not a multiple of burst size, last burst is shortened by HW.
+ */
+ return max_burst;
+}
+
+static int stm32_dma3_chan_prep_hw(struct stm32_dma3_chan *chan, enum dma_transfer_direction dir,
+ u32 *ccr, u32 *ctr1, u32 *ctr2,
+ dma_addr_t src_addr, dma_addr_t dst_addr, u32 len)
+{
+ struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan);
+ struct dma_device dma_device = ddata->dma_dev;
+ u32 sdw, ddw, sbl_max, dbl_max, tcem, init_dw, init_bl_max;
+ u32 _ctr1 = 0, _ctr2 = 0;
+ u32 ch_conf = chan->dt_config.ch_conf;
+ u32 tr_conf = chan->dt_config.tr_conf;
+ u32 sap = FIELD_GET(STM32_DMA3_DT_SAP, tr_conf), sap_max_dw;
+ u32 dap = FIELD_GET(STM32_DMA3_DT_DAP, tr_conf), dap_max_dw;
+
+ dev_dbg(chan2dev(chan), "%s from %pad to %pad\n",
+ dmaengine_get_direction_text(dir), &src_addr, &dst_addr);
+
+ sdw = chan->dma_config.src_addr_width ? : get_chan_max_dw(sap, chan->max_burst);
+ ddw = chan->dma_config.dst_addr_width ? : get_chan_max_dw(dap, chan->max_burst);
+ sbl_max = chan->dma_config.src_maxburst ? : 1;
+ dbl_max = chan->dma_config.dst_maxburst ? : 1;
+
+ /* Following conditions would raise User Setting Error interrupt */
+ if (!(dma_device.src_addr_widths & BIT(sdw)) || !(dma_device.dst_addr_widths & BIT(ddw))) {
+ dev_err(chan2dev(chan), "Bus width (src=%u, dst=%u) not supported\n", sdw, ddw);
+ return -EINVAL;
+ }
+
+ if (ddata->ports_max_dw[1] == DW_INVALID && (sap || dap)) {
+ dev_err(chan2dev(chan), "Only one master port, port 1 is not supported\n");
+ return -EINVAL;
+ }
+
+ sap_max_dw = ddata->ports_max_dw[sap];
+ dap_max_dw = ddata->ports_max_dw[dap];
+ if ((port_is_ahb(sap_max_dw) && sdw == DMA_SLAVE_BUSWIDTH_8_BYTES) ||
+ (port_is_ahb(dap_max_dw) && ddw == DMA_SLAVE_BUSWIDTH_8_BYTES)) {
+ dev_err(chan2dev(chan),
+ "8 bytes buswidth (src=%u, dst=%u) not supported on port (sap=%u, dap=%u\n",
+ sdw, ddw, sap, dap);
+ return -EINVAL;
+ }
+
+ if (FIELD_GET(STM32_DMA3_DT_SINC, tr_conf))
+ _ctr1 |= CTR1_SINC;
+ if (sap)
+ _ctr1 |= CTR1_SAP;
+ if (FIELD_GET(STM32_DMA3_DT_DINC, tr_conf))
+ _ctr1 |= CTR1_DINC;
+ if (dap)
+ _ctr1 |= CTR1_DAP;
+
+ _ctr2 |= FIELD_PREP(CTR2_REQSEL, chan->dt_config.req_line) & ~CTR2_SWREQ;
+ if (FIELD_GET(STM32_DMA3_DT_BREQ, tr_conf))
+ _ctr2 |= CTR2_BREQ;
+ if (dir == DMA_DEV_TO_MEM && FIELD_GET(STM32_DMA3_DT_PFREQ, tr_conf))
+ _ctr2 |= CTR2_PFREQ;
+ tcem = FIELD_GET(STM32_DMA3_DT_TCEM, tr_conf);
+ _ctr2 |= FIELD_PREP(CTR2_TCEM, tcem);
+
+ /* Store TCEM to know on which event TC flag occurred */
+ chan->tcem = tcem;
+ /* Store direction for residue computation */
+ chan->dma_config.direction = dir;
+
+ switch (dir) {
+ case DMA_MEM_TO_DEV:
+ /* Set destination (device) data width and burst */
+ ddw = min_t(u32, ddw, stm32_dma3_get_max_dw(chan->max_burst, dap_max_dw,
+ len, dst_addr));
+ dbl_max = min_t(u32, dbl_max, stm32_dma3_get_max_burst(len, ddw, chan->max_burst));
+
+ /* Set source (memory) data width and burst */
+ sdw = stm32_dma3_get_max_dw(chan->max_burst, sap_max_dw, len, src_addr);
+ sbl_max = stm32_dma3_get_max_burst(len, sdw, chan->max_burst);
+
+ _ctr1 |= FIELD_PREP(CTR1_SDW_LOG2, ilog2(sdw));
+ _ctr1 |= FIELD_PREP(CTR1_SBL_1, sbl_max - 1);
+ _ctr1 |= FIELD_PREP(CTR1_DDW_LOG2, ilog2(ddw));
+ _ctr1 |= FIELD_PREP(CTR1_DBL_1, dbl_max - 1);
+
+ if (ddw != sdw) {
+ _ctr1 |= FIELD_PREP(CTR1_PAM, CTR1_PAM_PACK_UNPACK);
+ /* Should never reach this case as ddw is clamped down */
+ if (len & (ddw - 1)) {
+ dev_err(chan2dev(chan),
+ "Packing mode is enabled and len is not multiple of ddw");
+ return -EINVAL;
+ }
+ }
+
+ /* dst = dev */
+ _ctr2 |= CTR2_DREQ;
+
+ break;
+
+ case DMA_DEV_TO_MEM:
+ /* Set source (device) data width and burst */
+ sdw = min_t(u32, sdw, stm32_dma3_get_max_dw(chan->max_burst, sap_max_dw,
+ len, src_addr));
+ sbl_max = min_t(u32, sbl_max, stm32_dma3_get_max_burst(len, sdw, chan->max_burst));
+
+ /* Set destination (memory) data width and burst */
+ ddw = stm32_dma3_get_max_dw(chan->max_burst, dap_max_dw, len, dst_addr);
+ dbl_max = stm32_dma3_get_max_burst(len, ddw, chan->max_burst);
+
+ _ctr1 |= FIELD_PREP(CTR1_SDW_LOG2, ilog2(sdw));
+ _ctr1 |= FIELD_PREP(CTR1_SBL_1, sbl_max - 1);
+ _ctr1 |= FIELD_PREP(CTR1_DDW_LOG2, ilog2(ddw));
+ _ctr1 |= FIELD_PREP(CTR1_DBL_1, dbl_max - 1);
+
+ if (ddw != sdw) {
+ _ctr1 |= FIELD_PREP(CTR1_PAM, CTR1_PAM_PACK_UNPACK);
+ /* Should never reach this case as ddw is clamped down */
+ if (len & (ddw - 1)) {
+ dev_err(chan2dev(chan),
+ "Packing mode is enabled and len is not multiple of ddw\n");
+ return -EINVAL;
+ }
+ }
+
+ /* dst = mem */
+ _ctr2 &= ~CTR2_DREQ;
+
+ break;
+
+ case DMA_MEM_TO_MEM:
+ /* Set source (memory) data width and burst */
+ init_dw = sdw;
+ init_bl_max = sbl_max;
+ sdw = stm32_dma3_get_max_dw(chan->max_burst, sap_max_dw, len, src_addr);
+ sbl_max = stm32_dma3_get_max_burst(len, sdw, chan->max_burst);
+ if (chan->config_set & STM32_DMA3_CFG_SET_DMA) {
+ sdw = min_t(u32, init_dw, sdw);
+ sbl_max = min_t(u32, init_bl_max,
+ stm32_dma3_get_max_burst(len, sdw, chan->max_burst));
+ }
+
+ /* Set destination (memory) data width and burst */
+ init_dw = ddw;
+ init_bl_max = dbl_max;
+ ddw = stm32_dma3_get_max_dw(chan->max_burst, dap_max_dw, len, dst_addr);
+ dbl_max = stm32_dma3_get_max_burst(len, ddw, chan->max_burst);
+ if (chan->config_set & STM32_DMA3_CFG_SET_DMA) {
+ ddw = min_t(u32, init_dw, ddw);
+ dbl_max = min_t(u32, init_bl_max,
+ stm32_dma3_get_max_burst(len, ddw, chan->max_burst));
+ }
+
+ _ctr1 |= FIELD_PREP(CTR1_SDW_LOG2, ilog2(sdw));
+ _ctr1 |= FIELD_PREP(CTR1_SBL_1, sbl_max - 1);
+ _ctr1 |= FIELD_PREP(CTR1_DDW_LOG2, ilog2(ddw));
+ _ctr1 |= FIELD_PREP(CTR1_DBL_1, dbl_max - 1);
+
+ if (ddw != sdw) {
+ _ctr1 |= FIELD_PREP(CTR1_PAM, CTR1_PAM_PACK_UNPACK);
+ /* Should never reach this case as ddw is clamped down */
+ if (len & (ddw - 1)) {
+ dev_err(chan2dev(chan),
+ "Packing mode is enabled and len is not multiple of ddw");
+ return -EINVAL;
+ }
+ }
+
+ /* CTR2_REQSEL/DREQ/BREQ/PFREQ are ignored with CTR2_SWREQ=1 */
+ _ctr2 |= CTR2_SWREQ;
+
+ break;
+
+ default:
+ dev_err(chan2dev(chan), "Direction %s not supported\n",
+ dmaengine_get_direction_text(dir));
+ return -EINVAL;
+ }
+
+ *ccr |= FIELD_PREP(CCR_PRIO, FIELD_GET(STM32_DMA3_DT_PRIO, ch_conf));
+ *ctr1 = _ctr1;
+ *ctr2 = _ctr2;
+
+ dev_dbg(chan2dev(chan), "%s: sdw=%u bytes sbl=%u beats ddw=%u bytes dbl=%u beats\n",
+ __func__, sdw, sbl_max, ddw, dbl_max);
+
+ return 0;
+}
+
+static void stm32_dma3_chan_start(struct stm32_dma3_chan *chan)
+{
+ struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan);
+ struct virt_dma_desc *vdesc;
+ struct stm32_dma3_hwdesc *hwdesc;
+ u32 id = chan->id;
+ u32 csr, ccr;
+
+ vdesc = vchan_next_desc(&chan->vchan);
+ if (!vdesc) {
+ chan->swdesc = NULL;
+ return;
+ }
+ list_del(&vdesc->node);
+
+ chan->swdesc = to_stm32_dma3_swdesc(vdesc);
+ hwdesc = chan->swdesc->lli[0].hwdesc;
+
+ stm32_dma3_chan_dump_hwdesc(chan, chan->swdesc);
+
+ writel_relaxed(chan->swdesc->ccr, ddata->base + STM32_DMA3_CCR(id));
+ writel_relaxed(hwdesc->ctr1, ddata->base + STM32_DMA3_CTR1(id));
+ writel_relaxed(hwdesc->ctr2, ddata->base + STM32_DMA3_CTR2(id));
+ writel_relaxed(hwdesc->cbr1, ddata->base + STM32_DMA3_CBR1(id));
+ writel_relaxed(hwdesc->csar, ddata->base + STM32_DMA3_CSAR(id));
+ writel_relaxed(hwdesc->cdar, ddata->base + STM32_DMA3_CDAR(id));
+ writel_relaxed(hwdesc->cllr, ddata->base + STM32_DMA3_CLLR(id));
+
+ /* Clear any pending interrupts */
+ csr = readl_relaxed(ddata->base + STM32_DMA3_CSR(id));
+ if (csr & CSR_ALL_F)
+ writel_relaxed(csr, ddata->base + STM32_DMA3_CFCR(id));
+
+ stm32_dma3_chan_dump_reg(chan);
+
+ ccr = readl_relaxed(ddata->base + STM32_DMA3_CCR(id));
+ writel_relaxed(ccr | CCR_EN, ddata->base + STM32_DMA3_CCR(id));
+
+ chan->dma_status = DMA_IN_PROGRESS;
+
+ dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
+}
+
+static int stm32_dma3_chan_suspend(struct stm32_dma3_chan *chan, bool susp)
+{
+ struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan);
+ u32 csr, ccr = readl_relaxed(ddata->base + STM32_DMA3_CCR(chan->id)) & ~CCR_EN;
+ int ret = 0;
+
+ if (susp)
+ ccr |= CCR_SUSP;
+ else
+ ccr &= ~CCR_SUSP;
+
+ writel_relaxed(ccr, ddata->base + STM32_DMA3_CCR(chan->id));
+
+ if (susp) {
+ ret = readl_relaxed_poll_timeout_atomic(ddata->base + STM32_DMA3_CSR(chan->id), csr,
+ csr & CSR_SUSPF, 1, 10);
+ if (!ret)
+ writel_relaxed(CFCR_SUSPF, ddata->base + STM32_DMA3_CFCR(chan->id));
+
+ stm32_dma3_chan_dump_reg(chan);
+ }
+
+ return ret;
+}
+
+static void stm32_dma3_chan_reset(struct stm32_dma3_chan *chan)
+{
+ struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan);
+ u32 ccr = readl_relaxed(ddata->base + STM32_DMA3_CCR(chan->id)) & ~CCR_EN;
+
+ writel_relaxed(ccr |= CCR_RESET, ddata->base + STM32_DMA3_CCR(chan->id));
+}
+
+static int stm32_dma3_chan_get_curr_hwdesc(struct stm32_dma3_swdesc *swdesc, u32 cllr, u32 *residue)
+{
+ u32 i, lli_offset, next_lli_offset = cllr & CLLR_LA;
+
+ /* If cllr is null, it means it is either the last or single item */
+ if (!cllr)
+ return swdesc->lli_size - 1;
+
+ /* In cyclic mode, go fast and first check we are not on the last item */
+ if (swdesc->cyclic && next_lli_offset == (swdesc->lli[0].hwdesc_addr & CLLR_LA))
+ return swdesc->lli_size - 1;
+
+ /* As transfer is in progress, look backward from the last item */
+ for (i = swdesc->lli_size - 1; i > 0; i--) {
+ *residue += FIELD_GET(CBR1_BNDT, swdesc->lli[i].hwdesc->cbr1);
+ lli_offset = swdesc->lli[i].hwdesc_addr & CLLR_LA;
+ if (lli_offset == next_lli_offset)
+ return i - 1;
+ }
+
+ return -EINVAL;
+}
+
+static void stm32_dma3_chan_set_residue(struct stm32_dma3_chan *chan,
+ struct stm32_dma3_swdesc *swdesc,
+ struct dma_tx_state *txstate)
+{
+ struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan);
+ struct device *dev = chan2dev(chan);
+ struct stm32_dma3_hwdesc *hwdesc;
+ u32 residue, curr_lli, csr, cdar, cbr1, cllr, bndt, fifol;
+ bool pack_unpack;
+ int ret;
+
+ csr = readl_relaxed(ddata->base + STM32_DMA3_CSR(chan->id));
+ if (!(csr & CSR_IDLEF) && chan->dma_status != DMA_PAUSED) {
+ /* Suspend current transfer to read registers for a snapshot */
+ writel_relaxed(swdesc->ccr | CCR_SUSP, ddata->base + STM32_DMA3_CCR(chan->id));
+ ret = readl_relaxed_poll_timeout_atomic(ddata->base + STM32_DMA3_CSR(chan->id), csr,
+ csr & (CSR_SUSPF | CSR_IDLEF), 1, 10);
+
+ if (ret || ((csr & CSR_TCF) && (csr & CSR_IDLEF))) {
+ writel_relaxed(CFCR_SUSPF, ddata->base + STM32_DMA3_CFCR(chan->id));
+ writel_relaxed(swdesc->ccr, ddata->base + STM32_DMA3_CCR(chan->id));
+ if (ret)
+ dev_err(dev, "Channel suspension timeout, csr=%08x\n", csr);
+ }
+ }
+
+ /* If channel is still active (CSR_IDLEF is not set), can't get a reliable residue */
+ if (!(csr & CSR_IDLEF))
+ dev_warn(dev, "Can't get residue: channel still active, csr=%08x\n", csr);
+
+ /*
+ * If channel is not suspended, but Idle and Transfer Complete are set,
+ * linked-list is over, no residue
+ */
+ if (!(csr & CSR_SUSPF) && (csr & CSR_TCF) && (csr & CSR_IDLEF))
+ return;
+
+ /* Read registers to have a snapshot */
+ cllr = readl_relaxed(ddata->base + STM32_DMA3_CLLR(chan->id));
+ cbr1 = readl_relaxed(ddata->base + STM32_DMA3_CBR1(chan->id));
+ cdar = readl_relaxed(ddata->base + STM32_DMA3_CDAR(chan->id));
+
+ /* Resume current transfer */
+ if (csr & CSR_SUSPF) {
+ writel_relaxed(CFCR_SUSPF, ddata->base + STM32_DMA3_CFCR(chan->id));
+ writel_relaxed(swdesc->ccr, ddata->base + STM32_DMA3_CCR(chan->id));
+ }
+
+ /* Add current BNDT */
+ bndt = FIELD_GET(CBR1_BNDT, cbr1);
+ residue = bndt;
+
+ /* Get current hwdesc and cumulate residue of pending hwdesc BNDT */
+ ret = stm32_dma3_chan_get_curr_hwdesc(swdesc, cllr, &residue);
+ if (ret < 0) {
+ dev_err(chan2dev(chan), "Can't get residue: current hwdesc not found\n");
+ return;
+ }
+ curr_lli = ret;
+
+ /* Read current FIFO level - in units of programmed destination data width */
+ hwdesc = swdesc->lli[curr_lli].hwdesc;
+ fifol = FIELD_GET(CSR_FIFOL, csr) * (1 << FIELD_GET(CTR1_DDW_LOG2, hwdesc->ctr1));
+ /* If the FIFO contains as many bytes as its size, it can't contain more */
+ if (fifol == (1 << (chan->fifo_size + 1)))
+ goto skip_fifol_update;
+
+ /*
+ * In case of PACKING (Destination burst length > Source burst length) or UNPACKING
+ * (Source burst length > Destination burst length), bytes could be pending in the FIFO
+ * (to be packed up to Destination burst length or unpacked into Destination burst length
+ * chunks).
+ * BNDT is not reliable, as it reflects the number of bytes read from the source but not the
+ * number of bytes written to the destination.
+ * FIFOL is also not sufficient, because it reflects the number of available write beats in
+ * units of Destination data width but not the bytes not yet packed or unpacked.
+ * In case of Destination increment DINC, it is possible to compute the number of bytes in
+ * the FIFO:
+ * fifol_in_bytes = bytes_read - bytes_written.
+ */
+ pack_unpack = !!(FIELD_GET(CTR1_PAM, hwdesc->ctr1) == CTR1_PAM_PACK_UNPACK);
+ if (pack_unpack && (hwdesc->ctr1 & CTR1_DINC)) {
+ int bytes_read = FIELD_GET(CBR1_BNDT, hwdesc->cbr1) - bndt;
+ int bytes_written = cdar - hwdesc->cdar;
+
+ if (bytes_read > 0)
+ fifol = bytes_read - bytes_written;
+ }
+
+skip_fifol_update:
+ if (fifol) {
+ dev_dbg(chan2dev(chan), "%u byte(s) in the FIFO\n", fifol);
+ dma_set_in_flight_bytes(txstate, fifol);
+ /*
+ * Residue is already accurate for DMA_MEM_TO_DEV as BNDT reflects data read from
+ * the source memory buffer, so just need to add fifol to residue in case of
+ * DMA_DEV_TO_MEM transfer because these bytes are not yet written in destination
+ * memory buffer.
+ */
+ if (chan->dma_config.direction == DMA_DEV_TO_MEM)
+ residue += fifol;
+ }
+ dma_set_residue(txstate, residue);
+}
+
+static int stm32_dma3_chan_stop(struct stm32_dma3_chan *chan)
+{
+ struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan);
+ u32 ccr;
+ int ret = 0;
+
+ chan->dma_status = DMA_COMPLETE;
+
+ /* Disable interrupts */
+ ccr = readl_relaxed(ddata->base + STM32_DMA3_CCR(chan->id));
+ writel_relaxed(ccr & ~(CCR_ALLIE | CCR_EN), ddata->base + STM32_DMA3_CCR(chan->id));
+
+ if (!(ccr & CCR_SUSP) && (ccr & CCR_EN)) {
+ /* Suspend the channel */
+ ret = stm32_dma3_chan_suspend(chan, true);
+ if (ret)
+ dev_warn(chan2dev(chan), "%s: timeout, data might be lost\n", __func__);
+ }
+
+ /*
+ * Reset the channel: this causes the reset of the FIFO and the reset of the channel
+ * internal state, the reset of CCR_EN and CCR_SUSP bits.
+ */
+ stm32_dma3_chan_reset(chan);
+
+ return ret;
+}
+
+static void stm32_dma3_chan_complete(struct stm32_dma3_chan *chan)
+{
+ if (!chan->swdesc)
+ return;
+
+ vchan_cookie_complete(&chan->swdesc->vdesc);
+ chan->swdesc = NULL;
+ stm32_dma3_chan_start(chan);
+}
+
+static irqreturn_t stm32_dma3_chan_irq(int irq, void *devid)
+{
+ struct stm32_dma3_chan *chan = devid;
+ struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan);
+ u32 misr, csr, ccr;
+
+ spin_lock(&chan->vchan.lock);
+
+ misr = readl_relaxed(ddata->base + STM32_DMA3_MISR);
+ if (!(misr & MISR_MIS(chan->id))) {
+ spin_unlock(&chan->vchan.lock);
+ return IRQ_NONE;
+ }
+
+ csr = readl_relaxed(ddata->base + STM32_DMA3_CSR(chan->id));
+ ccr = readl_relaxed(ddata->base + STM32_DMA3_CCR(chan->id)) & CCR_ALLIE;
+
+ if (csr & CSR_TCF && ccr & CCR_TCIE) {
+ if (chan->swdesc->cyclic)
+ vchan_cyclic_callback(&chan->swdesc->vdesc);
+ else
+ stm32_dma3_chan_complete(chan);
+ }
+
+ if (csr & CSR_USEF && ccr & CCR_USEIE) {
+ dev_err(chan2dev(chan), "User setting error\n");
+ chan->dma_status = DMA_ERROR;
+ /* CCR.EN automatically cleared by HW */
+ stm32_dma3_check_user_setting(chan);
+ stm32_dma3_chan_reset(chan);
+ }
+
+ if (csr & CSR_ULEF && ccr & CCR_ULEIE) {
+ dev_err(chan2dev(chan), "Update link transfer error\n");
+ chan->dma_status = DMA_ERROR;
+ /* CCR.EN automatically cleared by HW */
+ stm32_dma3_chan_reset(chan);
+ }
+
+ if (csr & CSR_DTEF && ccr & CCR_DTEIE) {
+ dev_err(chan2dev(chan), "Data transfer error\n");
+ chan->dma_status = DMA_ERROR;
+ /* CCR.EN automatically cleared by HW */
+ stm32_dma3_chan_reset(chan);
+ }
+
+ /*
+ * Half Transfer Interrupt may be disabled but Half Transfer Flag can be set,
+ * ensure HTF flag to be cleared, with other flags.
+ */
+ csr &= (ccr | CCR_HTIE);
+
+ if (csr)
+ writel_relaxed(csr, ddata->base + STM32_DMA3_CFCR(chan->id));
+
+ spin_unlock(&chan->vchan.lock);
+
+ return IRQ_HANDLED;
+}
+
+static int stm32_dma3_alloc_chan_resources(struct dma_chan *c)
+{
+ struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c);
+ struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan);
+ u32 id = chan->id, csemcr, ccid;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(ddata->dma_dev.dev);
+ if (ret < 0)
+ return ret;
+
+ /* Ensure the channel is free */
+ if (chan->semaphore_mode &&
+ readl_relaxed(ddata->base + STM32_DMA3_CSEMCR(chan->id)) & CSEMCR_SEM_MUTEX) {
+ ret = -EBUSY;
+ goto err_put_sync;
+ }
+
+ chan->lli_pool = dmam_pool_create(dev_name(&c->dev->device), c->device->dev,
+ sizeof(struct stm32_dma3_hwdesc),
+ __alignof__(struct stm32_dma3_hwdesc), SZ_64K);
+ if (!chan->lli_pool) {
+ dev_err(chan2dev(chan), "Failed to create LLI pool\n");
+ ret = -ENOMEM;
+ goto err_put_sync;
+ }
+
+ /* Take the channel semaphore */
+ if (chan->semaphore_mode) {
+ writel_relaxed(CSEMCR_SEM_MUTEX, ddata->base + STM32_DMA3_CSEMCR(id));
+ csemcr = readl_relaxed(ddata->base + STM32_DMA3_CSEMCR(id));
+ ccid = FIELD_GET(CSEMCR_SEM_CCID, csemcr);
+ /* Check that the channel is well taken */
+ if (ccid != CCIDCFGR_CID1) {
+ dev_err(chan2dev(chan), "Not under CID1 control (in-use by CID%d)\n", ccid);
+ ret = -EPERM;
+ goto err_pool_destroy;
+ }
+ dev_dbg(chan2dev(chan), "Under CID1 control (semcr=0x%08x)\n", csemcr);
+ }
+
+ return 0;
+
+err_pool_destroy:
+ dmam_pool_destroy(chan->lli_pool);
+ chan->lli_pool = NULL;
+
+err_put_sync:
+ pm_runtime_put_sync(ddata->dma_dev.dev);
+
+ return ret;
+}
+
+static void stm32_dma3_free_chan_resources(struct dma_chan *c)
+{
+ struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c);
+ struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan);
+ unsigned long flags;
+
+ /* Ensure channel is in idle state */
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+ stm32_dma3_chan_stop(chan);
+ chan->swdesc = NULL;
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+ vchan_free_chan_resources(to_virt_chan(c));
+
+ dmam_pool_destroy(chan->lli_pool);
+ chan->lli_pool = NULL;
+
+ /* Release the channel semaphore */
+ if (chan->semaphore_mode)
+ writel_relaxed(0, ddata->base + STM32_DMA3_CSEMCR(chan->id));
+
+ pm_runtime_put_sync(ddata->dma_dev.dev);
+
+ /* Reset configuration */
+ memset(&chan->dt_config, 0, sizeof(chan->dt_config));
+ memset(&chan->dma_config, 0, sizeof(chan->dma_config));
+ chan->config_set = 0;
+}
+
+static void stm32_dma3_init_chan_config_for_memcpy(struct stm32_dma3_chan *chan,
+ dma_addr_t dst, dma_addr_t src)
+{
+ struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan);
+ u32 dw = get_chan_max_dw(ddata->ports_max_dw[0], chan->max_burst); /* port 0 by default */
+ u32 burst = chan->max_burst / dw;
+
+ /* Initialize dt_config if channel not pre-configured through DT */
+ if (!(chan->config_set & STM32_DMA3_CFG_SET_DT)) {
+ chan->dt_config.ch_conf = FIELD_PREP(STM32_DMA3_DT_PRIO, CCR_PRIO_VERY_HIGH);
+ chan->dt_config.ch_conf |= FIELD_PREP(STM32_DMA3_DT_FIFO, chan->fifo_size);
+ chan->dt_config.tr_conf = STM32_DMA3_DT_SINC | STM32_DMA3_DT_DINC;
+ chan->dt_config.tr_conf |= FIELD_PREP(STM32_DMA3_DT_TCEM, CTR2_TCEM_CHANNEL);
+ }
+
+ /* Initialize dma_config if dmaengine_slave_config() not used */
+ if (!(chan->config_set & STM32_DMA3_CFG_SET_DMA)) {
+ chan->dma_config.src_addr_width = dw;
+ chan->dma_config.dst_addr_width = dw;
+ chan->dma_config.src_maxburst = burst;
+ chan->dma_config.dst_maxburst = burst;
+ chan->dma_config.src_addr = src;
+ chan->dma_config.dst_addr = dst;
+ }
+}
+
+static struct dma_async_tx_descriptor *stm32_dma3_prep_dma_memcpy(struct dma_chan *c,
+ dma_addr_t dst, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c);
+ struct stm32_dma3_swdesc *swdesc;
+ size_t next_size, offset;
+ u32 count, i, ctr1, ctr2;
+
+ count = DIV_ROUND_UP(len, STM32_DMA3_MAX_BLOCK_SIZE);
+
+ swdesc = stm32_dma3_chan_desc_alloc(chan, count);
+ if (!swdesc)
+ return NULL;
+
+ if (chan->config_set != STM32_DMA3_CFG_SET_BOTH)
+ stm32_dma3_init_chan_config_for_memcpy(chan, dst, src);
+
+ for (i = 0, offset = 0; offset < len; i++, offset += next_size) {
+ size_t remaining;
+ int ret;
+
+ remaining = len - offset;
+ next_size = min_t(size_t, remaining, STM32_DMA3_MAX_BLOCK_SIZE);
+
+ ret = stm32_dma3_chan_prep_hw(chan, DMA_MEM_TO_MEM, &swdesc->ccr, &ctr1, &ctr2,
+ src + offset, dst + offset, next_size);
+ if (ret)
+ goto err_desc_free;
+
+ stm32_dma3_chan_prep_hwdesc(chan, swdesc, i, src + offset, dst + offset, next_size,
+ ctr1, ctr2, next_size == remaining, false);
+ }
+
+ /* Enable Errors interrupts */
+ swdesc->ccr |= CCR_USEIE | CCR_ULEIE | CCR_DTEIE;
+ /* Enable Transfer state interrupts */
+ swdesc->ccr |= CCR_TCIE;
+
+ swdesc->cyclic = false;
+
+ return vchan_tx_prep(&chan->vchan, &swdesc->vdesc, flags);
+
+err_desc_free:
+ stm32_dma3_chan_desc_free(chan, swdesc);
+
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *stm32_dma3_prep_slave_sg(struct dma_chan *c,
+ struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction dir,
+ unsigned long flags, void *context)
+{
+ struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c);
+ struct stm32_dma3_swdesc *swdesc;
+ struct scatterlist *sg;
+ size_t len;
+ dma_addr_t sg_addr, dev_addr, src, dst;
+ u32 i, j, count, ctr1, ctr2;
+ int ret;
+
+ count = sg_len;
+ for_each_sg(sgl, sg, sg_len, i) {
+ len = sg_dma_len(sg);
+ if (len > STM32_DMA3_MAX_BLOCK_SIZE)
+ count += DIV_ROUND_UP(len, STM32_DMA3_MAX_BLOCK_SIZE) - 1;
+ }
+
+ swdesc = stm32_dma3_chan_desc_alloc(chan, count);
+ if (!swdesc)
+ return NULL;
+
+ /* sg_len and i correspond to the initial sgl; count and j correspond to the hwdesc LL */
+ j = 0;
+ for_each_sg(sgl, sg, sg_len, i) {
+ sg_addr = sg_dma_address(sg);
+ dev_addr = (dir == DMA_MEM_TO_DEV) ? chan->dma_config.dst_addr :
+ chan->dma_config.src_addr;
+ len = sg_dma_len(sg);
+
+ do {
+ size_t chunk = min_t(size_t, len, STM32_DMA3_MAX_BLOCK_SIZE);
+
+ if (dir == DMA_MEM_TO_DEV) {
+ src = sg_addr;
+ dst = dev_addr;
+
+ ret = stm32_dma3_chan_prep_hw(chan, dir, &swdesc->ccr, &ctr1, &ctr2,
+ src, dst, chunk);
+
+ if (FIELD_GET(CTR1_DINC, ctr1))
+ dev_addr += chunk;
+ } else { /* (dir == DMA_DEV_TO_MEM || dir == DMA_MEM_TO_MEM) */
+ src = dev_addr;
+ dst = sg_addr;
+
+ ret = stm32_dma3_chan_prep_hw(chan, dir, &swdesc->ccr, &ctr1, &ctr2,
+ src, dst, chunk);
+
+ if (FIELD_GET(CTR1_SINC, ctr1))
+ dev_addr += chunk;
+ }
+
+ if (ret)
+ goto err_desc_free;
+
+ stm32_dma3_chan_prep_hwdesc(chan, swdesc, j, src, dst, chunk,
+ ctr1, ctr2, j == (count - 1), false);
+
+ sg_addr += chunk;
+ len -= chunk;
+ j++;
+ } while (len);
+ }
+
+ /* Enable Error interrupts */
+ swdesc->ccr |= CCR_USEIE | CCR_ULEIE | CCR_DTEIE;
+ /* Enable Transfer state interrupts */
+ swdesc->ccr |= CCR_TCIE;
+
+ swdesc->cyclic = false;
+
+ return vchan_tx_prep(&chan->vchan, &swdesc->vdesc, flags);
+
+err_desc_free:
+ stm32_dma3_chan_desc_free(chan, swdesc);
+
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *stm32_dma3_prep_dma_cyclic(struct dma_chan *c,
+ dma_addr_t buf_addr,
+ size_t buf_len, size_t period_len,
+ enum dma_transfer_direction dir,
+ unsigned long flags)
+{
+ struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c);
+ struct stm32_dma3_swdesc *swdesc;
+ dma_addr_t src, dst;
+ u32 count, i, ctr1, ctr2;
+ int ret;
+
+ if (!buf_len || !period_len || period_len > STM32_DMA3_MAX_BLOCK_SIZE) {
+ dev_err(chan2dev(chan), "Invalid buffer/period length\n");
+ return NULL;
+ }
+
+ if (buf_len % period_len) {
+ dev_err(chan2dev(chan), "Buffer length not multiple of period length\n");
+ return NULL;
+ }
+
+ count = buf_len / period_len;
+ swdesc = stm32_dma3_chan_desc_alloc(chan, count);
+ if (!swdesc)
+ return NULL;
+
+ if (dir == DMA_MEM_TO_DEV) {
+ src = buf_addr;
+ dst = chan->dma_config.dst_addr;
+
+ ret = stm32_dma3_chan_prep_hw(chan, DMA_MEM_TO_DEV, &swdesc->ccr, &ctr1, &ctr2,
+ src, dst, period_len);
+ } else if (dir == DMA_DEV_TO_MEM) {
+ src = chan->dma_config.src_addr;
+ dst = buf_addr;
+
+ ret = stm32_dma3_chan_prep_hw(chan, DMA_DEV_TO_MEM, &swdesc->ccr, &ctr1, &ctr2,
+ src, dst, period_len);
+ } else {
+ dev_err(chan2dev(chan), "Invalid direction\n");
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ goto err_desc_free;
+
+ for (i = 0; i < count; i++) {
+ if (dir == DMA_MEM_TO_DEV) {
+ src = buf_addr + i * period_len;
+ dst = chan->dma_config.dst_addr;
+ } else { /* (dir == DMA_DEV_TO_MEM) */
+ src = chan->dma_config.src_addr;
+ dst = buf_addr + i * period_len;
+ }
+
+ stm32_dma3_chan_prep_hwdesc(chan, swdesc, i, src, dst, period_len,
+ ctr1, ctr2, i == (count - 1), true);
+ }
+
+ /* Enable Error interrupts */
+ swdesc->ccr |= CCR_USEIE | CCR_ULEIE | CCR_DTEIE;
+ /* Enable Transfer state interrupts */
+ swdesc->ccr |= CCR_TCIE;
+
+ swdesc->cyclic = true;
+
+ return vchan_tx_prep(&chan->vchan, &swdesc->vdesc, flags);
+
+err_desc_free:
+ stm32_dma3_chan_desc_free(chan, swdesc);
+
+ return NULL;
+}
+
+static void stm32_dma3_caps(struct dma_chan *c, struct dma_slave_caps *caps)
+{
+ struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c);
+
+ if (!chan->fifo_size) {
+ caps->max_burst = 0;
+ caps->src_addr_widths &= ~BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
+ caps->dst_addr_widths &= ~BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
+ } else {
+ /* Burst transfer should not exceed half of the fifo size */
+ caps->max_burst = chan->max_burst;
+ if (caps->max_burst < DMA_SLAVE_BUSWIDTH_8_BYTES) {
+ caps->src_addr_widths &= ~BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
+ caps->dst_addr_widths &= ~BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
+ }
+ }
+}
+
+static int stm32_dma3_config(struct dma_chan *c, struct dma_slave_config *config)
+{
+ struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c);
+
+ memcpy(&chan->dma_config, config, sizeof(*config));
+ chan->config_set |= STM32_DMA3_CFG_SET_DMA;
+
+ return 0;
+}
+
+static int stm32_dma3_pause(struct dma_chan *c)
+{
+ struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c);
+ int ret;
+
+ ret = stm32_dma3_chan_suspend(chan, true);
+ if (ret)
+ return ret;
+
+ chan->dma_status = DMA_PAUSED;
+
+ dev_dbg(chan2dev(chan), "vchan %pK: paused\n", &chan->vchan);
+
+ return 0;
+}
+
+static int stm32_dma3_resume(struct dma_chan *c)
+{
+ struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c);
+
+ stm32_dma3_chan_suspend(chan, false);
+
+ chan->dma_status = DMA_IN_PROGRESS;
+
+ dev_dbg(chan2dev(chan), "vchan %pK: resumed\n", &chan->vchan);
+
+ return 0;
+}
+
+static int stm32_dma3_terminate_all(struct dma_chan *c)
+{
+ struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+
+ if (chan->swdesc) {
+ vchan_terminate_vdesc(&chan->swdesc->vdesc);
+ chan->swdesc = NULL;
+ }
+
+ stm32_dma3_chan_stop(chan);
+
+ vchan_get_all_descriptors(&chan->vchan, &head);
+
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+ vchan_dma_desc_free_list(&chan->vchan, &head);
+
+ dev_dbg(chan2dev(chan), "vchan %pK: terminated\n", &chan->vchan);
+
+ return 0;
+}
+
+static void stm32_dma3_synchronize(struct dma_chan *c)
+{
+ struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c);
+
+ vchan_synchronize(&chan->vchan);
+}
+
+static enum dma_status stm32_dma3_tx_status(struct dma_chan *c, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c);
+ struct stm32_dma3_swdesc *swdesc = NULL;
+ enum dma_status status;
+ unsigned long flags;
+ struct virt_dma_desc *vd;
+
+ status = dma_cookie_status(c, cookie, txstate);
+ if (status == DMA_COMPLETE)
+ return status;
+
+ if (!txstate)
+ return chan->dma_status;
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+
+ vd = vchan_find_desc(&chan->vchan, cookie);
+ if (vd)
+ swdesc = to_stm32_dma3_swdesc(vd);
+ else if (chan->swdesc && chan->swdesc->vdesc.tx.cookie == cookie)
+ swdesc = chan->swdesc;
+
+ /* Get residue/in_flight_bytes only if a transfer is currently running (swdesc != NULL) */
+ if (swdesc)
+ stm32_dma3_chan_set_residue(chan, swdesc, txstate);
+
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+ return chan->dma_status;
+}
+
+static void stm32_dma3_issue_pending(struct dma_chan *c)
+{
+ struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c);
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+
+ if (vchan_issue_pending(&chan->vchan) && !chan->swdesc) {
+ dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
+ stm32_dma3_chan_start(chan);
+ }
+
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+}
+
+static bool stm32_dma3_filter_fn(struct dma_chan *c, void *fn_param)
+{
+ struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c);
+ struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan);
+ struct stm32_dma3_dt_conf *conf = fn_param;
+ u32 mask, semcr;
+ int ret;
+
+ dev_dbg(c->device->dev, "%s(%s): req_line=%d ch_conf=%08x tr_conf=%08x\n",
+ __func__, dma_chan_name(c), conf->req_line, conf->ch_conf, conf->tr_conf);
+
+ if (!of_property_read_u32(c->device->dev->of_node, "dma-channel-mask", &mask))
+ if (!(mask & BIT(chan->id)))
+ return false;
+
+ ret = pm_runtime_resume_and_get(ddata->dma_dev.dev);
+ if (ret < 0)
+ return false;
+ semcr = readl_relaxed(ddata->base + STM32_DMA3_CSEMCR(chan->id));
+ pm_runtime_put_sync(ddata->dma_dev.dev);
+
+ /* Check if chan is free */
+ if (semcr & CSEMCR_SEM_MUTEX)
+ return false;
+
+ /* Check if chan fifo fits well */
+ if (FIELD_GET(STM32_DMA3_DT_FIFO, conf->ch_conf) != chan->fifo_size)
+ return false;
+
+ return true;
+}
+
+static struct dma_chan *stm32_dma3_of_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma)
+{
+ struct stm32_dma3_ddata *ddata = ofdma->of_dma_data;
+ dma_cap_mask_t mask = ddata->dma_dev.cap_mask;
+ struct stm32_dma3_dt_conf conf;
+ struct stm32_dma3_chan *chan;
+ struct dma_chan *c;
+
+ if (dma_spec->args_count < 3) {
+ dev_err(ddata->dma_dev.dev, "Invalid args count\n");
+ return NULL;
+ }
+
+ conf.req_line = dma_spec->args[0];
+ conf.ch_conf = dma_spec->args[1];
+ conf.tr_conf = dma_spec->args[2];
+
+ if (conf.req_line >= ddata->dma_requests) {
+ dev_err(ddata->dma_dev.dev, "Invalid request line\n");
+ return NULL;
+ }
+
+ /* Request dma channel among the generic dma controller list */
+ c = dma_request_channel(mask, stm32_dma3_filter_fn, &conf);
+ if (!c) {
+ dev_err(ddata->dma_dev.dev, "No suitable channel found\n");
+ return NULL;
+ }
+
+ chan = to_stm32_dma3_chan(c);
+ chan->dt_config = conf;
+ chan->config_set |= STM32_DMA3_CFG_SET_DT;
+
+ return c;
+}
+
+static u32 stm32_dma3_check_rif(struct stm32_dma3_ddata *ddata)
+{
+ u32 chan_reserved, mask = 0, i, ccidcfgr, invalid_cid = 0;
+
+ /* Reserve Secure channels */
+ chan_reserved = readl_relaxed(ddata->base + STM32_DMA3_SECCFGR);
+
+ /*
+ * CID filtering must be configured to ensure that the DMA3 channel will inherit the CID of
+ * the processor which is configuring and using the given channel.
+ * In case CID filtering is not configured, dma-channel-mask property can be used to
+ * specify available DMA channels to the kernel.
+ */
+ of_property_read_u32(ddata->dma_dev.dev->of_node, "dma-channel-mask", &mask);
+
+ /* Reserve !CID-filtered not in dma-channel-mask, static CID != CID1, CID1 not allowed */
+ for (i = 0; i < ddata->dma_channels; i++) {
+ ccidcfgr = readl_relaxed(ddata->base + STM32_DMA3_CCIDCFGR(i));
+
+ if (!(ccidcfgr & CCIDCFGR_CFEN)) { /* !CID-filtered */
+ invalid_cid |= BIT(i);
+ if (!(mask & BIT(i))) /* Not in dma-channel-mask */
+ chan_reserved |= BIT(i);
+ } else { /* CID-filtered */
+ if (!(ccidcfgr & CCIDCFGR_SEM_EN)) { /* Static CID mode */
+ if (FIELD_GET(CCIDCFGR_SCID, ccidcfgr) != CCIDCFGR_CID1)
+ chan_reserved |= BIT(i);
+ } else { /* Semaphore mode */
+ if (!FIELD_GET(CCIDCFGR_SEM_WLIST_CID1, ccidcfgr))
+ chan_reserved |= BIT(i);
+ ddata->chans[i].semaphore_mode = true;
+ }
+ }
+ dev_dbg(ddata->dma_dev.dev, "chan%d: %s mode, %s\n", i,
+ !(ccidcfgr & CCIDCFGR_CFEN) ? "!CID-filtered" :
+ ddata->chans[i].semaphore_mode ? "Semaphore" : "Static CID",
+ (chan_reserved & BIT(i)) ? "denied" :
+ mask & BIT(i) ? "force allowed" : "allowed");
+ }
+
+ if (invalid_cid)
+ dev_warn(ddata->dma_dev.dev, "chan%*pbl have invalid CID configuration\n",
+ ddata->dma_channels, &invalid_cid);
+
+ return chan_reserved;
+}
+
+static const struct of_device_id stm32_dma3_of_match[] = {
+ { .compatible = "st,stm32mp25-dma3", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, stm32_dma3_of_match);
+
+static int stm32_dma3_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct stm32_dma3_ddata *ddata;
+ struct reset_control *reset;
+ struct stm32_dma3_chan *chan;
+ struct dma_device *dma_dev;
+ u32 master_ports, chan_reserved, i, verr;
+ u64 hwcfgr;
+ int ret;
+
+ ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, ddata);
+
+ dma_dev = &ddata->dma_dev;
+
+ ddata->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ddata->base))
+ return PTR_ERR(ddata->base);
+
+ ddata->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(ddata->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ddata->clk), "Failed to get clk\n");
+
+ reset = devm_reset_control_get_optional(&pdev->dev, NULL);
+ if (IS_ERR(reset))
+ return dev_err_probe(&pdev->dev, PTR_ERR(reset), "Failed to get reset\n");
+
+ ret = clk_prepare_enable(ddata->clk);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to enable clk\n");
+
+ reset_control_reset(reset);
+
+ INIT_LIST_HEAD(&dma_dev->channels);
+
+ dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+ dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
+ dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+ dma_dev->dev = &pdev->dev;
+ /*
+ * This controller supports up to 8-byte buswidth depending on the port used and the
+ * channel, and can only access address at even boundaries, multiple of the buswidth.
+ */
+ dma_dev->copy_align = DMAENGINE_ALIGN_8_BYTES;
+ dma_dev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
+ dma_dev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
+ dma_dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) | BIT(DMA_MEM_TO_MEM);
+
+ dma_dev->descriptor_reuse = true;
+ dma_dev->max_sg_burst = STM32_DMA3_MAX_SEG_SIZE;
+ dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ dma_dev->device_alloc_chan_resources = stm32_dma3_alloc_chan_resources;
+ dma_dev->device_free_chan_resources = stm32_dma3_free_chan_resources;
+ dma_dev->device_prep_dma_memcpy = stm32_dma3_prep_dma_memcpy;
+ dma_dev->device_prep_slave_sg = stm32_dma3_prep_slave_sg;
+ dma_dev->device_prep_dma_cyclic = stm32_dma3_prep_dma_cyclic;
+ dma_dev->device_caps = stm32_dma3_caps;
+ dma_dev->device_config = stm32_dma3_config;
+ dma_dev->device_pause = stm32_dma3_pause;
+ dma_dev->device_resume = stm32_dma3_resume;
+ dma_dev->device_terminate_all = stm32_dma3_terminate_all;
+ dma_dev->device_synchronize = stm32_dma3_synchronize;
+ dma_dev->device_tx_status = stm32_dma3_tx_status;
+ dma_dev->device_issue_pending = stm32_dma3_issue_pending;
+
+ /* if dma_channels is not modified, get it from hwcfgr1 */
+ if (of_property_read_u32(np, "dma-channels", &ddata->dma_channels)) {
+ hwcfgr = readl_relaxed(ddata->base + STM32_DMA3_HWCFGR1);
+ ddata->dma_channels = FIELD_GET(G_NUM_CHANNELS, hwcfgr);
+ }
+
+ /* if dma_requests is not modified, get it from hwcfgr2 */
+ if (of_property_read_u32(np, "dma-requests", &ddata->dma_requests)) {
+ hwcfgr = readl_relaxed(ddata->base + STM32_DMA3_HWCFGR2);
+ ddata->dma_requests = FIELD_GET(G_MAX_REQ_ID, hwcfgr) + 1;
+ }
+
+ /* G_MASTER_PORTS, G_M0_DATA_WIDTH_ENC, G_M1_DATA_WIDTH_ENC in HWCFGR1 */
+ hwcfgr = readl_relaxed(ddata->base + STM32_DMA3_HWCFGR1);
+ master_ports = FIELD_GET(G_MASTER_PORTS, hwcfgr);
+
+ ddata->ports_max_dw[0] = FIELD_GET(G_M0_DATA_WIDTH_ENC, hwcfgr);
+ if (master_ports == AXI64 || master_ports == AHB32) /* Single master port */
+ ddata->ports_max_dw[1] = DW_INVALID;
+ else /* Dual master ports */
+ ddata->ports_max_dw[1] = FIELD_GET(G_M1_DATA_WIDTH_ENC, hwcfgr);
+
+ ddata->chans = devm_kcalloc(&pdev->dev, ddata->dma_channels, sizeof(*ddata->chans),
+ GFP_KERNEL);
+ if (!ddata->chans) {
+ ret = -ENOMEM;
+ goto err_clk_disable;
+ }
+
+ chan_reserved = stm32_dma3_check_rif(ddata);
+
+ if (chan_reserved == GENMASK(ddata->dma_channels - 1, 0)) {
+ ret = -ENODEV;
+ dev_err_probe(&pdev->dev, ret, "No channel available, abort registration\n");
+ goto err_clk_disable;
+ }
+
+ /* G_FIFO_SIZE x=0..7 in HWCFGR3 and G_FIFO_SIZE x=8..15 in HWCFGR4 */
+ hwcfgr = readl_relaxed(ddata->base + STM32_DMA3_HWCFGR3);
+ hwcfgr |= ((u64)readl_relaxed(ddata->base + STM32_DMA3_HWCFGR4)) << 32;
+
+ for (i = 0; i < ddata->dma_channels; i++) {
+ if (chan_reserved & BIT(i))
+ continue;
+
+ chan = &ddata->chans[i];
+ chan->id = i;
+ chan->fifo_size = get_chan_hwcfg(i, G_FIFO_SIZE(i), hwcfgr);
+ /* If chan->fifo_size > 0 then half of the fifo size, else no burst when no FIFO */
+ chan->max_burst = (chan->fifo_size) ? (1 << (chan->fifo_size + 1)) / 2 : 0;
+ }
+
+ ret = dmaenginem_async_device_register(dma_dev);
+ if (ret)
+ goto err_clk_disable;
+
+ for (i = 0; i < ddata->dma_channels; i++) {
+ char name[12];
+
+ if (chan_reserved & BIT(i))
+ continue;
+
+ chan = &ddata->chans[i];
+ snprintf(name, sizeof(name), "dma%dchan%d", ddata->dma_dev.dev_id, chan->id);
+
+ chan->vchan.desc_free = stm32_dma3_chan_vdesc_free;
+ vchan_init(&chan->vchan, dma_dev);
+
+ ret = dma_async_device_channel_register(&ddata->dma_dev, &chan->vchan.chan, name);
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "Failed to register channel %s\n", name);
+ goto err_clk_disable;
+ }
+
+ ret = platform_get_irq(pdev, i);
+ if (ret < 0)
+ goto err_clk_disable;
+ chan->irq = ret;
+
+ ret = devm_request_irq(&pdev->dev, chan->irq, stm32_dma3_chan_irq, 0,
+ dev_name(chan2dev(chan)), chan);
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "Failed to request channel %s IRQ\n",
+ dev_name(chan2dev(chan)));
+ goto err_clk_disable;
+ }
+ }
+
+ ret = of_dma_controller_register(np, stm32_dma3_of_xlate, ddata);
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "Failed to register controller\n");
+ goto err_clk_disable;
+ }
+
+ verr = readl_relaxed(ddata->base + STM32_DMA3_VERR);
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_put(&pdev->dev);
+
+ dev_info(&pdev->dev, "STM32 DMA3 registered rev:%lu.%lu\n",
+ FIELD_GET(VERR_MAJREV, verr), FIELD_GET(VERR_MINREV, verr));
+
+ return 0;
+
+err_clk_disable:
+ clk_disable_unprepare(ddata->clk);
+
+ return ret;
+}
+
+static void stm32_dma3_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+}
+
+static int stm32_dma3_runtime_suspend(struct device *dev)
+{
+ struct stm32_dma3_ddata *ddata = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(ddata->clk);
+
+ return 0;
+}
+
+static int stm32_dma3_runtime_resume(struct device *dev)
+{
+ struct stm32_dma3_ddata *ddata = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(ddata->clk);
+ if (ret)
+ dev_err(dev, "Failed to enable clk: %d\n", ret);
+
+ return ret;
+}
+
+static const struct dev_pm_ops stm32_dma3_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ RUNTIME_PM_OPS(stm32_dma3_runtime_suspend, stm32_dma3_runtime_resume, NULL)
+};
+
+static struct platform_driver stm32_dma3_driver = {
+ .probe = stm32_dma3_probe,
+ .remove_new = stm32_dma3_remove,
+ .driver = {
+ .name = "stm32-dma3",
+ .of_match_table = stm32_dma3_of_match,
+ .pm = pm_ptr(&stm32_dma3_pm_ops),
+ },
+};
+
+static int __init stm32_dma3_init(void)
+{
+ return platform_driver_register(&stm32_dma3_driver);
+}
+
+subsys_initcall(stm32_dma3_init);
+
+MODULE_DESCRIPTION("STM32 DMA3 controller driver");
+MODULE_AUTHOR("Amelie Delaunay <amelie.delaunay@foss.st.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32/stm32-dmamux.c
index 8d77e2a7939a..8d77e2a7939a 100644
--- a/drivers/dma/stm32-dmamux.c
+++ b/drivers/dma/stm32/stm32-dmamux.c
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32/stm32-mdma.c
index 6505081ced44..e6d525901de7 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32/stm32-mdma.c
@@ -30,7 +30,7 @@
#include <linux/reset.h>
#include <linux/slab.h>
-#include "virt-dma.h"
+#include "../virt-dma.h"
#define STM32_MDMA_GISR0 0x0000 /* MDMA Int Status Reg 1 */
diff --git a/drivers/dma/ti/cppi41.c b/drivers/dma/ti/cppi41.c
index 7e0b06b5dff0..a8bb70c2d109 100644
--- a/drivers/dma/ti/cppi41.c
+++ b/drivers/dma/ti/cppi41.c
@@ -1252,5 +1252,6 @@ static struct platform_driver cpp41_dma_driver = {
};
module_platform_driver(cpp41_dma_driver);
+MODULE_DESCRIPTION("Texas Instruments CPPI 4.1 DMA support");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");
diff --git a/drivers/dma/ti/k3-psil.c b/drivers/dma/ti/k3-psil.c
index 25148d952472..c4b6f0df4686 100644
--- a/drivers/dma/ti/k3-psil.c
+++ b/drivers/dma/ti/k3-psil.c
@@ -106,4 +106,5 @@ int psil_set_new_ep_config(struct device *dev, const char *name,
return 0;
}
EXPORT_SYMBOL_GPL(psil_set_new_ep_config);
+MODULE_DESCRIPTION("K3 PSI-L endpoint configuration");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c
index dd1a068f905d..7c224c3ab7a0 100644
--- a/drivers/dma/ti/k3-udma-glue.c
+++ b/drivers/dma/ti/k3-udma-glue.c
@@ -1574,4 +1574,5 @@ static int __init k3_udma_glue_class_init(void)
}
module_init(k3_udma_glue_class_init);
+MODULE_DESCRIPTION("TI K3 NAVSS DMA glue interface");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 6400d06588a2..406ee199c2ac 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -4405,6 +4405,7 @@ static const struct of_device_id udma_of_match[] = {
},
{ /* Sentinel */ },
};
+MODULE_DEVICE_TABLE(of, udma_of_match);
static struct udma_soc_data am654_soc_data = {
.oes = {
@@ -4472,7 +4473,9 @@ static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
break;
case DMA_TYPE_BCDMA:
- ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2);
+ ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2) +
+ BCDMA_CAP3_HBCHAN_CNT(cap3) +
+ BCDMA_CAP3_UBCHAN_CNT(cap3);
ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2);
ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2);
ud->rflow_cnt = ud->rchan_cnt;
@@ -5621,6 +5624,7 @@ static struct platform_driver udma_driver = {
};
module_platform_driver(udma_driver);
+MODULE_DESCRIPTION("Texas Instruments UDMA support");
MODULE_LICENSE("GPL v2");
/* Private interfaces to UDMA */
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index b9e0e22383b7..7e6c04afbe89 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -1950,4 +1950,5 @@ static void __exit omap_dma_exit(void)
module_exit(omap_dma_exit);
MODULE_AUTHOR("Russell King");
+MODULE_DESCRIPTION("Texas Instruments sDMA DMAengine support");
MODULE_LICENSE("GPL");
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
index a6f4265be0c9..7961172a780d 100644
--- a/drivers/dma/virt-dma.c
+++ b/drivers/dma/virt-dma.c
@@ -139,4 +139,5 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
EXPORT_SYMBOL_GPL(vchan_init);
MODULE_AUTHOR("Russell King");
+MODULE_DESCRIPTION("Virtual DMA channel support for DMAengine");
MODULE_LICENSE("GPL");
diff --git a/drivers/eisa/eisa-bus.c b/drivers/eisa/eisa-bus.c
index 33f0ba11c6ad..cb586a362944 100644
--- a/drivers/eisa/eisa-bus.c
+++ b/drivers/eisa/eisa-bus.c
@@ -105,10 +105,10 @@ static char __init *decode_eisa_sig(unsigned long addr)
return sig_str;
}
-static int eisa_bus_match(struct device *dev, struct device_driver *drv)
+static int eisa_bus_match(struct device *dev, const struct device_driver *drv)
{
struct eisa_device *edev = to_eisa_device(dev);
- struct eisa_driver *edrv = to_eisa_driver(drv);
+ const struct eisa_driver *edrv = to_eisa_driver(drv);
const struct eisa_device_id *eids = edrv->id_table;
if (!eids)
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index e6cdb905eeac..00e9a13e6c45 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -190,10 +190,10 @@ static bool match_ids(const struct ieee1394_device_id *id_table, int *id)
}
static const struct ieee1394_device_id *unit_match(struct device *dev,
- struct device_driver *drv)
+ const struct device_driver *drv)
{
const struct ieee1394_device_id *id_table =
- container_of(drv, struct fw_driver, driver)->id_table;
+ container_of_const(drv, struct fw_driver, driver)->id_table;
int id[] = {0, 0, 0, 0};
get_modalias_ids(fw_unit(dev), id);
@@ -207,7 +207,7 @@ static const struct ieee1394_device_id *unit_match(struct device *dev,
static bool is_fw_unit(const struct device *dev);
-static int fw_unit_match(struct device *dev, struct device_driver *drv)
+static int fw_unit_match(struct device *dev, const struct device_driver *drv)
{
/* We only allow binding to fw_units. */
return is_fw_unit(dev) && unit_match(dev, drv) != NULL;
diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c
index 0c83931485f6..eb17d03b66fe 100644
--- a/drivers/firmware/arm_ffa/bus.c
+++ b/drivers/firmware/arm_ffa/bus.c
@@ -19,7 +19,7 @@
static DEFINE_IDA(ffa_bus_id);
-static int ffa_device_match(struct device *dev, struct device_driver *drv)
+static int ffa_device_match(struct device *dev, const struct device_driver *drv)
{
const struct ffa_device_id *id_table;
struct ffa_device *ffa_dev;
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index 77c78be6e79c..96b2e5f9a8ef 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -207,7 +207,7 @@ scmi_protocol_table_unregister(const struct scmi_device_id *id_table)
}
static const struct scmi_device_id *
-scmi_dev_match_id(struct scmi_device *scmi_dev, struct scmi_driver *scmi_drv)
+scmi_dev_match_id(struct scmi_device *scmi_dev, const struct scmi_driver *scmi_drv)
{
const struct scmi_device_id *id = scmi_drv->id_table;
@@ -225,9 +225,9 @@ scmi_dev_match_id(struct scmi_device *scmi_dev, struct scmi_driver *scmi_drv)
return NULL;
}
-static int scmi_dev_match(struct device *dev, struct device_driver *drv)
+static int scmi_dev_match(struct device *dev, const struct device_driver *drv)
{
- struct scmi_driver *scmi_drv = to_scmi_driver(drv);
+ const struct scmi_driver *scmi_drv = to_scmi_driver(drv);
struct scmi_device *scmi_dev = to_scmi_dev(dev);
const struct scmi_device_id *id;
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 1f32d6cf98d6..f23ba62ce127 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -28,7 +28,8 @@ cflags-$(CONFIG_ARM) += -DEFI_HAVE_STRLEN -DEFI_HAVE_STRNLEN \
-DEFI_HAVE_MEMCHR -DEFI_HAVE_STRRCHR \
-DEFI_HAVE_STRCMP -fno-builtin -fpic \
$(call cc-option,-mno-single-pic-base)
-cflags-$(CONFIG_RISCV) += -fpic -DNO_ALTERNATIVE -mno-relax
+cflags-$(CONFIG_RISCV) += -fpic -DNO_ALTERNATIVE -mno-relax \
+ $(DISABLE_STACKLEAK_PLUGIN)
cflags-$(CONFIG_LOONGARCH) += -fpie
cflags-$(CONFIG_EFI_PARAMS_FROM_FDT) += -I$(srctree)/scripts/dtc/libfdt
diff --git a/drivers/firmware/efi/libstub/screen_info.c b/drivers/firmware/efi/libstub/screen_info.c
index a51ec201ca3c..5d3a1e32d177 100644
--- a/drivers/firmware/efi/libstub/screen_info.c
+++ b/drivers/firmware/efi/libstub/screen_info.c
@@ -32,6 +32,8 @@ struct screen_info *__alloc_screen_info(void)
if (status != EFI_SUCCESS)
return NULL;
+ memset(si, 0, sizeof(*si));
+
status = efi_bs_call(install_configuration_table,
&screen_info_guid, si);
if (status == EFI_SUCCESS)
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index 078055b054e3..f8e465da344d 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -534,11 +534,12 @@ void __noreturn efi_stub_entry(efi_handle_t handle,
efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
efi_system_table_t *sys_table_arg)
{
- static struct boot_params boot_params __page_aligned_bss;
- struct setup_header *hdr = &boot_params.hdr;
efi_guid_t proto = LOADED_IMAGE_PROTOCOL_GUID;
+ struct boot_params *boot_params;
+ struct setup_header *hdr;
int options_size = 0;
efi_status_t status;
+ unsigned long alloc;
char *cmdline_ptr;
efi_system_table = sys_table_arg;
@@ -553,6 +554,13 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
efi_exit(handle, status);
}
+ status = efi_allocate_pages(PARAM_SIZE, &alloc, ULONG_MAX);
+ if (status != EFI_SUCCESS)
+ efi_exit(handle, status);
+
+ boot_params = memset((void *)alloc, 0x0, PARAM_SIZE);
+ hdr = &boot_params->hdr;
+
/* Assign the setup_header fields that the kernel actually cares about */
hdr->root_flags = 1;
hdr->vid_mode = 0xffff;
@@ -562,13 +570,15 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
/* Convert unicode cmdline to ascii */
cmdline_ptr = efi_convert_cmdline(image, &options_size);
- if (!cmdline_ptr)
+ if (!cmdline_ptr) {
+ efi_free(PARAM_SIZE, alloc);
efi_exit(handle, EFI_OUT_OF_RESOURCES);
+ }
efi_set_u64_split((unsigned long)cmdline_ptr, &hdr->cmd_line_ptr,
- &boot_params.ext_cmd_line_ptr);
+ &boot_params->ext_cmd_line_ptr);
- efi_stub_entry(handle, sys_table_arg, &boot_params);
+ efi_stub_entry(handle, sys_table_arg, boot_params);
/* not reached */
}
diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c
index a4e3bbd556a3..208652a8087c 100644
--- a/drivers/firmware/google/coreboot_table.c
+++ b/drivers/firmware/google/coreboot_table.c
@@ -22,12 +22,12 @@
#include "coreboot_table.h"
#define CB_DEV(d) container_of(d, struct coreboot_device, dev)
-#define CB_DRV(d) container_of(d, struct coreboot_driver, drv)
+#define CB_DRV(d) container_of_const(d, struct coreboot_driver, drv)
-static int coreboot_bus_match(struct device *dev, struct device_driver *drv)
+static int coreboot_bus_match(struct device *dev, const struct device_driver *drv)
{
struct coreboot_device *device = CB_DEV(dev);
- struct coreboot_driver *driver = CB_DRV(drv);
+ const struct coreboot_driver *driver = CB_DRV(drv);
const struct coreboot_device_id *id;
if (!driver->id_table)
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index 094ee97ea26c..c406b949026f 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -257,10 +257,10 @@ dfl_match_one_device(const struct dfl_device_id *id, struct dfl_device *ddev)
return NULL;
}
-static int dfl_bus_match(struct device *dev, struct device_driver *drv)
+static int dfl_bus_match(struct device *dev, const struct device_driver *drv)
{
struct dfl_device *ddev = to_dfl_dev(dev);
- struct dfl_driver *ddrv = to_dfl_drv(drv);
+ const struct dfl_driver *ddrv = to_dfl_drv(drv);
const struct dfl_device_id *id_entry;
id_entry = ddrv->id_table;
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
index 097d5a780264..46ac5a8beab7 100644
--- a/drivers/fsi/fsi-core.c
+++ b/drivers/fsi/fsi-core.c
@@ -1361,10 +1361,10 @@ EXPORT_SYMBOL_GPL(fsi_master_unregister);
/* FSI core & Linux bus type definitions */
-static int fsi_bus_match(struct device *dev, struct device_driver *drv)
+static int fsi_bus_match(struct device *dev, const struct device_driver *drv)
{
struct fsi_device *fsi_dev = to_fsi_dev(dev);
- struct fsi_driver *fsi_drv = to_fsi_drv(drv);
+ const struct fsi_driver *fsi_drv = to_fsi_drv(drv);
const struct fsi_device_id *id;
if (!fsi_drv->id_table)
diff --git a/drivers/fsi/fsi-master-aspeed.c b/drivers/fsi/fsi-master-aspeed.c
index f0a19cd451a0..b0b624c3717b 100644
--- a/drivers/fsi/fsi-master-aspeed.c
+++ b/drivers/fsi/fsi-master-aspeed.c
@@ -646,14 +646,12 @@ err_free_aspeed:
return rc;
}
-static int fsi_master_aspeed_remove(struct platform_device *pdev)
+static void fsi_master_aspeed_remove(struct platform_device *pdev)
{
struct fsi_master_aspeed *aspeed = platform_get_drvdata(pdev);
fsi_master_unregister(&aspeed->master);
clk_disable_unprepare(aspeed->clk);
-
- return 0;
}
static const struct of_device_id fsi_master_aspeed_match[] = {
@@ -668,7 +666,7 @@ static struct platform_driver fsi_master_aspeed_driver = {
.of_match_table = fsi_master_aspeed_match,
},
.probe = fsi_master_aspeed_probe,
- .remove = fsi_master_aspeed_remove,
+ .remove_new = fsi_master_aspeed_remove,
};
module_platform_driver(fsi_master_aspeed_driver);
diff --git a/drivers/fsi/fsi-master-ast-cf.c b/drivers/fsi/fsi-master-ast-cf.c
index 812dfa9a9140..f8c776ce1b56 100644
--- a/drivers/fsi/fsi-master-ast-cf.c
+++ b/drivers/fsi/fsi-master-ast-cf.c
@@ -1412,15 +1412,13 @@ static int fsi_master_acf_probe(struct platform_device *pdev)
}
-static int fsi_master_acf_remove(struct platform_device *pdev)
+static void fsi_master_acf_remove(struct platform_device *pdev)
{
struct fsi_master_acf *master = platform_get_drvdata(pdev);
device_remove_file(master->dev, &dev_attr_external_mode);
fsi_master_unregister(&master->master);
-
- return 0;
}
static const struct of_device_id fsi_master_acf_match[] = {
@@ -1436,7 +1434,7 @@ static struct platform_driver fsi_master_acf = {
.of_match_table = fsi_master_acf_match,
},
.probe = fsi_master_acf_probe,
- .remove = fsi_master_acf_remove,
+ .remove_new = fsi_master_acf_remove,
};
module_platform_driver(fsi_master_acf);
diff --git a/drivers/fsi/fsi-master-gpio.c b/drivers/fsi/fsi-master-gpio.c
index ed03da4f2447..10fc344b6b22 100644
--- a/drivers/fsi/fsi-master-gpio.c
+++ b/drivers/fsi/fsi-master-gpio.c
@@ -867,15 +867,13 @@ static int fsi_master_gpio_probe(struct platform_device *pdev)
-static int fsi_master_gpio_remove(struct platform_device *pdev)
+static void fsi_master_gpio_remove(struct platform_device *pdev)
{
struct fsi_master_gpio *master = platform_get_drvdata(pdev);
device_remove_file(&pdev->dev, &dev_attr_external_mode);
fsi_master_unregister(&master->master);
-
- return 0;
}
static const struct of_device_id fsi_master_gpio_match[] = {
@@ -890,7 +888,7 @@ static struct platform_driver fsi_master_gpio_driver = {
.of_match_table = fsi_master_gpio_match,
},
.probe = fsi_master_gpio_probe,
- .remove = fsi_master_gpio_remove,
+ .remove_new = fsi_master_gpio_remove,
};
module_platform_driver(fsi_master_gpio_driver);
diff --git a/drivers/fsi/fsi-occ.c b/drivers/fsi/fsi-occ.c
index f7157c1d77d8..f58b158d097c 100644
--- a/drivers/fsi/fsi-occ.c
+++ b/drivers/fsi/fsi-occ.c
@@ -702,7 +702,7 @@ static int occ_probe(struct platform_device *pdev)
return 0;
}
-static int occ_remove(struct platform_device *pdev)
+static void occ_remove(struct platform_device *pdev)
{
struct occ *occ = platform_get_drvdata(pdev);
@@ -719,8 +719,6 @@ static int occ_remove(struct platform_device *pdev)
device_for_each_child(&pdev->dev, NULL, occ_unregister_of_child);
ida_free(&occ_ida, occ->idx);
-
- return 0;
}
static const struct of_device_id occ_match[] = {
@@ -742,7 +740,7 @@ static struct platform_driver occ_driver = {
.of_match_table = occ_match,
},
.probe = occ_probe,
- .remove = occ_remove,
+ .remove_new = occ_remove,
};
static int occ_init(void)
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index 8dce78ea7139..5762e517338e 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -591,8 +591,6 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
void __iomem *reg_base;
struct brcmstb_gpio_priv *priv;
struct resource *res;
- struct property *prop;
- const __be32 *p;
u32 bank_width;
int num_banks = 0;
int num_gpios = 0;
@@ -636,8 +634,7 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
flags = BGPIOF_BIG_ENDIAN_BYTE_ORDER;
#endif
- of_property_for_each_u32(np, "brcm,gpio-bank-widths", prop, p,
- bank_width) {
+ of_property_for_each_u32(np, "brcm,gpio-bank-widths", bank_width) {
struct brcmstb_gpio_bank *bank;
struct gpio_chip *gc;
diff --git a/drivers/gpio/gpio-virtuser.c b/drivers/gpio/gpio-virtuser.c
index 0e0d55da4f01..ccc47ea0b3e1 100644
--- a/drivers/gpio/gpio-virtuser.c
+++ b/drivers/gpio/gpio-virtuser.c
@@ -805,7 +805,7 @@ static int gpio_virtuser_dbgfs_init_line_attrs(struct device *dev,
return -ENOMEM;
data->ad.desc = desc;
- sprintf(data->consumer, id);
+ strscpy(data->consumer, id);
atomic_set(&data->irq, 0);
atomic_set(&data->irq_count, 0);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index edaeee53db75..3a9668cc100d 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -55,7 +55,7 @@ static DEFINE_IDA(gpio_ida);
static dev_t gpio_devt;
#define GPIO_DEV_MAX 256 /* 256 GPIO chip devices supported */
-static int gpio_bus_match(struct device *dev, struct device_driver *drv)
+static int gpio_bus_match(struct device *dev, const struct device_driver *drv)
{
struct fwnode_handle *fwnode = dev_fwnode(dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 9dd8294032ef..38408e4e158e 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -106,7 +106,8 @@ amdgpu-y += \
df_v1_7.o \
df_v3_6.o \
df_v4_3.o \
- df_v4_6_2.o
+ df_v4_6_2.o \
+ df_v4_15.o
# add GMC block
amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
index 1538b2dbfff1..eb605e79ae0e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
@@ -33,6 +33,7 @@ struct amdgpu_df_hash_status {
struct amdgpu_df_funcs {
void (*sw_init)(struct amdgpu_device *adev);
void (*sw_fini)(struct amdgpu_device *adev);
+ void (*hw_init)(struct amdgpu_device *adev);
void (*enable_broadcast_mode)(struct amdgpu_device *adev,
bool enable);
u32 (*get_fb_channel_number)(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index b241f61fe9c9..ac108fca64fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -37,6 +37,7 @@
#include "df_v3_6.h"
#include "df_v4_3.h"
#include "df_v4_6_2.h"
+#include "df_v4_15.h"
#include "nbio_v6_1.h"
#include "nbio_v7_0.h"
#include "nbio_v7_4.h"
@@ -2803,6 +2804,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(4, 6, 2):
adev->df.funcs = &df_v4_6_2_funcs;
break;
+ case IP_VERSION(4, 15, 0):
+ case IP_VERSION(4, 15, 1):
+ adev->df.funcs = &df_v4_15_funcs;
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 78089f2f79f5..094498a0964b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -116,9 +116,10 @@
* - 3.55.0 - Add AMDGPU_INFO_GPUVM_FAULT query
* - 3.56.0 - Update IB start address and size alignment for decode and encode
* - 3.57.0 - Compute tunneling on GFX10+
+ * - 3.58.0 - Add GFX12 DCC support
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 57
+#define KMS_DRIVER_MINOR 58
#define KMS_DRIVER_PATCHLEVEL 0
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 800cc7a148b2..189574d53ebd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -1591,6 +1591,66 @@ static void psp_ras_ta_check_status(struct psp_context *psp)
}
}
+static int psp_ras_send_cmd(struct psp_context *psp,
+ enum ras_command cmd_id, void *in, void *out)
+{
+ struct ta_ras_shared_memory *ras_cmd;
+ uint32_t cmd = cmd_id;
+ int ret = 0;
+
+ if (!in)
+ return -EINVAL;
+
+ mutex_lock(&psp->ras_context.mutex);
+ ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
+ memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
+
+ switch (cmd) {
+ case TA_RAS_COMMAND__ENABLE_FEATURES:
+ case TA_RAS_COMMAND__DISABLE_FEATURES:
+ memcpy(&ras_cmd->ras_in_message,
+ in, sizeof(ras_cmd->ras_in_message));
+ break;
+ case TA_RAS_COMMAND__TRIGGER_ERROR:
+ memcpy(&ras_cmd->ras_in_message.trigger_error,
+ in, sizeof(ras_cmd->ras_in_message.trigger_error));
+ break;
+ case TA_RAS_COMMAND__QUERY_ADDRESS:
+ memcpy(&ras_cmd->ras_in_message.address,
+ in, sizeof(ras_cmd->ras_in_message.address));
+ break;
+ default:
+ dev_err(psp->adev->dev, "Invalid ras cmd id: %u\n", cmd);
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ ras_cmd->cmd_id = cmd;
+ ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
+
+ switch (cmd) {
+ case TA_RAS_COMMAND__TRIGGER_ERROR:
+ if (!ret && out)
+ memcpy(out, &ras_cmd->ras_status, sizeof(ras_cmd->ras_status));
+ break;
+ case TA_RAS_COMMAND__QUERY_ADDRESS:
+ if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status)
+ ret = -EINVAL;
+ else if (out)
+ memcpy(out,
+ &ras_cmd->ras_out_message.address,
+ sizeof(ras_cmd->ras_out_message.address));
+ break;
+ default:
+ break;
+ }
+
+err_out:
+ mutex_unlock(&psp->ras_context.mutex);
+
+ return ret;
+}
+
int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
{
struct ta_ras_shared_memory *ras_cmd;
@@ -1632,23 +1692,15 @@ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
int psp_ras_enable_features(struct psp_context *psp,
union ta_ras_cmd_input *info, bool enable)
{
- struct ta_ras_shared_memory *ras_cmd;
+ enum ras_command cmd_id;
int ret;
- if (!psp->ras_context.context.initialized)
+ if (!psp->ras_context.context.initialized || !info)
return -EINVAL;
- ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
- memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
-
- if (enable)
- ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES;
- else
- ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES;
-
- ras_cmd->ras_in_message = *info;
-
- ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
+ cmd_id = enable ?
+ TA_RAS_COMMAND__ENABLE_FEATURES : TA_RAS_COMMAND__DISABLE_FEATURES;
+ ret = psp_ras_send_cmd(psp, cmd_id, info, NULL);
if (ret)
return -EINVAL;
@@ -1672,6 +1724,8 @@ int psp_ras_terminate(struct psp_context *psp)
psp->ras_context.context.initialized = false;
+ mutex_destroy(&psp->ras_context.mutex);
+
return ret;
}
@@ -1756,9 +1810,10 @@ int psp_ras_initialize(struct psp_context *psp)
ret = psp_ta_load(psp, &psp->ras_context.context);
- if (!ret && !ras_cmd->ras_status)
+ if (!ret && !ras_cmd->ras_status) {
psp->ras_context.context.initialized = true;
- else {
+ mutex_init(&psp->ras_context.mutex);
+ } else {
if (ras_cmd->ras_status)
dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
@@ -1772,12 +1827,12 @@ int psp_ras_initialize(struct psp_context *psp)
int psp_ras_trigger_error(struct psp_context *psp,
struct ta_ras_trigger_error_input *info, uint32_t instance_mask)
{
- struct ta_ras_shared_memory *ras_cmd;
struct amdgpu_device *adev = psp->adev;
int ret;
uint32_t dev_mask;
+ uint32_t ras_status = 0;
- if (!psp->ras_context.context.initialized)
+ if (!psp->ras_context.context.initialized || !info)
return -EINVAL;
switch (info->block_id) {
@@ -1801,13 +1856,8 @@ int psp_ras_trigger_error(struct psp_context *psp,
dev_mask &= AMDGPU_RAS_INST_MASK;
info->sub_block_index |= dev_mask;
- ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
- memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
-
- ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
- ras_cmd->ras_in_message.trigger_error = *info;
-
- ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
+ ret = psp_ras_send_cmd(psp,
+ TA_RAS_COMMAND__TRIGGER_ERROR, info, &ras_status);
if (ret)
return -EINVAL;
@@ -1817,9 +1867,9 @@ int psp_ras_trigger_error(struct psp_context *psp,
if (amdgpu_ras_intr_triggered())
return 0;
- if (ras_cmd->ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED)
+ if (ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED)
return -EACCES;
- else if (ras_cmd->ras_status)
+ else if (ras_status)
return -EINVAL;
return 0;
@@ -1829,25 +1879,16 @@ int psp_ras_query_address(struct psp_context *psp,
struct ta_ras_query_address_input *addr_in,
struct ta_ras_query_address_output *addr_out)
{
- struct ta_ras_shared_memory *ras_cmd;
int ret;
- if (!psp->ras_context.context.initialized)
- return -EINVAL;
-
- ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
- memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
-
- ras_cmd->cmd_id = TA_RAS_COMMAND__QUERY_ADDRESS;
- ras_cmd->ras_in_message.address = *addr_in;
-
- ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
- if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status)
+ if (!psp->ras_context.context.initialized ||
+ !addr_in || !addr_out)
return -EINVAL;
- *addr_out = ras_cmd->ras_out_message.address;
+ ret = psp_ras_send_cmd(psp,
+ TA_RAS_COMMAND__QUERY_ADDRESS, addr_in, addr_out);
- return 0;
+ return ret;
}
// ras end
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 3635303e6548..74a96516c913 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -200,6 +200,7 @@ struct psp_xgmi_context {
struct psp_ras_context {
struct ta_context context;
struct amdgpu_ras *ras;
+ struct mutex mutex;
};
#define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
index 8e8afbd237bc..0c856005df6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
@@ -348,6 +348,7 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
context->session_id = ta_id;
+ mutex_lock(&psp->ras_context.mutex);
ret = prep_ta_mem_context(&context->mem_context, shared_buf, shared_buf_len);
if (ret)
goto err_free_shared_buf;
@@ -366,6 +367,7 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
ret = -EFAULT;
err_free_shared_buf:
+ mutex_unlock(&psp->ras_context.mutex);
kfree(shared_buf);
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index eae0a555df3c..aab8077e5098 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -1011,6 +1011,9 @@ Out:
uint32_t amdgpu_ras_eeprom_max_record_count(struct amdgpu_ras_eeprom_control *control)
{
+ /* get available eeprom table version first before eeprom table init */
+ amdgpu_ras_set_eeprom_table_version(control);
+
if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1)
return RAS_MAX_RECORD_COUNT_V2_1;
else
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 8d65b096db90..43f44cc201cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -147,6 +147,10 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
}
}
+ /* from vcn4 and above, only unified queue is used */
+ adev->vcn.using_unified_queue =
+ amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0);
+
hdr = (const struct common_firmware_header *)adev->vcn.fw[0]->data;
adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
@@ -275,18 +279,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
return 0;
}
-/* from vcn4 and above, only unified queue is used */
-static bool amdgpu_vcn_using_unified_queue(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
- bool ret = false;
-
- if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0))
- ret = true;
-
- return ret;
-}
-
bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
{
bool ret = false;
@@ -397,7 +389,9 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
for (i = 0; i < adev->vcn.num_enc_rings; ++i)
fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
+ !adev->vcn.using_unified_queue) {
struct dpg_pause_state new_state;
if (fence[j] ||
@@ -443,7 +437,9 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_UNGATE);
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
+ !adev->vcn.using_unified_queue) {
struct dpg_pause_state new_state;
if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
@@ -469,8 +465,12 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
{
+ struct amdgpu_device *adev = ring->adev;
+
+ /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
- ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
+ ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC &&
+ !adev->vcn.using_unified_queue)
atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
atomic_dec(&ring->adev->vcn.total_submission_cnt);
@@ -724,12 +724,11 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
struct amdgpu_job *job;
struct amdgpu_ib *ib;
uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
- bool sq = amdgpu_vcn_using_unified_queue(ring);
uint32_t *ib_checksum;
uint32_t ib_pack_in_dw;
int i, r;
- if (sq)
+ if (adev->vcn.using_unified_queue)
ib_size_dw += 8;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
@@ -742,7 +741,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
ib->length_dw = 0;
/* single queue headers */
- if (sq) {
+ if (adev->vcn.using_unified_queue) {
ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t)
+ 4 + 2; /* engine info + decoding ib in dw */
ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false);
@@ -761,7 +760,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- if (sq)
+ if (adev->vcn.using_unified_queue)
amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw);
r = amdgpu_job_submit_direct(job, ring, &f);
@@ -851,15 +850,15 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
struct dma_fence **fence)
{
unsigned int ib_size_dw = 16;
+ struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
uint32_t *ib_checksum = NULL;
uint64_t addr;
- bool sq = amdgpu_vcn_using_unified_queue(ring);
int i, r;
- if (sq)
+ if (adev->vcn.using_unified_queue)
ib_size_dw += 8;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
@@ -873,7 +872,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
ib->length_dw = 0;
- if (sq)
+ if (adev->vcn.using_unified_queue)
ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
ib->ptr[ib->length_dw++] = 0x00000018;
@@ -895,7 +894,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- if (sq)
+ if (adev->vcn.using_unified_queue)
amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
r = amdgpu_job_submit_direct(job, ring, &f);
@@ -918,15 +917,15 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
struct dma_fence **fence)
{
unsigned int ib_size_dw = 16;
+ struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
uint32_t *ib_checksum = NULL;
uint64_t addr;
- bool sq = amdgpu_vcn_using_unified_queue(ring);
int i, r;
- if (sq)
+ if (adev->vcn.using_unified_queue)
ib_size_dw += 8;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
@@ -940,7 +939,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
ib->length_dw = 0;
- if (sq)
+ if (adev->vcn.using_unified_queue)
ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
ib->ptr[ib->length_dw++] = 0x00000018;
@@ -962,7 +961,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- if (sq)
+ if (adev->vcn.using_unified_queue)
amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
r = amdgpu_job_submit_direct(job, ring, &f);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 9f06def236fd..1a5439abd1a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -329,6 +329,7 @@ struct amdgpu_vcn {
uint16_t inst_mask;
uint8_t num_inst_per_aid;
+ bool using_unified_queue;
};
struct amdgpu_fw_shared_rb_ptrs_struct {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 3abfa66d72a2..a060c28f0877 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -434,7 +434,7 @@ uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm)
if (!vm)
return result;
- result += vm->generation;
+ result += lower_32_bits(vm->generation);
/* Add one if the page tables will be re-generated on next CS */
if (drm_sched_entity_error(&vm->delayed))
++result;
@@ -463,13 +463,14 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int (*validate)(void *p, struct amdgpu_bo *bo),
void *param)
{
+ uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm);
struct amdgpu_vm_bo_base *bo_base;
struct amdgpu_bo *shadow;
struct amdgpu_bo *bo;
int r;
- if (drm_sched_entity_error(&vm->delayed)) {
- ++vm->generation;
+ if (vm->generation != new_vm_generation) {
+ vm->generation = new_vm_generation;
amdgpu_vm_bo_reset_state_machine(vm);
amdgpu_vm_fini_entities(vm);
r = amdgpu_vm_init_entities(adev, vm);
@@ -2439,7 +2440,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->last_update = dma_fence_get_stub();
vm->last_unlocked = dma_fence_get_stub();
vm->last_tlb_flush = dma_fence_get_stub();
- vm->generation = 0;
+ vm->generation = amdgpu_vm_generation(adev, NULL);
mutex_init(&vm->eviction_lock);
vm->evicting = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v4_15.c b/drivers/gpu/drm/amd/amdgpu/df_v4_15.c
new file mode 100644
index 000000000000..2a573e33908b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/df_v4_15.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "df_v4_15.h"
+
+#include "df/df_4_15_offset.h"
+#include "df/df_4_15_sh_mask.h"
+
+static void df_v4_15_hw_init(struct amdgpu_device *adev)
+{
+ if (adev->have_atomics_support) {
+ uint32_t tmp;
+ uint32_t dis_lcl_proc = (1 << 1 |
+ 1 << 2 |
+ 1 << 13);
+
+ tmp = RREG32_SOC15(DF, 0, regNCSConfigurationRegister1);
+ tmp |= (dis_lcl_proc << NCSConfigurationRegister1__DisIntAtomicsLclProcessing__SHIFT);
+ WREG32_SOC15(DF, 0, regNCSConfigurationRegister1, tmp);
+ }
+}
+
+const struct amdgpu_df_funcs df_v4_15_funcs = {
+ .hw_init = df_v4_15_hw_init
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v4_15.h b/drivers/gpu/drm/amd/amdgpu/df_v4_15.h
new file mode 100644
index 000000000000..dddf2422112a
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/df_v4_15.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __DF_V4_15_H__
+#define __DF_V4_15_H__
+
+extern const struct amdgpu_df_funcs df_v4_15_funcs;
+
+#endif /* __DF_V4_15_H__ */
+
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
index 04d8966423de..ad524ddc9760 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
@@ -32,6 +32,9 @@
#include "vcn/vcn_4_0_3_sh_mask.h"
#include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
+#define NORMALIZE_JPEG_REG_OFFSET(offset) \
+ (offset & 0x1FFFF)
+
enum jpeg_engin_status {
UVD_PGFSM_STATUS__UVDJ_PWR_ON = 0,
UVD_PGFSM_STATUS__UVDJ_PWR_OFF = 2,
@@ -621,6 +624,13 @@ static uint64_t jpeg_v4_0_3_dec_ring_get_wptr(struct amdgpu_ring *ring)
ring->pipe ? (0x40 * ring->pipe - 0xc80) : 0);
}
+static void jpeg_v4_0_3_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+ /* JPEG engine access for HDP flush doesn't work when RRMT is enabled.
+ * This is a workaround to avoid any HDP flush through JPEG ring.
+ */
+}
+
/**
* jpeg_v4_0_3_dec_ring_set_wptr - set write pointer
*
@@ -817,7 +827,13 @@ void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring,
void jpeg_v4_0_3_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
uint32_t val, uint32_t mask)
{
- uint32_t reg_offset = (reg << 2);
+ uint32_t reg_offset;
+
+ /* For VF, only local offsets should be used */
+ if (amdgpu_sriov_vf(ring->adev))
+ reg = NORMALIZE_JPEG_REG_OFFSET(reg);
+
+ reg_offset = (reg << 2);
amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
@@ -858,7 +874,13 @@ void jpeg_v4_0_3_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
void jpeg_v4_0_3_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
{
- uint32_t reg_offset = (reg << 2);
+ uint32_t reg_offset;
+
+ /* For VF, only local offsets should be used */
+ if (amdgpu_sriov_vf(ring->adev))
+ reg = NORMALIZE_JPEG_REG_OFFSET(reg);
+
+ reg_offset = (reg << 2);
amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
@@ -1072,6 +1094,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = {
.emit_ib = jpeg_v4_0_3_dec_ring_emit_ib,
.emit_fence = jpeg_v4_0_3_dec_ring_emit_fence,
.emit_vm_flush = jpeg_v4_0_3_dec_ring_emit_vm_flush,
+ .emit_hdp_flush = jpeg_v4_0_3_ring_emit_hdp_flush,
.test_ring = amdgpu_jpeg_dec_ring_test_ring,
.test_ib = amdgpu_jpeg_dec_ring_test_ib,
.insert_nop = jpeg_v4_0_3_dec_ring_nop,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index cc9e961f0078..af1e90159ce3 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -176,6 +176,14 @@ static void sdma_v5_2_ring_set_wptr(struct amdgpu_ring *ring)
DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
ring->doorbell_index, ring->wptr << 2);
WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
+ /* SDMA seems to miss doorbells sometimes when powergating kicks in.
+ * Updating the wptr directly will wake it. This is only safe because
+ * we disallow gfxoff in begin_use() and then allow it again in end_use().
+ */
+ WREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR),
+ lower_32_bits(ring->wptr << 2));
+ WREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI),
+ upper_32_bits(ring->wptr << 2));
} else {
DRM_DEBUG("Not using doorbell -- "
"mmSDMA%i_GFX_RB_WPTR == 0x%08x "
@@ -1647,6 +1655,10 @@ static void sdma_v5_2_ring_begin_use(struct amdgpu_ring *ring)
* but it shouldn't hurt for other parts since
* this GFXOFF will be disallowed anyway when SDMA is
* active, this just makes it explicit.
+ * sdma_v5_2_ring_set_wptr() takes advantage of this
+ * to update the wptr because sometimes SDMA seems to miss
+ * doorbells when entering PG. If you remove this, update
+ * sdma_v5_2_ring_set_wptr() as well!
*/
amdgpu_gfx_off_ctrl(adev, false);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c b/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c
index 04c797d54511..0af648931df5 100644
--- a/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c
+++ b/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c
@@ -91,7 +91,7 @@ static int smu_v13_0_10_mode2_suspend_ip(struct amdgpu_device *adev)
adev->ip_blocks[i].status.hw = false;
}
- return r;
+ return 0;
}
static int
diff --git a/drivers/gpu/drm/amd/amdgpu/soc24.c b/drivers/gpu/drm/amd/amdgpu/soc24.c
index d27fb4ea6612..7d641d0dadba 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc24.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc24.c
@@ -484,6 +484,10 @@ static int soc24_common_hw_init(void *handle)
*/
if (adev->nbio.funcs->remap_hdp_registers)
adev->nbio.funcs->remap_hdp_registers(adev);
+
+ if (adev->df.funcs->hw_init)
+ adev->df.funcs->hw_init(adev);
+
/* enable the doorbell aperture */
soc24_enable_doorbell_aperture(adev, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index f6d96a44d75f..776c539bfdda 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -1045,6 +1045,9 @@ static int vcn_v4_0_start(struct amdgpu_device *adev)
amdgpu_dpm_enable_uvd(adev, true);
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
@@ -1498,6 +1501,9 @@ static int vcn_v4_0_stop(struct amdgpu_device *adev)
int i, r = 0;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index f53054e39ebb..9bae95538b62 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -45,6 +45,9 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48300
+#define NORMALIZE_VCN_REG_OFFSET(offset) \
+ (offset & 0x1FFFF)
+
static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev);
static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev);
@@ -1375,6 +1378,50 @@ static uint64_t vcn_v4_0_3_unified_ring_get_wptr(struct amdgpu_ring *ring)
regUVD_RB_WPTR);
}
+static void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask)
+{
+ /* For VF, only local offsets should be used */
+ if (amdgpu_sriov_vf(ring->adev))
+ reg = NORMALIZE_VCN_REG_OFFSET(reg);
+
+ amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
+ amdgpu_ring_write(ring, reg << 2);
+ amdgpu_ring_write(ring, mask);
+ amdgpu_ring_write(ring, val);
+}
+
+static void vcn_v4_0_3_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
+{
+ /* For VF, only local offsets should be used */
+ if (amdgpu_sriov_vf(ring->adev))
+ reg = NORMALIZE_VCN_REG_OFFSET(reg);
+
+ amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
+ amdgpu_ring_write(ring, reg << 2);
+ amdgpu_ring_write(ring, val);
+}
+
+static void vcn_v4_0_3_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned int vmid, uint64_t pd_addr)
+{
+ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
+
+ pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
+
+ /* wait for reg writes */
+ vcn_v4_0_3_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
+ vmid * hub->ctx_addr_distance,
+ lower_32_bits(pd_addr), 0xffffffff);
+}
+
+static void vcn_v4_0_3_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+ /* VCN engine access for HDP flush doesn't work when RRMT is enabled.
+ * This is a workaround to avoid any HDP flush through VCN ring.
+ */
+}
+
/**
* vcn_v4_0_3_unified_ring_set_wptr - set enc write pointer
*
@@ -1414,7 +1461,8 @@ static const struct amdgpu_ring_funcs vcn_v4_0_3_unified_ring_vm_funcs = {
.emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
.emit_ib = vcn_v2_0_enc_ring_emit_ib,
.emit_fence = vcn_v2_0_enc_ring_emit_fence,
- .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
+ .emit_vm_flush = vcn_v4_0_3_enc_ring_emit_vm_flush,
+ .emit_hdp_flush = vcn_v4_0_3_ring_emit_hdp_flush,
.test_ring = amdgpu_vcn_enc_ring_test_ring,
.test_ib = amdgpu_vcn_unified_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
@@ -1422,8 +1470,8 @@ static const struct amdgpu_ring_funcs vcn_v4_0_3_unified_ring_vm_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_vcn_ring_begin_use,
.end_use = amdgpu_vcn_ring_end_use,
- .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
- .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
+ .emit_wreg = vcn_v4_0_3_enc_ring_emit_wreg,
+ .emit_reg_wait = vcn_v4_0_3_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
index f45495de6875..8d75061f9f38 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
@@ -958,6 +958,9 @@ static int vcn_v4_0_5_start(struct amdgpu_device *adev)
amdgpu_dpm_enable_uvd(adev, true);
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
@@ -1162,6 +1165,9 @@ static int vcn_v4_0_5_stop(struct amdgpu_device *adev)
int i, r = 0;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
index 070b56610c7d..68c97fcd539b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
@@ -721,6 +721,9 @@ static int vcn_v5_0_0_start(struct amdgpu_device *adev)
amdgpu_dpm_enable_uvd(adev, true);
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
@@ -898,6 +901,9 @@ static int vcn_v5_0_0_stop(struct amdgpu_device *adev)
int i, r = 0;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 5fd1b6b44577..2d7755e2b6c3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -137,6 +137,13 @@ struct vblank_control_work {
bool enable;
};
+/**
+ * struct idle_workqueue - Work data for periodic action in idle
+ * @work: Kernel work data for the work event
+ * @dm: amdgpu display manager device
+ * @enable: true if idle worker is enabled
+ * @running: true if idle worker is running
+ */
struct idle_workqueue {
struct work_struct work;
struct amdgpu_display_manager *dm;
@@ -502,6 +509,12 @@ struct amdgpu_display_manager {
* Deferred work for vblank control events.
*/
struct workqueue_struct *vblank_control_workqueue;
+
+ /**
+ * @idle_workqueue:
+ *
+ * Periodic work for idle events.
+ */
struct idle_workqueue *idle_workqueue;
struct drm_atomic_state *cached_state;
@@ -587,7 +600,9 @@ struct amdgpu_display_manager {
*/
struct mutex dpia_aux_lock;
- /*
+ /**
+ * @bb_from_dmub:
+ *
* Bounding box data read from dmub during early initialization for DCN4+
*/
struct dml2_soc_bb *bb_from_dmub;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index 067f6555cfdf..ccbb15f1638c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -143,7 +143,8 @@ const struct dc_plane_status *dc_plane_get_status(
if (pipe_ctx->plane_state != plane_state)
continue;
- pipe_ctx->plane_state->status.is_flip_pending = false;
+ if (pipe_ctx->plane_state)
+ pipe_ctx->plane_state->status.is_flip_pending = false;
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c
index defe13436a2c..e73579f1a88e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c
@@ -64,8 +64,6 @@ double math_ceil(const double arg)
double math_ceil2(const double arg, const double significance)
{
- ASSERT(significance != 0);
-
return ((int)(arg / significance + 0.99999)) * significance;
}
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 9ac7fc717a92..0150f2581ee4 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -147,16 +147,28 @@ struct cnv_color_keyer_params {
int color_keyer_blue_high;
};
-/* new for dcn2: set the 8bit alpha values based on the 2 bit alpha
- *ALPHA_2BIT_LUT. ALPHA_2BIT_LUT0 default: 0b00000000
- *ALPHA_2BIT_LUT. ALPHA_2BIT_LUT1 default: 0b01010101
- *ALPHA_2BIT_LUT. ALPHA_2BIT_LUT2 default: 0b10101010
- *ALPHA_2BIT_LUT. ALPHA_2BIT_LUT3 default: 0b11111111
+/**
+ * struct cnv_alpha_2bit_lut - Set the 8bit alpha values based on the 2 bit alpha
*/
struct cnv_alpha_2bit_lut {
+ /**
+ * @lut0: ALPHA_2BIT_LUT. ALPHA_2BIT_LUT0. Default: 0b00000000
+ */
int lut0;
+
+ /**
+ * @lut1: ALPHA_2BIT_LUT. ALPHA_2BIT_LUT1. Default: 0b01010101
+ */
int lut1;
+
+ /**
+ * @lut2: ALPHA_2BIT_LUT. ALPHA_2BIT_LUT2. Default: 0b10101010
+ */
int lut2;
+
+ /**
+ * @lut3: ALPHA_2BIT_LUT. ALPHA_2BIT_LUT3. Default: 0b11111111
+ */
int lut3;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 40a9b3471208..3a89cc0cffc1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -1039,6 +1039,20 @@ struct mpc_funcs {
*/
void (*program_lut_mode)(struct mpc *mpc, const enum MCM_LUT_ID id, const enum MCM_LUT_XABLE xable,
bool lut_bank_a, int mpcc_id);
+ /**
+ * @program_3dlut_size:
+ *
+ * Program 3D LUT size.
+ *
+ * Parameters:
+ * - [in/out] mpc - MPC context.
+ * - [in] is_17x17x17 - is 3dlut 17x17x17
+ * - [in] mpcc_id
+ *
+ * Return:
+ *
+ * void
+ */
void (*program_3dlut_size)(struct mpc *mpc, bool is_17x17x17, int mpcc_id);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
index 127fb1a51654..747679cb4944 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
@@ -205,9 +205,24 @@ struct gamma_coefficients {
struct fixed31_32 user_brightness;
};
+/**
+ * struct pwl_float_data - Fixed point RGB color
+ */
struct pwl_float_data {
+ /**
+ * @r: Component Red.
+ */
struct fixed31_32 r;
+
+ /**
+ * @g: Component Green.
+ */
+
struct fixed31_32 g;
+
+ /**
+ * @b: Component Blue.
+ */
struct fixed31_32 b;
};
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
index 50459d7a0f85..b76737b7b9e4 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
@@ -26,6 +26,16 @@
#include "core_types.h"
#include "link_enc_cfg.h"
+/**
+ * DOC: overview
+ *
+ * Display Input Output (DIO), is the display input and output unit in DCN. It
+ * includes output encoders to support different display output, like
+ * DisplayPort, HDMI, DVI interface, and others. It also includes the control
+ * and status channels for these interfaces.
+ */
+
+
void set_dio_throttled_vcp_size(struct pipe_ctx *pipe_ctx,
struct fixed31_32 throttled_vcp_size)
{
@@ -254,12 +264,31 @@ static const struct link_hwss dio_link_hwss = {
},
};
+/**
+ * can_use_dio_link_hwss - Check if the link_hwss is accessible
+ *
+ * @link: Reference a link struct containing one or more sinks and the
+ * connective status.
+ * @link_res: Mappable hardware resource used to enable a link.
+ *
+ * Returns:
+ * Return true if the link encoder is accessible from link.
+ */
bool can_use_dio_link_hwss(const struct dc_link *link,
const struct link_resource *link_res)
{
return link->link_enc != NULL;
}
+/**
+ * get_dio_link_hwss - Return link_hwss reference
+ *
+ * This function behaves like a get function to return the link_hwss populated
+ * in the link_hwss_dio.c file.
+ *
+ * Returns:
+ * Return the reference to the filled struct of link_hwss.
+ */
const struct link_hwss *get_dio_link_hwss(void)
{
return &dio_link_hwss;
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
index a1f72fe378ee..45f0e091fcb0 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
@@ -23,15 +23,6 @@
*
*/
-/**
- * DOC: overview
- *
- * Display Input Output (DIO), is the display input and output unit in DCN. It
- * includes output encoders to support different display output, like
- * DisplayPort, HDMI, DVI interface, and others. It also includes the control
- * and status channels for these interfaces.
- */
-
#ifndef __LINK_HWSS_DIO_H__
#define __LINK_HWSS_DIO_H__
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
index 336488c0574e..94427875bcdd 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
@@ -945,19 +945,10 @@ void optc1_set_drr(
OTG_FORCE_LOCK_ON_EVENT, 0,
OTG_SET_V_TOTAL_MIN_MASK_EN, 0,
OTG_SET_V_TOTAL_MIN_MASK, 0);
-
- // Setup manual flow control for EOF via TRIG_A
- optc->funcs->setup_manual_trigger(optc);
-
- } else {
- REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
- OTG_SET_V_TOTAL_MIN_MASK, 0,
- OTG_V_TOTAL_MIN_SEL, 0,
- OTG_V_TOTAL_MAX_SEL, 0,
- OTG_FORCE_LOCK_ON_EVENT, 0);
-
- optc->funcs->set_vtotal_min_max(optc, 0, 0);
}
+
+ // Setup manual flow control for EOF via TRIG_A
+ optc->funcs->setup_manual_trigger(optc);
}
void optc1_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c
index 43417cff2c9b..b4694985a40a 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c
@@ -453,6 +453,16 @@ void optc2_setup_manual_trigger(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ /* Set the min/max selectors unconditionally so that
+ * DMCUB fw may change OTG timings when necessary
+ * TODO: Remove the w/a after fixing the issue in DMCUB firmware
+ */
+ REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
+ OTG_V_TOTAL_MIN_SEL, 1,
+ OTG_V_TOTAL_MAX_SEL, 1,
+ OTG_FORCE_LOCK_ON_EVENT, 0,
+ OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */
+
REG_SET_8(OTG_TRIGA_CNTL, 0,
OTG_TRIGA_SOURCE_SELECT, 21,
OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst,
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_4_15_offset.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_4_15_offset.h
new file mode 100644
index 000000000000..c2b009752f60
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_4_15_offset.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _df_4_15_OFFSET_HEADER
+#define _df_4_15_OFFSET_HEADER
+
+#define regNCSConfigurationRegister1 0x0901
+#define regNCSConfigurationRegister1_BASE_IDX 4
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_4_15_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_4_15_sh_mask.h
new file mode 100644
index 000000000000..9868a9c32795
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_4_15_sh_mask.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _df_4_15_SH_MASK_HEADER
+#define _df_4_15_SH_MASK_HEADER
+
+#define NCSConfigurationRegister1__DisIntAtomicsLclProcessing__SHIFT 0x3
+#define NCSConfigurationRegister1__DisIntAtomicsLclProcessing_MASK 0x0003FFF8L
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index fb8643d25d1b..9d7454b3c314 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1924,20 +1924,12 @@ static int smu_disable_dpms(struct smu_context *smu)
}
/*
- * For SMU 13.0.4/11 and 14.0.0, PMFW will handle the features disablement properly
+ * For GFX11 and subsequent APUs, PMFW will handle the features disablement properly
* for gpu reset and S0i3 cases. Driver involvement is unnecessary.
*/
- if (amdgpu_in_reset(adev) || adev->in_s0ix) {
- switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
- case IP_VERSION(13, 0, 4):
- case IP_VERSION(13, 0, 11):
- case IP_VERSION(14, 0, 0):
- case IP_VERSION(14, 0, 1):
- return 0;
- default:
- break;
- }
- }
+ if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) >= 11 &&
+ smu->is_apu && (amdgpu_in_reset(adev) || adev->in_s0ix))
+ return 0;
/*
* For gpu reset, runpm and hibernation through BACO,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
index 5d47d58944f6..8798ebfcea83 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
@@ -69,6 +69,9 @@
#define SMU_14_0_0_UMD_PSTATE_SOCCLK 678
#define SMU_14_0_0_UMD_PSTATE_FCLK 1800
+#define SMU_14_0_4_UMD_PSTATE_GFXCLK 938
+#define SMU_14_0_4_UMD_PSTATE_SOCCLK 938
+
#define FEATURE_MASK(feature) (1ULL << feature)
#define SMC_DPM_FEATURE ( \
FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \
@@ -1296,19 +1299,28 @@ static int smu_v14_0_common_get_dpm_profile_freq(struct smu_context *smu,
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
- clk_limit = SMU_14_0_0_UMD_PSTATE_GFXCLK;
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 4))
+ clk_limit = SMU_14_0_4_UMD_PSTATE_GFXCLK;
+ else
+ clk_limit = SMU_14_0_0_UMD_PSTATE_GFXCLK;
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &clk_limit);
else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK)
smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, &clk_limit, NULL);
break;
case SMU_SOCCLK:
- clk_limit = SMU_14_0_0_UMD_PSTATE_SOCCLK;
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 4))
+ clk_limit = SMU_14_0_4_UMD_PSTATE_SOCCLK;
+ else
+ clk_limit = SMU_14_0_0_UMD_PSTATE_SOCCLK;
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &clk_limit);
break;
case SMU_FCLK:
- clk_limit = SMU_14_0_0_UMD_PSTATE_FCLK;
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 4))
+ smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &clk_limit);
+ else
+ clk_limit = SMU_14_0_0_UMD_PSTATE_FCLK;
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &clk_limit);
else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK)
diff --git a/drivers/gpu/drm/display/drm_dp_aux_bus.c b/drivers/gpu/drm/display/drm_dp_aux_bus.c
index 5afc26be9d2a..d810529ebfb6 100644
--- a/drivers/gpu/drm/display/drm_dp_aux_bus.c
+++ b/drivers/gpu/drm/display/drm_dp_aux_bus.c
@@ -36,7 +36,7 @@ struct dp_aux_ep_device_with_data {
*
* Return: True if this driver matches this device; false otherwise.
*/
-static int dp_aux_ep_match(struct device *dev, struct device_driver *drv)
+static int dp_aux_ep_match(struct device *dev, const struct device_driver *drv)
{
return !!of_match_device(drv->of_match_table, dev);
}
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index a471c46f5ca6..969cfd5a01ae 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -48,7 +48,7 @@
* subset of the MIPI DCS command set.
*/
-static int mipi_dsi_device_match(struct device *dev, struct device_driver *drv)
+static int mipi_dsi_device_match(struct device *dev, const struct device_driver *drv)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 3903f6ead6e6..59f11af3b0a1 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -5314,6 +5314,8 @@ static int intel_dp_retrain_link(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
+ intel_dp->link_trained = false;
+
intel_dp_check_frl_training(intel_dp);
intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
intel_dp_start_link_train(NULL, intel_dp, crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 1bc4ef84ff3b..d044c8e36bb3 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -117,10 +117,24 @@ intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable)
return drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) == 1;
}
-static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+static bool intel_dp_lttpr_transparent_mode_enabled(struct intel_dp *intel_dp)
+{
+ return intel_dp->lttpr_common_caps[DP_PHY_REPEATER_MODE -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] ==
+ DP_PHY_REPEATER_MODE_TRANSPARENT;
+}
+
+/*
+ * Read the LTTPR common capabilities and switch the LTTPR PHYs to
+ * non-transparent mode if this is supported. Preserve the
+ * transparent/non-transparent mode on an active link.
+ *
+ * Return the number of detected LTTPRs in non-transparent mode or 0 if the
+ * LTTPRs are in transparent mode or the detection failed.
+ */
+static int intel_dp_init_lttpr_phys(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
int lttpr_count;
- int i;
if (!intel_dp_read_lttpr_common_caps(intel_dp, dpcd))
return 0;
@@ -135,6 +149,19 @@ static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEI
return 0;
/*
+ * Don't change the mode on an active link, to prevent a loss of link
+ * synchronization. See DP Standard v2.0 3.6.7. about the LTTPR
+ * resetting its internal state when the mode is changed from
+ * non-transparent to transparent.
+ */
+ if (intel_dp->link_trained) {
+ if (lttpr_count < 0 || intel_dp_lttpr_transparent_mode_enabled(intel_dp))
+ goto out_reset_lttpr_count;
+
+ return lttpr_count;
+ }
+
+ /*
* See DP Standard v2.0 3.6.6.1. about the explicit disabling of
* non-transparent mode and the disable->enable non-transparent mode
* sequence.
@@ -154,11 +181,25 @@ static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEI
"Switching to LTTPR non-transparent LT mode failed, fall-back to transparent mode\n");
intel_dp_set_lttpr_transparent_mode(intel_dp, true);
- intel_dp_reset_lttpr_count(intel_dp);
- return 0;
+ goto out_reset_lttpr_count;
}
+ return lttpr_count;
+
+out_reset_lttpr_count:
+ intel_dp_reset_lttpr_count(intel_dp);
+
+ return 0;
+}
+
+static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ int lttpr_count;
+ int i;
+
+ lttpr_count = intel_dp_init_lttpr_phys(intel_dp, dpcd);
+
for (i = 0; i < lttpr_count; i++)
intel_dp_read_lttpr_phy_caps(intel_dp, dpcd, DP_PHY_LTTPR(i));
@@ -1482,10 +1523,10 @@ void intel_dp_start_link_train(struct intel_atomic_state *state,
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
bool passed;
-
/*
- * TODO: Reiniting LTTPRs here won't be needed once proper connector
- * HW state readout is added.
+ * Reinit the LTTPRs here to ensure that they are switched to
+ * non-transparent mode. During an earlier LTTPR detection this
+ * could've been prevented by an active link.
*/
int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp);
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 21829439e686..72090f52fb85 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -3315,11 +3315,7 @@ static void remove_from_engine(struct i915_request *rq)
static bool can_preempt(struct intel_engine_cs *engine)
{
- if (GRAPHICS_VER(engine->i915) > 8)
- return true;
-
- /* GPGPU on bdw requires extra w/a; not implemented */
- return engine->class != RENDER_CLASS;
+ return GRAPHICS_VER(engine->i915) > 8;
}
static void kick_execlists(const struct i915_request *rq, int prio)
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index 172dfa7c3588..d40ee1b42110 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -368,8 +368,10 @@ int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
goto out_cleanup;
}
- mem->id = i;
- i915->mm.regions[i] = mem;
+ if (mem) { /* Skip on non-fatal errors */
+ mem->id = i;
+ i915->mm.regions[i] = mem;
+ }
}
for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
diff --git a/drivers/gpu/drm/imagination/pvr_drv.c b/drivers/gpu/drm/imagination/pvr_drv.c
index 5c3b2d58d766..1a0cb7aa9cea 100644
--- a/drivers/gpu/drm/imagination/pvr_drv.c
+++ b/drivers/gpu/drm/imagination/pvr_drv.c
@@ -1451,8 +1451,7 @@ err_context_fini:
return err;
}
-static int
-pvr_remove(struct platform_device *plat_dev)
+static void pvr_remove(struct platform_device *plat_dev)
{
struct drm_device *drm_dev = platform_get_drvdata(plat_dev);
struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
@@ -1469,8 +1468,6 @@ pvr_remove(struct platform_device *plat_dev)
pvr_watchdog_fini(pvr_dev);
pvr_queue_device_fini(pvr_dev);
pvr_context_device_fini(pvr_dev);
-
- return 0;
}
static const struct of_device_id dt_match[] = {
@@ -1485,7 +1482,7 @@ static const struct dev_pm_ops pvr_pm_ops = {
static struct platform_driver pvr_driver = {
.probe = pvr_probe,
- .remove = pvr_remove,
+ .remove_new = pvr_remove,
.driver = {
.name = PVR_DRIVER_NAME,
.pm = &pvr_pm_ops,
diff --git a/drivers/gpu/drm/stm/lvds.c b/drivers/gpu/drm/stm/lvds.c
index bfc8cb13fbc5..2fa2c81784e9 100644
--- a/drivers/gpu/drm/stm/lvds.c
+++ b/drivers/gpu/drm/stm/lvds.c
@@ -1186,15 +1186,13 @@ err_lvds_probe:
return ret;
}
-static int lvds_remove(struct platform_device *pdev)
+static void lvds_remove(struct platform_device *pdev)
{
struct stm_lvds *lvds = platform_get_drvdata(pdev);
lvds_pixel_clk_unregister(lvds);
drm_bridge_remove(&lvds->lvds_bridge);
-
- return 0;
}
static const struct of_device_id lvds_dt_ids[] = {
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index a47f00b443d3..5982941d933b 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -265,7 +265,7 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
struct v3d_dev *v3d;
int ret;
u32 mmu_debug;
- u32 ident1;
+ u32 ident1, ident3;
u64 mask;
v3d = devm_drm_dev_alloc(dev, &v3d_drm_driver, struct v3d_dev, drm);
@@ -298,6 +298,9 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
v3d->cores = V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_NCORES);
WARN_ON(v3d->cores > 1); /* multicore not yet implemented */
+ ident3 = V3D_READ(V3D_HUB_IDENT3);
+ v3d->rev = V3D_GET_FIELD(ident3, V3D_HUB_IDENT3_IPREV);
+
if (v3d->ver >= 71)
v3d->max_counters = V3D_V71_NUM_PERFCOUNTERS;
else if (v3d->ver >= 42)
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index 099b962bdfde..49089eefb7c7 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -98,10 +98,12 @@ struct v3d_perfmon {
struct v3d_dev {
struct drm_device drm;
- /* Short representation (e.g. 33, 41) of the V3D tech version
- * and revision.
- */
+ /* Short representation (e.g. 33, 41) of the V3D tech version */
int ver;
+
+ /* Short representation (e.g. 5, 6) of the V3D tech revision */
+ int rev;
+
bool single_irq_line;
/* Different revisions of V3D have different total number of performance
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 03df37a3acf5..271a6d0f5aca 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -331,7 +331,8 @@ v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job)
struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
struct v3d_bo *indirect = to_v3d_bo(indirect_csd->indirect);
struct drm_v3d_submit_csd *args = &indirect_csd->job->args;
- u32 *wg_counts;
+ struct v3d_dev *v3d = job->base.v3d;
+ u32 num_batches, *wg_counts;
v3d_get_bo_vaddr(bo);
v3d_get_bo_vaddr(indirect);
@@ -344,8 +345,17 @@ v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job)
args->cfg[0] = wg_counts[0] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
args->cfg[1] = wg_counts[1] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
args->cfg[2] = wg_counts[2] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
- args->cfg[4] = DIV_ROUND_UP(indirect_csd->wg_size, 16) *
- (wg_counts[0] * wg_counts[1] * wg_counts[2]) - 1;
+
+ num_batches = DIV_ROUND_UP(indirect_csd->wg_size, 16) *
+ (wg_counts[0] * wg_counts[1] * wg_counts[2]);
+
+ /* V3D 7.1.6 and later don't subtract 1 from the number of batches */
+ if (v3d->ver < 71 || (v3d->ver == 71 && v3d->rev < 6))
+ args->cfg[4] = num_batches - 1;
+ else
+ args->cfg[4] = num_batches;
+
+ WARN_ON(args->cfg[4] == ~0);
for (int i = 0; i < 3; i++) {
/* 0xffffffff indicates that the uniform rewrite is not needed */
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 03492fbcb8fb..76109415eba6 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -854,6 +854,13 @@ u64 xe_device_uncanonicalize_addr(struct xe_device *xe, u64 address)
return address & GENMASK_ULL(xe->info.va_bits - 1, 0);
}
+static void xe_device_wedged_fini(struct drm_device *drm, void *arg)
+{
+ struct xe_device *xe = arg;
+
+ xe_pm_runtime_put(xe);
+}
+
/**
* xe_device_declare_wedged - Declare device wedged
* @xe: xe device instance
@@ -870,11 +877,21 @@ u64 xe_device_uncanonicalize_addr(struct xe_device *xe, u64 address)
*/
void xe_device_declare_wedged(struct xe_device *xe)
{
+ struct xe_gt *gt;
+ u8 id;
+
if (xe->wedged.mode == 0) {
drm_dbg(&xe->drm, "Wedged mode is forcibly disabled\n");
return;
}
+ if (drmm_add_action_or_reset(&xe->drm, xe_device_wedged_fini, xe)) {
+ drm_err(&xe->drm, "Failed to register xe_device_wedged_fini clean-up. Although device is wedged.\n");
+ return;
+ }
+
+ xe_pm_runtime_get_noresume(xe);
+
if (!atomic_xchg(&xe->wedged.flag, 1)) {
xe->needs_flr_on_fini = true;
drm_err(&xe->drm,
@@ -883,4 +900,7 @@ void xe_device_declare_wedged(struct xe_device *xe)
"Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/xe/kernel/issues/new\n",
dev_name(xe->drm.dev));
}
+
+ for_each_gt(gt, xe, id)
+ xe_gt_declare_wedged(gt);
}
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index 2d72cdec3a0b..f36980aa26e6 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -118,7 +118,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
u64 addresses[XE_HW_ENGINE_MAX_INSTANCE];
struct drm_gpuvm_exec vm_exec = {.extra.fn = xe_exec_fn};
struct drm_exec *exec = &vm_exec.exec;
- u32 i, num_syncs = 0, num_ufence = 0;
+ u32 i, num_syncs, num_ufence = 0;
struct xe_sched_job *job;
struct xe_vm *vm;
bool write_locked, skip_retry = false;
@@ -156,15 +156,15 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
vm = q->vm;
- for (i = 0; i < args->num_syncs; i++) {
- err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs++],
- &syncs_user[i], SYNC_PARSE_FLAG_EXEC |
+ for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
+ err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
+ &syncs_user[num_syncs], SYNC_PARSE_FLAG_EXEC |
(xe_vm_in_lr_mode(vm) ?
SYNC_PARSE_FLAG_LR_MODE : 0));
if (err)
goto err_syncs;
- if (xe_sync_is_ufence(&syncs[i]))
+ if (xe_sync_is_ufence(&syncs[num_syncs]))
num_ufence++;
}
@@ -325,8 +325,8 @@ err_unlock_list:
if (err == -EAGAIN && !skip_retry)
goto retry;
err_syncs:
- for (i = 0; i < num_syncs; i++)
- xe_sync_entry_cleanup(&syncs[i]);
+ while (num_syncs--)
+ xe_sync_entry_cleanup(&syncs[num_syncs]);
kfree(syncs);
err_exec_queue:
xe_exec_queue_put(q);
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 0ba2e2d0289b..31b2e64c70c6 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -904,3 +904,18 @@ struct xe_hw_engine *xe_gt_any_hw_engine(struct xe_gt *gt)
return NULL;
}
+
+/**
+ * xe_gt_declare_wedged() - Declare GT wedged
+ * @gt: the GT object
+ *
+ * Wedge the GT which stops all submission, saves desired debug state, and
+ * cleans up anything which could timeout.
+ */
+void xe_gt_declare_wedged(struct xe_gt *gt)
+{
+ xe_gt_assert(gt, gt_to_xe(gt)->wedged.mode);
+
+ xe_uc_declare_wedged(&gt->uc);
+ xe_gt_tlb_invalidation_reset(gt);
+}
diff --git a/drivers/gpu/drm/xe/xe_gt.h b/drivers/gpu/drm/xe/xe_gt.h
index 1123fdfc4ebc..8b1a5027dcf2 100644
--- a/drivers/gpu/drm/xe/xe_gt.h
+++ b/drivers/gpu/drm/xe/xe_gt.h
@@ -37,6 +37,7 @@ struct xe_gt *xe_gt_alloc(struct xe_tile *tile);
int xe_gt_init_hwconfig(struct xe_gt *gt);
int xe_gt_init_early(struct xe_gt *gt);
int xe_gt_init(struct xe_gt *gt);
+void xe_gt_declare_wedged(struct xe_gt *gt);
int xe_gt_record_default_lrcs(struct xe_gt *gt);
/**
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index db6c213da847..4699b7836001 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -1543,6 +1543,7 @@ static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs)
u64 fair;
fair = div_u64(available, num_vfs);
+ fair = rounddown_pow_of_two(fair); /* XXX: ttm_vram_mgr & drm_buddy limitation */
fair = ALIGN_DOWN(fair, alignment);
#ifdef MAX_FAIR_LMEM
fair = min_t(u64, MAX_FAIR_LMEM, fair);
diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
index eb655cee19f7..de0fe9e65746 100644
--- a/drivers/gpu/drm/xe/xe_guc.c
+++ b/drivers/gpu/drm/xe/xe_guc.c
@@ -1178,3 +1178,19 @@ void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p)
xe_guc_ct_print(&guc->ct, p, false);
xe_guc_submit_print(guc, p);
}
+
+/**
+ * xe_guc_declare_wedged() - Declare GuC wedged
+ * @guc: the GuC object
+ *
+ * Wedge the GuC which stops all submission, saves desired debug state, and
+ * cleans up anything which could timeout.
+ */
+void xe_guc_declare_wedged(struct xe_guc *guc)
+{
+ xe_gt_assert(guc_to_gt(guc), guc_to_xe(guc)->wedged.mode);
+
+ xe_guc_reset_prepare(guc);
+ xe_guc_ct_stop(&guc->ct);
+ xe_guc_submit_wedge(guc);
+}
diff --git a/drivers/gpu/drm/xe/xe_guc.h b/drivers/gpu/drm/xe/xe_guc.h
index af59c9545753..e0bbf98f849d 100644
--- a/drivers/gpu/drm/xe/xe_guc.h
+++ b/drivers/gpu/drm/xe/xe_guc.h
@@ -37,6 +37,7 @@ void xe_guc_reset_wait(struct xe_guc *guc);
void xe_guc_stop_prepare(struct xe_guc *guc);
void xe_guc_stop(struct xe_guc *guc);
int xe_guc_start(struct xe_guc *guc);
+void xe_guc_declare_wedged(struct xe_guc *guc);
static inline u16 xe_engine_class_to_guc_class(enum xe_engine_class class)
{
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 373447758a60..8d7e7f4bbff7 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -861,29 +861,27 @@ static void xe_guc_exec_queue_trigger_cleanup(struct xe_exec_queue *q)
xe_sched_tdr_queue_imm(&q->guc->sched);
}
-static bool guc_submit_hint_wedged(struct xe_guc *guc)
+/**
+ * xe_guc_submit_wedge() - Wedge GuC submission
+ * @guc: the GuC object
+ *
+ * Save exec queue's registered with GuC state by taking a ref to each queue.
+ * Register a DRMM handler to drop refs upon driver unload.
+ */
+void xe_guc_submit_wedge(struct xe_guc *guc)
{
struct xe_device *xe = guc_to_xe(guc);
struct xe_exec_queue *q;
unsigned long index;
int err;
- if (xe->wedged.mode != 2)
- return false;
-
- if (xe_device_wedged(xe))
- return true;
-
- xe_device_declare_wedged(xe);
-
- xe_guc_submit_reset_prepare(guc);
- xe_guc_ct_stop(&guc->ct);
+ xe_gt_assert(guc_to_gt(guc), guc_to_xe(guc)->wedged.mode);
err = drmm_add_action_or_reset(&guc_to_xe(guc)->drm,
guc_submit_wedged_fini, guc);
if (err) {
drm_err(&xe->drm, "Failed to register xe_guc_submit clean-up on wedged.mode=2. Although device is wedged.\n");
- return true; /* Device is wedged anyway */
+ return;
}
mutex_lock(&guc->submission_state.lock);
@@ -891,6 +889,19 @@ static bool guc_submit_hint_wedged(struct xe_guc *guc)
if (xe_exec_queue_get_unless_zero(q))
set_exec_queue_wedged(q);
mutex_unlock(&guc->submission_state.lock);
+}
+
+static bool guc_submit_hint_wedged(struct xe_guc *guc)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+
+ if (xe->wedged.mode != 2)
+ return false;
+
+ if (xe_device_wedged(xe))
+ return true;
+
+ xe_device_declare_wedged(xe);
return true;
}
@@ -1677,7 +1688,8 @@ int xe_guc_submit_reset_prepare(struct xe_guc *guc)
void xe_guc_submit_reset_wait(struct xe_guc *guc)
{
- wait_event(guc->ct.wq, !guc_read_stopped(guc));
+ wait_event(guc->ct.wq, xe_device_wedged(guc_to_xe(guc)) ||
+ !guc_read_stopped(guc));
}
void xe_guc_submit_stop(struct xe_guc *guc)
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.h b/drivers/gpu/drm/xe/xe_guc_submit.h
index 4ad5f4c1b084..bdf8c9f3d24a 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.h
+++ b/drivers/gpu/drm/xe/xe_guc_submit.h
@@ -18,6 +18,7 @@ int xe_guc_submit_reset_prepare(struct xe_guc *guc);
void xe_guc_submit_reset_wait(struct xe_guc *guc);
void xe_guc_submit_stop(struct xe_guc *guc);
int xe_guc_submit_start(struct xe_guc *guc);
+void xe_guc_submit_wedge(struct xe_guc *guc);
int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
diff --git a/drivers/gpu/drm/xe/xe_uc.c b/drivers/gpu/drm/xe/xe_uc.c
index 0f240534fb72..0d073a9987c2 100644
--- a/drivers/gpu/drm/xe/xe_uc.c
+++ b/drivers/gpu/drm/xe/xe_uc.c
@@ -300,3 +300,17 @@ void xe_uc_remove(struct xe_uc *uc)
{
xe_gsc_remove(&uc->gsc);
}
+
+/**
+ * xe_uc_declare_wedged() - Declare UC wedged
+ * @uc: the UC object
+ *
+ * Wedge the UC which stops all submission, saves desired debug state, and
+ * cleans up anything which could timeout.
+ */
+void xe_uc_declare_wedged(struct xe_uc *uc)
+{
+ xe_gt_assert(uc_to_gt(uc), uc_to_xe(uc)->wedged.mode);
+
+ xe_guc_declare_wedged(&uc->guc);
+}
diff --git a/drivers/gpu/drm/xe/xe_uc.h b/drivers/gpu/drm/xe/xe_uc.h
index 11856f24e6f9..506517c11333 100644
--- a/drivers/gpu/drm/xe/xe_uc.h
+++ b/drivers/gpu/drm/xe/xe_uc.h
@@ -21,5 +21,6 @@ int xe_uc_start(struct xe_uc *uc);
int xe_uc_suspend(struct xe_uc *uc);
int xe_uc_sanitize_reset(struct xe_uc *uc);
void xe_uc_remove(struct xe_uc *uc);
+void xe_uc_declare_wedged(struct xe_uc *uc);
#endif
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 7c52757a89db..8e09d6d328d2 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -333,7 +333,7 @@ static int host1x_del_client(struct host1x *host1x,
return -ENODEV;
}
-static int host1x_device_match(struct device *dev, struct device_driver *drv)
+static int host1x_device_match(struct device *dev, const struct device_driver *drv)
{
return strcmp(dev_name(dev), drv->name) == 0;
}
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 3a0aaa68ac8d..f006bc931324 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -677,7 +677,7 @@ destroy_cache:
return err;
}
-static int host1x_remove(struct platform_device *pdev)
+static void host1x_remove(struct platform_device *pdev)
{
struct host1x *host = platform_get_drvdata(pdev);
@@ -692,8 +692,6 @@ static int host1x_remove(struct platform_device *pdev)
host1x_channel_list_free(&host->channel_list);
host1x_iommu_exit(host);
host1x_bo_cache_destroy(&host->cache);
-
- return 0;
}
static int __maybe_unused host1x_runtime_suspend(struct device *dev)
@@ -778,7 +776,7 @@ static struct platform_driver tegra_host1x_driver = {
.pm = &host1x_pm_ops,
},
.probe = host1x_probe,
- .remove = host1x_remove,
+ .remove_new = host1x_remove,
};
static struct platform_driver * const drivers[] = {
diff --git a/drivers/gpu/host1x/mipi.c b/drivers/gpu/host1x/mipi.c
index 4dcec535ec21..e51b43dd15a3 100644
--- a/drivers/gpu/host1x/mipi.c
+++ b/drivers/gpu/host1x/mipi.c
@@ -501,7 +501,6 @@ static int tegra_mipi_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
struct tegra_mipi *mipi;
- int err;
match = of_match_node(tegra_mipi_of_match, pdev->dev.of_node);
if (!match)
@@ -520,35 +519,21 @@ static int tegra_mipi_probe(struct platform_device *pdev)
mutex_init(&mipi->lock);
- mipi->clk = devm_clk_get(&pdev->dev, NULL);
+ mipi->clk = devm_clk_get_prepared(&pdev->dev, NULL);
if (IS_ERR(mipi->clk)) {
dev_err(&pdev->dev, "failed to get clock\n");
return PTR_ERR(mipi->clk);
}
- err = clk_prepare(mipi->clk);
- if (err < 0)
- return err;
-
platform_set_drvdata(pdev, mipi);
return 0;
}
-static int tegra_mipi_remove(struct platform_device *pdev)
-{
- struct tegra_mipi *mipi = platform_get_drvdata(pdev);
-
- clk_unprepare(mipi->clk);
-
- return 0;
-}
-
struct platform_driver tegra_mipi_driver = {
.driver = {
.name = "tegra-mipi",
.of_match_table = tegra_mipi_of_match,
},
.probe = tegra_mipi_probe,
- .remove = tegra_mipi_remove,
};
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 71ec1e7f657a..3535be9daa1f 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -1450,7 +1450,7 @@ out_failed_reset:
return ret;
}
-static int ipu_remove(struct platform_device *pdev)
+static void ipu_remove(struct platform_device *pdev)
{
struct ipu_soc *ipu = platform_get_drvdata(pdev);
@@ -1459,8 +1459,6 @@ static int ipu_remove(struct platform_device *pdev)
ipu_irq_exit(ipu);
clk_disable_unprepare(ipu->clk);
-
- return 0;
}
static struct platform_driver imx_ipu_driver = {
@@ -1469,7 +1467,7 @@ static struct platform_driver imx_ipu_driver = {
.of_match_table = imx_ipu_dt_ids,
},
.probe = ipu_probe,
- .remove = ipu_remove,
+ .remove_new = ipu_remove,
};
static struct platform_driver * const drivers[] = {
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c
index 624b76131560..41bd5dbd7356 100644
--- a/drivers/gpu/ipu-v3/ipu-pre.c
+++ b/drivers/gpu/ipu-v3/ipu-pre.c
@@ -351,7 +351,7 @@ static int ipu_pre_probe(struct platform_device *pdev)
return 0;
}
-static int ipu_pre_remove(struct platform_device *pdev)
+static void ipu_pre_remove(struct platform_device *pdev)
{
struct ipu_pre *pre = platform_get_drvdata(pdev);
@@ -365,7 +365,6 @@ static int ipu_pre_remove(struct platform_device *pdev)
if (pre->buffer_virt)
gen_pool_free(pre->iram, (unsigned long)pre->buffer_virt,
IPU_PRE_MAX_WIDTH * IPU_PRE_NUM_SCANLINES * 4);
- return 0;
}
static const struct of_device_id ipu_pre_dt_ids[] = {
@@ -375,7 +374,7 @@ static const struct of_device_id ipu_pre_dt_ids[] = {
struct platform_driver ipu_pre_drv = {
.probe = ipu_pre_probe,
- .remove = ipu_pre_remove,
+ .remove_new = ipu_pre_remove,
.driver = {
.name = "imx-ipu-pre",
.of_match_table = ipu_pre_dt_ids,
diff --git a/drivers/gpu/ipu-v3/ipu-prg.c b/drivers/gpu/ipu-v3/ipu-prg.c
index 661dedf6617a..afb2d72e9175 100644
--- a/drivers/gpu/ipu-v3/ipu-prg.c
+++ b/drivers/gpu/ipu-v3/ipu-prg.c
@@ -419,15 +419,13 @@ static int ipu_prg_probe(struct platform_device *pdev)
return 0;
}
-static int ipu_prg_remove(struct platform_device *pdev)
+static void ipu_prg_remove(struct platform_device *pdev)
{
struct ipu_prg *prg = platform_get_drvdata(pdev);
mutex_lock(&ipu_prg_list_mutex);
list_del(&prg->list);
mutex_unlock(&ipu_prg_list_mutex);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -471,7 +469,7 @@ static const struct of_device_id ipu_prg_dt_ids[] = {
struct platform_driver ipu_prg_drv = {
.probe = ipu_prg_probe,
- .remove = ipu_prg_remove,
+ .remove_new = ipu_prg_remove,
.driver = {
.name = "imx-ipu-prg",
.pm = &prg_pm_ops,
diff --git a/drivers/greybus/core.c b/drivers/greybus/core.c
index 33a47e73f0fa..313eb65cf703 100644
--- a/drivers/greybus/core.c
+++ b/drivers/greybus/core.c
@@ -90,9 +90,9 @@ greybus_match_id(struct gb_bundle *bundle, const struct greybus_bundle_id *id)
return NULL;
}
-static int greybus_match_device(struct device *dev, struct device_driver *drv)
+static int greybus_match_device(struct device *dev, const struct device_driver *drv)
{
- struct greybus_driver *driver = to_greybus_driver(drv);
+ const struct greybus_driver *driver = to_greybus_driver(drv);
struct gb_bundle *bundle;
const struct greybus_bundle_id *id;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 254006178426..988d0acbdf04 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2604,7 +2604,7 @@ const struct hid_device_id *hid_match_device(struct hid_device *hdev,
}
EXPORT_SYMBOL_GPL(hid_match_device);
-static int hid_bus_match(struct device *dev, struct device_driver *drv)
+static int hid_bus_match(struct device *dev, const struct device_driver *drv)
{
struct hid_driver *hdrv = to_hid_driver(drv);
struct hid_device *hdev = to_hid_device(dev);
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index cc76b295b632..5ac7d70a7c84 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -236,7 +236,7 @@ static int ishtp_cl_device_probe(struct device *dev)
*
* Return: 1 if dev & drv matches, 0 otherwise.
*/
-static int ishtp_cl_bus_match(struct device *dev, struct device_driver *drv)
+static int ishtp_cl_bus_match(struct device *dev, const struct device_driver *drv)
{
struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
struct ishtp_cl_driver *driver = to_ishtp_cl_driver(drv);
diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c
index 10926359e6d2..afe470f3661c 100644
--- a/drivers/hsi/clients/ssi_protocol.c
+++ b/drivers/hsi/clients/ssi_protocol.c
@@ -14,7 +14,6 @@
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/if_ether.h>
#include <linux/if_arp.h>
#include <linux/if_phonet.h>
diff --git a/drivers/hsi/hsi_core.c b/drivers/hsi/hsi_core.c
index e3beeac8aee5..8113cb9d4015 100644
--- a/drivers/hsi/hsi_core.c
+++ b/drivers/hsi/hsi_core.c
@@ -37,7 +37,7 @@ static int hsi_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
return 0;
}
-static int hsi_bus_match(struct device *dev, struct device_driver *driver)
+static int hsi_bus_match(struct device *dev, const struct device_driver *driver)
{
if (of_driver_match_device(dev, driver))
return true;
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 12a707ab73f8..c857dc3975be 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -685,7 +685,7 @@ static const struct hv_vmbus_device_id vmbus_device_null;
* Return a matching hv_vmbus_device_id pointer.
* If there is no match, return NULL.
*/
-static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv,
+static const struct hv_vmbus_device_id *hv_vmbus_get_id(const struct hv_driver *drv,
struct hv_device *dev)
{
const guid_t *guid = &dev->dev_type;
@@ -696,7 +696,7 @@ static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv,
return NULL;
/* Look at the dynamic ids first, before the static ones */
- id = hv_vmbus_dynid_match(drv, guid);
+ id = hv_vmbus_dynid_match((struct hv_driver *)drv, guid);
if (!id)
id = hv_vmbus_dev_match(drv->id_table, guid);
@@ -809,9 +809,9 @@ ATTRIBUTE_GROUPS(vmbus_drv);
/*
* vmbus_match - Attempt to match the specified device to the specified driver
*/
-static int vmbus_match(struct device *device, struct device_driver *driver)
+static int vmbus_match(struct device *device, const struct device_driver *driver)
{
- struct hv_driver *drv = drv_to_hv_drv(driver);
+ const struct hv_driver *drv = drv_to_hv_drv(driver);
struct hv_device *hv_dev = device_to_hv_device(device);
/* The hv_sock driver handles all hv_sock offers. */
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
index 0c0a932c00f3..6505261e6068 100644
--- a/drivers/hwspinlock/hwspinlock_core.c
+++ b/drivers/hwspinlock/hwspinlock_core.c
@@ -306,6 +306,34 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
EXPORT_SYMBOL_GPL(__hwspin_unlock);
/**
+ * hwspin_lock_bust() - bust a specific hwspinlock
+ * @hwlock: a previously-acquired hwspinlock which we want to bust
+ * @id: identifier of the remote lock holder, if applicable
+ *
+ * This function will bust a hwspinlock that was previously acquired as
+ * long as the current owner of the lock matches the id given by the caller.
+ *
+ * Context: Process context.
+ *
+ * Returns: 0 on success, or -EINVAL if the hwspinlock does not exist, or
+ * the bust operation fails, and -EOPNOTSUPP if the bust operation is not
+ * defined for the hwspinlock.
+ */
+int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id)
+{
+ if (WARN_ON(!hwlock))
+ return -EINVAL;
+
+ if (!hwlock->bank->ops->bust) {
+ pr_err("bust operation not defined\n");
+ return -EOPNOTSUPP;
+ }
+
+ return hwlock->bank->ops->bust(hwlock, id);
+}
+EXPORT_SYMBOL_GPL(hwspin_lock_bust);
+
+/**
* of_hwspin_lock_simple_xlate - translate hwlock_spec to return a lock id
* @hwlock_spec: hwlock specifier as found in the device tree
*
diff --git a/drivers/hwspinlock/hwspinlock_internal.h b/drivers/hwspinlock/hwspinlock_internal.h
index 29892767bb7a..f298fc0ee5ad 100644
--- a/drivers/hwspinlock/hwspinlock_internal.h
+++ b/drivers/hwspinlock/hwspinlock_internal.h
@@ -21,6 +21,8 @@ struct hwspinlock_device;
* @trylock: make a single attempt to take the lock. returns 0 on
* failure and true on success. may _not_ sleep.
* @unlock: release the lock. always succeed. may _not_ sleep.
+ * @bust: optional, platform-specific bust handler, called by hwspinlock
+ * core to bust a specific lock.
* @relax: optional, platform-specific relax handler, called by hwspinlock
* core while spinning on a lock, between two successive
* invocations of @trylock. may _not_ sleep.
@@ -28,6 +30,7 @@ struct hwspinlock_device;
struct hwspinlock_ops {
int (*trylock)(struct hwspinlock *lock);
void (*unlock)(struct hwspinlock *lock);
+ int (*bust)(struct hwspinlock *lock, unsigned int id);
void (*relax)(struct hwspinlock *lock);
};
diff --git a/drivers/hwspinlock/qcom_hwspinlock.c b/drivers/hwspinlock/qcom_hwspinlock.c
index 814dfe8697bf..0390979fd765 100644
--- a/drivers/hwspinlock/qcom_hwspinlock.c
+++ b/drivers/hwspinlock/qcom_hwspinlock.c
@@ -64,9 +64,34 @@ static void qcom_hwspinlock_unlock(struct hwspinlock *lock)
pr_err("%s: failed to unlock spinlock\n", __func__);
}
+static int qcom_hwspinlock_bust(struct hwspinlock *lock, unsigned int id)
+{
+ struct regmap_field *field = lock->priv;
+ u32 owner;
+ int ret;
+
+ ret = regmap_field_read(field, &owner);
+ if (ret) {
+ dev_err(lock->bank->dev, "unable to query spinlock owner\n");
+ return ret;
+ }
+
+ if (owner != id)
+ return 0;
+
+ ret = regmap_field_write(field, 0);
+ if (ret) {
+ dev_err(lock->bank->dev, "failed to bust spinlock\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct hwspinlock_ops qcom_hwspinlock_ops = {
.trylock = qcom_hwspinlock_trylock,
.unlock = qcom_hwspinlock_unlock,
+ .bust = qcom_hwspinlock_bust,
};
static const struct regmap_config sfpb_mutex_config = {
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index a121dc5cbd61..d72993355473 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -26,9 +26,9 @@ module_param(host_mode, bool, 0444);
static DEFINE_IDA(intel_th_ida);
-static int intel_th_match(struct device *dev, struct device_driver *driver)
+static int intel_th_match(struct device *dev, const struct device_driver *driver)
{
- struct intel_th_driver *thdrv = to_intel_th_driver(driver);
+ const struct intel_th_driver *thdrv = to_intel_th_driver(driver);
struct intel_th_device *thdev = to_intel_th_device(dev);
if (thdev->type == INTEL_TH_SWITCH &&
diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h
index 6cbba733f259..3b87cd542c1b 100644
--- a/drivers/hwtracing/intel_th/intel_th.h
+++ b/drivers/hwtracing/intel_th/intel_th.h
@@ -189,7 +189,7 @@ struct intel_th_driver {
};
#define to_intel_th_driver(_d) \
- container_of((_d), struct intel_th_driver, driver)
+ container_of_const((_d), struct intel_th_driver, driver)
#define to_intel_th_driver_or_null(_d) \
((_d) ? to_intel_th_driver(_d) : NULL)
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 3e32fb882101..a22f9125322a 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -196,6 +196,7 @@ config I2C_ISMT
config I2C_PIIX4
tristate "Intel PIIX4 and compatible (ATI/AMD/Serverworks/Broadcom/SMSC)"
depends on PCI && HAS_IOPORT
+ select I2C_SMBUS
help
If you say yes to this option, support will be included for the Intel
PIIX4 family of mainboard I2C interfaces. Specifically, the following
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 84aa18d1003b..4e32d57ae0bf 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -29,6 +29,7 @@
#include <linux/stddef.h>
#include <linux/ioport.h>
#include <linux/i2c.h>
+#include <linux/i2c-smbus.h>
#include <linux/slab.h>
#include <linux/dmi.h>
#include <linux/acpi.h>
@@ -982,6 +983,14 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
return retval;
}
+ /*
+ * The AUX bus can not be probed as on some platforms it reports all
+ * devices present and all reads return "0".
+ * This would allow the ee1004 to be probed incorrectly.
+ */
+ if (port == 0)
+ i2c_register_spd(adap);
+
*padap = adap;
return 0;
}
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index f76b7f4fafc1..b63f75e44296 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -136,10 +136,10 @@ const void *i2c_get_match_data(const struct i2c_client *client)
}
EXPORT_SYMBOL(i2c_get_match_data);
-static int i2c_device_match(struct device *dev, struct device_driver *drv)
+static int i2c_device_match(struct device *dev, const struct device_driver *drv)
{
struct i2c_client *client = i2c_verify_client(dev);
- struct i2c_driver *driver;
+ const struct i2c_driver *driver;
/* Attempt an OF style match */
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index 1cb137b9181d..7e4203df83ed 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -352,18 +352,11 @@ void i2c_register_spd(struct i2c_adapter *adap)
return;
/*
- * If we're a child adapter on a muxed segment, then limit slots to 8,
- * as this is the max number of SPD EEPROMs that can be addressed per bus.
+ * The max number of SPD EEPROMs that can be addressed per bus is 8.
+ * If more slots are present either muxed or multiple busses are
+ * necessary or the additional slots are ignored.
*/
- if (i2c_parent_is_i2c_adapter(adap)) {
- slot_count = 8;
- } else {
- if (slot_count > 8) {
- dev_warn(&adap->dev,
- "More than 8 memory slots on a single bus, contact i801 maintainer to add missing mux config\n");
- return;
- }
- }
+ slot_count = min(slot_count, 8);
/*
* Memory types could be found at section 7.18.2 (Memory Device — Type), table 78
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index d6bbb8b68333..944577bb09c1 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -5,16 +5,17 @@
* Peter Korsgaard <peter.korsgaard@barco.com>
*/
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
+#include <linux/module.h>
#include <linux/overflow.h>
#include <linux/platform_data/i2c-mux-gpio.h>
#include <linux/platform_device.h>
-#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/bits.h>
-#include <linux/gpio/consumer.h>
-#include <linux/gpio/driver.h>
struct gpiomux {
struct i2c_mux_gpio_platform_data data;
@@ -37,6 +38,9 @@ static int i2c_mux_gpio_select(struct i2c_mux_core *muxc, u32 chan)
i2c_mux_gpio_set(mux, chan);
+ if (mux->data.settle_time)
+ fsleep(mux->data.settle_time);
+
return 0;
}
@@ -116,6 +120,8 @@ static int i2c_mux_gpio_probe_fw(struct gpiomux *mux,
if (device_property_read_u32(dev, "idle-state", &mux->data.idle))
mux->data.idle = I2C_MUX_GPIO_NO_IDLE;
+ device_property_read_u32(dev, "settle-time-us", &mux->data.settle_time);
+
return 0;
}
diff --git a/drivers/i3c/internals.h b/drivers/i3c/internals.h
index 4d99a3524171..433f6088b7ce 100644
--- a/drivers/i3c/internals.h
+++ b/drivers/i3c/internals.h
@@ -10,8 +10,6 @@
#include <linux/i3c/master.h>
-extern const struct bus_type i3c_bus_type;
-
void i3c_bus_normaluse_lock(struct i3c_bus *bus);
void i3c_bus_normaluse_unlock(struct i3c_bus *bus);
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index 3b4d6a8edca3..7028f03c2c42 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -301,10 +301,10 @@ static const struct device_type i3c_device_type = {
.uevent = i3c_device_uevent,
};
-static int i3c_device_match(struct device *dev, struct device_driver *drv)
+static int i3c_device_match(struct device *dev, const struct device_driver *drv)
{
struct i3c_device *i3cdev;
- struct i3c_driver *i3cdrv;
+ const struct i3c_driver *i3cdrv;
if (dev->type != &i3c_device_type)
return 0;
@@ -342,6 +342,7 @@ const struct bus_type i3c_bus_type = {
.probe = i3c_device_probe,
.remove = i3c_device_remove,
};
+EXPORT_SYMBOL_GPL(i3c_bus_type);
static enum i3c_addr_slot_status
i3c_bus_get_addr_slot_status(struct i3c_bus *bus, u16 addr)
diff --git a/drivers/i3c/master/ast2600-i3c-master.c b/drivers/i3c/master/ast2600-i3c-master.c
index 01a47d3dd499..84942dbb6f80 100644
--- a/drivers/i3c/master/ast2600-i3c-master.c
+++ b/drivers/i3c/master/ast2600-i3c-master.c
@@ -156,7 +156,6 @@ static int ast2600_i3c_probe(struct platform_device *pdev)
i3c->sda_pullup);
i3c->dw.platform_ops = &ast2600_i3c_ops;
- i3c->dw.ibi_capable = true;
return dw_i3c_common_probe(&i3c->dw, pdev);
}
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index 0ec00e644bd4..8d694672c110 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -17,7 +17,9 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/slab.h>
@@ -217,7 +219,7 @@
#define I3C_BUS_THIGH_MAX_NS 41
#define XFER_TIMEOUT (msecs_to_jiffies(1000))
-
+#define RPM_AUTOSUSPEND_TIMEOUT 1000 /* ms */
struct dw_i3c_cmd {
u32 cmd_lo;
u32 cmd_hi;
@@ -300,7 +302,14 @@ static void dw_i3c_master_disable(struct dw_i3c_master *master)
static void dw_i3c_master_enable(struct dw_i3c_master *master)
{
- writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_ENABLE,
+ u32 dev_ctrl;
+
+ dev_ctrl = readl(master->regs + DEVICE_CTRL);
+ /* For now don't support Hot-Join */
+ dev_ctrl |= DEV_CTRL_HOT_JOIN_NACK;
+ if (master->i2c_slv_prsnt)
+ dev_ctrl |= DEV_CTRL_I2C_SLAVE_PRESENT;
+ writel(dev_ctrl | DEV_CTRL_ENABLE,
master->regs + DEVICE_CTRL);
}
@@ -521,6 +530,32 @@ static void dw_i3c_master_end_xfer_locked(struct dw_i3c_master *master, u32 isr)
dw_i3c_master_start_xfer_locked(master);
}
+static void dw_i3c_master_set_intr_regs(struct dw_i3c_master *master)
+{
+ u32 thld_ctrl;
+
+ thld_ctrl = readl(master->regs + QUEUE_THLD_CTRL);
+ thld_ctrl &= ~(QUEUE_THLD_CTRL_RESP_BUF_MASK |
+ QUEUE_THLD_CTRL_IBI_STAT_MASK |
+ QUEUE_THLD_CTRL_IBI_DATA_MASK);
+ thld_ctrl |= QUEUE_THLD_CTRL_IBI_STAT(1) |
+ QUEUE_THLD_CTRL_IBI_DATA(31);
+ writel(thld_ctrl, master->regs + QUEUE_THLD_CTRL);
+
+ thld_ctrl = readl(master->regs + DATA_BUFFER_THLD_CTRL);
+ thld_ctrl &= ~DATA_BUFFER_THLD_CTRL_RX_BUF;
+ writel(thld_ctrl, master->regs + DATA_BUFFER_THLD_CTRL);
+
+ writel(INTR_ALL, master->regs + INTR_STATUS);
+ writel(INTR_MASTER_MASK, master->regs + INTR_STATUS_EN);
+ writel(INTR_MASTER_MASK, master->regs + INTR_SIGNAL_EN);
+
+ master->sir_rej_mask = IBI_REQ_REJECT_ALL;
+ writel(master->sir_rej_mask, master->regs + IBI_SIR_REQ_REJECT);
+
+ writel(IBI_REQ_REJECT_ALL, master->regs + IBI_MR_REQ_REJECT);
+}
+
static int dw_i3c_clk_cfg(struct dw_i3c_master *master)
{
unsigned long core_rate, core_period;
@@ -543,18 +578,22 @@ static int dw_i3c_clk_cfg(struct dw_i3c_master *master)
scl_timing = SCL_I3C_TIMING_HCNT(hcnt) | SCL_I3C_TIMING_LCNT(lcnt);
writel(scl_timing, master->regs + SCL_I3C_PP_TIMING);
+ master->i3c_pp_timing = scl_timing;
/*
* In pure i3c mode, MST_FREE represents tCAS. In shared mode, this
* will be set up by dw_i2c_clk_cfg as tLOW.
*/
- if (master->base.bus.mode == I3C_BUS_MODE_PURE)
+ if (master->base.bus.mode == I3C_BUS_MODE_PURE) {
writel(BUS_I3C_MST_FREE(lcnt), master->regs + BUS_FREE_TIMING);
+ master->bus_free_timing = BUS_I3C_MST_FREE(lcnt);
+ }
lcnt = max_t(u8,
DIV_ROUND_UP(I3C_BUS_TLOW_OD_MIN_NS, core_period), lcnt);
scl_timing = SCL_I3C_TIMING_HCNT(hcnt) | SCL_I3C_TIMING_LCNT(lcnt);
writel(scl_timing, master->regs + SCL_I3C_OD_TIMING);
+ master->i3c_od_timing = scl_timing;
lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR1_SCL_RATE) - hcnt;
scl_timing = SCL_EXT_LCNT_1(lcnt);
@@ -565,6 +604,7 @@ static int dw_i3c_clk_cfg(struct dw_i3c_master *master)
lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR4_SCL_RATE) - hcnt;
scl_timing |= SCL_EXT_LCNT_4(lcnt);
writel(scl_timing, master->regs + SCL_EXT_LCNT_TIMING);
+ master->ext_lcnt_timing = scl_timing;
return 0;
}
@@ -586,16 +626,21 @@ static int dw_i2c_clk_cfg(struct dw_i3c_master *master)
scl_timing = SCL_I2C_FMP_TIMING_HCNT(hcnt) |
SCL_I2C_FMP_TIMING_LCNT(lcnt);
writel(scl_timing, master->regs + SCL_I2C_FMP_TIMING);
+ master->i2c_fmp_timing = scl_timing;
lcnt = DIV_ROUND_UP(I3C_BUS_I2C_FM_TLOW_MIN_NS, core_period);
hcnt = DIV_ROUND_UP(core_rate, I3C_BUS_I2C_FM_SCL_RATE) - lcnt;
scl_timing = SCL_I2C_FM_TIMING_HCNT(hcnt) |
SCL_I2C_FM_TIMING_LCNT(lcnt);
writel(scl_timing, master->regs + SCL_I2C_FM_TIMING);
+ master->i2c_fm_timing = scl_timing;
writel(BUS_I3C_MST_FREE(lcnt), master->regs + BUS_FREE_TIMING);
+ master->bus_free_timing = BUS_I3C_MST_FREE(lcnt);
+
writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_I2C_SLAVE_PRESENT,
master->regs + DEVICE_CTRL);
+ master->i2c_slv_prsnt = true;
return 0;
}
@@ -605,69 +650,58 @@ static int dw_i3c_master_bus_init(struct i3c_master_controller *m)
struct dw_i3c_master *master = to_dw_i3c_master(m);
struct i3c_bus *bus = i3c_master_get_bus(m);
struct i3c_device_info info = { };
- u32 thld_ctrl;
int ret;
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev,
+ "<%s> cannot resume i3c bus master, err: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
ret = master->platform_ops->init(master);
if (ret)
- return ret;
+ goto rpm_out;
switch (bus->mode) {
case I3C_BUS_MODE_MIXED_FAST:
case I3C_BUS_MODE_MIXED_LIMITED:
ret = dw_i2c_clk_cfg(master);
if (ret)
- return ret;
+ goto rpm_out;
fallthrough;
case I3C_BUS_MODE_PURE:
ret = dw_i3c_clk_cfg(master);
if (ret)
- return ret;
+ goto rpm_out;
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ goto rpm_out;
}
- thld_ctrl = readl(master->regs + QUEUE_THLD_CTRL);
- thld_ctrl &= ~(QUEUE_THLD_CTRL_RESP_BUF_MASK |
- QUEUE_THLD_CTRL_IBI_STAT_MASK |
- QUEUE_THLD_CTRL_IBI_STAT_MASK);
- thld_ctrl |= QUEUE_THLD_CTRL_IBI_STAT(1) |
- QUEUE_THLD_CTRL_IBI_DATA(31);
- writel(thld_ctrl, master->regs + QUEUE_THLD_CTRL);
-
- thld_ctrl = readl(master->regs + DATA_BUFFER_THLD_CTRL);
- thld_ctrl &= ~DATA_BUFFER_THLD_CTRL_RX_BUF;
- writel(thld_ctrl, master->regs + DATA_BUFFER_THLD_CTRL);
-
- writel(INTR_ALL, master->regs + INTR_STATUS);
- writel(INTR_MASTER_MASK, master->regs + INTR_STATUS_EN);
- writel(INTR_MASTER_MASK, master->regs + INTR_SIGNAL_EN);
-
ret = i3c_master_get_free_addr(m, 0);
if (ret < 0)
- return ret;
+ goto rpm_out;
writel(DEV_ADDR_DYNAMIC_ADDR_VALID | DEV_ADDR_DYNAMIC(ret),
master->regs + DEVICE_ADDR);
-
+ master->dev_addr = ret;
memset(&info, 0, sizeof(info));
info.dyn_addr = ret;
ret = i3c_master_set_info(&master->base, &info);
if (ret)
- return ret;
-
- writel(IBI_REQ_REJECT_ALL, master->regs + IBI_SIR_REQ_REJECT);
- writel(IBI_REQ_REJECT_ALL, master->regs + IBI_MR_REQ_REJECT);
-
- /* For now don't support Hot-Join */
- writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_HOT_JOIN_NACK,
- master->regs + DEVICE_CTRL);
+ goto rpm_out;
+ dw_i3c_master_set_intr_regs(master);
dw_i3c_master_enable(master);
- return 0;
+rpm_out:
+ pm_runtime_mark_last_busy(master->dev);
+ pm_runtime_put_autosuspend(master->dev);
+ return ret;
}
static void dw_i3c_master_bus_cleanup(struct i3c_master_controller *m)
@@ -769,11 +803,21 @@ static int dw_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
if (ccc->id == I3C_CCC_ENTDAA)
return -EINVAL;
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev,
+ "<%s> cannot resume i3c bus master, err: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
if (ccc->rnw)
ret = dw_i3c_ccc_get(master, ccc);
else
ret = dw_i3c_ccc_set(master, ccc);
+ pm_runtime_mark_last_busy(master->dev);
+ pm_runtime_put_autosuspend(master->dev);
return ret;
}
@@ -786,6 +830,14 @@ static int dw_i3c_master_daa(struct i3c_master_controller *m)
u8 p, last_addr = 0;
int ret, pos;
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev,
+ "<%s> cannot resume i3c bus master, err: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
olddevs = ~(master->free_pos);
/* Prepare DAT before launching DAA. */
@@ -794,8 +846,10 @@ static int dw_i3c_master_daa(struct i3c_master_controller *m)
continue;
ret = i3c_master_get_free_addr(m, last_addr + 1);
- if (ret < 0)
- return -ENOSPC;
+ if (ret < 0) {
+ ret = -ENOSPC;
+ goto rpm_out;
+ }
master->devs[pos].addr = ret;
p = even_parity(ret);
@@ -805,16 +859,21 @@ static int dw_i3c_master_daa(struct i3c_master_controller *m)
writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(ret),
master->regs +
DEV_ADDR_TABLE_LOC(master->datstartaddr, pos));
+
+ ret = 0;
}
xfer = dw_i3c_master_alloc_xfer(master, 1);
- if (!xfer)
- return -ENOMEM;
+ if (!xfer) {
+ ret = -ENOMEM;
+ goto rpm_out;
+ }
pos = dw_i3c_master_get_free_pos(master);
if (pos < 0) {
dw_i3c_master_free_xfer(xfer);
- return pos;
+ ret = pos;
+ goto rpm_out;
}
cmd = &xfer->cmds[0];
cmd->cmd_hi = 0x1;
@@ -839,7 +898,10 @@ static int dw_i3c_master_daa(struct i3c_master_controller *m)
dw_i3c_master_free_xfer(xfer);
- return 0;
+rpm_out:
+ pm_runtime_mark_last_busy(master->dev);
+ pm_runtime_put_autosuspend(master->dev);
+ return ret;
}
static int dw_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
@@ -874,6 +936,14 @@ static int dw_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
if (!xfer)
return -ENOMEM;
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev,
+ "<%s> cannot resume i3c bus master, err: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
for (i = 0; i < i3c_nxfers; i++) {
struct dw_i3c_cmd *cmd = &xfer->cmds[i];
@@ -915,6 +985,8 @@ static int dw_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
ret = xfer->ret;
dw_i3c_master_free_xfer(xfer);
+ pm_runtime_mark_last_busy(master->dev);
+ pm_runtime_put_autosuspend(master->dev);
return ret;
}
@@ -1025,6 +1097,14 @@ static int dw_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
if (!xfer)
return -ENOMEM;
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev,
+ "<%s> cannot resume i3c bus master, err: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
for (i = 0; i < i2c_nxfers; i++) {
struct dw_i3c_cmd *cmd = &xfer->cmds[i];
@@ -1055,6 +1135,8 @@ static int dw_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
ret = xfer->ret;
dw_i3c_master_free_xfer(xfer);
+ pm_runtime_mark_last_busy(master->dev);
+ pm_runtime_put_autosuspend(master->dev);
return ret;
}
@@ -1075,6 +1157,7 @@ static int dw_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
data->index = pos;
master->devs[pos].addr = dev->addr;
+ master->devs[pos].is_i2c_addr = true;
master->free_pos &= ~BIT(pos);
i2c_dev_set_master_data(dev, data);
@@ -1175,17 +1258,16 @@ static void dw_i3c_master_set_sir_enabled(struct dw_i3c_master *master,
master->platform_ops->set_dat_ibi(master, dev, enable, &reg);
writel(reg, master->regs + dat_entry);
- reg = readl(master->regs + IBI_SIR_REQ_REJECT);
if (enable) {
- global = reg == 0xffffffff;
- reg &= ~BIT(idx);
+ global = (master->sir_rej_mask == IBI_REQ_REJECT_ALL);
+ master->sir_rej_mask &= ~BIT(idx);
} else {
bool hj_rejected = !!(readl(master->regs + DEVICE_CTRL) & DEV_CTRL_HOT_JOIN_NACK);
- reg |= BIT(idx);
- global = (reg == 0xffffffff) && hj_rejected;
+ master->sir_rej_mask |= BIT(idx);
+ global = (master->sir_rej_mask == IBI_REQ_REJECT_ALL) && hj_rejected;
}
- writel(reg, master->regs + IBI_SIR_REQ_REJECT);
+ writel(master->sir_rej_mask, master->regs + IBI_SIR_REQ_REJECT);
if (global)
dw_i3c_master_enable_sir_signal(master, enable);
@@ -1197,6 +1279,15 @@ static void dw_i3c_master_set_sir_enabled(struct dw_i3c_master *master,
static int dw_i3c_master_enable_hotjoin(struct i3c_master_controller *m)
{
struct dw_i3c_master *master = to_dw_i3c_master(m);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev,
+ "<%s> cannot resume i3c bus master, err: %d\n",
+ __func__, ret);
+ return ret;
+ }
dw_i3c_master_enable_sir_signal(master, true);
writel(readl(master->regs + DEVICE_CTRL) & ~DEV_CTRL_HOT_JOIN_NACK,
@@ -1212,6 +1303,8 @@ static int dw_i3c_master_disable_hotjoin(struct i3c_master_controller *m)
writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_HOT_JOIN_NACK,
master->regs + DEVICE_CTRL);
+ pm_runtime_mark_last_busy(master->dev);
+ pm_runtime_put_autosuspend(master->dev);
return 0;
}
@@ -1222,12 +1315,23 @@ static int dw_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
struct dw_i3c_master *master = to_dw_i3c_master(m);
int rc;
+ rc = pm_runtime_resume_and_get(master->dev);
+ if (rc < 0) {
+ dev_err(master->dev,
+ "<%s> cannot resume i3c bus master, err: %d\n",
+ __func__, rc);
+ return rc;
+ }
+
dw_i3c_master_set_sir_enabled(master, dev, data->index, true);
rc = i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
- if (rc)
+ if (rc) {
dw_i3c_master_set_sir_enabled(master, dev, data->index, false);
+ pm_runtime_mark_last_busy(master->dev);
+ pm_runtime_put_autosuspend(master->dev);
+ }
return rc;
}
@@ -1245,6 +1349,8 @@ static int dw_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
dw_i3c_master_set_sir_enabled(master, dev, data->index, false);
+ pm_runtime_mark_last_busy(master->dev);
+ pm_runtime_put_autosuspend(master->dev);
return 0;
}
@@ -1403,21 +1509,6 @@ static const struct i3c_master_controller_ops dw_mipi_i3c_ops = {
.attach_i2c_dev = dw_i3c_master_attach_i2c_dev,
.detach_i2c_dev = dw_i3c_master_detach_i2c_dev,
.i2c_xfers = dw_i3c_master_i2c_xfers,
-};
-
-static const struct i3c_master_controller_ops dw_mipi_i3c_ibi_ops = {
- .bus_init = dw_i3c_master_bus_init,
- .bus_cleanup = dw_i3c_master_bus_cleanup,
- .attach_i3c_dev = dw_i3c_master_attach_i3c_dev,
- .reattach_i3c_dev = dw_i3c_master_reattach_i3c_dev,
- .detach_i3c_dev = dw_i3c_master_detach_i3c_dev,
- .do_daa = dw_i3c_master_daa,
- .supports_ccc_cmd = dw_i3c_master_supports_ccc_cmd,
- .send_ccc_cmd = dw_i3c_master_send_ccc_cmd,
- .priv_xfers = dw_i3c_master_priv_xfers,
- .attach_i2c_dev = dw_i3c_master_attach_i2c_dev,
- .detach_i2c_dev = dw_i3c_master_detach_i2c_dev,
- .i2c_xfers = dw_i3c_master_i2c_xfers,
.request_ibi = dw_i3c_master_request_ibi,
.free_ibi = dw_i3c_master_free_ibi,
.enable_ibi = dw_i3c_master_enable_ibi,
@@ -1455,29 +1546,30 @@ static void dw_i3c_hj_work(struct work_struct *work)
int dw_i3c_common_probe(struct dw_i3c_master *master,
struct platform_device *pdev)
{
- const struct i3c_master_controller_ops *ops;
int ret, irq;
if (!master->platform_ops)
master->platform_ops = &dw_i3c_platform_ops_default;
+ master->dev = &pdev->dev;
+
master->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(master->regs))
return PTR_ERR(master->regs);
- master->core_clk = devm_clk_get(&pdev->dev, NULL);
+ master->core_clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(master->core_clk))
return PTR_ERR(master->core_clk);
+ master->pclk = devm_clk_get_optional_enabled(&pdev->dev, "pclk");
+ if (IS_ERR(master->pclk))
+ return PTR_ERR(master->pclk);
+
master->core_rst = devm_reset_control_get_optional_exclusive(&pdev->dev,
"core_rst");
if (IS_ERR(master->core_rst))
return PTR_ERR(master->core_rst);
- ret = clk_prepare_enable(master->core_clk);
- if (ret)
- goto err_disable_core_clk;
-
reset_control_deassert(master->core_rst);
spin_lock_init(&master->xferqueue.lock);
@@ -1493,6 +1585,11 @@ int dw_i3c_common_probe(struct dw_i3c_master *master,
platform_set_drvdata(pdev, master);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, RPM_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
/* Information regarding the FIFOs/QUEUEs depth */
ret = readl(master->regs + QUEUE_STATUS_LEVEL);
master->caps.cmdfifodepth = QUEUE_STATUS_LEVEL_CMD(ret);
@@ -1505,23 +1602,22 @@ int dw_i3c_common_probe(struct dw_i3c_master *master,
master->maxdevs = ret >> 16;
master->free_pos = GENMASK(master->maxdevs - 1, 0);
- ops = &dw_mipi_i3c_ops;
- if (master->ibi_capable)
- ops = &dw_mipi_i3c_ibi_ops;
-
INIT_WORK(&master->hj_work, dw_i3c_hj_work);
- ret = i3c_master_register(&master->base, &pdev->dev, ops, false);
+ ret = i3c_master_register(&master->base, &pdev->dev,
+ &dw_mipi_i3c_ops, false);
if (ret)
- goto err_assert_rst;
+ goto err_disable_pm;
return 0;
+err_disable_pm:
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+
err_assert_rst:
reset_control_assert(master->core_rst);
-err_disable_core_clk:
- clk_disable_unprepare(master->core_clk);
-
return ret;
}
EXPORT_SYMBOL_GPL(dw_i3c_common_probe);
@@ -1530,9 +1626,9 @@ void dw_i3c_common_remove(struct dw_i3c_master *master)
{
i3c_master_unregister(&master->base);
- reset_control_assert(master->core_rst);
-
- clk_disable_unprepare(master->core_clk);
+ pm_runtime_disable(master->dev);
+ pm_runtime_set_suspended(master->dev);
+ pm_runtime_dont_use_autosuspend(master->dev);
}
EXPORT_SYMBOL_GPL(dw_i3c_common_remove);
@@ -1556,6 +1652,96 @@ static void dw_i3c_remove(struct platform_device *pdev)
dw_i3c_common_remove(master);
}
+static void dw_i3c_master_restore_addrs(struct dw_i3c_master *master)
+{
+ u32 pos, reg_val;
+
+ writel(DEV_ADDR_DYNAMIC_ADDR_VALID | DEV_ADDR_DYNAMIC(master->dev_addr),
+ master->regs + DEVICE_ADDR);
+
+ for (pos = 0; pos < master->maxdevs; pos++) {
+ if (master->free_pos & BIT(pos))
+ continue;
+
+ if (master->devs[pos].is_i2c_addr)
+ reg_val = DEV_ADDR_TABLE_LEGACY_I2C_DEV |
+ DEV_ADDR_TABLE_STATIC_ADDR(master->devs[pos].addr);
+ else
+ reg_val = DEV_ADDR_TABLE_DYNAMIC_ADDR(master->devs[pos].addr);
+
+ writel(reg_val, master->regs + DEV_ADDR_TABLE_LOC(master->datstartaddr, pos));
+ }
+}
+
+static void dw_i3c_master_restore_timing_regs(struct dw_i3c_master *master)
+{
+ writel(master->i3c_pp_timing, master->regs + SCL_I3C_PP_TIMING);
+ writel(master->bus_free_timing, master->regs + BUS_FREE_TIMING);
+ writel(master->i3c_od_timing, master->regs + SCL_I3C_OD_TIMING);
+ writel(master->ext_lcnt_timing, master->regs + SCL_EXT_LCNT_TIMING);
+
+ if (master->i2c_slv_prsnt) {
+ writel(master->i2c_fmp_timing, master->regs + SCL_I2C_FMP_TIMING);
+ writel(master->i2c_fm_timing, master->regs + SCL_I2C_FM_TIMING);
+ }
+}
+
+static int dw_i3c_master_enable_clks(struct dw_i3c_master *master)
+{
+ int ret = 0;
+
+ ret = clk_prepare_enable(master->core_clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(master->pclk);
+ if (ret) {
+ clk_disable_unprepare(master->core_clk);
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline void dw_i3c_master_disable_clks(struct dw_i3c_master *master)
+{
+ clk_disable_unprepare(master->pclk);
+ clk_disable_unprepare(master->core_clk);
+}
+
+static int __maybe_unused dw_i3c_master_runtime_suspend(struct device *dev)
+{
+ struct dw_i3c_master *master = dev_get_drvdata(dev);
+
+ dw_i3c_master_disable(master);
+
+ reset_control_assert(master->core_rst);
+ dw_i3c_master_disable_clks(master);
+ pinctrl_pm_select_sleep_state(dev);
+ return 0;
+}
+
+static int __maybe_unused dw_i3c_master_runtime_resume(struct device *dev)
+{
+ struct dw_i3c_master *master = dev_get_drvdata(dev);
+
+ pinctrl_pm_select_default_state(dev);
+ dw_i3c_master_enable_clks(master);
+ reset_control_deassert(master->core_rst);
+
+ dw_i3c_master_set_intr_regs(master);
+ dw_i3c_master_restore_timing_regs(master);
+ dw_i3c_master_restore_addrs(master);
+
+ dw_i3c_master_enable(master);
+ return 0;
+}
+
+static const struct dev_pm_ops dw_i3c_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(dw_i3c_master_runtime_suspend, dw_i3c_master_runtime_resume, NULL)
+};
+
static const struct of_device_id dw_i3c_master_of_match[] = {
{ .compatible = "snps,dw-i3c-master-1.00a", },
{},
@@ -1568,6 +1754,7 @@ static struct platform_driver dw_i3c_driver = {
.driver = {
.name = "dw-i3c-master",
.of_match_table = dw_i3c_master_of_match,
+ .pm = &dw_i3c_pm_ops,
},
};
module_platform_driver(dw_i3c_driver);
diff --git a/drivers/i3c/master/dw-i3c-master.h b/drivers/i3c/master/dw-i3c-master.h
index 4ab94aa72252..219ff815d3a7 100644
--- a/drivers/i3c/master/dw-i3c-master.h
+++ b/drivers/i3c/master/dw-i3c-master.h
@@ -19,11 +19,13 @@ struct dw_i3c_master_caps {
struct dw_i3c_dat_entry {
u8 addr;
+ bool is_i2c_addr;
struct i3c_dev_desc *ibi_dev;
};
struct dw_i3c_master {
struct i3c_master_controller base;
+ struct device *dev;
u16 maxdevs;
u16 datstartaddr;
u32 free_pos;
@@ -36,10 +38,18 @@ struct dw_i3c_master {
void __iomem *regs;
struct reset_control *core_rst;
struct clk *core_clk;
+ struct clk *pclk;
char version[5];
char type[5];
- bool ibi_capable;
-
+ u32 sir_rej_mask;
+ bool i2c_slv_prsnt;
+ u32 dev_addr;
+ u32 i3c_pp_timing;
+ u32 i3c_od_timing;
+ u32 ext_lcnt_timing;
+ u32 bus_free_timing;
+ u32 i2c_fm_timing;
+ u32 i2c_fmp_timing;
/*
* Per-device hardware data, used to manage the device address table
* (DAT)
diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c
index d7e966a25583..4e7d6a43ee9b 100644
--- a/drivers/i3c/master/mipi-i3c-hci/core.c
+++ b/drivers/i3c/master/mipi-i3c-hci/core.c
@@ -631,6 +631,7 @@ static irqreturn_t i3c_hci_irq_handler(int irq, void *dev_id)
static int i3c_hci_init(struct i3c_hci *hci)
{
u32 regval, offset;
+ bool size_in_dwords;
int ret;
/* Validate HCI hardware version */
@@ -654,11 +655,16 @@ static int i3c_hci_init(struct i3c_hci *hci)
hci->caps = reg_read(HC_CAPABILITIES);
DBG("caps = %#x", hci->caps);
+ size_in_dwords = hci->version_major < 1 ||
+ (hci->version_major == 1 && hci->version_minor < 1);
+
regval = reg_read(DAT_SECTION);
offset = FIELD_GET(DAT_TABLE_OFFSET, regval);
hci->DAT_regs = offset ? hci->base_regs + offset : NULL;
hci->DAT_entries = FIELD_GET(DAT_TABLE_SIZE, regval);
hci->DAT_entry_size = FIELD_GET(DAT_ENTRY_SIZE, regval) ? 0 : 8;
+ if (size_in_dwords)
+ hci->DAT_entries = 4 * hci->DAT_entries / hci->DAT_entry_size;
dev_info(&hci->master.dev, "DAT: %u %u-bytes entries at offset %#x\n",
hci->DAT_entries, hci->DAT_entry_size, offset);
@@ -667,6 +673,8 @@ static int i3c_hci_init(struct i3c_hci *hci)
hci->DCT_regs = offset ? hci->base_regs + offset : NULL;
hci->DCT_entries = FIELD_GET(DCT_TABLE_SIZE, regval);
hci->DCT_entry_size = FIELD_GET(DCT_ENTRY_SIZE, regval) ? 0 : 16;
+ if (size_in_dwords)
+ hci->DCT_entries = 4 * hci->DCT_entries / hci->DCT_entry_size;
dev_info(&hci->master.dev, "DCT: %u %u-bytes entries at offset %#x\n",
hci->DCT_entries, hci->DCT_entry_size, offset);
diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c
index 4e01a95cc4d0..a918e96b21fd 100644
--- a/drivers/i3c/master/mipi-i3c-hci/dma.c
+++ b/drivers/i3c/master/mipi-i3c-hci/dma.c
@@ -147,21 +147,6 @@ struct hci_dma_dev_ibi_data {
unsigned int max_len;
};
-static inline u32 lo32(dma_addr_t physaddr)
-{
- return physaddr;
-}
-
-static inline u32 hi32(dma_addr_t physaddr)
-{
- /* trickery to avoid compiler warnings on 32-bit build targets */
- if (sizeof(dma_addr_t) > 4) {
- u64 hi = physaddr;
- return hi >> 32;
- }
- return 0;
-}
-
static void hci_dma_cleanup(struct i3c_hci *hci)
{
struct hci_rings_data *rings = hci->io_data;
@@ -265,10 +250,10 @@ static int hci_dma_init(struct i3c_hci *hci)
if (!rh->xfer || !rh->resp || !rh->src_xfers)
goto err_out;
- rh_reg_write(CMD_RING_BASE_LO, lo32(rh->xfer_dma));
- rh_reg_write(CMD_RING_BASE_HI, hi32(rh->xfer_dma));
- rh_reg_write(RESP_RING_BASE_LO, lo32(rh->resp_dma));
- rh_reg_write(RESP_RING_BASE_HI, hi32(rh->resp_dma));
+ rh_reg_write(CMD_RING_BASE_LO, lower_32_bits(rh->xfer_dma));
+ rh_reg_write(CMD_RING_BASE_HI, upper_32_bits(rh->xfer_dma));
+ rh_reg_write(RESP_RING_BASE_LO, lower_32_bits(rh->resp_dma));
+ rh_reg_write(RESP_RING_BASE_HI, upper_32_bits(rh->resp_dma));
regval = FIELD_PREP(CR_RING_SIZE, rh->xfer_entries);
rh_reg_write(CR_SETUP, regval);
@@ -294,7 +279,17 @@ static int hci_dma_init(struct i3c_hci *hci)
rh->ibi_chunk_sz = dma_get_cache_alignment();
rh->ibi_chunk_sz *= IBI_CHUNK_CACHELINES;
- BUG_ON(rh->ibi_chunk_sz > 256);
+ /*
+ * Round IBI data chunk size to number of bytes supported by
+ * the HW. Chunk size can be 2^n number of DWORDs which is the
+ * same as 2^(n+2) bytes, where n is 0..6.
+ */
+ rh->ibi_chunk_sz = umax(4, rh->ibi_chunk_sz);
+ rh->ibi_chunk_sz = roundup_pow_of_two(rh->ibi_chunk_sz);
+ if (rh->ibi_chunk_sz > 256) {
+ ret = -EINVAL;
+ goto err_out;
+ }
ibi_status_ring_sz = rh->ibi_status_sz * rh->ibi_status_entries;
ibi_data_ring_sz = rh->ibi_chunk_sz * rh->ibi_chunks_total;
@@ -315,6 +310,11 @@ static int hci_dma_init(struct i3c_hci *hci)
goto err_out;
}
+ rh_reg_write(IBI_STATUS_RING_BASE_LO, lower_32_bits(rh->ibi_status_dma));
+ rh_reg_write(IBI_STATUS_RING_BASE_HI, upper_32_bits(rh->ibi_status_dma));
+ rh_reg_write(IBI_DATA_RING_BASE_LO, lower_32_bits(rh->ibi_data_dma));
+ rh_reg_write(IBI_DATA_RING_BASE_HI, upper_32_bits(rh->ibi_data_dma));
+
regval = FIELD_PREP(IBI_STATUS_RING_SIZE,
rh->ibi_status_entries) |
FIELD_PREP(IBI_DATA_CHUNK_SIZE,
@@ -404,8 +404,8 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,
hci_dma_unmap_xfer(hci, xfer_list, i);
return -ENOMEM;
}
- *ring_data++ = lo32(xfer->data_dma);
- *ring_data++ = hi32(xfer->data_dma);
+ *ring_data++ = lower_32_bits(xfer->data_dma);
+ *ring_data++ = upper_32_bits(xfer->data_dma);
} else {
*ring_data++ = 0;
*ring_data++ = 0;
diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
index bb299ce02ccc..0a68fd1b81d4 100644
--- a/drivers/i3c/master/svc-i3c-master.c
+++ b/drivers/i3c/master/svc-i3c-master.c
@@ -790,7 +790,20 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
int ret, i;
while (true) {
- /* Enter/proceed with DAA */
+ /* SVC_I3C_MCTRL_REQUEST_PROC_DAA have two mode, ENTER DAA or PROCESS DAA.
+ *
+ * ENTER DAA:
+ * 1 will issue START, 7E, ENTDAA, and then emits 7E/R to process first target.
+ * 2 Stops just before the new Dynamic Address (DA) is to be emitted.
+ *
+ * PROCESS DAA:
+ * 1 The DA is written using MWDATAB or ADDR bits 6:0.
+ * 2 ProcessDAA is requested again to write the new address, and then starts the
+ * next (START, 7E, ENTDAA) unless marked to STOP; an MSTATUS indicating NACK
+ * means DA was not accepted (e.g. parity error). If PROCESSDAA is NACKed on the
+ * 7E/R, which means no more Slaves need a DA, then a COMPLETE will be signaled
+ * (along with DONE), and a STOP issued automatically.
+ */
writel(SVC_I3C_MCTRL_REQUEST_PROC_DAA |
SVC_I3C_MCTRL_TYPE_I3C |
SVC_I3C_MCTRL_IBIRESP_NACK |
@@ -807,7 +820,7 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
SVC_I3C_MSTATUS_MCTRLDONE(reg),
1, 1000);
if (ret)
- return ret;
+ break;
if (SVC_I3C_MSTATUS_RXPEND(reg)) {
u8 data[6];
@@ -819,7 +832,7 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
*/
ret = svc_i3c_master_readb(master, data, 6);
if (ret)
- return ret;
+ break;
for (i = 0; i < 6; i++)
prov_id[dev_nb] |= (u64)(data[i]) << (8 * (5 - i));
@@ -827,7 +840,7 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
/* We do not care about the BCR and DCR yet */
ret = svc_i3c_master_readb(master, data, 2);
if (ret)
- return ret;
+ break;
} else if (SVC_I3C_MSTATUS_MCTRLDONE(reg)) {
if (SVC_I3C_MSTATUS_STATE_IDLE(reg) &&
SVC_I3C_MSTATUS_COMPLETE(reg)) {
@@ -835,12 +848,23 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
* All devices received and acked they dynamic
* address, this is the natural end of the DAA
* procedure.
+ *
+ * Hardware will auto emit STOP at this case.
*/
- break;
+ *count = dev_nb;
+ return 0;
+
} else if (SVC_I3C_MSTATUS_NACKED(reg)) {
/* No I3C devices attached */
- if (dev_nb == 0)
+ if (dev_nb == 0) {
+ /*
+ * Hardware can't treat first NACK for ENTAA as normal
+ * COMPLETE. So need manual emit STOP.
+ */
+ ret = 0;
+ *count = 0;
break;
+ }
/*
* A slave device nacked the address, this is
@@ -849,8 +873,10 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
* answer again immediately and shall ack the
* address this time.
*/
- if (prov_id[dev_nb] == nacking_prov_id)
- return -EIO;
+ if (prov_id[dev_nb] == nacking_prov_id) {
+ ret = -EIO;
+ break;
+ }
dev_nb--;
nacking_prov_id = prov_id[dev_nb];
@@ -858,7 +884,7 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
continue;
} else {
- return -EIO;
+ break;
}
}
@@ -870,12 +896,12 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
SVC_I3C_MSTATUS_BETWEEN(reg),
0, 1000);
if (ret)
- return ret;
+ break;
/* Give the slave device a suitable dynamic address */
ret = i3c_master_get_free_addr(&master->base, last_addr + 1);
if (ret < 0)
- return ret;
+ break;
addrs[dev_nb] = ret;
dev_dbg(master->dev, "DAA: device %d assigned to 0x%02x\n",
@@ -885,9 +911,9 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
last_addr = addrs[dev_nb++];
}
- *count = dev_nb;
-
- return 0;
+ /* Need manual issue STOP except for Complete condition */
+ svc_i3c_master_emit_stop(master);
+ return ret;
}
static int svc_i3c_update_ibirules(struct svc_i3c_master *master)
@@ -961,11 +987,10 @@ static int svc_i3c_master_do_daa(struct i3c_master_controller *m)
spin_lock_irqsave(&master->xferqueue.lock, flags);
ret = svc_i3c_master_do_daa_locked(master, addrs, &dev_nb);
spin_unlock_irqrestore(&master->xferqueue.lock, flags);
- if (ret) {
- svc_i3c_master_emit_stop(master);
- svc_i3c_master_clear_merrwarn(master);
+
+ svc_i3c_master_clear_merrwarn(master);
+ if (ret)
goto rpm_out;
- }
/* Register all devices who participated to the core */
for (i = 0; i < dev_nb; i++) {
@@ -1052,29 +1077,59 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
u8 *in, const u8 *out, unsigned int xfer_len,
unsigned int *actual_len, bool continued)
{
+ int retry = 2;
u32 reg;
int ret;
/* clean SVC_I3C_MINT_IBIWON w1c bits */
writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
- writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
- xfer_type |
- SVC_I3C_MCTRL_IBIRESP_NACK |
- SVC_I3C_MCTRL_DIR(rnw) |
- SVC_I3C_MCTRL_ADDR(addr) |
- SVC_I3C_MCTRL_RDTERM(*actual_len),
- master->regs + SVC_I3C_MCTRL);
- ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
+ while (retry--) {
+ writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
+ xfer_type |
+ SVC_I3C_MCTRL_IBIRESP_NACK |
+ SVC_I3C_MCTRL_DIR(rnw) |
+ SVC_I3C_MCTRL_ADDR(addr) |
+ SVC_I3C_MCTRL_RDTERM(*actual_len),
+ master->regs + SVC_I3C_MCTRL);
+
+ ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
SVC_I3C_MSTATUS_MCTRLDONE(reg), 0, 1000);
- if (ret)
- goto emit_stop;
+ if (ret)
+ goto emit_stop;
- if (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_NACK) {
- ret = -ENXIO;
- *actual_len = 0;
- goto emit_stop;
+ if (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_NACK) {
+ /*
+ * According to I3C Spec 1.1.1, 11-Jun-2021, section: 5.1.2.2.3.
+ * If the Controller chooses to start an I3C Message with an I3C Dynamic
+ * Address, then special provisions shall be made because that same I3C
+ * Target may be initiating an IBI or a Controller Role Request. So, one of
+ * three things may happen: (skip 1, 2)
+ *
+ * 3. The Addresses match and the RnW bits also match, and so neither
+ * Controller nor Target will ACK since both are expecting the other side to
+ * provide ACK. As a result, each side might think it had "won" arbitration,
+ * but neither side would continue, as each would subsequently see that the
+ * other did not provide ACK.
+ * ...
+ * For either value of RnW: Due to the NACK, the Controller shall defer the
+ * Private Write or Private Read, and should typically transmit the Target
+ * Address again after a Repeated START (i.e., the next one or any one prior
+ * to a STOP in the Frame). Since the Address Header following a Repeated
+ * START is not arbitrated, the Controller will always win (see Section
+ * 5.1.2.2.4).
+ */
+ if (retry && addr != 0x7e) {
+ writel(SVC_I3C_MERRWARN_NACK, master->regs + SVC_I3C_MERRWARN);
+ } else {
+ ret = -ENXIO;
+ *actual_len = 0;
+ goto emit_stop;
+ }
+ } else {
+ break;
+ }
}
/*
@@ -1321,7 +1376,7 @@ static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
cmd->addr = ccc->dests[0].addr;
cmd->rnw = ccc->rnw;
cmd->in = ccc->rnw ? ccc->dests[0].payload.data : NULL;
- cmd->out = ccc->rnw ? NULL : ccc->dests[0].payload.data,
+ cmd->out = ccc->rnw ? NULL : ccc->dests[0].payload.data;
cmd->len = xfer_len;
cmd->actual_len = actual_len;
cmd->continued = false;
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 95fa857e8aad..426e3c9f88a1 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -564,13 +564,11 @@ static int tiadc_parse_dt(struct platform_device *pdev,
struct tiadc_device *adc_dev)
{
struct device_node *node = pdev->dev.of_node;
- struct property *prop;
- const __be32 *cur;
int channels = 0;
u32 val;
int i;
- of_property_for_each_u32(node, "ti,adc-channels", prop, cur, val) {
+ of_property_for_each_u32(node, "ti,adc-channels", val) {
adc_dev->channel_line[channels] = val;
/* Set Default values for optional DT parameters */
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index ad39ac6fa96d..10cc95867415 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -806,9 +806,9 @@ start_over:
}
EXPORT_SYMBOL(gameport_unregister_driver);
-static int gameport_bus_match(struct device *dev, struct device_driver *drv)
+static int gameport_bus_match(struct device *dev, const struct device_driver *drv)
{
- struct gameport_driver *gameport_drv = to_gameport_driver(drv);
+ const struct gameport_driver *gameport_drv = to_gameport_driver(drv);
return !gameport_drv->ignore;
}
diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c
index 343030290d78..3aee04837205 100644
--- a/drivers/input/rmi4/rmi_bus.c
+++ b/drivers/input/rmi4/rmi_bus.c
@@ -144,9 +144,9 @@ bool rmi_is_function_device(struct device *dev)
return dev->type == &rmi_function_type;
}
-static int rmi_function_match(struct device *dev, struct device_driver *drv)
+static int rmi_function_match(struct device *dev, const struct device_driver *drv)
{
- struct rmi_function_handler *handler = to_rmi_function_handler(drv);
+ const struct rmi_function_handler *handler = to_rmi_function_handler(drv);
struct rmi_function *fn = to_rmi_function(dev);
return fn->fd.function_number == handler->func;
@@ -333,7 +333,7 @@ EXPORT_SYMBOL_GPL(rmi_unregister_function_handler);
/* Bus specific stuff */
-static int rmi_bus_match(struct device *dev, struct device_driver *drv)
+static int rmi_bus_match(struct device *dev, const struct device_driver *drv)
{
bool physical = rmi_is_physical_device(dev);
diff --git a/drivers/input/rmi4/rmi_bus.h b/drivers/input/rmi4/rmi_bus.h
index ea46ad9447ec..d4d0d82c69aa 100644
--- a/drivers/input/rmi4/rmi_bus.h
+++ b/drivers/input/rmi4/rmi_bus.h
@@ -87,7 +87,7 @@ struct rmi_function_handler {
};
#define to_rmi_function_handler(d) \
- container_of(d, struct rmi_function_handler, driver)
+ container_of_const(d, struct rmi_function_handler, driver)
int __must_check __rmi_register_function_handler(struct rmi_function_handler *,
struct module *, const char *);
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index ef9ea295f9e0..2168b6cd7167 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -1258,7 +1258,7 @@ static struct rmi_driver rmi_physical_driver = {
.set_input_params = rmi_driver_set_input_params,
};
-bool rmi_is_physical_driver(struct device_driver *drv)
+bool rmi_is_physical_driver(const struct device_driver *drv)
{
return drv == &rmi_physical_driver.driver;
}
diff --git a/drivers/input/rmi4/rmi_driver.h b/drivers/input/rmi4/rmi_driver.h
index 1c6c6086c0e5..3bfe9013043e 100644
--- a/drivers/input/rmi4/rmi_driver.h
+++ b/drivers/input/rmi4/rmi_driver.h
@@ -84,7 +84,7 @@ int rmi_register_desc_calc_reg_offset(
bool rmi_register_desc_has_subpacket(const struct rmi_register_desc_item *item,
u8 subpacket);
-bool rmi_is_physical_driver(struct device_driver *);
+bool rmi_is_physical_driver(const struct device_driver *);
int rmi_register_physical_driver(void);
void rmi_unregister_physical_driver(void);
void rmi_free_function_list(struct rmi_device *rmi_dev);
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index 04967494eeb6..97d8eacb9112 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -877,10 +877,10 @@ static void serio_set_drv(struct serio *serio, struct serio_driver *drv)
serio_continue_rx(serio);
}
-static int serio_bus_match(struct device *dev, struct device_driver *drv)
+static int serio_bus_match(struct device *dev, const struct device_driver *drv)
{
struct serio *serio = to_serio_port(dev);
- struct serio_driver *serio_drv = to_serio_driver(drv);
+ const struct serio_driver *serio_drv = to_serio_driver(drv);
if (serio->manual_bind || serio_drv->manual_bind)
return 0;
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index 9d9a7fde59e7..1074ee25064d 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -588,9 +588,9 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
{
struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
- cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES,
- cfg->ias = IOMMU_IN_ADDR_BIT_SIZE,
- cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE,
+ cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES;
+ cfg->ias = IOMMU_IN_ADDR_BIT_SIZE;
+ cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE;
cfg->tlb = &v1_flush_ops;
pgtable->iop.ops.map_pages = iommu_v1_map_pages;
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
index 4b2994b6126d..2fce4f6d4e1b 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
@@ -277,7 +277,7 @@ static int nvidia_smmu_init_context(struct arm_smmu_domain *smmu_domain,
*/
if (of_device_is_compatible(np, "nvidia,tegra234-smmu") ||
of_device_is_compatible(np, "nvidia,tegra194-smmu")) {
- smmu->pgsize_bitmap = PAGE_SIZE;
+ smmu->pgsize_bitmap &= GENMASK(PAGE_SHIFT, 0);
pgtbl_cfg->pgsize_bitmap = smmu->pgsize_bitmap;
}
diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c
index ba53571a8239..a2f4ffe6d949 100644
--- a/drivers/iommu/sprd-iommu.c
+++ b/drivers/iommu/sprd-iommu.c
@@ -232,8 +232,8 @@ static void sprd_iommu_cleanup(struct sprd_iommu_domain *dom)
pgt_size = sprd_iommu_pgt_size(&dom->domain);
dma_free_coherent(dom->sdev->dev, pgt_size, dom->pgt_va, dom->pgt_pa);
- dom->sdev = NULL;
sprd_iommu_hw_en(dom->sdev, false);
+ dom->sdev = NULL;
}
static void sprd_iommu_domain_free(struct iommu_domain *domain)
diff --git a/drivers/ipack/ipack.c b/drivers/ipack/ipack.c
index 866bf48d803b..57d232c909f9 100644
--- a/drivers/ipack/ipack.c
+++ b/drivers/ipack/ipack.c
@@ -13,7 +13,7 @@
#include <linux/ipack.h>
#define to_ipack_dev(device) container_of(device, struct ipack_device, dev)
-#define to_ipack_driver(drv) container_of(drv, struct ipack_driver, driver)
+#define to_ipack_driver(drv) container_of_const(drv, struct ipack_driver, driver)
static DEFINE_IDA(ipack_ida);
@@ -49,10 +49,10 @@ ipack_match_id(const struct ipack_device_id *ids, struct ipack_device *idev)
return NULL;
}
-static int ipack_bus_match(struct device *dev, struct device_driver *drv)
+static int ipack_bus_match(struct device *dev, const struct device_driver *drv)
{
struct ipack_device *idev = to_ipack_dev(dev);
- struct ipack_driver *idrv = to_ipack_driver(drv);
+ const struct ipack_driver *idrv = to_ipack_driver(drv);
const struct ipack_device_id *found_id;
found_id = ipack_match_id(idrv->id_table, idev);
diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
index 072bd227b6c6..4525366d16d6 100644
--- a/drivers/irqchip/irq-atmel-aic-common.c
+++ b/drivers/irqchip/irq-atmel-aic-common.c
@@ -111,8 +111,6 @@ static void __init aic_common_ext_irq_of_init(struct irq_domain *domain)
struct device_node *node = irq_domain_get_of_node(domain);
struct irq_chip_generic *gc;
struct aic_chip_data *aic;
- struct property *prop;
- const __be32 *p;
u32 hwirq;
gc = irq_get_domain_generic_chip(domain, 0);
@@ -120,7 +118,7 @@ static void __init aic_common_ext_irq_of_init(struct irq_domain *domain)
aic = gc->private;
aic->ext_irqs |= 1;
- of_property_for_each_u32(node, "atmel,external-irqs", prop, p, hwirq) {
+ of_property_for_each_u32(node, "atmel,external-irqs", hwirq) {
gc = irq_get_domain_generic_chip(domain, hwirq);
if (!gc) {
pr_warn("AIC: external irq %d >= %d skip it\n",
diff --git a/drivers/irqchip/irq-pic32-evic.c b/drivers/irqchip/irq-pic32-evic.c
index 1d9bb28d13e5..5d6b8e025bb8 100644
--- a/drivers/irqchip/irq-pic32-evic.c
+++ b/drivers/irqchip/irq-pic32-evic.c
@@ -190,13 +190,11 @@ static void __init pic32_ext_irq_of_init(struct irq_domain *domain)
{
struct device_node *node = irq_domain_get_of_node(domain);
struct evic_chip_data *priv = domain->host_data;
- struct property *prop;
- const __le32 *p;
u32 hwirq;
int i = 0;
const char *pname = "microchip,external-irqs";
- of_property_for_each_u32(node, pname, prop, p, hwirq) {
+ of_property_for_each_u32(node, pname, hwirq) {
if (i >= ARRAY_SIZE(priv->ext_irqs)) {
pr_warn("More than %d external irq, skip rest\n",
ARRAY_SIZE(priv->ext_irqs));
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index 0d2928d8aeae..e5a483fd9ad8 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -1901,7 +1901,7 @@ hfcmulti_dtmf(struct hfc_multi *hc)
static void
hfcmulti_tx(struct hfc_multi *hc, int ch)
{
- int i, ii, temp, len = 0;
+ int i, ii, temp, tmp_len, len = 0;
int Zspace, z1, z2; /* must be int for calculation */
int Fspace, f1, f2;
u_char *d;
@@ -2122,14 +2122,15 @@ next_frame:
HFC_wait_nodebug(hc);
}
+ tmp_len = (*sp)->len;
dev_kfree_skb(*sp);
/* check for next frame */
if (bch && get_next_bframe(bch)) {
- len = (*sp)->len;
+ len = tmp_len;
goto next_frame;
}
if (dch && get_next_dframe(dch)) {
- len = (*sp)->len;
+ len = tmp_len;
goto next_frame;
}
diff --git a/drivers/macintosh/mac_hid.c b/drivers/macintosh/mac_hid.c
index b7b3ef1e58dc..b461b1bed25b 100644
--- a/drivers/macintosh/mac_hid.c
+++ b/drivers/macintosh/mac_hid.c
@@ -183,7 +183,7 @@ static void mac_hid_stop_emulation(void)
mac_hid_destroy_emumouse();
}
-static int mac_hid_toggle_emumouse(struct ctl_table *table, int write,
+static int mac_hid_toggle_emumouse(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = table->data;
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 565f1e21ff7d..13626205530d 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -36,7 +36,7 @@
static struct macio_chip *macio_on_hold;
-static int macio_bus_match(struct device *dev, struct device_driver *drv)
+static int macio_bus_match(struct device *dev, const struct device_driver *drv)
{
const struct of_device_id * matches = drv->of_match_table;
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 3b8842c4a340..4eed97295927 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -276,6 +276,14 @@ config SPRD_MBOX
to send message between application processors and MCU. Say Y here if
you want to build the Spreatrum mailbox controller driver.
+config QCOM_CPUCP_MBOX
+ tristate "Qualcomm Technologies, Inc. CPUCP mailbox driver"
+ depends on (ARCH_QCOM || COMPILE_TEST) && 64BIT
+ help
+ Qualcomm Technologies, Inc. CPUSS Control Processor (CPUCP) mailbox
+ controller driver enables communication between AP and CPUCP. Say
+ Y here if you want to build this driver.
+
config QCOM_IPCC
tristate "Qualcomm Technologies, Inc. IPCC driver"
depends on ARCH_QCOM || COMPILE_TEST
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 5cf2f54debaf..3c3c27d54c13 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -61,4 +61,6 @@ obj-$(CONFIG_SUN6I_MSGBOX) += sun6i-msgbox.o
obj-$(CONFIG_SPRD_MBOX) += sprd-mailbox.o
+obj-$(CONFIG_QCOM_CPUCP_MBOX) += qcom-cpucp-mbox.o
+
obj-$(CONFIG_QCOM_IPCC) += qcom-ipcc.o
diff --git a/drivers/mailbox/bcm-pdc-mailbox.c b/drivers/mailbox/bcm-pdc-mailbox.c
index 242e7504a628..a873672a9082 100644
--- a/drivers/mailbox/bcm-pdc-mailbox.c
+++ b/drivers/mailbox/bcm-pdc-mailbox.c
@@ -158,10 +158,6 @@ enum pdc_hw {
PDC_HW /* PDC/MDE hardware (i.e. Northstar 2, Pegasus) */
};
-struct pdc_dma_map {
- void *ctx; /* opaque context associated with frame */
-};
-
/* dma descriptor */
struct dma64dd {
u32 ctrl1; /* misc control bits */
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index 933727f89431..d17efb1dd0cb 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -225,6 +225,8 @@ static int imx_mu_generic_tx(struct imx_mu_priv *priv,
void *data)
{
u32 *arg = data;
+ u32 val;
+ int ret;
switch (cp->type) {
case IMX_MU_TYPE_TX:
@@ -236,7 +238,13 @@ static int imx_mu_generic_tx(struct imx_mu_priv *priv,
queue_work(system_bh_wq, &cp->txdb_work);
break;
case IMX_MU_TYPE_TXDB_V2:
- imx_mu_xcr_rmw(priv, IMX_MU_GCR, IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx), 0);
+ imx_mu_write(priv, IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx),
+ priv->dcfg->xCR[IMX_MU_GCR]);
+ ret = readl_poll_timeout(priv->base + priv->dcfg->xCR[IMX_MU_GCR], val,
+ !(val & IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx)),
+ 0, 1000);
+ if (ret)
+ dev_warn_ratelimited(priv->dev, "channel type: %d failure\n", cp->type);
break;
default:
dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index 4aa394e91109..4bff73532085 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -22,7 +22,6 @@
#define CMDQ_OP_CODE_MASK (0xff << CMDQ_OP_CODE_SHIFT)
#define CMDQ_NUM_CMD(t) (t->cmd_buf_size / CMDQ_INST_SIZE)
-#define CMDQ_GCE_NUM_MAX (2)
#define CMDQ_CURR_IRQ_STATUS 0x10
#define CMDQ_SYNC_TOKEN_UPDATE 0x68
@@ -81,7 +80,7 @@ struct cmdq {
u32 irq_mask;
const struct gce_plat *pdata;
struct cmdq_thread *thread;
- struct clk_bulk_data clocks[CMDQ_GCE_NUM_MAX];
+ struct clk_bulk_data *clocks;
bool suspended;
};
@@ -578,16 +577,64 @@ static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox,
return &mbox->chans[ind];
}
+static int cmdq_get_clocks(struct device *dev, struct cmdq *cmdq)
+{
+ static const char * const gce_name = "gce";
+ struct device_node *node, *parent = dev->of_node->parent;
+ struct clk_bulk_data *clks;
+
+ cmdq->clocks = devm_kcalloc(dev, cmdq->pdata->gce_num,
+ sizeof(cmdq->clocks), GFP_KERNEL);
+ if (!cmdq->clocks)
+ return -ENOMEM;
+
+ if (cmdq->pdata->gce_num == 1) {
+ clks = &cmdq->clocks[0];
+
+ clks->id = gce_name;
+ clks->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clks->clk))
+ return dev_err_probe(dev, PTR_ERR(clks->clk),
+ "failed to get gce clock\n");
+
+ return 0;
+ }
+
+ /*
+ * If there is more than one GCE, get the clocks for the others too,
+ * as the clock of the main GCE must be enabled for additional IPs
+ * to be reachable.
+ */
+ for_each_child_of_node(parent, node) {
+ int alias_id = of_alias_get_id(node, gce_name);
+
+ if (alias_id < 0 || alias_id >= cmdq->pdata->gce_num)
+ continue;
+
+ clks = &cmdq->clocks[alias_id];
+
+ clks->id = devm_kasprintf(dev, GFP_KERNEL, "gce%d", alias_id);
+ if (!clks->id) {
+ of_node_put(node);
+ return -ENOMEM;
+ }
+
+ clks->clk = of_clk_get(node, 0);
+ if (IS_ERR(clks->clk)) {
+ of_node_put(node);
+ return dev_err_probe(dev, PTR_ERR(clks->clk),
+ "failed to get gce%d clock\n", alias_id);
+ }
+ }
+
+ return 0;
+}
+
static int cmdq_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct cmdq *cmdq;
int err, i;
- struct device_node *phandle = dev->of_node;
- struct device_node *node;
- int alias_id = 0;
- static const char * const clk_name = "gce";
- static const char * const clk_names[] = { "gce0", "gce1" };
cmdq = devm_kzalloc(dev, sizeof(*cmdq), GFP_KERNEL);
if (!cmdq)
@@ -612,29 +659,9 @@ static int cmdq_probe(struct platform_device *pdev)
dev_dbg(dev, "cmdq device: addr:0x%p, va:0x%p, irq:%d\n",
dev, cmdq->base, cmdq->irq);
- if (cmdq->pdata->gce_num > 1) {
- for_each_child_of_node(phandle->parent, node) {
- alias_id = of_alias_get_id(node, clk_name);
- if (alias_id >= 0 && alias_id < cmdq->pdata->gce_num) {
- cmdq->clocks[alias_id].id = clk_names[alias_id];
- cmdq->clocks[alias_id].clk = of_clk_get(node, 0);
- if (IS_ERR(cmdq->clocks[alias_id].clk)) {
- of_node_put(node);
- return dev_err_probe(dev,
- PTR_ERR(cmdq->clocks[alias_id].clk),
- "failed to get gce clk: %d\n",
- alias_id);
- }
- }
- }
- } else {
- cmdq->clocks[alias_id].id = clk_name;
- cmdq->clocks[alias_id].clk = devm_clk_get(&pdev->dev, clk_name);
- if (IS_ERR(cmdq->clocks[alias_id].clk)) {
- return dev_err_probe(dev, PTR_ERR(cmdq->clocks[alias_id].clk),
- "failed to get gce clk\n");
- }
- }
+ err = cmdq_get_clocks(dev, cmdq);
+ if (err)
+ return err;
cmdq->mbox.dev = dev;
cmdq->mbox.chans = devm_kcalloc(dev, cmdq->pdata->thread_nr,
@@ -662,12 +689,6 @@ static int cmdq_probe(struct platform_device *pdev)
cmdq->mbox.chans[i].con_priv = (void *)&cmdq->thread[i];
}
- err = devm_mbox_controller_register(dev, &cmdq->mbox);
- if (err < 0) {
- dev_err(dev, "failed to register mailbox: %d\n", err);
- return err;
- }
-
platform_set_drvdata(pdev, cmdq);
WARN_ON(clk_bulk_prepare(cmdq->pdata->gce_num, cmdq->clocks));
@@ -695,6 +716,12 @@ static int cmdq_probe(struct platform_device *pdev)
pm_runtime_set_autosuspend_delay(dev, CMDQ_MBOX_AUTOSUSPEND_DELAY_MS);
pm_runtime_use_autosuspend(dev);
+ err = devm_mbox_controller_register(dev, &cmdq->mbox);
+ if (err < 0) {
+ dev_err(dev, "failed to register mailbox: %d\n", err);
+ return err;
+ }
+
return 0;
}
@@ -790,4 +817,5 @@ static void __exit cmdq_drv_exit(void)
subsys_initcall(cmdq_drv_init);
module_exit(cmdq_drv_exit);
+MODULE_DESCRIPTION("Mediatek Command Queue(CMDQ) Mailbox driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c
index 46747559b438..7a87424657a1 100644
--- a/drivers/mailbox/omap-mailbox.c
+++ b/drivers/mailbox/omap-mailbox.c
@@ -230,7 +230,8 @@ static int omap_mbox_startup(struct omap_mbox *mbox)
int ret = 0;
ret = request_threaded_irq(mbox->irq, NULL, mbox_interrupt,
- IRQF_ONESHOT, mbox->name, mbox);
+ IRQF_SHARED | IRQF_ONESHOT, mbox->name,
+ mbox);
if (unlikely(ret)) {
pr_err("failed to register mailbox interrupt:%d\n", ret);
return ret;
diff --git a/drivers/mailbox/qcom-cpucp-mbox.c b/drivers/mailbox/qcom-cpucp-mbox.c
new file mode 100644
index 000000000000..e5437c294803
--- /dev/null
+++ b/drivers/mailbox/qcom-cpucp-mbox.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#define APSS_CPUCP_IPC_CHAN_SUPPORTED 3
+#define APSS_CPUCP_MBOX_CMD_OFF 0x4
+
+/* Tx Registers */
+#define APSS_CPUCP_TX_MBOX_CMD(i) (0x100 + ((i) * 8))
+
+/* Rx Registers */
+#define APSS_CPUCP_RX_MBOX_CMD(i) (0x100 + ((i) * 8))
+#define APSS_CPUCP_RX_MBOX_MAP 0x4000
+#define APSS_CPUCP_RX_MBOX_STAT 0x4400
+#define APSS_CPUCP_RX_MBOX_CLEAR 0x4800
+#define APSS_CPUCP_RX_MBOX_EN 0x4c00
+#define APSS_CPUCP_RX_MBOX_CMD_MASK GENMASK_ULL(63, 0)
+
+/**
+ * struct qcom_cpucp_mbox - Holder for the mailbox driver
+ * @chans: The mailbox channel
+ * @mbox: The mailbox controller
+ * @tx_base: Base address of the CPUCP tx registers
+ * @rx_base: Base address of the CPUCP rx registers
+ */
+struct qcom_cpucp_mbox {
+ struct mbox_chan chans[APSS_CPUCP_IPC_CHAN_SUPPORTED];
+ struct mbox_controller mbox;
+ void __iomem *tx_base;
+ void __iomem *rx_base;
+};
+
+static inline int channel_number(struct mbox_chan *chan)
+{
+ return chan - chan->mbox->chans;
+}
+
+static irqreturn_t qcom_cpucp_mbox_irq_fn(int irq, void *data)
+{
+ struct qcom_cpucp_mbox *cpucp = data;
+ u64 status;
+ int i;
+
+ status = readq(cpucp->rx_base + APSS_CPUCP_RX_MBOX_STAT);
+
+ for_each_set_bit(i, (unsigned long *)&status, APSS_CPUCP_IPC_CHAN_SUPPORTED) {
+ u32 val = readl(cpucp->rx_base + APSS_CPUCP_RX_MBOX_CMD(i) + APSS_CPUCP_MBOX_CMD_OFF);
+ struct mbox_chan *chan = &cpucp->chans[i];
+ unsigned long flags;
+
+ /* Provide mutual exclusion with changes to chan->cl */
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->cl)
+ mbox_chan_received_data(chan, &val);
+ writeq(BIT(i), cpucp->rx_base + APSS_CPUCP_RX_MBOX_CLEAR);
+ spin_unlock_irqrestore(&chan->lock, flags);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int qcom_cpucp_mbox_startup(struct mbox_chan *chan)
+{
+ struct qcom_cpucp_mbox *cpucp = container_of(chan->mbox, struct qcom_cpucp_mbox, mbox);
+ unsigned long chan_id = channel_number(chan);
+ u64 val;
+
+ val = readq(cpucp->rx_base + APSS_CPUCP_RX_MBOX_EN);
+ val |= BIT(chan_id);
+ writeq(val, cpucp->rx_base + APSS_CPUCP_RX_MBOX_EN);
+
+ return 0;
+}
+
+static void qcom_cpucp_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct qcom_cpucp_mbox *cpucp = container_of(chan->mbox, struct qcom_cpucp_mbox, mbox);
+ unsigned long chan_id = channel_number(chan);
+ u64 val;
+
+ val = readq(cpucp->rx_base + APSS_CPUCP_RX_MBOX_EN);
+ val &= ~BIT(chan_id);
+ writeq(val, cpucp->rx_base + APSS_CPUCP_RX_MBOX_EN);
+}
+
+static int qcom_cpucp_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct qcom_cpucp_mbox *cpucp = container_of(chan->mbox, struct qcom_cpucp_mbox, mbox);
+ unsigned long chan_id = channel_number(chan);
+ u32 *val = data;
+
+ writel(*val, cpucp->tx_base + APSS_CPUCP_TX_MBOX_CMD(chan_id) + APSS_CPUCP_MBOX_CMD_OFF);
+
+ return 0;
+}
+
+static const struct mbox_chan_ops qcom_cpucp_mbox_chan_ops = {
+ .startup = qcom_cpucp_mbox_startup,
+ .send_data = qcom_cpucp_mbox_send_data,
+ .shutdown = qcom_cpucp_mbox_shutdown
+};
+
+static int qcom_cpucp_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct qcom_cpucp_mbox *cpucp;
+ struct mbox_controller *mbox;
+ int irq, ret;
+
+ cpucp = devm_kzalloc(dev, sizeof(*cpucp), GFP_KERNEL);
+ if (!cpucp)
+ return -ENOMEM;
+
+ cpucp->rx_base = devm_of_iomap(dev, dev->of_node, 0, NULL);
+ if (IS_ERR(cpucp->rx_base))
+ return PTR_ERR(cpucp->rx_base);
+
+ cpucp->tx_base = devm_of_iomap(dev, dev->of_node, 1, NULL);
+ if (IS_ERR(cpucp->tx_base))
+ return PTR_ERR(cpucp->tx_base);
+
+ writeq(0, cpucp->rx_base + APSS_CPUCP_RX_MBOX_EN);
+ writeq(0, cpucp->rx_base + APSS_CPUCP_RX_MBOX_CLEAR);
+ writeq(0, cpucp->rx_base + APSS_CPUCP_RX_MBOX_MAP);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, qcom_cpucp_mbox_irq_fn,
+ IRQF_TRIGGER_HIGH, "apss_cpucp_mbox", cpucp);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to register irq: %d\n", irq);
+
+ writeq(APSS_CPUCP_RX_MBOX_CMD_MASK, cpucp->rx_base + APSS_CPUCP_RX_MBOX_MAP);
+
+ mbox = &cpucp->mbox;
+ mbox->dev = dev;
+ mbox->num_chans = APSS_CPUCP_IPC_CHAN_SUPPORTED;
+ mbox->chans = cpucp->chans;
+ mbox->ops = &qcom_cpucp_mbox_chan_ops;
+
+ ret = devm_mbox_controller_register(dev, mbox);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to create mailbox\n");
+
+ return 0;
+}
+
+static const struct of_device_id qcom_cpucp_mbox_of_match[] = {
+ { .compatible = "qcom,x1e80100-cpucp-mbox" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_cpucp_mbox_of_match);
+
+static struct platform_driver qcom_cpucp_mbox_driver = {
+ .probe = qcom_cpucp_mbox_probe,
+ .driver = {
+ .name = "qcom_cpucp_mbox",
+ .of_match_table = qcom_cpucp_mbox_of_match,
+ },
+};
+
+static int __init qcom_cpucp_mbox_init(void)
+{
+ return platform_driver_register(&qcom_cpucp_mbox_driver);
+}
+core_initcall(qcom_cpucp_mbox_init);
+
+static void __exit qcom_cpucp_mbox_exit(void)
+{
+ platform_driver_unregister(&qcom_cpucp_mbox_driver);
+}
+module_exit(qcom_cpucp_mbox_exit);
+
+MODULE_DESCRIPTION("QTI CPUCP MBOX Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
index 4acf5612487c..521d08b9ab47 100644
--- a/drivers/mailbox/zynqmp-ipi-mailbox.c
+++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
@@ -64,6 +64,13 @@
#define MAX_SGI 16
+/*
+ * Module parameters
+ */
+static int tx_poll_period = 5;
+module_param_named(tx_poll_period, tx_poll_period, int, 0644);
+MODULE_PARM_DESC(tx_poll_period, "Poll period waiting for ack after send.");
+
/**
* struct zynqmp_ipi_mchan - Description of a Xilinx ZynqMP IPI mailbox channel
* @is_opened: indicate if the IPI channel is opened
@@ -537,7 +544,7 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox,
mbox->num_chans = 2;
mbox->txdone_irq = false;
mbox->txdone_poll = true;
- mbox->txpoll_period = 5;
+ mbox->txpoll_period = tx_poll_period;
mbox->of_xlate = zynqmp_ipi_of_xlate;
chans = devm_kzalloc(mdev, 2 * sizeof(*chans), GFP_KERNEL);
if (!chans)
diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
index 267045b76505..91bbd948ee93 100644
--- a/drivers/mcb/mcb-core.c
+++ b/drivers/mcb/mcb-core.c
@@ -28,9 +28,9 @@ static const struct mcb_device_id *mcb_match_id(const struct mcb_device_id *ids,
return NULL;
}
-static int mcb_match(struct device *dev, struct device_driver *drv)
+static int mcb_match(struct device *dev, const struct device_driver *drv)
{
- struct mcb_driver *mdrv = to_mcb_driver(drv);
+ const struct mcb_driver *mdrv = to_mcb_driver(drv);
struct mcb_device *mdev = to_mcb_device(dev);
const struct mcb_device_id *found_id;
diff --git a/drivers/media/pci/bt8xx/bttv-gpio.c b/drivers/media/pci/bt8xx/bttv-gpio.c
index 6b7fea50328c..59a6f160aac7 100644
--- a/drivers/media/pci/bt8xx/bttv-gpio.c
+++ b/drivers/media/pci/bt8xx/bttv-gpio.c
@@ -28,9 +28,9 @@
/* ----------------------------------------------------------------------- */
/* internal: the bttv "bus" */
-static int bttv_sub_bus_match(struct device *dev, struct device_driver *drv)
+static int bttv_sub_bus_match(struct device *dev, const struct device_driver *drv)
{
- struct bttv_sub_driver *sub = to_bttv_sub_drv(drv);
+ const struct bttv_sub_driver *sub = to_bttv_sub_drv(drv);
int len = strlen(sub->wanted);
if (0 == strncmp(dev_name(dev), sub->wanted, len))
diff --git a/drivers/media/pci/bt8xx/bttv.h b/drivers/media/pci/bt8xx/bttv.h
index eed7eeb3b963..97bbed980f98 100644
--- a/drivers/media/pci/bt8xx/bttv.h
+++ b/drivers/media/pci/bt8xx/bttv.h
@@ -341,7 +341,7 @@ struct bttv_sub_driver {
int (*probe)(struct bttv_sub_device *sub);
void (*remove)(struct bttv_sub_device *sub);
};
-#define to_bttv_sub_drv(x) container_of((x), struct bttv_sub_driver, drv)
+#define to_bttv_sub_drv(x) container_of_const((x), struct bttv_sub_driver, drv)
int bttv_sub_register(struct bttv_sub_driver *drv, char *wanted);
int bttv_sub_unregister(struct bttv_sub_driver *drv);
diff --git a/drivers/media/pci/intel/ipu6/ipu6-bus.h b/drivers/media/pci/intel/ipu6/ipu6-bus.h
index b26c6aee1621..bb4926dfdf08 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-bus.h
+++ b/drivers/media/pci/intel/ipu6/ipu6-bus.h
@@ -21,7 +21,7 @@ struct ipu6_buttress_ctrl;
struct ipu6_bus_device {
struct auxiliary_device auxdev;
- struct auxiliary_driver *auxdrv;
+ const struct auxiliary_driver *auxdrv;
const struct ipu6_auxdrv_data *auxdrv_data;
struct list_head list;
void *pdata;
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index 52aea4167718..717c441b4a86 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -828,8 +828,10 @@ struct rc_dev *rc_dev_get_from_fd(int fd, bool write)
return ERR_PTR(-EINVAL);
}
- if (write && !(f.file->f_mode & FMODE_WRITE))
+ if (write && !(f.file->f_mode & FMODE_WRITE)) {
+ fdput(f);
return ERR_PTR(-EPERM);
+ }
fh = f.file->private_data;
dev = fh->rc;
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index 23fea51ecbdd..9a3a784054cc 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -38,13 +38,12 @@ static int memstick_dev_match(struct memstick_dev *card,
return 0;
}
-static int memstick_bus_match(struct device *dev, struct device_driver *drv)
+static int memstick_bus_match(struct device *dev, const struct device_driver *drv)
{
struct memstick_dev *card = container_of(dev, struct memstick_dev,
dev);
- struct memstick_driver *ms_drv = container_of(drv,
- struct memstick_driver,
- driver);
+ const struct memstick_driver *ms_drv = container_of_const(drv, struct memstick_driver,
+ driver);
struct memstick_device_id *ids = ms_drv->id_table;
if (ids) {
diff --git a/drivers/mfd/mcp-core.c b/drivers/mfd/mcp-core.c
index 16ca23311cab..be08eaee0a90 100644
--- a/drivers/mfd/mcp-core.c
+++ b/drivers/mfd/mcp-core.c
@@ -20,7 +20,7 @@
#define to_mcp(d) container_of(d, struct mcp, attached_device)
#define to_mcp_driver(d) container_of(d, struct mcp_driver, drv)
-static int mcp_bus_match(struct device *dev, struct device_driver *drv)
+static int mcp_bus_match(struct device *dev, const struct device_driver *drv)
{
return 1;
}
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index 4bbd542d753e..0c1364d88469 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -119,8 +119,6 @@ static int ti_tscadc_probe(struct platform_device *pdev)
struct clk *clk;
struct device_node *node;
struct mfd_cell *cell;
- struct property *prop;
- const __be32 *cur;
bool use_tsc = false, use_mag = false;
u32 val;
int err;
@@ -167,7 +165,7 @@ static int ti_tscadc_probe(struct platform_device *pdev)
}
node = of_get_child_by_name(pdev->dev.of_node, "adc");
- of_property_for_each_u32(node, "ti,adc-channels", prop, cur, val) {
+ of_property_for_each_u32(node, "ti,adc-channels", val) {
adc_channels++;
if (val > 7) {
dev_err(&pdev->dev, " PIN numbers are 0..7 (not %d)\n",
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 99393f610cdf..5576146ab13b 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -19,7 +19,7 @@
#include "mei_dev.h"
#include "client.h"
-#define to_mei_cl_driver(d) container_of(d, struct mei_cl_driver, driver)
+#define to_mei_cl_driver(d) container_of_const(d, struct mei_cl_driver, driver)
/**
* __mei_cl_send - internal client send (write)
@@ -1124,7 +1124,7 @@ struct mei_cl_device_id *mei_cl_device_find(const struct mei_cl_device *cldev,
*
* Return: 1 if matching device was found 0 otherwise
*/
-static int mei_cl_device_match(struct device *dev, struct device_driver *drv)
+static int mei_cl_device_match(struct device *dev, const struct device_driver *drv)
{
const struct mei_cl_device *cldev = to_mei_cl_device(dev);
const struct mei_cl_driver *cldrv = to_mei_cl_driver(drv);
diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c
index fd9c3cbbc51e..12355d34e193 100644
--- a/drivers/misc/tifm_core.c
+++ b/drivers/misc/tifm_core.c
@@ -38,11 +38,11 @@ static int tifm_dev_match(struct tifm_dev *sock, struct tifm_device_id *id)
return 0;
}
-static int tifm_bus_match(struct device *dev, struct device_driver *drv)
+static int tifm_bus_match(struct device *dev, const struct device_driver *drv)
{
struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev);
- struct tifm_driver *fm_drv = container_of(drv, struct tifm_driver,
- driver);
+ const struct tifm_driver *fm_drv = container_of_const(drv, struct tifm_driver,
+ driver);
struct tifm_device_id *ids = fm_drv->id_table;
if (ids) {
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index c5fdfe2325f8..b66b637e2d57 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -26,7 +26,7 @@
#include "sdio_cis.h"
#include "sdio_bus.h"
-#define to_sdio_driver(d) container_of(d, struct sdio_driver, drv)
+#define to_sdio_driver(d) container_of_const(d, struct sdio_driver, drv)
/* show configuration fields */
#define sdio_config_attr(field, format_string, args...) \
@@ -91,7 +91,7 @@ static const struct sdio_device_id *sdio_match_one(struct sdio_func *func,
}
static const struct sdio_device_id *sdio_match_device(struct sdio_func *func,
- struct sdio_driver *sdrv)
+ const struct sdio_driver *sdrv)
{
const struct sdio_device_id *ids;
@@ -108,10 +108,10 @@ static const struct sdio_device_id *sdio_match_device(struct sdio_func *func,
return NULL;
}
-static int sdio_bus_match(struct device *dev, struct device_driver *drv)
+static int sdio_bus_match(struct device *dev, const struct device_driver *drv)
{
struct sdio_func *func = dev_to_sdio_func(dev);
- struct sdio_driver *sdrv = to_sdio_driver(drv);
+ const struct sdio_driver *sdrv = to_sdio_driver(drv);
if (sdio_match_device(func, sdrv))
return 1;
@@ -129,7 +129,7 @@ sdio_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
"SDIO_CLASS=%02X", func->class))
return -ENOMEM;
- if (add_uevent_var(env,
+ if (add_uevent_var(env,
"SDIO_ID=%04X:%04X", func->vendor, func->device))
return -ENOMEM;
diff --git a/drivers/most/core.c b/drivers/most/core.c
index 10342e8801bf..a635d5082ebb 100644
--- a/drivers/most/core.c
+++ b/drivers/most/core.c
@@ -491,7 +491,7 @@ static int print_links(struct device *dev, void *data)
return 0;
}
-static int most_match(struct device *dev, struct device_driver *drv)
+static int most_match(struct device *dev, const struct device_driver *drv)
{
if (!strcmp(dev_name(dev), "most"))
return 0;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index af9ddd3902cc..1cd92c12e782 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1121,13 +1121,10 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
return bestslave;
}
+/* must be called in RCU critical section or with RTNL held */
static bool bond_should_notify_peers(struct bonding *bond)
{
- struct slave *slave;
-
- rcu_read_lock();
- slave = rcu_dereference(bond->curr_active_slave);
- rcu_read_unlock();
+ struct slave *slave = rcu_dereference_rtnl(bond->curr_active_slave);
if (!slave || !bond->send_peer_notif ||
bond->send_peer_notif %
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index bb3be33c1bbd..ffa74c26ee53 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -4052,6 +4052,7 @@ static void bnxt_reset_rx_ring_struct(struct bnxt *bp,
rxr->page_pool->p.napi = NULL;
rxr->page_pool = NULL;
+ memset(&rxr->xdp_rxq, 0, sizeof(struct xdp_rxq_info));
ring = &rxr->rx_ring_struct;
rmem = &ring->ring_mem;
@@ -15018,6 +15019,16 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
if (rc)
return rc;
+ rc = xdp_rxq_info_reg(&clone->xdp_rxq, bp->dev, idx, 0);
+ if (rc < 0)
+ goto err_page_pool_destroy;
+
+ rc = xdp_rxq_info_reg_mem_model(&clone->xdp_rxq,
+ MEM_TYPE_PAGE_POOL,
+ clone->page_pool);
+ if (rc)
+ goto err_rxq_info_unreg;
+
ring = &clone->rx_ring_struct;
rc = bnxt_alloc_ring(bp, &ring->ring_mem);
if (rc)
@@ -15047,6 +15058,9 @@ err_free_rx_agg_ring:
bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem);
err_free_rx_ring:
bnxt_free_ring(bp, &clone->rx_ring_struct.ring_mem);
+err_rxq_info_unreg:
+ xdp_rxq_info_unreg(&clone->xdp_rxq);
+err_page_pool_destroy:
clone->page_pool->p.napi = NULL;
page_pool_destroy(clone->page_pool);
clone->page_pool = NULL;
@@ -15062,6 +15076,8 @@ static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
bnxt_free_one_rx_ring(bp, rxr);
bnxt_free_one_rx_agg_ring(bp, rxr);
+ xdp_rxq_info_unreg(&rxr->xdp_rxq);
+
page_pool_destroy(rxr->page_pool);
rxr->page_pool = NULL;
@@ -15145,6 +15161,7 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod;
rxr->rx_next_cons = clone->rx_next_cons;
rxr->page_pool = clone->page_pool;
+ rxr->xdp_rxq = clone->xdp_rxq;
bnxt_copy_rx_ring(bp, rxr, clone);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index ba3fa1c2e5d9..b9e7d3e7b15d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -239,7 +239,7 @@ void bnxt_ulp_stop(struct bnxt *bp)
adev = &aux_priv->aux_dev;
if (adev->dev.driver) {
- struct auxiliary_driver *adrv;
+ const struct auxiliary_driver *adrv;
pm_message_t pm = {};
adrv = to_auxiliary_drv(adev->dev.driver);
@@ -277,7 +277,7 @@ void bnxt_ulp_start(struct bnxt *bp, int err)
adev = &aux_priv->aux_dev;
if (adev->dev.driver) {
- struct auxiliary_driver *adrv;
+ const struct auxiliary_driver *adrv;
adrv = to_auxiliary_drv(adev->dev.driver);
edev->en_state = bp->state;
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index 0b3cca3fc792..f879426cb552 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -866,22 +866,42 @@ static bool gve_can_send_tso(const struct sk_buff *skb)
const int header_len = skb_tcp_all_headers(skb);
const int gso_size = shinfo->gso_size;
int cur_seg_num_bufs;
+ int prev_frag_size;
int cur_seg_size;
int i;
cur_seg_size = skb_headlen(skb) - header_len;
+ prev_frag_size = skb_headlen(skb);
cur_seg_num_bufs = cur_seg_size > 0;
for (i = 0; i < shinfo->nr_frags; i++) {
if (cur_seg_size >= gso_size) {
cur_seg_size %= gso_size;
cur_seg_num_bufs = cur_seg_size > 0;
+
+ if (prev_frag_size > GVE_TX_MAX_BUF_SIZE_DQO) {
+ int prev_frag_remain = prev_frag_size %
+ GVE_TX_MAX_BUF_SIZE_DQO;
+
+ /* If the last descriptor of the previous frag
+ * is less than cur_seg_size, the segment will
+ * span two descriptors in the previous frag.
+ * Since max gso size (9728) is less than
+ * GVE_TX_MAX_BUF_SIZE_DQO, it is impossible
+ * for the segment to span more than two
+ * descriptors.
+ */
+ if (prev_frag_remain &&
+ cur_seg_size > prev_frag_remain)
+ cur_seg_num_bufs++;
+ }
}
if (unlikely(++cur_seg_num_bufs > max_bufs_per_seg))
return false;
- cur_seg_size += skb_frag_size(&shinfo->frags[i]);
+ prev_frag_size = skb_frag_size(&shinfo->frags[i]);
+ cur_seg_size += prev_frag_size;
}
return true;
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index e3cab8e98f52..5412eff8ef23 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -534,7 +534,7 @@ ice_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
*
* Returns the number of available flow director filters to this VSI
*/
-static int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi)
+int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi)
{
u16 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
u16 num_guar;
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h
index 021ecbac7848..ab5b118daa2d 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.h
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.h
@@ -207,6 +207,8 @@ struct ice_fdir_base_pkt {
const u8 *tun_pkt;
};
+struct ice_vsi;
+
int ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id);
int ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id);
int ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr);
@@ -218,6 +220,7 @@ int
ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
u8 *pkt, bool frag, bool tun);
int ice_get_fdir_cnt_all(struct ice_hw *hw);
+int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi);
bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input);
bool ice_fdir_has_frag(enum ice_fltr_ptype flow);
struct ice_fdir_fltr *
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 51fac8f18cb0..e2786cc13286 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -2915,7 +2915,7 @@ static struct ice_pf *
ice_ptp_aux_dev_to_owner_pf(struct auxiliary_device *aux_dev)
{
struct ice_ptp_port_owner *ports_owner;
- struct auxiliary_driver *aux_drv;
+ const struct auxiliary_driver *aux_drv;
struct ice_ptp *owner_ptp;
if (!aux_dev->dev.driver)
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 3caafcdc301f..fe8847184cb1 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -2400,10 +2400,10 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
/* Propagate some data to the recipe database */
recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
- recps[idx].need_pass_l2 = root_bufs.content.act_ctrl &
- ICE_AQ_RECIPE_ACT_NEED_PASS_L2;
- recps[idx].allow_pass_l2 = root_bufs.content.act_ctrl &
- ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2;
+ recps[idx].need_pass_l2 = !!(root_bufs.content.act_ctrl &
+ ICE_AQ_RECIPE_ACT_NEED_PASS_L2);
+ recps[idx].allow_pass_l2 = !!(root_bufs.content.act_ctrl &
+ ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2);
bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
set_bit(root_bufs.content.result_indx &
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
index 8e4ff3af86c6..b4feb0927687 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -536,6 +536,8 @@ static void ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir *fdir)
fdir->fdir_fltr_cnt[flow][0] = 0;
fdir->fdir_fltr_cnt[flow][1] = 0;
}
+
+ fdir->fdir_fltr_cnt_total = 0;
}
/**
@@ -1560,6 +1562,7 @@ ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
resp->status = status;
resp->flow_id = conf->flow_id;
vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++;
+ vf->fdir.fdir_fltr_cnt_total++;
ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
(u8 *)resp, len);
@@ -1624,6 +1627,7 @@ ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
resp->status = status;
ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--;
+ vf->fdir.fdir_fltr_cnt_total--;
ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
(u8 *)resp, len);
@@ -1790,6 +1794,7 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
struct virtchnl_fdir_add *stat = NULL;
struct virtchnl_fdir_fltr_conf *conf;
enum virtchnl_status_code v_ret;
+ struct ice_vsi *vf_vsi;
struct device *dev;
struct ice_pf *pf;
int is_tun = 0;
@@ -1798,6 +1803,17 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
pf = vf->pf;
dev = ice_pf_to_dev(pf);
+ vf_vsi = ice_get_vf_vsi(vf);
+
+#define ICE_VF_MAX_FDIR_FILTERS 128
+ if (!ice_fdir_num_avail_fltr(&pf->hw, vf_vsi) ||
+ vf->fdir.fdir_fltr_cnt_total >= ICE_VF_MAX_FDIR_FILTERS) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_err(dev, "Max number of FDIR filters for VF %d is reached\n",
+ vf->vf_id);
+ goto err_exit;
+ }
+
ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
if (ret) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
index c5bcc8d7481c..ac6dcab454b4 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
@@ -29,6 +29,7 @@ struct ice_vf_fdir_ctx {
struct ice_vf_fdir {
u16 fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
int prof_entry_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
+ u16 fdir_fltr_cnt_total;
struct ice_fd_hw_prof **fdir_prof;
struct idr fdir_rule_idr;
diff --git a/drivers/net/ethernet/mediatek/airoha_eth.c b/drivers/net/ethernet/mediatek/airoha_eth.c
index 16761fde6c6c..1c5b85a86df1 100644
--- a/drivers/net/ethernet/mediatek/airoha_eth.c
+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
@@ -249,7 +249,7 @@
#define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2fc)
#define REG_GDM2_CHN_RLS (GDM2_BASE + 0x20)
-#define MBI_RX_AGE_SEL_MASK GENMASK(18, 17)
+#define MBI_RX_AGE_SEL_MASK GENMASK(26, 25)
#define MBI_TX_AGE_SEL_MASK GENMASK(18, 17)
#define REG_GDM3_FWD_CFG GDM3_BASE
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 0cc2dd85652f..16ca427cf4c3 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -4223,8 +4223,6 @@ static int mtk_free_dev(struct mtk_eth *eth)
metadata_dst_free(eth->dsa_meta[i]);
}
- free_netdev(eth->dummy_dev);
-
return 0;
}
@@ -5090,6 +5088,7 @@ static void mtk_remove(struct platform_device *pdev)
netif_napi_del(&eth->tx_napi);
netif_napi_del(&eth->rx_napi);
mtk_cleanup(eth);
+ free_netdev(eth->dummy_dev);
mtk_mdio_cleanup(eth);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index 47e7c2639774..9a79674d27f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -349,7 +349,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
struct auxiliary_device *adev;
- struct auxiliary_driver *adrv;
+ const struct auxiliary_driver *adrv;
int ret = 0, i;
devl_assert_locked(priv_to_devlink(dev));
@@ -406,7 +406,7 @@ void mlx5_detach_device(struct mlx5_core_dev *dev, bool suspend)
{
struct mlx5_priv *priv = &dev->priv;
struct auxiliary_device *adev;
- struct auxiliary_driver *adrv;
+ const struct auxiliary_driver *adrv;
pm_message_t pm = {};
int i;
diff --git a/drivers/net/ethernet/renesas/rtsn.c b/drivers/net/ethernet/renesas/rtsn.c
index 577227c007ab..0e6cea42f007 100644
--- a/drivers/net/ethernet/renesas/rtsn.c
+++ b/drivers/net/ethernet/renesas/rtsn.c
@@ -1358,7 +1358,7 @@ error_free:
return ret;
}
-static int rtsn_remove(struct platform_device *pdev)
+static void rtsn_remove(struct platform_device *pdev)
{
struct rtsn_private *priv = platform_get_drvdata(pdev);
@@ -1372,8 +1372,6 @@ static int rtsn_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
free_netdev(priv->ndev);
-
- return 0;
}
static struct platform_driver rtsn_driver = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index dbd9f93b2460..f98741d2607e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -977,7 +977,7 @@ static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable)
}
static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
- __le16 perfect_match, bool is_double)
+ u16 perfect_match, bool is_double)
{
void __iomem *ioaddr = hw->pcsr;
u32 value;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index 6a987cf598e4..f196cd99d510 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -615,7 +615,7 @@ static int dwxgmac2_rss_configure(struct mac_device_info *hw,
}
static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
- __le16 perfect_match, bool is_double)
+ u16 perfect_match, bool is_double)
{
void __iomem *ioaddr = hw->pcsr;
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 97934ccba5b1..e53c32362774 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -393,7 +393,7 @@ struct stmmac_ops {
struct stmmac_rss *cfg, u32 num_rxq);
/* VLAN */
void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash,
- __le16 perfect_match, bool is_double);
+ u16 perfect_match, bool is_double);
void (*enable_vlan)(struct mac_device_info *hw, u32 type);
void (*rx_hw_vlan)(struct mac_device_info *hw, struct dma_desc *rx_desc,
struct sk_buff *skb);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 4b6a359e5a94..12689774d755 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -6641,7 +6641,7 @@ static u32 stmmac_vid_crc32_le(__le16 vid_le)
static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
{
u32 crc, hash = 0;
- __le16 pmatch = 0;
+ u16 pmatch = 0;
int count = 0;
u16 vid = 0;
@@ -6656,7 +6656,7 @@ static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
if (count > 2) /* VID = 0 always passes filter */
return -EOPNOTSUPP;
- pmatch = cpu_to_le16(vid);
+ pmatch = vid;
hash = 0;
}
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 8b9ead76e40e..7e2f10182c0c 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -1375,9 +1375,9 @@ EXPORT_SYMBOL_GPL(mdiobus_c45_modify_changed);
* require calling the devices own match function, since different classes
* of MDIO devices have different match criteria.
*/
-static int mdio_bus_match(struct device *dev, struct device_driver *drv)
+static int mdio_bus_match(struct device *dev, const struct device_driver *drv)
{
- struct mdio_driver *mdiodrv = to_mdio_driver(drv);
+ const struct mdio_driver *mdiodrv = to_mdio_driver(drv);
struct mdio_device *mdio = to_mdio_device(dev);
/* Both the driver and device must type-match */
diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c
index 73f6539b9e50..e747ee63c665 100644
--- a/drivers/net/phy/mdio_device.c
+++ b/drivers/net/phy/mdio_device.c
@@ -35,10 +35,10 @@ static void mdio_device_release(struct device *dev)
kfree(to_mdio_device(dev));
}
-int mdio_device_bus_match(struct device *dev, struct device_driver *drv)
+int mdio_device_bus_match(struct device *dev, const struct device_driver *drv)
{
struct mdio_device *mdiodev = to_mdio_device(dev);
- struct mdio_driver *mdiodrv = to_mdio_driver(drv);
+ const struct mdio_driver *mdiodrv = to_mdio_driver(drv);
if (mdiodrv->mdiodrv.flags & MDIO_DEVICE_IS_PHY)
return 0;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 70b07e621fb2..7752e9386b40 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -533,10 +533,10 @@ static int phy_scan_fixups(struct phy_device *phydev)
return 0;
}
-static int phy_bus_match(struct device *dev, struct device_driver *drv)
+static int phy_bus_match(struct device *dev, const struct device_driver *drv)
{
struct phy_device *phydev = to_phy_device(dev);
- struct phy_driver *phydrv = to_phy_driver(drv);
+ const struct phy_driver *phydrv = to_phy_driver(drv);
const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids);
int i;
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index bfdd3875fe86..77574f7a3bd4 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -1177,6 +1177,11 @@ static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
struct sk_buff *skb;
int err, depth;
+ if (unlikely(xdp->data_end - xdp->data < ETH_HLEN)) {
+ err = -EINVAL;
+ goto err;
+ }
+
if (q->flags & IFF_VNET_HDR)
vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 9b24861464bc..1d06c560c5e6 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -2455,6 +2455,9 @@ static int tun_xdp_one(struct tun_struct *tun,
bool skb_xdp = false;
struct page *page;
+ if (unlikely(datasize < ETH_HLEN))
+ return -EINVAL;
+
xdp_prog = rcu_dereference(tun->xdp_prog);
if (xdp_prog) {
if (gso->gso_type) {
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 9af316cdd8b3..040f0bb36c0e 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -1886,7 +1886,7 @@ unlock:
return res;
}
-static int vrf_shared_table_handler(struct ctl_table *table, int write,
+static int vrf_shared_table_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = (struct net *)table->extra1;
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index f9e7847a378e..77e55debeed6 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -284,7 +284,7 @@ static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset);
static int ntb_transport_bus_match(struct device *dev,
- struct device_driver *drv)
+ const struct device_driver *drv)
{
return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
}
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 508aed017ddc..2237715e42eb 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -25,9 +25,12 @@
int nvdimm_major;
static int nvdimm_bus_major;
-static struct class *nd_class;
static DEFINE_IDA(nd_ida);
+static const struct class nd_class = {
+ .name = "nd",
+};
+
static int to_nd_device_type(const struct device *dev)
{
if (is_nvdimm(dev))
@@ -269,7 +272,7 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
}
EXPORT_SYMBOL_GPL(nvdimm_clear_poison);
-static int nvdimm_bus_match(struct device *dev, struct device_driver *drv);
+static int nvdimm_bus_match(struct device *dev, const struct device_driver *drv);
static const struct bus_type nvdimm_bus_type = {
.name = "nd",
@@ -465,9 +468,9 @@ static struct nd_device_driver nd_bus_driver = {
},
};
-static int nvdimm_bus_match(struct device *dev, struct device_driver *drv)
+static int nvdimm_bus_match(struct device *dev, const struct device_driver *drv)
{
- struct nd_device_driver *nd_drv = to_nd_device_driver(drv);
+ const struct nd_device_driver *nd_drv = to_nd_device_driver(drv);
if (is_nvdimm_bus(dev) && nd_drv == &nd_bus_driver)
return true;
@@ -742,7 +745,7 @@ int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus)
device_initialize(dev);
lockdep_set_class(&dev->mutex, &nvdimm_ndctl_key);
device_set_pm_not_required(dev);
- dev->class = nd_class;
+ dev->class = &nd_class;
dev->parent = &nvdimm_bus->dev;
dev->devt = devt;
dev->release = ndctl_release;
@@ -765,7 +768,7 @@ err:
void nvdimm_bus_destroy_ndctl(struct nvdimm_bus *nvdimm_bus)
{
- device_destroy(nd_class, MKDEV(nvdimm_bus_major, nvdimm_bus->id));
+ device_destroy(&nd_class, MKDEV(nvdimm_bus_major, nvdimm_bus->id));
}
static const struct nd_cmd_desc __nd_cmd_dimm_descs[] = {
@@ -1320,11 +1323,9 @@ int __init nvdimm_bus_init(void)
goto err_dimm_chrdev;
nvdimm_major = rc;
- nd_class = class_create("nd");
- if (IS_ERR(nd_class)) {
- rc = PTR_ERR(nd_class);
+ rc = class_register(&nd_class);
+ if (rc)
goto err_class;
- }
rc = driver_register(&nd_bus_driver.drv);
if (rc)
@@ -1333,7 +1334,7 @@ int __init nvdimm_bus_init(void)
return 0;
err_nd_bus:
- class_destroy(nd_class);
+ class_unregister(&nd_class);
err_class:
unregister_chrdev(nvdimm_major, "dimmctl");
err_dimm_chrdev:
@@ -1347,7 +1348,7 @@ int __init nvdimm_bus_init(void)
void nvdimm_bus_exit(void)
{
driver_unregister(&nd_bus_driver.drv);
- class_destroy(nd_class);
+ class_unregister(&nd_class);
unregister_chrdev(nvdimm_bus_major, "ndctl");
unregister_chrdev(nvdimm_major, "dimmctl");
bus_unregister(&nvdimm_bus_type);
diff --git a/drivers/nvdimm/e820.c b/drivers/nvdimm/e820.c
index 008b9aae74ff..0982215371ba 100644
--- a/drivers/nvdimm/e820.c
+++ b/drivers/nvdimm/e820.c
@@ -9,12 +9,11 @@
#include <linux/module.h>
#include <linux/numa.h>
-static int e820_pmem_remove(struct platform_device *pdev)
+static void e820_pmem_remove(struct platform_device *pdev)
{
struct nvdimm_bus *nvdimm_bus = platform_get_drvdata(pdev);
nvdimm_bus_unregister(nvdimm_bus);
- return 0;
}
static int e820_register_one(struct resource *res, void *data)
@@ -60,7 +59,7 @@ err:
static struct platform_driver e820_pmem_driver = {
.probe = e820_pmem_probe,
- .remove = e820_pmem_remove,
+ .remove_new = e820_pmem_remove,
.driver = {
.name = "e820_pmem",
},
diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c
index 5134a8d08bf9..403384f25ce3 100644
--- a/drivers/nvdimm/of_pmem.c
+++ b/drivers/nvdimm/of_pmem.c
@@ -84,14 +84,12 @@ static int of_pmem_region_probe(struct platform_device *pdev)
return 0;
}
-static int of_pmem_region_remove(struct platform_device *pdev)
+static void of_pmem_region_remove(struct platform_device *pdev)
{
struct of_pmem_private *priv = platform_get_drvdata(pdev);
nvdimm_bus_unregister(priv->bus);
kfree(priv);
-
- return 0;
}
static const struct of_device_id of_pmem_region_match[] = {
@@ -102,7 +100,7 @@ static const struct of_device_id of_pmem_region_match[] = {
static struct platform_driver of_pmem_region_driver = {
.probe = of_pmem_region_probe,
- .remove = of_pmem_region_remove,
+ .remove_new = of_pmem_region_remove,
.driver = {
.name = "of_pmem",
.of_match_table = of_pmem_region_match,
diff --git a/drivers/nvmem/layouts.c b/drivers/nvmem/layouts.c
index 64dc7013a098..77a4119efea8 100644
--- a/drivers/nvmem/layouts.c
+++ b/drivers/nvmem/layouts.c
@@ -17,11 +17,11 @@
#include "internals.h"
#define to_nvmem_layout_driver(drv) \
- (container_of((drv), struct nvmem_layout_driver, driver))
+ (container_of_const((drv), struct nvmem_layout_driver, driver))
#define to_nvmem_layout_device(_dev) \
container_of((_dev), struct nvmem_layout, dev)
-static int nvmem_layout_bus_match(struct device *dev, struct device_driver *drv)
+static int nvmem_layout_bus_match(struct device *dev, const struct device_driver *drv)
{
return of_driver_match_device(dev, drv);
}
diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
index c2e371c50dcf..3ef486cd3d6d 100644
--- a/drivers/parport/procfs.c
+++ b/drivers/parport/procfs.c
@@ -33,7 +33,7 @@
#define PARPORT_MIN_SPINTIME_VALUE 1
#define PARPORT_MAX_SPINTIME_VALUE 1000
-static int do_active_device(struct ctl_table *table, int write,
+static int do_active_device(const struct ctl_table *table, int write,
void *result, size_t *lenp, loff_t *ppos)
{
struct parport *port = (struct parport *)table->extra1;
@@ -70,7 +70,7 @@ static int do_active_device(struct ctl_table *table, int write,
}
#ifdef CONFIG_PARPORT_1284
-static int do_autoprobe(struct ctl_table *table, int write,
+static int do_autoprobe(const struct ctl_table *table, int write,
void *result, size_t *lenp, loff_t *ppos)
{
struct parport_device_info *info = table->extra2;
@@ -113,7 +113,7 @@ static int do_autoprobe(struct ctl_table *table, int write,
}
#endif /* IEEE1284.3 support. */
-static int do_hardware_base_addr(struct ctl_table *table, int write,
+static int do_hardware_base_addr(const struct ctl_table *table, int write,
void *result, size_t *lenp, loff_t *ppos)
{
struct parport *port = (struct parport *)table->extra1;
@@ -140,7 +140,7 @@ static int do_hardware_base_addr(struct ctl_table *table, int write,
return 0;
}
-static int do_hardware_irq(struct ctl_table *table, int write,
+static int do_hardware_irq(const struct ctl_table *table, int write,
void *result, size_t *lenp, loff_t *ppos)
{
struct parport *port = (struct parport *)table->extra1;
@@ -167,7 +167,7 @@ static int do_hardware_irq(struct ctl_table *table, int write,
return 0;
}
-static int do_hardware_dma(struct ctl_table *table, int write,
+static int do_hardware_dma(const struct ctl_table *table, int write,
void *result, size_t *lenp, loff_t *ppos)
{
struct parport *port = (struct parport *)table->extra1;
@@ -194,7 +194,7 @@ static int do_hardware_dma(struct ctl_table *table, int write,
return 0;
}
-static int do_hardware_modes(struct ctl_table *table, int write,
+static int do_hardware_modes(const struct ctl_table *table, int write,
void *result, size_t *lenp, loff_t *ppos)
{
struct parport *port = (struct parport *)table->extra1;
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 2d34f783b36e..427abdf3c4c4 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -128,7 +128,7 @@ static int parport_probe(struct device *dev)
return drv->probe(to_pardevice(dev));
}
-static struct bus_type parport_bus_type = {
+static const struct bus_type parport_bus_type = {
.name = "parport",
.probe = parport_probe,
};
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 323f2a60ab16..8fa2797d4169 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -488,10 +488,10 @@ pci_epf_match_id(const struct pci_epf_device_id *id, const struct pci_epf *epf)
return NULL;
}
-static int pci_epf_device_match(struct device *dev, struct device_driver *drv)
+static int pci_epf_device_match(struct device *dev, const struct device_driver *drv)
{
struct pci_epf *epf = to_pci_epf(dev);
- struct pci_epf_driver *driver = to_pci_epf_driver(drv);
+ const struct pci_epf_driver *driver = to_pci_epf_driver(drv);
if (driver->id_table)
return !!pci_epf_match_id(driver->id_table, epf);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index af2996d0d17f..f412ef73a6e4 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -1503,7 +1503,7 @@ EXPORT_SYMBOL(pci_dev_driver);
* system is in its list of supported devices. Returns the matching
* pci_device_id structure or %NULL if there is no match.
*/
-static int pci_bus_match(struct device *dev, struct device_driver *drv)
+static int pci_bus_match(struct device *dev, const struct device_driver *drv)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct pci_driver *pci_drv;
@@ -1512,7 +1512,7 @@ static int pci_bus_match(struct device *dev, struct device_driver *drv)
if (!pci_dev->match_driver)
return 0;
- pci_drv = to_pci_driver(drv);
+ pci_drv = (struct pci_driver *)to_pci_driver(drv);
found_id = pci_match_device(pci_drv, pci_dev);
if (found_id)
return 1;
@@ -1688,10 +1688,10 @@ struct bus_type pci_bus_type = {
EXPORT_SYMBOL(pci_bus_type);
#ifdef CONFIG_PCIEPORTBUS
-static int pcie_port_bus_match(struct device *dev, struct device_driver *drv)
+static int pcie_port_bus_match(struct device *dev, const struct device_driver *drv)
{
struct pcie_device *pciedev;
- struct pcie_port_service_driver *driver;
+ const struct pcie_port_service_driver *driver;
if (drv->bus != &pcie_port_bus_type || dev->bus != &pcie_port_bus_type)
return 0;
diff --git a/drivers/pcmcia/bcm63xx_pcmcia.c b/drivers/pcmcia/bcm63xx_pcmcia.c
index a5414441834a..5bda3e6d43d8 100644
--- a/drivers/pcmcia/bcm63xx_pcmcia.c
+++ b/drivers/pcmcia/bcm63xx_pcmcia.c
@@ -456,7 +456,6 @@ struct platform_driver bcm63xx_pcmcia_driver = {
.remove_new = bcm63xx_drv_pcmcia_remove,
.driver = {
.name = "bcm63xx_pcmcia",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index d3cfd353fb93..da6f66f357cc 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -900,7 +900,7 @@ static inline int pcmcia_devmatch(struct pcmcia_device *dev,
}
-static int pcmcia_bus_match(struct device *dev, struct device_driver *drv)
+static int pcmcia_bus_match(struct device *dev, const struct device_driver *drv)
{
struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
struct pcmcia_driver *p_drv = to_pcmcia_drv(drv);
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c
index a335748bdef5..a947ffb2df55 100644
--- a/drivers/pcmcia/i82092.c
+++ b/drivers/pcmcia/i82092.c
@@ -23,6 +23,7 @@
#include "i82092aa.h"
#include "i82365.h"
+MODULE_DESCRIPTION("Driver for Intel I82092AA PCI-PCMCIA bridge");
MODULE_LICENSE("GPL");
/* PCI core routines */
diff --git a/drivers/pcmcia/i82365.c b/drivers/pcmcia/i82365.c
index 891ccea2cccb..86a357837a7b 100644
--- a/drivers/pcmcia/i82365.c
+++ b/drivers/pcmcia/i82365.c
@@ -1342,5 +1342,6 @@ static void __exit exit_i82365(void)
module_init(init_i82365);
module_exit(exit_i82365);
+MODULE_DESCRIPTION("Driver for Intel 82365 and compatible PC Card controllers");
MODULE_LICENSE("Dual MPL/GPL");
/*====================================================================*/
diff --git a/drivers/pcmcia/max1600.c b/drivers/pcmcia/max1600.c
index 379875a5e7cd..7be9068f6191 100644
--- a/drivers/pcmcia/max1600.c
+++ b/drivers/pcmcia/max1600.c
@@ -119,4 +119,5 @@ int max1600_configure(struct max1600 *m, unsigned int vcc, unsigned int vpp)
}
EXPORT_SYMBOL_GPL(max1600_configure);
+MODULE_DESCRIPTION("MAX1600 PCMCIA power switch library");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pcmcia/rsrc_mgr.c b/drivers/pcmcia/rsrc_mgr.c
index 252893216e50..3a1d2baa466f 100644
--- a/drivers/pcmcia/rsrc_mgr.c
+++ b/drivers/pcmcia/rsrc_mgr.c
@@ -66,5 +66,6 @@ EXPORT_SYMBOL(pccard_static_ops);
MODULE_AUTHOR("David A. Hinds, Dominik Brodowski");
+MODULE_DESCRIPTION("PCMCIA resource management routines");
MODULE_LICENSE("GPL");
MODULE_ALIAS("rsrc_nonstatic");
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 1365eaa20ff4..020ea86c24ec 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -638,11 +638,11 @@ static int yenta_search_one_res(struct resource *root, struct resource *res,
start = PCIBIOS_MIN_CARDBUS_IO;
end = ~0U;
} else {
- unsigned long avail = root->end - root->start;
+ unsigned long avail = resource_size(root);
int i;
size = BRIDGE_MEM_MAX;
- if (size > avail/8) {
- size = (avail+1)/8;
+ if (size > (avail - 1) / 8) {
+ size = avail / 8;
/* round size down to next power of 2 */
i = 0;
while ((size /= 2) != 0)
@@ -1452,4 +1452,5 @@ static struct pci_driver yenta_cardbus_driver = {
module_pci_driver(yenta_cardbus_driver);
+MODULE_DESCRIPTION("Driver for CardBus yenta-compatible bridges");
MODULE_LICENSE("GPL");
diff --git a/drivers/peci/core.c b/drivers/peci/core.c
index 8ff3e5d225ae..25e46579dd9c 100644
--- a/drivers/peci/core.c
+++ b/drivers/peci/core.c
@@ -172,10 +172,10 @@ peci_bus_match_device_id(const struct peci_device_id *id, struct peci_device *de
return NULL;
}
-static int peci_bus_device_match(struct device *dev, struct device_driver *drv)
+static int peci_bus_device_match(struct device *dev, const struct device_driver *drv)
{
struct peci_device *device = to_peci_device(dev);
- struct peci_driver *peci_drv = to_peci_driver(drv);
+ const struct peci_driver *peci_drv = to_peci_driver(drv);
if (dev->type != &peci_device_type)
return 0;
diff --git a/drivers/peci/internal.h b/drivers/peci/internal.h
index 7a4f6eae2f90..99924a118c8c 100644
--- a/drivers/peci/internal.h
+++ b/drivers/peci/internal.h
@@ -96,10 +96,7 @@ struct peci_driver {
const struct peci_device_id *id_table;
};
-static inline struct peci_driver *to_peci_driver(struct device_driver *d)
-{
- return container_of(d, struct peci_driver, driver);
-}
+#define to_peci_driver(__drv) container_of_const(__drv, struct peci_driver, driver)
int __peci_driver_register(struct peci_driver *driver, struct module *owner,
const char *mod_name);
diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c
index cf0430c266a6..d246840797b6 100644
--- a/drivers/perf/arm_pmuv3.c
+++ b/drivers/perf/arm_pmuv3.c
@@ -1257,7 +1257,7 @@ static void armv8pmu_disable_user_access_ipi(void *unused)
armv8pmu_disable_user_access();
}
-static int armv8pmu_proc_user_access_handler(struct ctl_table *table, int write,
+static int armv8pmu_proc_user_access_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 4e842dcedfba..44d3951d009f 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -25,6 +25,8 @@
#include <asm/errata_list.h>
#include <asm/sbi.h>
#include <asm/cpufeature.h>
+#include <asm/vendor_extensions.h>
+#include <asm/vendor_extensions/andes.h>
#define ALT_SBI_PMU_OVERFLOW(__ovl) \
asm volatile(ALTERNATIVE_2( \
@@ -33,7 +35,8 @@ asm volatile(ALTERNATIVE_2( \
THEAD_VENDOR_ID, ERRATA_THEAD_PMU, \
CONFIG_ERRATA_THEAD_PMU, \
"csrr %0, " __stringify(ANDES_CSR_SCOUNTEROF), \
- 0, RISCV_ISA_EXT_XANDESPMU, \
+ ANDES_VENDOR_ID, \
+ RISCV_ISA_VENDOR_EXT_XANDESPMU + RISCV_VENDOR_EXT_ALTERNATIVES_BASE, \
CONFIG_ANDES_CUSTOM_PMU) \
: "=r" (__ovl) : \
: "memory")
@@ -42,7 +45,8 @@ asm volatile(ALTERNATIVE_2( \
asm volatile(ALTERNATIVE( \
"csrc " __stringify(CSR_IP) ", %0\n\t", \
"csrc " __stringify(ANDES_CSR_SLIP) ", %0\n\t", \
- 0, RISCV_ISA_EXT_XANDESPMU, \
+ ANDES_VENDOR_ID, \
+ RISCV_ISA_VENDOR_EXT_XANDESPMU + RISCV_VENDOR_EXT_ALTERNATIVES_BASE, \
CONFIG_ANDES_CUSTOM_PMU) \
: : "r"(__irq_mask) \
: "memory")
@@ -1095,7 +1099,8 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde
riscv_cached_mimpid(0) == 0) {
riscv_pmu_irq_num = THEAD_C9XX_RV_IRQ_PMU;
riscv_pmu_use_irq = true;
- } else if (riscv_isa_extension_available(NULL, XANDESPMU) &&
+ } else if (riscv_has_vendor_extension_unlikely(ANDES_VENDOR_ID,
+ RISCV_ISA_VENDOR_EXT_XANDESPMU) &&
IS_ENABLED(CONFIG_ANDES_CUSTOM_PMU)) {
riscv_pmu_irq_num = ANDES_SLI_CAUSE_BASE + ANDES_RV_IRQ_PMOVI;
riscv_pmu_use_irq = true;
@@ -1277,7 +1282,7 @@ static void riscv_pmu_update_counter_access(void *info)
csr_write(CSR_SCOUNTEREN, 0x2);
}
-static int riscv_pmu_proc_user_access_handler(struct ctl_table *table,
+static int riscv_pmu_proc_user_access_handler(const struct ctl_table *table,
int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 787354b849c7..dfab1c66b3e5 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -72,6 +72,16 @@ config PHY_CAN_TRANSCEIVER
functional modes using gpios and sets the attribute max link
rate, for CAN drivers.
+config PHY_AIROHA_PCIE
+ tristate "Airoha PCIe-PHY Driver"
+ depends on ARCH_AIROHA || COMPILE_TEST
+ depends on OF
+ select GENERIC_PHY
+ help
+ Say Y here to add support for Airoha PCIe PHY driver.
+ This driver create the basic PHY instance and provides initialize
+ callback for PCIe GEN3 port.
+
source "drivers/phy/allwinner/Kconfig"
source "drivers/phy/amlogic/Kconfig"
source "drivers/phy/broadcom/Kconfig"
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index 868a220ed0f6..5fcbce5f9ab1 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_PHY_LPC18XX_USB_OTG) += phy-lpc18xx-usb-otg.o
obj-$(CONFIG_PHY_XGENE) += phy-xgene.o
obj-$(CONFIG_PHY_PISTACHIO_USB) += phy-pistachio-usb.o
obj-$(CONFIG_USB_LGM_PHY) += phy-lgm-usb.o
+obj-$(CONFIG_PHY_AIROHA_PCIE) += phy-airoha-pcie.o
obj-y += allwinner/ \
amlogic/ \
broadcom/ \
diff --git a/drivers/phy/broadcom/phy-bcm-ns-usb2.c b/drivers/phy/broadcom/phy-bcm-ns-usb2.c
index 269564bdf687..5213c75b6da6 100644
--- a/drivers/phy/broadcom/phy-bcm-ns-usb2.c
+++ b/drivers/phy/broadcom/phy-bcm-ns-usb2.c
@@ -162,4 +162,5 @@ static struct platform_driver bcm_ns_usb2_driver = {
};
module_platform_driver(bcm_ns_usb2_driver);
+MODULE_DESCRIPTION("Broadcom Northstar USB 2.0 PHY Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/broadcom/phy-bcm-ns-usb3.c b/drivers/phy/broadcom/phy-bcm-ns-usb3.c
index 2c8b1b7dda5b..9f995e156f75 100644
--- a/drivers/phy/broadcom/phy-bcm-ns-usb3.c
+++ b/drivers/phy/broadcom/phy-bcm-ns-usb3.c
@@ -240,5 +240,6 @@ static struct mdio_driver bcm_ns_usb3_mdio_driver = {
mdio_module_driver(bcm_ns_usb3_mdio_driver);
+MODULE_DESCRIPTION("Broadcom Northstar USB 3.0 PHY Driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, bcm_ns_usb3_id_table);
diff --git a/drivers/phy/cadence/phy-cadence-torrent.c b/drivers/phy/cadence/phy-cadence-torrent.c
index 95924a09960c..56ce82a47f88 100644
--- a/drivers/phy/cadence/phy-cadence-torrent.c
+++ b/drivers/phy/cadence/phy-cadence-torrent.c
@@ -360,6 +360,7 @@ struct cdns_torrent_phy {
enum cdns_torrent_ref_clk ref_clk1_rate;
struct cdns_torrent_inst phys[MAX_NUM_LANES];
int nsubnodes;
+ int already_configured;
const struct cdns_torrent_data *init_data;
struct regmap *regmap_common_cdb;
struct regmap *regmap_phy_pcs_common_cdb;
@@ -1156,6 +1157,9 @@ static int cdns_torrent_dp_set_power_state(struct cdns_torrent_phy *cdns_phy,
ret = regmap_read_poll_timeout(regmap, PHY_PMA_XCVR_POWER_STATE_ACK,
read_val, (read_val & mask) == value, 0,
POLL_TIMEOUT_US);
+ if (ret)
+ return ret;
+
cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_POWER_STATE_REQ, 0x00000000);
ndelay(100);
@@ -1594,6 +1598,9 @@ static int cdns_torrent_dp_configure(struct phy *phy,
struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent);
int ret;
+ if (cdns_phy->already_configured)
+ return 0;
+
ret = cdns_torrent_dp_verify_config(inst, &opts->dp);
if (ret) {
dev_err(&phy->dev, "invalid params for phy configure\n");
@@ -1629,6 +1636,12 @@ static int cdns_torrent_phy_on(struct phy *phy)
u32 read_val;
int ret;
+ if (cdns_phy->already_configured) {
+ /* Give 5ms to 10ms delay for the PIPE clock to be stable */
+ usleep_range(5000, 10000);
+ return 0;
+ }
+
if (cdns_phy->nsubnodes == 1) {
/* Take the PHY lane group out of reset */
reset_control_deassert(inst->lnk_rst);
@@ -2307,6 +2320,9 @@ static int cdns_torrent_phy_init(struct phy *phy)
u32 num_regs;
int i, j;
+ if (cdns_phy->already_configured)
+ return 0;
+
if (cdns_phy->nsubnodes > 1) {
if (phy_type == TYPE_DP)
return cdns_torrent_dp_multilink_init(cdns_phy, inst, phy);
@@ -2444,19 +2460,6 @@ static const struct phy_ops cdns_torrent_phy_ops = {
.owner = THIS_MODULE,
};
-static int cdns_torrent_noop_phy_on(struct phy *phy)
-{
- /* Give 5ms to 10ms delay for the PIPE clock to be stable */
- usleep_range(5000, 10000);
-
- return 0;
-}
-
-static const struct phy_ops noop_ops = {
- .power_on = cdns_torrent_noop_phy_on,
- .owner = THIS_MODULE,
-};
-
static
int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
{
@@ -2678,7 +2681,7 @@ static int cdns_torrent_clk_register(struct cdns_torrent_phy *cdns_phy)
return 0;
}
-static int cdns_torrent_reset(struct cdns_torrent_phy *cdns_phy)
+static int cdns_torrent_of_get_reset(struct cdns_torrent_phy *cdns_phy)
{
struct device *dev = cdns_phy->dev;
@@ -2699,20 +2702,29 @@ static int cdns_torrent_reset(struct cdns_torrent_phy *cdns_phy)
return 0;
}
+static int cdns_torrent_of_get_clk(struct cdns_torrent_phy *cdns_phy)
+{
+ /* refclk: Input reference clock for PLL0 */
+ cdns_phy->clk = devm_clk_get(cdns_phy->dev, "refclk");
+ if (IS_ERR(cdns_phy->clk))
+ return dev_err_probe(cdns_phy->dev, PTR_ERR(cdns_phy->clk),
+ "phy ref clock not found\n");
+
+ /* refclk1: Input reference clock for PLL1 */
+ cdns_phy->clk1 = devm_clk_get_optional(cdns_phy->dev, "pll1_refclk");
+ if (IS_ERR(cdns_phy->clk1))
+ return dev_err_probe(cdns_phy->dev, PTR_ERR(cdns_phy->clk1),
+ "phy PLL1 ref clock not found\n");
+
+ return 0;
+}
+
static int cdns_torrent_clk(struct cdns_torrent_phy *cdns_phy)
{
- struct device *dev = cdns_phy->dev;
unsigned long ref_clk1_rate;
unsigned long ref_clk_rate;
int ret;
- /* refclk: Input reference clock for PLL0 */
- cdns_phy->clk = devm_clk_get(dev, "refclk");
- if (IS_ERR(cdns_phy->clk)) {
- dev_err(dev, "phy ref clock not found\n");
- return PTR_ERR(cdns_phy->clk);
- }
-
ret = clk_prepare_enable(cdns_phy->clk);
if (ret) {
dev_err(cdns_phy->dev, "Failed to prepare ref clock: %d\n", ret);
@@ -2745,14 +2757,6 @@ static int cdns_torrent_clk(struct cdns_torrent_phy *cdns_phy)
goto disable_clk;
}
- /* refclk1: Input reference clock for PLL1 */
- cdns_phy->clk1 = devm_clk_get_optional(dev, "pll1_refclk");
- if (IS_ERR(cdns_phy->clk1)) {
- dev_err(dev, "phy PLL1 ref clock not found\n");
- ret = PTR_ERR(cdns_phy->clk1);
- goto disable_clk;
- }
-
if (cdns_phy->clk1) {
ret = clk_prepare_enable(cdns_phy->clk1);
if (ret) {
@@ -2807,7 +2811,6 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
struct device_node *child;
int ret, subnodes, node = 0, i;
u32 total_num_lanes = 0;
- int already_configured;
u8 init_dp_regmap = 0;
u32 phy_type;
@@ -2846,13 +2849,17 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
if (ret)
return ret;
- regmap_field_read(cdns_phy->phy_pma_cmn_ctrl_1, &already_configured);
+ ret = cdns_torrent_of_get_reset(cdns_phy);
+ if (ret)
+ goto clk_cleanup;
- if (!already_configured) {
- ret = cdns_torrent_reset(cdns_phy);
- if (ret)
- goto clk_cleanup;
+ ret = cdns_torrent_of_get_clk(cdns_phy);
+ if (ret)
+ goto clk_cleanup;
+
+ regmap_field_read(cdns_phy->phy_pma_cmn_ctrl_1, &cdns_phy->already_configured);
+ if (!cdns_phy->already_configured) {
ret = cdns_torrent_clk(cdns_phy);
if (ret)
goto clk_cleanup;
@@ -2932,10 +2939,7 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
of_property_read_u32(child, "cdns,ssc-mode",
&cdns_phy->phys[node].ssc_mode);
- if (!already_configured)
- gphy = devm_phy_create(dev, child, &cdns_torrent_phy_ops);
- else
- gphy = devm_phy_create(dev, child, &noop_ops);
+ gphy = devm_phy_create(dev, child, &cdns_torrent_phy_ops);
if (IS_ERR(gphy)) {
ret = PTR_ERR(gphy);
goto put_child;
@@ -3018,7 +3022,7 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
goto put_lnk_rst;
}
- if (cdns_phy->nsubnodes > 1 && !already_configured) {
+ if (cdns_phy->nsubnodes > 1 && !cdns_phy->already_configured) {
ret = cdns_torrent_phy_configure_multilink(cdns_phy);
if (ret)
goto put_lnk_rst;
@@ -3074,6 +3078,82 @@ static void cdns_torrent_phy_remove(struct platform_device *pdev)
cdns_torrent_clk_cleanup(cdns_phy);
}
+/* SGMII and QSGMII link configuration */
+static struct cdns_reg_pairs sgmii_qsgmii_link_cmn_regs[] = {
+ {0x0002, PHY_PLL_CFG}
+};
+
+static struct cdns_reg_pairs sgmii_qsgmii_xcvr_diag_ln_regs[] = {
+ {0x0003, XCVR_DIAG_HSCLK_DIV},
+ {0x0113, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_torrent_vals sgmii_qsgmii_link_cmn_vals = {
+ .reg_pairs = sgmii_qsgmii_link_cmn_regs,
+ .num_regs = ARRAY_SIZE(sgmii_qsgmii_link_cmn_regs),
+};
+
+static struct cdns_torrent_vals sgmii_qsgmii_xcvr_diag_ln_vals = {
+ .reg_pairs = sgmii_qsgmii_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(sgmii_qsgmii_xcvr_diag_ln_regs),
+};
+
+static int cdns_torrent_phy_suspend_noirq(struct device *dev)
+{
+ struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(dev);
+ int i;
+
+ reset_control_assert(cdns_phy->phy_rst);
+ reset_control_assert(cdns_phy->apb_rst);
+ for (i = 0; i < cdns_phy->nsubnodes; i++)
+ reset_control_assert(cdns_phy->phys[i].lnk_rst);
+
+ if (cdns_phy->already_configured)
+ cdns_phy->already_configured = 0;
+ else {
+ clk_disable_unprepare(cdns_phy->clk1);
+ clk_disable_unprepare(cdns_phy->clk);
+ }
+
+ return 0;
+}
+
+static int cdns_torrent_phy_resume_noirq(struct device *dev)
+{
+ struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(dev);
+ int node = cdns_phy->nsubnodes;
+ int ret, i;
+
+ ret = cdns_torrent_clk(cdns_phy);
+ if (ret)
+ return ret;
+
+ /* Enable APB */
+ reset_control_deassert(cdns_phy->apb_rst);
+
+ if (cdns_phy->nsubnodes > 1) {
+ ret = cdns_torrent_phy_configure_multilink(cdns_phy);
+ if (ret)
+ goto put_lnk_rst;
+ }
+
+ return 0;
+
+put_lnk_rst:
+ for (i = 0; i < node; i++)
+ reset_control_assert(cdns_phy->phys[i].lnk_rst);
+ reset_control_assert(cdns_phy->apb_rst);
+
+ clk_disable_unprepare(cdns_phy->clk1);
+ clk_disable_unprepare(cdns_phy->clk);
+
+ return ret;
+}
+
+static DEFINE_NOIRQ_DEV_PM_OPS(cdns_torrent_phy_pm_ops,
+ cdns_torrent_phy_suspend_noirq,
+ cdns_torrent_phy_resume_noirq);
+
/* USB and DP link configuration */
static struct cdns_reg_pairs usb_dp_link_cmn_regs[] = {
{0x0002, PHY_PLL_CFG},
@@ -4043,7 +4123,8 @@ static struct cdns_reg_pairs sgmii_100_no_ssc_tx_ln_regs[] = {
{0x04A2, TX_PSC_A2},
{0x04A2, TX_PSC_A3},
{0x0000, TX_TXCC_CPOST_MULT_00},
- {0x00B3, DRV_DIAG_TX_DRV}
+ {0x00B3, DRV_DIAG_TX_DRV},
+ {0x0002, XCVR_DIAG_PSC_OVRD}
};
static struct cdns_reg_pairs ti_sgmii_100_no_ssc_tx_ln_regs[] = {
@@ -4052,7 +4133,8 @@ static struct cdns_reg_pairs ti_sgmii_100_no_ssc_tx_ln_regs[] = {
{0x04A2, TX_PSC_A3},
{0x0000, TX_TXCC_CPOST_MULT_00},
{0x00B3, DRV_DIAG_TX_DRV},
- {0x4000, XCVR_DIAG_RXCLK_CTRL},
+ {0x0002, XCVR_DIAG_PSC_OVRD},
+ {0x4000, XCVR_DIAG_RXCLK_CTRL}
};
static struct cdns_reg_pairs sgmii_100_no_ssc_rx_ln_regs[] = {
@@ -4219,7 +4301,8 @@ static struct cdns_reg_pairs qsgmii_100_no_ssc_tx_ln_regs[] = {
{0x04A2, TX_PSC_A3},
{0x0000, TX_TXCC_CPOST_MULT_00},
{0x0011, TX_TXCC_MGNFS_MULT_100},
- {0x0003, DRV_DIAG_TX_DRV}
+ {0x0003, DRV_DIAG_TX_DRV},
+ {0x0002, XCVR_DIAG_PSC_OVRD}
};
static struct cdns_reg_pairs ti_qsgmii_100_no_ssc_tx_ln_regs[] = {
@@ -4229,7 +4312,8 @@ static struct cdns_reg_pairs ti_qsgmii_100_no_ssc_tx_ln_regs[] = {
{0x0000, TX_TXCC_CPOST_MULT_00},
{0x0011, TX_TXCC_MGNFS_MULT_100},
{0x0003, DRV_DIAG_TX_DRV},
- {0x4000, XCVR_DIAG_RXCLK_CTRL},
+ {0x0002, XCVR_DIAG_PSC_OVRD},
+ {0x4000, XCVR_DIAG_RXCLK_CTRL}
};
static struct cdns_reg_pairs qsgmii_100_no_ssc_rx_ln_regs[] = {
@@ -4541,11 +4625,13 @@ static struct cdns_torrent_vals_entry link_cmn_vals_entries[] = {
{CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_NONE), &sl_sgmii_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_PCIE), &pcie_sgmii_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_QSGMII), &sgmii_qsgmii_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_USB), &usb_sgmii_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_USXGMII), &usxgmii_sgmii_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_NONE), &sl_sgmii_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_PCIE), &pcie_sgmii_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_SGMII), &sgmii_qsgmii_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_USB), &usb_sgmii_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_USXGMII), &usxgmii_sgmii_link_cmn_vals},
@@ -4575,11 +4661,13 @@ static struct cdns_torrent_vals_entry xcvr_diag_vals_entries[] = {
{CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_NONE), &sl_sgmii_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_PCIE), &sgmii_pcie_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_QSGMII), &sgmii_qsgmii_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_USB), &sgmii_usb_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_USXGMII), &sgmii_usxgmii_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_NONE), &sl_sgmii_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_PCIE), &sgmii_pcie_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_SGMII), &sgmii_qsgmii_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_USB), &sgmii_usb_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_USXGMII), &sgmii_usxgmii_xcvr_diag_ln_vals},
@@ -4635,6 +4723,8 @@ static struct cdns_torrent_vals_entry cmn_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &sgmii_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &sgmii_100_int_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_QSGMII, NO_SSC), &sl_sgmii_100_no_ssc_cmn_vals},
+
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &sgmii_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &sgmii_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &sgmii_100_no_ssc_cmn_vals},
@@ -4645,6 +4735,8 @@ static struct cdns_torrent_vals_entry cmn_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &qsgmii_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &qsgmii_100_int_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_SGMII, NO_SSC), &sl_qsgmii_100_no_ssc_cmn_vals},
+
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &qsgmii_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &qsgmii_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, INTERNAL_SSC), &qsgmii_100_no_ssc_cmn_vals},
@@ -4713,6 +4805,8 @@ static struct cdns_torrent_vals_entry cdns_tx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &sgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &sgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_QSGMII, NO_SSC), &sgmii_100_no_ssc_tx_ln_vals},
+
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &sgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &sgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &sgmii_100_no_ssc_tx_ln_vals},
@@ -4723,6 +4817,8 @@ static struct cdns_torrent_vals_entry cdns_tx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &qsgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &qsgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_SGMII, NO_SSC), &qsgmii_100_no_ssc_tx_ln_vals},
+
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &qsgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &qsgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, INTERNAL_SSC), &qsgmii_100_no_ssc_tx_ln_vals},
@@ -4791,6 +4887,8 @@ static struct cdns_torrent_vals_entry cdns_rx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_QSGMII, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals},
@@ -4801,6 +4899,8 @@ static struct cdns_torrent_vals_entry cdns_rx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_SGMII, NO_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, INTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
@@ -4905,6 +5005,8 @@ static struct cdns_torrent_vals_entry ti_tx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_QSGMII, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
@@ -4915,6 +5017,8 @@ static struct cdns_torrent_vals_entry ti_tx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_SGMII, NO_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, INTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
@@ -5017,6 +5121,8 @@ static struct cdns_torrent_vals_entry ti_j7200_cmn_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &sgmii_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &sgmii_100_int_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_QSGMII, NO_SSC), &sl_sgmii_100_no_ssc_cmn_vals},
+
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &sgmii_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &sgmii_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &sgmii_100_no_ssc_cmn_vals},
@@ -5027,6 +5133,8 @@ static struct cdns_torrent_vals_entry ti_j7200_cmn_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &qsgmii_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &qsgmii_100_int_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_SGMII, NO_SSC), &sl_qsgmii_100_no_ssc_cmn_vals},
+
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &qsgmii_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &qsgmii_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, INTERNAL_SSC), &qsgmii_100_no_ssc_cmn_vals},
@@ -5095,6 +5203,8 @@ static struct cdns_torrent_vals_entry ti_j7200_tx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_QSGMII, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
@@ -5105,6 +5215,8 @@ static struct cdns_torrent_vals_entry ti_j7200_tx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_SGMII, NO_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, INTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
@@ -5173,6 +5285,8 @@ static struct cdns_torrent_vals_entry ti_j7200_rx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_QSGMII, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals},
@@ -5183,6 +5297,8 @@ static struct cdns_torrent_vals_entry ti_j7200_rx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_SGMII, NO_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, INTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
@@ -5275,6 +5391,7 @@ static struct platform_driver cdns_torrent_phy_driver = {
.driver = {
.name = "cdns-torrent-phy",
.of_match_table = cdns_torrent_phy_of_match,
+ .pm = pm_sleep_ptr(&cdns_torrent_phy_pm_ops),
}
};
module_platform_driver(cdns_torrent_phy_driver);
diff --git a/drivers/phy/freescale/Kconfig b/drivers/phy/freescale/Kconfig
index 45aaaea14fb4..dcd9acff6d01 100644
--- a/drivers/phy/freescale/Kconfig
+++ b/drivers/phy/freescale/Kconfig
@@ -35,12 +35,19 @@ config PHY_FSL_IMX8M_PCIE
Enable this to add support for the PCIE PHY as found on
i.MX8M family of SOCs.
+config PHY_FSL_IMX8QM_HSIO
+ tristate "Freescale i.MX8QM HSIO PHY"
+ depends on OF && HAS_IOMEM
+ select GENERIC_PHY
+ help
+ Enable this to add support for the HSIO PHY as found on
+ i.MX8QM family of SOCs.
+
config PHY_FSL_SAMSUNG_HDMI_PHY
tristate "Samsung HDMI PHY support"
depends on OF && HAS_IOMEM && COMMON_CLK
help
Enable this to add support for the Samsung HDMI PHY in i.MX8MP.
-
endif
config PHY_FSL_LYNX_28G
diff --git a/drivers/phy/freescale/Makefile b/drivers/phy/freescale/Makefile
index c4386bfdb853..658eac7d0a62 100644
--- a/drivers/phy/freescale/Makefile
+++ b/drivers/phy/freescale/Makefile
@@ -3,5 +3,6 @@ obj-$(CONFIG_PHY_FSL_IMX8MQ_USB) += phy-fsl-imx8mq-usb.o
obj-$(CONFIG_PHY_MIXEL_LVDS_PHY) += phy-fsl-imx8qm-lvds-phy.o
obj-$(CONFIG_PHY_MIXEL_MIPI_DPHY) += phy-fsl-imx8-mipi-dphy.o
obj-$(CONFIG_PHY_FSL_IMX8M_PCIE) += phy-fsl-imx8m-pcie.o
+obj-$(CONFIG_PHY_FSL_IMX8QM_HSIO) += phy-fsl-imx8qm-hsio.o
obj-$(CONFIG_PHY_FSL_LYNX_28G) += phy-fsl-lynx-28g.o
obj-$(CONFIG_PHY_FSL_SAMSUNG_HDMI_PHY) += phy-fsl-samsung-hdmi.o
diff --git a/drivers/phy/freescale/phy-fsl-imx8qm-hsio.c b/drivers/phy/freescale/phy-fsl-imx8qm-hsio.c
new file mode 100644
index 000000000000..5dca93cd325c
--- /dev/null
+++ b/drivers/phy/freescale/phy-fsl-imx8qm-hsio.c
@@ -0,0 +1,611 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pci_regs.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/pcie.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/phy/phy.h>
+#include <dt-bindings/phy/phy-imx8-pcie.h>
+
+#define MAX_NUM_LANE 3
+#define LANE_NUM_CLKS 5
+
+/* Parameters for the waiting for PCIe PHY PLL to lock */
+#define PHY_INIT_WAIT_USLEEP_MAX 10
+#define PHY_INIT_WAIT_TIMEOUT (1000 * PHY_INIT_WAIT_USLEEP_MAX)
+
+/* i.MX8Q HSIO registers */
+#define HSIO_CTRL0 0x0
+#define HSIO_APB_RSTN_0 BIT(0)
+#define HSIO_APB_RSTN_1 BIT(1)
+#define HSIO_PIPE_RSTN_0_MASK GENMASK(25, 24)
+#define HSIO_PIPE_RSTN_1_MASK GENMASK(27, 26)
+#define HSIO_MODE_MASK GENMASK(20, 17)
+#define HSIO_MODE_PCIE 0x0
+#define HSIO_MODE_SATA 0x4
+#define HSIO_DEVICE_TYPE_MASK GENMASK(27, 24)
+#define HSIO_EPCS_TXDEEMP BIT(5)
+#define HSIO_EPCS_TXDEEMP_SEL BIT(6)
+#define HSIO_EPCS_PHYRESET_N BIT(7)
+#define HSIO_RESET_N BIT(12)
+
+#define HSIO_IOB_RXENA BIT(0)
+#define HSIO_IOB_TXENA BIT(1)
+#define HSIO_IOB_A_0_TXOE BIT(2)
+#define HSIO_IOB_A_0_M1M0_2 BIT(4)
+#define HSIO_IOB_A_0_M1M0_MASK GENMASK(4, 3)
+#define HSIO_PHYX1_EPCS_SEL BIT(12)
+#define HSIO_PCIE_AB_SELECT BIT(13)
+
+#define HSIO_PHY_STS0 0x4
+#define HSIO_LANE0_TX_PLL_LOCK BIT(4)
+#define HSIO_LANE1_TX_PLL_LOCK BIT(12)
+
+#define HSIO_CTRL2 0x8
+#define HSIO_LTSSM_ENABLE BIT(4)
+#define HSIO_BUTTON_RST_N BIT(21)
+#define HSIO_PERST_N BIT(22)
+#define HSIO_POWER_UP_RST_N BIT(23)
+
+#define HSIO_PCIE_STS0 0xc
+#define HSIO_PM_REQ_CORE_RST BIT(19)
+
+#define HSIO_REG48_PMA_STATUS 0x30
+#define HSIO_REG48_PMA_RDY BIT(7)
+
+struct imx_hsio_drvdata {
+ int lane_num;
+};
+
+struct imx_hsio_lane {
+ u32 ctrl_index;
+ u32 ctrl_off;
+ u32 idx;
+ u32 phy_off;
+ u32 phy_type;
+ const char * const *clk_names;
+ struct clk_bulk_data clks[LANE_NUM_CLKS];
+ struct imx_hsio_priv *priv;
+ struct phy *phy;
+ enum phy_mode phy_mode;
+};
+
+struct imx_hsio_priv {
+ void __iomem *base;
+ struct device *dev;
+ struct mutex lock;
+ const char *hsio_cfg;
+ const char *refclk_pad;
+ u32 open_cnt;
+ struct regmap *phy;
+ struct regmap *ctrl;
+ struct regmap *misc;
+ const struct imx_hsio_drvdata *drvdata;
+ struct imx_hsio_lane lane[MAX_NUM_LANE];
+};
+
+static const char * const lan0_pcie_clks[] = {"apb_pclk0", "pclk0", "ctl0_crr",
+ "phy0_crr", "misc_crr"};
+static const char * const lan1_pciea_clks[] = {"apb_pclk1", "pclk1", "ctl0_crr",
+ "phy0_crr", "misc_crr"};
+static const char * const lan1_pcieb_clks[] = {"apb_pclk1", "pclk1", "ctl1_crr",
+ "phy0_crr", "misc_crr"};
+static const char * const lan2_pcieb_clks[] = {"apb_pclk2", "pclk2", "ctl1_crr",
+ "phy1_crr", "misc_crr"};
+static const char * const lan2_sata_clks[] = {"pclk2", "epcs_tx", "epcs_rx",
+ "phy1_crr", "misc_crr"};
+
+static const struct regmap_config regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static int imx_hsio_init(struct phy *phy)
+{
+ int ret, i;
+ struct imx_hsio_lane *lane = phy_get_drvdata(phy);
+ struct imx_hsio_priv *priv = lane->priv;
+ struct device *dev = priv->dev;
+
+ /* Assign clocks refer to different modes */
+ switch (lane->phy_type) {
+ case PHY_TYPE_PCIE:
+ lane->phy_mode = PHY_MODE_PCIE;
+ if (lane->ctrl_index == 0) { /* PCIEA */
+ lane->ctrl_off = 0;
+ lane->phy_off = 0;
+
+ for (i = 0; i < LANE_NUM_CLKS; i++) {
+ if (lane->idx == 0)
+ lane->clks[i].id = lan0_pcie_clks[i];
+ else
+ lane->clks[i].id = lan1_pciea_clks[i];
+ }
+ } else { /* PCIEB */
+ if (lane->idx == 0) { /* i.MX8QXP */
+ lane->ctrl_off = 0;
+ lane->phy_off = 0;
+ } else {
+ /*
+ * On i.MX8QM, only second or third lane can be
+ * bound to PCIEB.
+ */
+ lane->ctrl_off = SZ_64K;
+ if (lane->idx == 1)
+ lane->phy_off = 0;
+ else /* the third lane is bound to PCIEB */
+ lane->phy_off = SZ_64K;
+ }
+
+ for (i = 0; i < LANE_NUM_CLKS; i++) {
+ if (lane->idx == 1)
+ lane->clks[i].id = lan1_pcieb_clks[i];
+ else if (lane->idx == 2)
+ lane->clks[i].id = lan2_pcieb_clks[i];
+ else /* i.MX8QXP only has PCIEB, idx is 0 */
+ lane->clks[i].id = lan0_pcie_clks[i];
+ }
+ }
+ break;
+ case PHY_TYPE_SATA:
+ /* On i.MX8QM, only the third lane can be bound to SATA */
+ lane->phy_mode = PHY_MODE_SATA;
+ lane->ctrl_off = SZ_128K;
+ lane->phy_off = SZ_64K;
+
+ for (i = 0; i < LANE_NUM_CLKS; i++)
+ lane->clks[i].id = lan2_sata_clks[i];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Fetch clocks and enable them */
+ ret = devm_clk_bulk_get(dev, LANE_NUM_CLKS, lane->clks);
+ if (ret)
+ return ret;
+ ret = clk_bulk_prepare_enable(LANE_NUM_CLKS, lane->clks);
+ if (ret)
+ return ret;
+
+ /* allow the clocks to stabilize */
+ usleep_range(200, 500);
+ return 0;
+}
+
+static int imx_hsio_exit(struct phy *phy)
+{
+ struct imx_hsio_lane *lane = phy_get_drvdata(phy);
+
+ clk_bulk_disable_unprepare(LANE_NUM_CLKS, lane->clks);
+
+ return 0;
+}
+
+static void imx_hsio_pcie_phy_resets(struct phy *phy)
+{
+ struct imx_hsio_lane *lane = phy_get_drvdata(phy);
+ struct imx_hsio_priv *priv = lane->priv;
+
+ regmap_clear_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL2,
+ HSIO_BUTTON_RST_N);
+ regmap_clear_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL2,
+ HSIO_PERST_N);
+ regmap_clear_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL2,
+ HSIO_POWER_UP_RST_N);
+ regmap_set_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL2,
+ HSIO_BUTTON_RST_N);
+ regmap_set_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL2,
+ HSIO_PERST_N);
+ regmap_set_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL2,
+ HSIO_POWER_UP_RST_N);
+
+ if (lane->idx == 1) {
+ regmap_set_bits(priv->phy, lane->phy_off + HSIO_CTRL0,
+ HSIO_APB_RSTN_1);
+ regmap_set_bits(priv->phy, lane->phy_off + HSIO_CTRL0,
+ HSIO_PIPE_RSTN_1_MASK);
+ } else {
+ regmap_set_bits(priv->phy, lane->phy_off + HSIO_CTRL0,
+ HSIO_APB_RSTN_0);
+ regmap_set_bits(priv->phy, lane->phy_off + HSIO_CTRL0,
+ HSIO_PIPE_RSTN_0_MASK);
+ }
+}
+
+static void imx_hsio_sata_phy_resets(struct phy *phy)
+{
+ struct imx_hsio_lane *lane = phy_get_drvdata(phy);
+ struct imx_hsio_priv *priv = lane->priv;
+
+ /* clear PHY RST, then set it */
+ regmap_clear_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL0,
+ HSIO_EPCS_PHYRESET_N);
+ regmap_set_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL0,
+ HSIO_EPCS_PHYRESET_N);
+
+ /* CTRL RST: SET -> delay 1 us -> CLEAR -> SET */
+ regmap_set_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL0, HSIO_RESET_N);
+ udelay(1);
+ regmap_clear_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL0,
+ HSIO_RESET_N);
+ regmap_set_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL0, HSIO_RESET_N);
+}
+
+static void imx_hsio_configure_clk_pad(struct phy *phy)
+{
+ bool pll = false;
+ struct imx_hsio_lane *lane = phy_get_drvdata(phy);
+ struct imx_hsio_priv *priv = lane->priv;
+
+ if (strncmp(priv->refclk_pad, "output", 6) == 0) {
+ pll = true;
+ regmap_update_bits(priv->misc, HSIO_CTRL0,
+ HSIO_IOB_A_0_TXOE | HSIO_IOB_A_0_M1M0_MASK,
+ HSIO_IOB_A_0_TXOE | HSIO_IOB_A_0_M1M0_2);
+ } else {
+ regmap_update_bits(priv->misc, HSIO_CTRL0,
+ HSIO_IOB_A_0_TXOE | HSIO_IOB_A_0_M1M0_MASK,
+ 0);
+ }
+
+ regmap_update_bits(priv->misc, HSIO_CTRL0, HSIO_IOB_RXENA,
+ pll ? 0 : HSIO_IOB_RXENA);
+ regmap_update_bits(priv->misc, HSIO_CTRL0, HSIO_IOB_TXENA,
+ pll ? HSIO_IOB_TXENA : 0);
+}
+
+static void imx_hsio_pre_set(struct phy *phy)
+{
+ struct imx_hsio_lane *lane = phy_get_drvdata(phy);
+ struct imx_hsio_priv *priv = lane->priv;
+
+ if (strncmp(priv->hsio_cfg, "pciea-x2-pcieb", 14) == 0) {
+ regmap_set_bits(priv->misc, HSIO_CTRL0, HSIO_PCIE_AB_SELECT);
+ } else if (strncmp(priv->hsio_cfg, "pciea-x2-sata", 13) == 0) {
+ regmap_set_bits(priv->misc, HSIO_CTRL0, HSIO_PHYX1_EPCS_SEL);
+ } else if (strncmp(priv->hsio_cfg, "pciea-pcieb-sata", 16) == 0) {
+ regmap_set_bits(priv->misc, HSIO_CTRL0, HSIO_PCIE_AB_SELECT);
+ regmap_set_bits(priv->misc, HSIO_CTRL0, HSIO_PHYX1_EPCS_SEL);
+ }
+
+ imx_hsio_configure_clk_pad(phy);
+}
+
+static int imx_hsio_pcie_power_on(struct phy *phy)
+{
+ int ret;
+ u32 val, addr, cond;
+ struct imx_hsio_lane *lane = phy_get_drvdata(phy);
+ struct imx_hsio_priv *priv = lane->priv;
+
+ imx_hsio_pcie_phy_resets(phy);
+
+ /* Toggle apb_pclk to make sure PM_REQ_CORE_RST is cleared. */
+ clk_disable_unprepare(lane->clks[0].clk);
+ mdelay(1);
+ ret = clk_prepare_enable(lane->clks[0].clk);
+ if (ret) {
+ dev_err(priv->dev, "unable to enable phy apb_pclk\n");
+ return ret;
+ }
+
+ addr = lane->ctrl_off + HSIO_PCIE_STS0;
+ cond = HSIO_PM_REQ_CORE_RST;
+ ret = regmap_read_poll_timeout(priv->ctrl, addr, val,
+ (val & cond) == 0,
+ PHY_INIT_WAIT_USLEEP_MAX,
+ PHY_INIT_WAIT_TIMEOUT);
+ if (ret)
+ dev_err(priv->dev, "HSIO_PM_REQ_CORE_RST is set\n");
+ return ret;
+}
+
+static int imx_hsio_sata_power_on(struct phy *phy)
+{
+ int ret;
+ u32 val, cond;
+ struct imx_hsio_lane *lane = phy_get_drvdata(phy);
+ struct imx_hsio_priv *priv = lane->priv;
+
+ regmap_set_bits(priv->phy, lane->phy_off + HSIO_CTRL0, HSIO_APB_RSTN_0);
+ regmap_set_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL0,
+ HSIO_EPCS_TXDEEMP);
+ regmap_set_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL0,
+ HSIO_EPCS_TXDEEMP_SEL);
+
+ imx_hsio_sata_phy_resets(phy);
+
+ cond = HSIO_REG48_PMA_RDY;
+ ret = read_poll_timeout(readb, val, ((val & cond) == cond),
+ PHY_INIT_WAIT_USLEEP_MAX,
+ PHY_INIT_WAIT_TIMEOUT, false,
+ priv->base + HSIO_REG48_PMA_STATUS);
+ if (ret)
+ dev_err(priv->dev, "PHY calibration is timeout\n");
+ else
+ dev_dbg(priv->dev, "PHY calibration is done\n");
+
+ return ret;
+}
+
+static int imx_hsio_power_on(struct phy *phy)
+{
+ int ret;
+ u32 val, cond;
+ struct imx_hsio_lane *lane = phy_get_drvdata(phy);
+ struct imx_hsio_priv *priv = lane->priv;
+
+ scoped_guard(mutex, &priv->lock) {
+ if (!priv->open_cnt)
+ imx_hsio_pre_set(phy);
+ priv->open_cnt++;
+ }
+
+ if (lane->phy_mode == PHY_MODE_PCIE)
+ ret = imx_hsio_pcie_power_on(phy);
+ else /* SATA */
+ ret = imx_hsio_sata_power_on(phy);
+ if (ret)
+ return ret;
+
+ /* Polling to check the PHY is ready or not. */
+ if (lane->idx == 1)
+ cond = HSIO_LANE1_TX_PLL_LOCK;
+ else
+ /*
+ * Except the phy_off, the bit-offset of lane2 is same to lane0.
+ * Merge the lane0 and lane2 bit-operations together.
+ */
+ cond = HSIO_LANE0_TX_PLL_LOCK;
+
+ ret = regmap_read_poll_timeout(priv->phy, lane->phy_off + HSIO_PHY_STS0,
+ val, ((val & cond) == cond),
+ PHY_INIT_WAIT_USLEEP_MAX,
+ PHY_INIT_WAIT_TIMEOUT);
+ if (ret) {
+ dev_err(priv->dev, "IMX8Q PHY%d PLL lock timeout\n", lane->idx);
+ return ret;
+ }
+ dev_dbg(priv->dev, "IMX8Q PHY%d PLL is locked\n", lane->idx);
+
+ return ret;
+}
+
+static int imx_hsio_power_off(struct phy *phy)
+{
+ struct imx_hsio_lane *lane = phy_get_drvdata(phy);
+ struct imx_hsio_priv *priv = lane->priv;
+
+ scoped_guard(mutex, &priv->lock) {
+ priv->open_cnt--;
+ if (priv->open_cnt == 0) {
+ regmap_clear_bits(priv->misc, HSIO_CTRL0,
+ HSIO_PCIE_AB_SELECT);
+ regmap_clear_bits(priv->misc, HSIO_CTRL0,
+ HSIO_PHYX1_EPCS_SEL);
+
+ if (lane->phy_mode == PHY_MODE_PCIE) {
+ regmap_clear_bits(priv->ctrl,
+ lane->ctrl_off + HSIO_CTRL2,
+ HSIO_BUTTON_RST_N);
+ regmap_clear_bits(priv->ctrl,
+ lane->ctrl_off + HSIO_CTRL2,
+ HSIO_PERST_N);
+ regmap_clear_bits(priv->ctrl,
+ lane->ctrl_off + HSIO_CTRL2,
+ HSIO_POWER_UP_RST_N);
+ } else {
+ regmap_clear_bits(priv->ctrl,
+ lane->ctrl_off + HSIO_CTRL0,
+ HSIO_EPCS_TXDEEMP);
+ regmap_clear_bits(priv->ctrl,
+ lane->ctrl_off + HSIO_CTRL0,
+ HSIO_EPCS_TXDEEMP_SEL);
+ regmap_clear_bits(priv->ctrl,
+ lane->ctrl_off + HSIO_CTRL0,
+ HSIO_RESET_N);
+ }
+
+ if (lane->idx == 1) {
+ regmap_clear_bits(priv->phy,
+ lane->phy_off + HSIO_CTRL0,
+ HSIO_APB_RSTN_1);
+ regmap_clear_bits(priv->phy,
+ lane->phy_off + HSIO_CTRL0,
+ HSIO_PIPE_RSTN_1_MASK);
+ } else {
+ /*
+ * Except the phy_off, the bit-offset of lane2 is same
+ * to lane0. Merge the lane0 and lane2 bit-operations
+ * together.
+ */
+ regmap_clear_bits(priv->phy,
+ lane->phy_off + HSIO_CTRL0,
+ HSIO_APB_RSTN_0);
+ regmap_clear_bits(priv->phy,
+ lane->phy_off + HSIO_CTRL0,
+ HSIO_PIPE_RSTN_0_MASK);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int imx_hsio_set_mode(struct phy *phy, enum phy_mode mode,
+ int submode)
+{
+ u32 val;
+ struct imx_hsio_lane *lane = phy_get_drvdata(phy);
+ struct imx_hsio_priv *priv = lane->priv;
+
+ if (lane->phy_mode != mode)
+ return -EINVAL;
+
+ val = (mode == PHY_MODE_PCIE) ? HSIO_MODE_PCIE : HSIO_MODE_SATA;
+ val = FIELD_PREP(HSIO_MODE_MASK, val);
+ regmap_update_bits(priv->phy, lane->phy_off + HSIO_CTRL0,
+ HSIO_MODE_MASK, val);
+
+ switch (submode) {
+ case PHY_MODE_PCIE_RC:
+ val = FIELD_PREP(HSIO_DEVICE_TYPE_MASK, PCI_EXP_TYPE_ROOT_PORT);
+ break;
+ case PHY_MODE_PCIE_EP:
+ val = FIELD_PREP(HSIO_DEVICE_TYPE_MASK, PCI_EXP_TYPE_ENDPOINT);
+ break;
+ default: /* Support only PCIe EP and RC now. */
+ return 0;
+ }
+ if (submode)
+ regmap_update_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL0,
+ HSIO_DEVICE_TYPE_MASK, val);
+
+ return 0;
+}
+
+static int imx_hsio_set_speed(struct phy *phy, int speed)
+{
+ struct imx_hsio_lane *lane = phy_get_drvdata(phy);
+ struct imx_hsio_priv *priv = lane->priv;
+
+ regmap_update_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL2,
+ HSIO_LTSSM_ENABLE,
+ speed ? HSIO_LTSSM_ENABLE : 0);
+ return 0;
+}
+
+static const struct phy_ops imx_hsio_ops = {
+ .init = imx_hsio_init,
+ .exit = imx_hsio_exit,
+ .power_on = imx_hsio_power_on,
+ .power_off = imx_hsio_power_off,
+ .set_mode = imx_hsio_set_mode,
+ .set_speed = imx_hsio_set_speed,
+ .owner = THIS_MODULE,
+};
+
+static const struct imx_hsio_drvdata imx8qxp_hsio_drvdata = {
+ .lane_num = 0x1,
+};
+
+static const struct imx_hsio_drvdata imx8qm_hsio_drvdata = {
+ .lane_num = 0x3,
+};
+
+static const struct of_device_id imx_hsio_of_match[] = {
+ {.compatible = "fsl,imx8qm-hsio", .data = &imx8qm_hsio_drvdata},
+ {.compatible = "fsl,imx8qxp-hsio", .data = &imx8qxp_hsio_drvdata},
+ { },
+};
+MODULE_DEVICE_TABLE(of, imx_hsio_of_match);
+
+static struct phy *imx_hsio_xlate(struct device *dev,
+ const struct of_phandle_args *args)
+{
+ struct imx_hsio_priv *priv = dev_get_drvdata(dev);
+ int idx = args->args[0];
+ int phy_type = args->args[1];
+ int ctrl_index = args->args[2];
+
+ if (idx < 0 || idx >= priv->drvdata->lane_num)
+ return ERR_PTR(-EINVAL);
+ priv->lane[idx].idx = idx;
+ priv->lane[idx].phy_type = phy_type;
+ priv->lane[idx].ctrl_index = ctrl_index;
+
+ return priv->lane[idx].phy;
+}
+
+static int imx_hsio_probe(struct platform_device *pdev)
+{
+ int i;
+ void __iomem *off;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct imx_hsio_priv *priv;
+ struct phy_provider *provider;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ priv->dev = &pdev->dev;
+ priv->drvdata = of_device_get_match_data(dev);
+
+ /* Get HSIO configuration mode */
+ if (of_property_read_string(np, "fsl,hsio-cfg", &priv->hsio_cfg))
+ priv->hsio_cfg = "pciea-pcieb-sata";
+ /* Get PHY refclk pad mode */
+ if (of_property_read_string(np, "fsl,refclk-pad-mode",
+ &priv->refclk_pad))
+ priv->refclk_pad = NULL;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ off = devm_platform_ioremap_resource_byname(pdev, "phy");
+ priv->phy = devm_regmap_init_mmio(dev, off, &regmap_config);
+ if (IS_ERR(priv->phy))
+ return dev_err_probe(dev, PTR_ERR(priv->phy),
+ "unable to find phy csr registers\n");
+
+ off = devm_platform_ioremap_resource_byname(pdev, "ctrl");
+ priv->ctrl = devm_regmap_init_mmio(dev, off, &regmap_config);
+ if (IS_ERR(priv->ctrl))
+ return dev_err_probe(dev, PTR_ERR(priv->ctrl),
+ "unable to find ctrl csr registers\n");
+
+ off = devm_platform_ioremap_resource_byname(pdev, "misc");
+ priv->misc = devm_regmap_init_mmio(dev, off, &regmap_config);
+ if (IS_ERR(priv->misc))
+ return dev_err_probe(dev, PTR_ERR(priv->misc),
+ "unable to find misc csr registers\n");
+
+ for (i = 0; i < priv->drvdata->lane_num; i++) {
+ struct imx_hsio_lane *lane = &priv->lane[i];
+ struct phy *phy;
+
+ phy = devm_phy_create(&pdev->dev, NULL, &imx_hsio_ops);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ lane->priv = priv;
+ lane->phy = phy;
+ lane->idx = i;
+ phy_set_drvdata(phy, lane);
+ }
+
+ dev_set_drvdata(dev, priv);
+ dev_set_drvdata(&pdev->dev, priv);
+
+ provider = devm_of_phy_provider_register(&pdev->dev, imx_hsio_xlate);
+
+ return PTR_ERR_OR_ZERO(provider);
+}
+
+static struct platform_driver imx_hsio_driver = {
+ .probe = imx_hsio_probe,
+ .driver = {
+ .name = "imx8qm-hsio-phy",
+ .of_match_table = imx_hsio_of_match,
+ }
+};
+module_platform_driver(imx_hsio_driver);
+
+MODULE_DESCRIPTION("FSL IMX8QM HSIO SERDES PHY driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/phy-airoha-pcie-regs.h b/drivers/phy/phy-airoha-pcie-regs.h
new file mode 100644
index 000000000000..bb1f679ca1df
--- /dev/null
+++ b/drivers/phy/phy-airoha-pcie-regs.h
@@ -0,0 +1,494 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#ifndef _PHY_AIROHA_PCIE_H
+#define _PHY_AIROHA_PCIE_H
+
+/* CSR_2L */
+#define REG_CSR_2L_CMN 0x0000
+#define CSR_2L_PXP_CMN_LANE_EN BIT(0)
+#define CSR_2L_PXP_CMN_TRIM_MASK GENMASK(28, 24)
+
+#define REG_CSR_2L_JCPLL_IB_EXT 0x0004
+#define REG_CSR_2L_JCPLL_LPF_SHCK_EN BIT(8)
+#define CSR_2L_PXP_JCPLL_CHP_IBIAS GENMASK(21, 16)
+#define CSR_2L_PXP_JCPLL_CHP_IOFST GENMASK(29, 24)
+
+#define REG_CSR_2L_JCPLL_LPF_BR 0x0008
+#define CSR_2L_PXP_JCPLL_LPF_BR GENMASK(4, 0)
+#define CSR_2L_PXP_JCPLL_LPF_BC GENMASK(12, 8)
+#define CSR_2L_PXP_JCPLL_LPF_BP GENMASK(20, 16)
+#define CSR_2L_PXP_JCPLL_LPF_BWR GENMASK(28, 24)
+
+#define REG_CSR_2L_JCPLL_LPF_BWC 0x000c
+#define CSR_2L_PXP_JCPLL_LPF_BWC GENMASK(4, 0)
+#define CSR_2L_PXP_JCPLL_KBAND_CODE GENMASK(23, 16)
+#define CSR_2L_PXP_JCPLL_KBAND_DIV GENMASK(26, 24)
+
+#define REG_CSR_2L_JCPLL_KBAND_KFC 0x0010
+#define CSR_2L_PXP_JCPLL_KBAND_KFC GENMASK(1, 0)
+#define CSR_2L_PXP_JCPLL_KBAND_KF GENMASK(9, 8)
+#define CSR_2L_PXP_JCPLL_KBAND_KS GENMASK(17, 16)
+#define CSR_2L_PXP_JCPLL_POSTDIV_EN BIT(24)
+
+#define REG_CSR_2L_JCPLL_MMD_PREDIV_MODE 0x0014
+#define CSR_2L_PXP_JCPLL_MMD_PREDIV_MODE GENMASK(1, 0)
+#define CSR_2L_PXP_JCPLL_POSTDIV_D2 BIT(16)
+#define CSR_2L_PXP_JCPLL_POSTDIV_D5 BIT(24)
+
+#define CSR_2L_PXP_JCPLL_MONCK 0x0018
+#define CSR_2L_PXP_JCPLL_REFIN_DIV GENMASK(25, 24)
+
+#define REG_CSR_2L_JCPLL_RST_DLY 0x001c
+#define CSR_2L_PXP_JCPLL_RST_DLY GENMASK(2, 0)
+#define CSR_2L_PXP_JCPLL_RST BIT(8)
+#define CSR_2L_PXP_JCPLL_SDM_DI_EN BIT(16)
+#define CSR_2L_PXP_JCPLL_SDM_DI_LS GENMASK(25, 24)
+
+#define REG_CSR_2L_JCPLL_SDM_IFM 0x0020
+#define CSR_2L_PXP_JCPLL_SDM_IFM BIT(0)
+
+#define REG_CSR_2L_JCPLL_SDM_HREN 0x0024
+#define CSR_2L_PXP_JCPLL_SDM_HREN BIT(0)
+#define CSR_2L_PXP_JCPLL_TCL_AMP_EN BIT(8)
+#define CSR_2L_PXP_JCPLL_TCL_AMP_GAIN GENMASK(18, 16)
+#define CSR_2L_PXP_JCPLL_TCL_AMP_VREF GENMASK(28, 24)
+
+#define REG_CSR_2L_JCPLL_TCL_CMP 0x0028
+#define CSR_2L_PXP_JCPLL_TCL_LPF_EN BIT(16)
+#define CSR_2L_PXP_JCPLL_TCL_LPF_BW GENMASK(26, 24)
+
+#define REG_CSR_2L_JCPLL_VCODIV 0x002c
+#define CSR_2L_PXP_JCPLL_VCO_CFIX GENMASK(9, 8)
+#define CSR_2L_PXP_JCPLL_VCO_HALFLSB_EN BIT(16)
+#define CSR_2L_PXP_JCPLL_VCO_SCAPWR GENMASK(26, 24)
+
+#define REG_CSR_2L_JCPLL_VCO_TCLVAR 0x0030
+#define CSR_2L_PXP_JCPLL_VCO_TCLVAR GENMASK(2, 0)
+
+#define REG_CSR_2L_JCPLL_SSC 0x0038
+#define CSR_2L_PXP_JCPLL_SSC_EN BIT(0)
+#define CSR_2L_PXP_JCPLL_SSC_PHASE_INI BIT(8)
+#define CSR_2L_PXP_JCPLL_SSC_TRI_EN BIT(16)
+
+#define REG_CSR_2L_JCPLL_SSC_DELTA1 0x003c
+#define CSR_2L_PXP_JCPLL_SSC_DELTA1 GENMASK(15, 0)
+#define CSR_2L_PXP_JCPLL_SSC_DELTA GENMASK(31, 16)
+
+#define REG_CSR_2L_JCPLL_SSC_PERIOD 0x0040
+#define CSR_2L_PXP_JCPLL_SSC_PERIOD GENMASK(15, 0)
+
+#define REG_CSR_2L_JCPLL_TCL_VTP_EN 0x004c
+#define CSR_2L_PXP_JCPLL_SPARE_LOW GENMASK(31, 24)
+
+#define REG_CSR_2L_JCPLL_TCL_KBAND_VREF 0x0050
+#define CSR_2L_PXP_JCPLL_TCL_KBAND_VREF GENMASK(4, 0)
+#define CSR_2L_PXP_JCPLL_VCO_KBAND_MEAS_EN BIT(24)
+
+#define REG_CSR_2L_750M_SYS_CK 0x0054
+#define CSR_2L_PXP_TXPLL_LPF_SHCK_EN BIT(16)
+#define CSR_2L_PXP_TXPLL_CHP_IBIAS GENMASK(29, 24)
+
+#define REG_CSR_2L_TXPLL_CHP_IOFST 0x0058
+#define CSR_2L_PXP_TXPLL_CHP_IOFST GENMASK(5, 0)
+#define CSR_2L_PXP_TXPLL_LPF_BR GENMASK(12, 8)
+#define CSR_2L_PXP_TXPLL_LPF_BC GENMASK(20, 16)
+#define CSR_2L_PXP_TXPLL_LPF_BP GENMASK(28, 24)
+
+#define REG_CSR_2L_TXPLL_LPF_BWR 0x005c
+#define CSR_2L_PXP_TXPLL_LPF_BWR GENMASK(4, 0)
+#define CSR_2L_PXP_TXPLL_LPF_BWC GENMASK(12, 8)
+#define CSR_2L_PXP_TXPLL_KBAND_CODE GENMASK(31, 24)
+
+#define REG_CSR_2L_TXPLL_KBAND_DIV 0x0060
+#define CSR_2L_PXP_TXPLL_KBAND_DIV GENMASK(2, 0)
+#define CSR_2L_PXP_TXPLL_KBAND_KFC GENMASK(9, 8)
+#define CSR_2L_PXP_TXPLL_KBAND_KF GENMASK(17, 16)
+#define CSR_2L_PXP_txpll_KBAND_KS GENMASK(25, 24)
+
+#define REG_CSR_2L_TXPLL_POSTDIV 0x0064
+#define CSR_2L_PXP_TXPLL_POSTDIV_EN BIT(0)
+#define CSR_2L_PXP_TXPLL_MMD_PREDIV_MODE GENMASK(9, 8)
+#define CSR_2L_PXP_TXPLL_PHY_CK1_EN BIT(24)
+
+#define REG_CSR_2L_TXPLL_PHY_CK2 0x0068
+#define CSR_2L_PXP_TXPLL_REFIN_INTERNAL BIT(24)
+
+#define REG_CSR_2L_TXPLL_REFIN_DIV 0x006c
+#define CSR_2L_PXP_TXPLL_REFIN_DIV GENMASK(1, 0)
+#define CSR_2L_PXP_TXPLL_RST_DLY GENMASK(10, 8)
+#define CSR_2L_PXP_TXPLL_PLL_RSTB BIT(16)
+
+#define REG_CSR_2L_TXPLL_SDM_DI_LS 0x0070
+#define CSR_2L_PXP_TXPLL_SDM_DI_LS GENMASK(1, 0)
+#define CSR_2L_PXP_TXPLL_SDM_IFM BIT(8)
+#define CSR_2L_PXP_TXPLL_SDM_ORD GENMASK(25, 24)
+
+#define REG_CSR_2L_TXPLL_SDM_OUT 0x0074
+#define CSR_2L_PXP_TXPLL_TCL_AMP_EN BIT(16)
+#define CSR_2L_PXP_TXPLL_TCL_AMP_GAIN GENMASK(26, 24)
+
+#define REG_CSR_2L_TXPLL_TCL_AMP_VREF 0x0078
+#define CSR_2L_PXP_TXPLL_TCL_AMP_VREF GENMASK(4, 0)
+#define CSR_2L_PXP_TXPLL_TCL_LPF_EN BIT(24)
+
+#define REG_CSR_2L_TXPLL_TCL_LPF_BW 0x007c
+#define CSR_2L_PXP_TXPLL_TCL_LPF_BW GENMASK(2, 0)
+#define CSR_2L_PXP_TXPLL_VCO_CFIX GENMASK(17, 16)
+#define CSR_2L_PXP_TXPLL_VCO_HALFLSB_EN BIT(24)
+
+#define REG_CSR_2L_TXPLL_VCO_SCAPWR 0x0080
+#define CSR_2L_PXP_TXPLL_VCO_SCAPWR GENMASK(2, 0)
+
+#define REG_CSR_2L_TXPLL_SSC 0x0084
+#define CSR_2L_PXP_TXPLL_SSC_EN BIT(0)
+#define CSR_2L_PXP_TXPLL_SSC_PHASE_INI BIT(8)
+
+#define REG_CSR_2L_TXPLL_SSC_DELTA1 0x0088
+#define CSR_2L_PXP_TXPLL_SSC_DELTA1 GENMASK(15, 0)
+#define CSR_2L_PXP_TXPLL_SSC_DELTA GENMASK(31, 16)
+
+#define REG_CSR_2L_TXPLL_SSC_PERIOD 0x008c
+#define CSR_2L_PXP_txpll_SSC_PERIOD GENMASK(15, 0)
+
+#define REG_CSR_2L_TXPLL_VTP 0x0090
+#define CSR_2L_PXP_TXPLL_VTP_EN BIT(0)
+
+#define REG_CSR_2L_TXPLL_TCL_VTP 0x0098
+#define CSR_2L_PXP_TXPLL_SPARE_L GENMASK(31, 24)
+
+#define REG_CSR_2L_TXPLL_TCL_KBAND_VREF 0x009c
+#define CSR_2L_PXP_TXPLL_TCL_KBAND_VREF GENMASK(4, 0)
+#define CSR_2L_PXP_TXPLL_VCO_KBAND_MEAS_EN BIT(24)
+
+#define REG_CSR_2L_TXPLL_POSTDIV_D256 0x00a0
+#define CSR_2L_PXP_CLKTX0_AMP GENMASK(10, 8)
+#define CSR_2L_PXP_CLKTX0_OFFSET GENMASK(17, 16)
+#define CSR_2L_PXP_CLKTX0_SR GENMASK(25, 24)
+
+#define REG_CSR_2L_CLKTX0_FORCE_OUT1 0x00a4
+#define CSR_2L_PXP_CLKTX0_HZ BIT(8)
+#define CSR_2L_PXP_CLKTX0_IMP_SEL GENMASK(20, 16)
+#define CSR_2L_PXP_CLKTX1_AMP GENMASK(26, 24)
+
+#define REG_CSR_2L_CLKTX1_OFFSET 0x00a8
+#define CSR_2L_PXP_CLKTX1_OFFSET GENMASK(1, 0)
+#define CSR_2L_PXP_CLKTX1_SR GENMASK(9, 8)
+#define CSR_2L_PXP_CLKTX1_HZ BIT(24)
+
+#define REG_CSR_2L_CLKTX1_IMP_SEL 0x00ac
+#define CSR_2L_PXP_CLKTX1_IMP_SEL GENMASK(4, 0)
+
+#define REG_CSR_2L_PLL_CMN_RESERVE0 0x00b0
+#define CSR_2L_PXP_PLL_RESERVE_MASK GENMASK(15, 0)
+
+#define REG_CSR_2L_TX0_CKLDO 0x00cc
+#define CSR_2L_PXP_TX0_CKLDO_EN BIT(0)
+#define CSR_2L_PXP_TX0_DMEDGEGEN_EN BIT(24)
+
+#define REG_CSR_2L_TX1_CKLDO 0x00e8
+#define CSR_2L_PXP_TX1_CKLDO_EN BIT(0)
+#define CSR_2L_PXP_TX1_DMEDGEGEN_EN BIT(24)
+
+#define REG_CSR_2L_TX1_MULTLANE 0x00ec
+#define CSR_2L_PXP_TX1_MULTLANE_EN BIT(0)
+
+#define REG_CSR_2L_RX0_REV0 0x00fc
+#define CSR_2L_PXP_VOS_PNINV GENMASK(3, 2)
+#define CSR_2L_PXP_FE_GAIN_NORMAL_MODE GENMASK(6, 4)
+#define CSR_2L_PXP_FE_GAIN_TRAIN_MODE GENMASK(10, 8)
+
+#define REG_CSR_2L_RX0_PHYCK_DIV 0x0100
+#define CSR_2L_PXP_RX0_PHYCK_SEL GENMASK(9, 8)
+#define CSR_2L_PXP_RX0_PHYCK_RSTB BIT(16)
+#define CSR_2L_PXP_RX0_TDC_CK_SEL BIT(24)
+
+#define REG_CSR_2L_CDR0_PD_PICAL_CKD8_INV 0x0104
+#define CSR_2L_PXP_CDR0_PD_EDGE_DISABLE BIT(8)
+
+#define REG_CSR_2L_CDR0_LPF_RATIO 0x0110
+#define CSR_2L_PXP_CDR0_LPF_TOP_LIM GENMASK(26, 8)
+
+#define REG_CSR_2L_CDR0_PR_INJ_MODE 0x011c
+#define CSR_2L_PXP_CDR0_INJ_FORCE_OFF BIT(24)
+
+#define REG_CSR_2L_CDR0_PR_BETA_DAC 0x0120
+#define CSR_2L_PXP_CDR0_PR_BETA_SEL GENMASK(19, 16)
+#define CSR_2L_PXP_CDR0_PR_KBAND_DIV GENMASK(26, 24)
+
+#define REG_CSR_2L_CDR0_PR_VREG_IBAND 0x0124
+#define CSR_2L_PXP_CDR0_PR_VREG_IBAND GENMASK(2, 0)
+#define CSR_2L_PXP_CDR0_PR_VREG_CKBUF GENMASK(10, 8)
+
+#define REG_CSR_2L_CDR0_PR_CKREF_DIV 0x0128
+#define CSR_2L_PXP_CDR0_PR_CKREF_DIV GENMASK(1, 0)
+
+#define REG_CSR_2L_CDR0_PR_MONCK 0x012c
+#define CSR_2L_PXP_CDR0_PR_MONCK_ENABLE BIT(0)
+#define CSR_2L_PXP_CDR0_PR_RESERVE0 GENMASK(19, 16)
+
+#define REG_CSR_2L_CDR0_PR_COR_HBW 0x0130
+#define CSR_2L_PXP_CDR0_PR_LDO_FORCE_ON BIT(8)
+#define CSR_2L_PXP_CDR0_PR_CKREF_DIV1 GENMASK(17, 16)
+
+#define REG_CSR_2L_CDR0_PR_MONPI 0x0134
+#define CSR_2L_PXP_CDR0_PR_XFICK_EN BIT(8)
+
+#define REG_CSR_2L_RX0_SIGDET_DCTEST 0x0140
+#define CSR_2L_PXP_RX0_SIGDET_LPF_CTRL GENMASK(9, 8)
+#define CSR_2L_PXP_RX0_SIGDET_PEAK GENMASK(25, 24)
+
+#define REG_CSR_2L_RX0_SIGDET_VTH_SEL 0x0144
+#define CSR_2L_PXP_RX0_SIGDET_VTH_SEL GENMASK(4, 0)
+#define CSR_2L_PXP_RX0_FE_VB_EQ1_EN BIT(24)
+
+#define REG_CSR_2L_PXP_RX0_FE_VB_EQ2 0x0148
+#define CSR_2L_PXP_RX0_FE_VB_EQ2_EN BIT(0)
+#define CSR_2L_PXP_RX0_FE_VB_EQ3_EN BIT(8)
+#define CSR_2L_PXP_RX0_FE_VCM_GEN_PWDB BIT(16)
+
+#define REG_CSR_2L_PXP_RX0_OSCAL_CTLE1IOS 0x0158
+#define CSR_2L_PXP_RX0_PR_OSCAL_VGA1IOS GENMASK(29, 24)
+
+#define REG_CSR_2L_PXP_RX0_OSCA_VGA1VOS 0x015c
+#define CSR_2L_PXP_RX0_PR_OSCAL_VGA1VOS GENMASK(5, 0)
+#define CSR_2L_PXP_RX0_PR_OSCAL_VGA2IOS GENMASK(13, 8)
+
+#define REG_CSR_2L_RX1_REV0 0x01b4
+
+#define REG_CSR_2L_RX1_PHYCK_DIV 0x01b8
+#define CSR_2L_PXP_RX1_PHYCK_SEL GENMASK(9, 8)
+#define CSR_2L_PXP_RX1_PHYCK_RSTB BIT(16)
+#define CSR_2L_PXP_RX1_TDC_CK_SEL BIT(24)
+
+#define REG_CSR_2L_CDR1_PD_PICAL_CKD8_INV 0x01bc
+#define CSR_2L_PXP_CDR1_PD_EDGE_DISABLE BIT(8)
+
+#define REG_CSR_2L_CDR1_PR_BETA_DAC 0x01d8
+#define CSR_2L_PXP_CDR1_PR_BETA_SEL GENMASK(19, 16)
+#define CSR_2L_PXP_CDR1_PR_KBAND_DIV GENMASK(26, 24)
+
+#define REG_CSR_2L_CDR1_PR_MONCK 0x01e4
+#define CSR_2L_PXP_CDR1_PR_MONCK_ENABLE BIT(0)
+#define CSR_2L_PXP_CDR1_PR_RESERVE0 GENMASK(19, 16)
+
+#define REG_CSR_2L_CDR1_LPF_RATIO 0x01c8
+#define CSR_2L_PXP_CDR1_LPF_TOP_LIM GENMASK(26, 8)
+
+#define REG_CSR_2L_CDR1_PR_INJ_MODE 0x01d4
+#define CSR_2L_PXP_CDR1_INJ_FORCE_OFF BIT(24)
+
+#define REG_CSR_2L_CDR1_PR_VREG_IBAND_VAL 0x01dc
+#define CSR_2L_PXP_CDR1_PR_VREG_IBAND GENMASK(2, 0)
+#define CSR_2L_PXP_CDR1_PR_VREG_CKBUF GENMASK(10, 8)
+
+#define REG_CSR_2L_CDR1_PR_CKREF_DIV 0x01e0
+#define CSR_2L_PXP_CDR1_PR_CKREF_DIV GENMASK(1, 0)
+
+#define REG_CSR_2L_CDR1_PR_COR_HBW 0x01e8
+#define CSR_2L_PXP_CDR1_PR_LDO_FORCE_ON BIT(8)
+#define CSR_2L_PXP_CDR1_PR_CKREF_DIV1 GENMASK(17, 16)
+
+#define REG_CSR_2L_CDR1_PR_MONPI 0x01ec
+#define CSR_2L_PXP_CDR1_PR_XFICK_EN BIT(8)
+
+#define REG_CSR_2L_RX1_DAC_RANGE_EYE 0x01f4
+#define CSR_2L_PXP_RX1_SIGDET_LPF_CTRL GENMASK(25, 24)
+
+#define REG_CSR_2L_RX1_SIGDET_NOVTH 0x01f8
+#define CSR_2L_PXP_RX1_SIGDET_PEAK GENMASK(9, 8)
+#define CSR_2L_PXP_RX1_SIGDET_VTH_SEL GENMASK(20, 16)
+
+#define REG_CSR_2L_RX1_FE_VB_EQ1 0x0200
+#define CSR_2L_PXP_RX1_FE_VB_EQ1_EN BIT(0)
+#define CSR_2L_PXP_RX1_FE_VB_EQ2_EN BIT(8)
+#define CSR_2L_PXP_RX1_FE_VB_EQ3_EN BIT(16)
+#define CSR_2L_PXP_RX1_FE_VCM_GEN_PWDB BIT(24)
+
+#define REG_CSR_2L_RX1_OSCAL_VGA1IOS 0x0214
+#define CSR_2L_PXP_RX1_PR_OSCAL_VGA1IOS GENMASK(5, 0)
+#define CSR_2L_PXP_RX1_PR_OSCAL_VGA1VOS GENMASK(13, 8)
+#define CSR_2L_PXP_RX1_PR_OSCAL_VGA2IOS GENMASK(21, 16)
+
+/* PMA */
+#define REG_PCIE_PMA_SS_LCPLL_PWCTL_SETTING_1 0x0004
+#define PCIE_LCPLL_MAN_PWDB BIT(0)
+
+#define REG_PCIE_PMA_SEQUENCE_DISB_CTRL1 0x010c
+#define PCIE_DISB_RX_SDCAL_EN BIT(0)
+
+#define REG_PCIE_PMA_CTRL_SEQUENCE_FORCE_CTRL1 0x0114
+#define PCIE_FORCE_RX_SDCAL_EN BIT(0)
+
+#define REG_PCIE_PMA_SS_RX_FREQ_DET1 0x014c
+#define PCIE_PLL_FT_LOCK_CYCLECNT GENMASK(15, 0)
+#define PCIE_PLL_FT_UNLOCK_CYCLECNT GENMASK(31, 16)
+
+#define REG_PCIE_PMA_SS_RX_FREQ_DET2 0x0150
+#define PCIE_LOCK_TARGET_BEG GENMASK(15, 0)
+#define PCIE_LOCK_TARGET_END GENMASK(31, 16)
+
+#define REG_PCIE_PMA_SS_RX_FREQ_DET3 0x0154
+#define PCIE_UNLOCK_TARGET_BEG GENMASK(15, 0)
+#define PCIE_UNLOCK_TARGET_END GENMASK(31, 16)
+
+#define REG_PCIE_PMA_SS_RX_FREQ_DET4 0x0158
+#define PCIE_FREQLOCK_DET_EN GENMASK(2, 0)
+#define PCIE_LOCK_LOCKTH GENMASK(11, 8)
+#define PCIE_UNLOCK_LOCKTH GENMASK(15, 12)
+
+#define REG_PCIE_PMA_SS_RX_CAL1 0x0160
+#define REG_PCIE_PMA_SS_RX_CAL2 0x0164
+#define PCIE_CAL_OUT_OS GENMASK(11, 8)
+
+#define REG_PCIE_PMA_SS_RX_SIGDET0 0x0168
+#define PCIE_SIGDET_WIN_NONVLD_TIMES GENMASK(28, 24)
+
+#define REG_PCIE_PMA_TX_RESET 0x0260
+#define PCIE_TX_TOP_RST BIT(0)
+#define PCIE_TX_CAL_RST BIT(8)
+
+#define REG_PCIE_PMA_RX_FORCE_MODE0 0x0294
+#define PCIE_FORCE_DA_XPON_RX_FE_GAIN_CTRL GENMASK(1, 0)
+
+#define REG_PCIE_PMA_SS_DA_XPON_PWDB0 0x034c
+#define PCIE_DA_XPON_CDR_PR_PWDB BIT(8)
+
+#define REG_PCIE_PMA_SW_RESET 0x0460
+#define PCIE_SW_RX_FIFO_RST BIT(0)
+#define PCIE_SW_RX_RST BIT(1)
+#define PCIE_SW_TX_RST BIT(2)
+#define PCIE_SW_PMA_RST BIT(3)
+#define PCIE_SW_ALLPCS_RST BIT(4)
+#define PCIE_SW_REF_RST BIT(5)
+#define PCIE_SW_TX_FIFO_RST BIT(6)
+#define PCIE_SW_XFI_TXPCS_RST BIT(7)
+#define PCIE_SW_XFI_RXPCS_RST BIT(8)
+#define PCIE_SW_XFI_RXPCS_BIST_RST BIT(9)
+#define PCIE_SW_HSG_TXPCS_RST BIT(10)
+#define PCIE_SW_HSG_RXPCS_RST BIT(11)
+#define PCIE_PMA_SW_RST (PCIE_SW_RX_FIFO_RST | \
+ PCIE_SW_RX_RST | \
+ PCIE_SW_TX_RST | \
+ PCIE_SW_PMA_RST | \
+ PCIE_SW_ALLPCS_RST | \
+ PCIE_SW_REF_RST | \
+ PCIE_SW_TX_FIFO_RST | \
+ PCIE_SW_XFI_TXPCS_RST | \
+ PCIE_SW_XFI_RXPCS_RST | \
+ PCIE_SW_XFI_RXPCS_BIST_RST | \
+ PCIE_SW_HSG_TXPCS_RST | \
+ PCIE_SW_HSG_RXPCS_RST)
+
+#define REG_PCIE_PMA_RO_RX_FREQDET 0x0530
+#define PCIE_RO_FBCK_LOCK BIT(0)
+#define PCIE_RO_FL_OUT GENMASK(31, 16)
+
+#define REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC 0x0794
+#define PCIE_FORCE_DA_PXP_CDR_PR_IDAC GENMASK(10, 0)
+#define PCIE_FORCE_SEL_DA_PXP_CDR_PR_IDAC BIT(16)
+#define PCIE_FORCE_SEL_DA_PXP_TXPLL_SDM_PCW BIT(24)
+
+#define REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_SDM_PCW 0x0798
+#define PCIE_FORCE_DA_PXP_TXPLL_SDM_PCW GENMASK(30, 0)
+
+#define REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_VOS 0x079c
+#define PCIE_FORCE_SEL_DA_PXP_JCPLL_SDM_PCW BIT(16)
+
+#define REG_PCIE_PMA_FORCE_DA_PXP_JCPLL_SDM_PCW 0x0800
+#define PCIE_FORCE_DA_PXP_JCPLL_SDM_PCW GENMASK(30, 0)
+
+#define REG_PCIE_PMA_FORCE_DA_PXP_CDR_PD_PWDB 0x081c
+#define PCIE_FORCE_DA_PXP_CDR_PD_PWDB BIT(0)
+#define PCIE_FORCE_SEL_DA_PXP_CDR_PD_PWDB BIT(8)
+
+#define REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C 0x0820
+#define PCIE_FORCE_DA_PXP_CDR_PR_LPF_C_EN BIT(0)
+#define PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_C_EN BIT(8)
+#define PCIE_FORCE_DA_PXP_CDR_PR_LPF_R_EN BIT(16)
+#define PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_R_EN BIT(24)
+
+#define REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB 0x0824
+#define PCIE_FORCE_DA_PXP_CDR_PR_PWDB BIT(16)
+#define PCIE_FORCE_SEL_DA_PXP_CDR_PR_PWDB BIT(24)
+
+#define REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT 0x0828
+#define PCIE_FORCE_DA_PXP_JCPLL_CKOUT_EN BIT(0)
+#define PCIE_FORCE_SEL_DA_PXP_JCPLL_CKOUT_EN BIT(8)
+#define PCIE_FORCE_DA_PXP_JCPLL_EN BIT(16)
+#define PCIE_FORCE_SEL_DA_PXP_JCPLL_EN BIT(24)
+
+#define REG_PCIE_PMA_FORCE_DA_PXP_RX_SCAN_RST 0x0084c
+#define PCIE_FORCE_DA_PXP_RX_SIGDET_PWDB BIT(16)
+#define PCIE_FORCE_SEL_DA_PXP_RX_SIGDET_PWDB BIT(24)
+
+#define REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT 0x0854
+#define PCIE_FORCE_DA_PXP_TXPLL_CKOUT_EN BIT(0)
+#define PCIE_FORCE_SEL_DA_PXP_TXPLL_CKOUT_EN BIT(8)
+#define PCIE_FORCE_DA_PXP_TXPLL_EN BIT(16)
+#define PCIE_FORCE_SEL_DA_PXP_TXPLL_EN BIT(24)
+
+#define REG_PCIE_PMA_SCAN_MODE 0x0884
+#define PCIE_FORCE_DA_PXP_JCPLL_KBAND_LOAD_EN BIT(0)
+#define PCIE_FORCE_SEL_DA_PXP_JCPLL_KBAND_LOAD_EN BIT(8)
+
+#define REG_PCIE_PMA_DIG_RESERVE_13 0x08bc
+#define PCIE_FLL_IDAC_PCIEG1 GENMASK(10, 0)
+#define PCIE_FLL_IDAC_PCIEG2 GENMASK(26, 16)
+
+#define REG_PCIE_PMA_DIG_RESERVE_14 0x08c0
+#define PCIE_FLL_IDAC_PCIEG3 GENMASK(10, 0)
+#define PCIE_FLL_LOAD_EN BIT(16)
+
+#define REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_GAIN_CTRL 0x088c
+#define PCIE_FORCE_DA_PXP_RX_FE_GAIN_CTRL GENMASK(1, 0)
+#define PCIE_FORCE_SEL_DA_PXP_RX_FE_GAIN_CTRL BIT(8)
+
+#define REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_PWDB 0x0894
+#define PCIE_FORCE_DA_PXP_RX_FE_PWDB BIT(0)
+#define PCIE_FORCE_SEL_DA_PXP_RX_FE_PWDB BIT(8)
+
+#define REG_PCIE_PMA_DIG_RESERVE_12 0x08b8
+#define PCIE_FORCE_PMA_RX_SPEED GENMASK(7, 4)
+#define PCIE_FORCE_SEL_PMA_RX_SPEED BIT(7)
+
+#define REG_PCIE_PMA_DIG_RESERVE_17 0x08e0
+
+#define REG_PCIE_PMA_DIG_RESERVE_18 0x08e4
+#define PCIE_PXP_RX_VTH_SEL_PCIE_G1 GENMASK(4, 0)
+#define PCIE_PXP_RX_VTH_SEL_PCIE_G2 GENMASK(12, 8)
+#define PCIE_PXP_RX_VTH_SEL_PCIE_G3 GENMASK(20, 16)
+
+#define REG_PCIE_PMA_DIG_RESERVE_19 0x08e8
+#define PCIE_PCP_RX_REV0_PCIE_GEN1 GENMASK(31, 16)
+
+#define REG_PCIE_PMA_DIG_RESERVE_20 0x08ec
+#define PCIE_PCP_RX_REV0_PCIE_GEN2 GENMASK(15, 0)
+#define PCIE_PCP_RX_REV0_PCIE_GEN3 GENMASK(31, 16)
+
+#define REG_PCIE_PMA_DIG_RESERVE_21 0x08f0
+#define REG_PCIE_PMA_DIG_RESERVE_22 0x08f4
+#define REG_PCIE_PMA_DIG_RESERVE_27 0x0908
+#define REG_PCIE_PMA_DIG_RESERVE_30 0x0914
+
+/* DTIME */
+#define REG_PCIE_PEXTP_DIG_GLB44 0x00
+#define PCIE_XTP_RXDET_VCM_OFF_STB_T_SEL GENMASK(7, 0)
+#define PCIE_XTP_RXDET_EN_STB_T_SEL GENMASK(15, 8)
+#define PCIE_XTP_RXDET_FINISH_STB_T_SEL GENMASK(23, 16)
+#define PCIE_XTP_TXPD_TX_DATA_EN_DLY GENMASK(27, 24)
+#define PCIE_XTP_TXPD_RXDET_DONE_CDT BIT(28)
+#define PCIE_XTP_RXDET_LATCH_STB_T_SEL GENMASK(31, 29)
+
+/* RX AEQ */
+#define REG_PCIE_PEXTP_DIG_LN_RX30_P0 0x0000
+#define PCIE_XTP_LN_RX_PDOWN_L1P2_EXIT_WAIT GENMASK(7, 0)
+#define PCIE_XTP_LN_RX_PDOWN_T2RLB_DIG_EN BIT(8)
+#define PCIE_XTP_LN_RX_PDOWN_E0_AEQEN_WAIT GENMASK(31, 16)
+
+#define REG_PCIE_PEXTP_DIG_LN_RX30_P1 0x0100
+
+#endif /* _PHY_AIROHA_PCIE_H */
diff --git a/drivers/phy/phy-airoha-pcie.c b/drivers/phy/phy-airoha-pcie.c
new file mode 100644
index 000000000000..bd3edaa986c8
--- /dev/null
+++ b/drivers/phy/phy-airoha-pcie.c
@@ -0,0 +1,1286 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "phy-airoha-pcie-regs.h"
+
+#define LEQ_LEN_CTRL_MAX_VAL 7
+#define FREQ_LOCK_MAX_ATTEMPT 10
+
+enum airoha_pcie_port_gen {
+ PCIE_PORT_GEN1 = 1,
+ PCIE_PORT_GEN2,
+ PCIE_PORT_GEN3,
+};
+
+/**
+ * struct airoha_pcie_phy - PCIe phy driver main structure
+ * @dev: pointer to device
+ * @phy: pointer to generic phy
+ * @csr_2l: Analogic lane IO mapped register base address
+ * @pma0: IO mapped register base address of PMA0-PCIe
+ * @pma1: IO mapped register base address of PMA1-PCIe
+ * @p0_xr_dtime: IO mapped register base address of port0 Tx-Rx detection time
+ * @p1_xr_dtime: IO mapped register base address of port1 Tx-Rx detection time
+ * @rx_aeq: IO mapped register base address of Rx AEQ training
+ */
+struct airoha_pcie_phy {
+ struct device *dev;
+ struct phy *phy;
+ void __iomem *csr_2l;
+ void __iomem *pma0;
+ void __iomem *pma1;
+ void __iomem *p0_xr_dtime;
+ void __iomem *p1_xr_dtime;
+ void __iomem *rx_aeq;
+};
+
+static void airoha_phy_clear_bits(void __iomem *reg, u32 mask)
+{
+ u32 val = readl(reg) & ~mask;
+
+ writel(val, reg);
+}
+
+static void airoha_phy_set_bits(void __iomem *reg, u32 mask)
+{
+ u32 val = readl(reg) | mask;
+
+ writel(val, reg);
+}
+
+static void airoha_phy_update_bits(void __iomem *reg, u32 mask, u32 val)
+{
+ u32 tmp = readl(reg);
+
+ tmp &= ~mask;
+ tmp |= val & mask;
+ writel(tmp, reg);
+}
+
+#define airoha_phy_update_field(reg, mask, val) \
+ do { \
+ BUILD_BUG_ON_MSG(!__builtin_constant_p((mask)), \
+ "mask is not constant"); \
+ airoha_phy_update_bits((reg), (mask), \
+ FIELD_PREP((mask), (val))); \
+ } while (0)
+
+#define airoha_phy_csr_2l_clear_bits(pcie_phy, reg, mask) \
+ airoha_phy_clear_bits((pcie_phy)->csr_2l + (reg), (mask))
+#define airoha_phy_csr_2l_set_bits(pcie_phy, reg, mask) \
+ airoha_phy_set_bits((pcie_phy)->csr_2l + (reg), (mask))
+#define airoha_phy_csr_2l_update_field(pcie_phy, reg, mask, val) \
+ airoha_phy_update_field((pcie_phy)->csr_2l + (reg), (mask), (val))
+#define airoha_phy_pma0_clear_bits(pcie_phy, reg, mask) \
+ airoha_phy_clear_bits((pcie_phy)->pma0 + (reg), (mask))
+#define airoha_phy_pma1_clear_bits(pcie_phy, reg, mask) \
+ airoha_phy_clear_bits((pcie_phy)->pma1 + (reg), (mask))
+#define airoha_phy_pma0_set_bits(pcie_phy, reg, mask) \
+ airoha_phy_set_bits((pcie_phy)->pma0 + (reg), (mask))
+#define airoha_phy_pma1_set_bits(pcie_phy, reg, mask) \
+ airoha_phy_set_bits((pcie_phy)->pma1 + (reg), (mask))
+#define airoha_phy_pma0_update_field(pcie_phy, reg, mask, val) \
+ airoha_phy_update_field((pcie_phy)->pma0 + (reg), (mask), (val))
+#define airoha_phy_pma1_update_field(pcie_phy, reg, mask, val) \
+ airoha_phy_update_field((pcie_phy)->pma1 + (reg), (mask), (val))
+
+static void
+airoha_phy_init_lane0_rx_fw_pre_calib(struct airoha_pcie_phy *pcie_phy,
+ enum airoha_pcie_port_gen gen)
+{
+ u32 fl_out_target = gen == PCIE_PORT_GEN3 ? 41600 : 41941;
+ u32 lock_cyclecnt = gen == PCIE_PORT_GEN3 ? 26000 : 32767;
+ u32 pr_idac, val, cdr_pr_idac_tmp = 0;
+ int i;
+
+ airoha_phy_pma0_set_bits(pcie_phy,
+ REG_PCIE_PMA_SS_LCPLL_PWCTL_SETTING_1,
+ PCIE_LCPLL_MAN_PWDB);
+ airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET2,
+ PCIE_LOCK_TARGET_BEG,
+ fl_out_target - 100);
+ airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET2,
+ PCIE_LOCK_TARGET_END,
+ fl_out_target + 100);
+ airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET1,
+ PCIE_PLL_FT_LOCK_CYCLECNT, lock_cyclecnt);
+ airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET4,
+ PCIE_LOCK_LOCKTH, 0x3);
+ airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET3,
+ PCIE_UNLOCK_TARGET_BEG,
+ fl_out_target - 100);
+ airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET3,
+ PCIE_UNLOCK_TARGET_END,
+ fl_out_target + 100);
+ airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET1,
+ PCIE_PLL_FT_UNLOCK_CYCLECNT,
+ lock_cyclecnt);
+ airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET4,
+ PCIE_UNLOCK_LOCKTH, 0x3);
+
+ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_CDR0_PR_INJ_MODE,
+ CSR_2L_PXP_CDR0_INJ_FORCE_OFF);
+
+ airoha_phy_pma0_set_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C,
+ PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_R_EN);
+ airoha_phy_pma0_set_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C,
+ PCIE_FORCE_DA_PXP_CDR_PR_LPF_R_EN);
+ airoha_phy_pma0_set_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C,
+ PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_C_EN);
+ airoha_phy_pma0_clear_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C,
+ PCIE_FORCE_DA_PXP_CDR_PR_LPF_C_EN);
+ airoha_phy_pma0_set_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
+ PCIE_FORCE_SEL_DA_PXP_CDR_PR_IDAC);
+
+ airoha_phy_pma0_set_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB,
+ PCIE_FORCE_SEL_DA_PXP_CDR_PR_PWDB);
+ airoha_phy_pma0_clear_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB,
+ PCIE_FORCE_DA_PXP_CDR_PR_PWDB);
+ airoha_phy_pma0_set_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB,
+ PCIE_FORCE_DA_PXP_CDR_PR_PWDB);
+
+ for (i = 0; i < LEQ_LEN_CTRL_MAX_VAL; i++) {
+ airoha_phy_pma0_update_field(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
+ PCIE_FORCE_DA_PXP_CDR_PR_IDAC, i << 8);
+ airoha_phy_pma0_clear_bits(pcie_phy,
+ REG_PCIE_PMA_SS_RX_FREQ_DET4,
+ PCIE_FREQLOCK_DET_EN);
+ airoha_phy_pma0_update_field(pcie_phy,
+ REG_PCIE_PMA_SS_RX_FREQ_DET4,
+ PCIE_FREQLOCK_DET_EN, 0x3);
+
+ usleep_range(10000, 15000);
+
+ val = FIELD_GET(PCIE_RO_FL_OUT,
+ readl(pcie_phy->pma0 +
+ REG_PCIE_PMA_RO_RX_FREQDET));
+ if (val > fl_out_target)
+ cdr_pr_idac_tmp = i << 8;
+ }
+
+ for (i = LEQ_LEN_CTRL_MAX_VAL; i >= 0; i--) {
+ pr_idac = cdr_pr_idac_tmp | (0x1 << i);
+ airoha_phy_pma0_update_field(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
+ PCIE_FORCE_DA_PXP_CDR_PR_IDAC, pr_idac);
+ airoha_phy_pma0_clear_bits(pcie_phy,
+ REG_PCIE_PMA_SS_RX_FREQ_DET4,
+ PCIE_FREQLOCK_DET_EN);
+ airoha_phy_pma0_update_field(pcie_phy,
+ REG_PCIE_PMA_SS_RX_FREQ_DET4,
+ PCIE_FREQLOCK_DET_EN, 0x3);
+
+ usleep_range(10000, 15000);
+
+ val = FIELD_GET(PCIE_RO_FL_OUT,
+ readl(pcie_phy->pma0 +
+ REG_PCIE_PMA_RO_RX_FREQDET));
+ if (val < fl_out_target)
+ pr_idac &= ~(0x1 << i);
+
+ cdr_pr_idac_tmp = pr_idac;
+ }
+
+ airoha_phy_pma0_update_field(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
+ PCIE_FORCE_DA_PXP_CDR_PR_IDAC,
+ cdr_pr_idac_tmp);
+
+ for (i = 0; i < FREQ_LOCK_MAX_ATTEMPT; i++) {
+ u32 val;
+
+ airoha_phy_pma0_clear_bits(pcie_phy,
+ REG_PCIE_PMA_SS_RX_FREQ_DET4,
+ PCIE_FREQLOCK_DET_EN);
+ airoha_phy_pma0_update_field(pcie_phy,
+ REG_PCIE_PMA_SS_RX_FREQ_DET4,
+ PCIE_FREQLOCK_DET_EN, 0x3);
+
+ usleep_range(10000, 15000);
+
+ val = readl(pcie_phy->pma0 + REG_PCIE_PMA_RO_RX_FREQDET);
+ if (val & PCIE_RO_FBCK_LOCK)
+ break;
+ }
+
+ /* turn off force mode and update band values */
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR0_PR_INJ_MODE,
+ CSR_2L_PXP_CDR0_INJ_FORCE_OFF);
+
+ airoha_phy_pma0_clear_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C,
+ PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_R_EN);
+ airoha_phy_pma0_clear_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C,
+ PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_C_EN);
+ airoha_phy_pma0_clear_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB,
+ PCIE_FORCE_SEL_DA_PXP_CDR_PR_PWDB);
+ airoha_phy_pma0_clear_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
+ PCIE_FORCE_SEL_DA_PXP_CDR_PR_IDAC);
+ if (gen == PCIE_PORT_GEN3) {
+ airoha_phy_pma0_update_field(pcie_phy,
+ REG_PCIE_PMA_DIG_RESERVE_14,
+ PCIE_FLL_IDAC_PCIEG3,
+ cdr_pr_idac_tmp);
+ } else {
+ airoha_phy_pma0_update_field(pcie_phy,
+ REG_PCIE_PMA_DIG_RESERVE_13,
+ PCIE_FLL_IDAC_PCIEG1,
+ cdr_pr_idac_tmp);
+ airoha_phy_pma0_update_field(pcie_phy,
+ REG_PCIE_PMA_DIG_RESERVE_13,
+ PCIE_FLL_IDAC_PCIEG2,
+ cdr_pr_idac_tmp);
+ }
+}
+
+static void
+airoha_phy_init_lane1_rx_fw_pre_calib(struct airoha_pcie_phy *pcie_phy,
+ enum airoha_pcie_port_gen gen)
+{
+ u32 fl_out_target = gen == PCIE_PORT_GEN3 ? 41600 : 41941;
+ u32 lock_cyclecnt = gen == PCIE_PORT_GEN3 ? 26000 : 32767;
+ u32 pr_idac, val, cdr_pr_idac_tmp = 0;
+ int i;
+
+ airoha_phy_pma1_set_bits(pcie_phy,
+ REG_PCIE_PMA_SS_LCPLL_PWCTL_SETTING_1,
+ PCIE_LCPLL_MAN_PWDB);
+ airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET2,
+ PCIE_LOCK_TARGET_BEG,
+ fl_out_target - 100);
+ airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET2,
+ PCIE_LOCK_TARGET_END,
+ fl_out_target + 100);
+ airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET1,
+ PCIE_PLL_FT_LOCK_CYCLECNT, lock_cyclecnt);
+ airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET4,
+ PCIE_LOCK_LOCKTH, 0x3);
+ airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET3,
+ PCIE_UNLOCK_TARGET_BEG,
+ fl_out_target - 100);
+ airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET3,
+ PCIE_UNLOCK_TARGET_END,
+ fl_out_target + 100);
+ airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET1,
+ PCIE_PLL_FT_UNLOCK_CYCLECNT,
+ lock_cyclecnt);
+ airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET4,
+ PCIE_UNLOCK_LOCKTH, 0x3);
+
+ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_CDR1_PR_INJ_MODE,
+ CSR_2L_PXP_CDR1_INJ_FORCE_OFF);
+
+ airoha_phy_pma1_set_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C,
+ PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_R_EN);
+ airoha_phy_pma1_set_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C,
+ PCIE_FORCE_DA_PXP_CDR_PR_LPF_R_EN);
+ airoha_phy_pma1_set_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C,
+ PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_C_EN);
+ airoha_phy_pma1_clear_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C,
+ PCIE_FORCE_DA_PXP_CDR_PR_LPF_C_EN);
+ airoha_phy_pma1_set_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
+ PCIE_FORCE_SEL_DA_PXP_CDR_PR_IDAC);
+ airoha_phy_pma1_set_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB,
+ PCIE_FORCE_SEL_DA_PXP_CDR_PR_PWDB);
+ airoha_phy_pma1_clear_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB,
+ PCIE_FORCE_DA_PXP_CDR_PR_PWDB);
+ airoha_phy_pma1_set_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB,
+ PCIE_FORCE_DA_PXP_CDR_PR_PWDB);
+
+ for (i = 0; i < LEQ_LEN_CTRL_MAX_VAL; i++) {
+ airoha_phy_pma1_update_field(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
+ PCIE_FORCE_DA_PXP_CDR_PR_IDAC, i << 8);
+ airoha_phy_pma1_clear_bits(pcie_phy,
+ REG_PCIE_PMA_SS_RX_FREQ_DET4,
+ PCIE_FREQLOCK_DET_EN);
+ airoha_phy_pma1_update_field(pcie_phy,
+ REG_PCIE_PMA_SS_RX_FREQ_DET4,
+ PCIE_FREQLOCK_DET_EN, 0x3);
+
+ usleep_range(10000, 15000);
+
+ val = FIELD_GET(PCIE_RO_FL_OUT,
+ readl(pcie_phy->pma1 +
+ REG_PCIE_PMA_RO_RX_FREQDET));
+ if (val > fl_out_target)
+ cdr_pr_idac_tmp = i << 8;
+ }
+
+ for (i = LEQ_LEN_CTRL_MAX_VAL; i >= 0; i--) {
+ pr_idac = cdr_pr_idac_tmp | (0x1 << i);
+ airoha_phy_pma1_update_field(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
+ PCIE_FORCE_DA_PXP_CDR_PR_IDAC, pr_idac);
+ airoha_phy_pma1_clear_bits(pcie_phy,
+ REG_PCIE_PMA_SS_RX_FREQ_DET4,
+ PCIE_FREQLOCK_DET_EN);
+ airoha_phy_pma1_update_field(pcie_phy,
+ REG_PCIE_PMA_SS_RX_FREQ_DET4,
+ PCIE_FREQLOCK_DET_EN, 0x3);
+
+ usleep_range(10000, 15000);
+
+ val = FIELD_GET(PCIE_RO_FL_OUT,
+ readl(pcie_phy->pma1 +
+ REG_PCIE_PMA_RO_RX_FREQDET));
+ if (val < fl_out_target)
+ pr_idac &= ~(0x1 << i);
+
+ cdr_pr_idac_tmp = pr_idac;
+ }
+
+ airoha_phy_pma1_update_field(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
+ PCIE_FORCE_DA_PXP_CDR_PR_IDAC,
+ cdr_pr_idac_tmp);
+
+ for (i = 0; i < FREQ_LOCK_MAX_ATTEMPT; i++) {
+ u32 val;
+
+ airoha_phy_pma1_clear_bits(pcie_phy,
+ REG_PCIE_PMA_SS_RX_FREQ_DET4,
+ PCIE_FREQLOCK_DET_EN);
+ airoha_phy_pma1_update_field(pcie_phy,
+ REG_PCIE_PMA_SS_RX_FREQ_DET4,
+ PCIE_FREQLOCK_DET_EN, 0x3);
+
+ usleep_range(10000, 15000);
+
+ val = readl(pcie_phy->pma1 + REG_PCIE_PMA_RO_RX_FREQDET);
+ if (val & PCIE_RO_FBCK_LOCK)
+ break;
+ }
+
+ /* turn off force mode and update band values */
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR1_PR_INJ_MODE,
+ CSR_2L_PXP_CDR1_INJ_FORCE_OFF);
+
+ airoha_phy_pma1_clear_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C,
+ PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_R_EN);
+ airoha_phy_pma1_clear_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C,
+ PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_C_EN);
+ airoha_phy_pma1_clear_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB,
+ PCIE_FORCE_SEL_DA_PXP_CDR_PR_PWDB);
+ airoha_phy_pma1_clear_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
+ PCIE_FORCE_SEL_DA_PXP_CDR_PR_IDAC);
+ if (gen == PCIE_PORT_GEN3) {
+ airoha_phy_pma1_update_field(pcie_phy,
+ REG_PCIE_PMA_DIG_RESERVE_14,
+ PCIE_FLL_IDAC_PCIEG3,
+ cdr_pr_idac_tmp);
+ } else {
+ airoha_phy_pma1_update_field(pcie_phy,
+ REG_PCIE_PMA_DIG_RESERVE_13,
+ PCIE_FLL_IDAC_PCIEG1,
+ cdr_pr_idac_tmp);
+ airoha_phy_pma1_update_field(pcie_phy,
+ REG_PCIE_PMA_DIG_RESERVE_13,
+ PCIE_FLL_IDAC_PCIEG2,
+ cdr_pr_idac_tmp);
+ }
+}
+
+static void airoha_pcie_phy_init_default(struct airoha_pcie_phy *pcie_phy)
+{
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CMN,
+ CSR_2L_PXP_CMN_TRIM_MASK, 0x10);
+ writel(0xcccbcccb, pcie_phy->pma0 + REG_PCIE_PMA_DIG_RESERVE_21);
+ writel(0xcccb, pcie_phy->pma0 + REG_PCIE_PMA_DIG_RESERVE_22);
+ writel(0xcccbcccb, pcie_phy->pma1 + REG_PCIE_PMA_DIG_RESERVE_21);
+ writel(0xcccb, pcie_phy->pma1 + REG_PCIE_PMA_DIG_RESERVE_22);
+ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_CMN,
+ CSR_2L_PXP_CMN_LANE_EN);
+}
+
+static void airoha_pcie_phy_init_clk_out(struct airoha_pcie_phy *pcie_phy)
+{
+ airoha_phy_csr_2l_update_field(pcie_phy,
+ REG_CSR_2L_TXPLL_POSTDIV_D256,
+ CSR_2L_PXP_CLKTX0_AMP, 0x5);
+ airoha_phy_csr_2l_update_field(pcie_phy,
+ REG_CSR_2L_CLKTX0_FORCE_OUT1,
+ CSR_2L_PXP_CLKTX1_AMP, 0x5);
+ airoha_phy_csr_2l_update_field(pcie_phy,
+ REG_CSR_2L_TXPLL_POSTDIV_D256,
+ CSR_2L_PXP_CLKTX0_OFFSET, 0x2);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CLKTX1_OFFSET,
+ CSR_2L_PXP_CLKTX1_OFFSET, 0x2);
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CLKTX0_FORCE_OUT1,
+ CSR_2L_PXP_CLKTX0_HZ);
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CLKTX1_OFFSET,
+ CSR_2L_PXP_CLKTX1_HZ);
+ airoha_phy_csr_2l_update_field(pcie_phy,
+ REG_CSR_2L_CLKTX0_FORCE_OUT1,
+ CSR_2L_PXP_CLKTX0_IMP_SEL, 0x12);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CLKTX1_IMP_SEL,
+ CSR_2L_PXP_CLKTX1_IMP_SEL, 0x12);
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_POSTDIV_D256,
+ CSR_2L_PXP_CLKTX0_SR);
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CLKTX1_OFFSET,
+ CSR_2L_PXP_CLKTX1_SR);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_PLL_CMN_RESERVE0,
+ CSR_2L_PXP_PLL_RESERVE_MASK, 0xdd);
+}
+
+static void airoha_pcie_phy_init_csr_2l(struct airoha_pcie_phy *pcie_phy)
+{
+ airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_SW_RESET,
+ PCIE_SW_XFI_RXPCS_RST | PCIE_SW_REF_RST |
+ PCIE_SW_RX_RST);
+ airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_SW_RESET,
+ PCIE_SW_XFI_RXPCS_RST | PCIE_SW_REF_RST |
+ PCIE_SW_RX_RST);
+ airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_TX_RESET,
+ PCIE_TX_TOP_RST | REG_PCIE_PMA_TX_RESET);
+ airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_TX_RESET,
+ PCIE_TX_TOP_RST | REG_PCIE_PMA_TX_RESET);
+}
+
+static void airoha_pcie_phy_init_rx(struct airoha_pcie_phy *pcie_phy)
+{
+ writel(0x2a00090b, pcie_phy->pma0 + REG_PCIE_PMA_DIG_RESERVE_17);
+ writel(0x2a00090b, pcie_phy->pma1 + REG_PCIE_PMA_DIG_RESERVE_17);
+ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_CDR0_PR_MONPI,
+ CSR_2L_PXP_CDR0_PR_XFICK_EN);
+ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_CDR1_PR_MONPI,
+ CSR_2L_PXP_CDR1_PR_XFICK_EN);
+ airoha_phy_csr_2l_clear_bits(pcie_phy,
+ REG_CSR_2L_CDR0_PD_PICAL_CKD8_INV,
+ CSR_2L_PXP_CDR0_PD_EDGE_DISABLE);
+ airoha_phy_csr_2l_clear_bits(pcie_phy,
+ REG_CSR_2L_CDR1_PD_PICAL_CKD8_INV,
+ CSR_2L_PXP_CDR1_PD_EDGE_DISABLE);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX0_PHYCK_DIV,
+ CSR_2L_PXP_RX0_PHYCK_SEL, 0x1);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_PHYCK_DIV,
+ CSR_2L_PXP_RX1_PHYCK_SEL, 0x1);
+}
+
+static void airoha_pcie_phy_init_jcpll(struct airoha_pcie_phy *pcie_phy)
+{
+ airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT,
+ PCIE_FORCE_SEL_DA_PXP_JCPLL_EN);
+ airoha_phy_pma0_clear_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT,
+ PCIE_FORCE_DA_PXP_JCPLL_EN);
+ airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT,
+ PCIE_FORCE_SEL_DA_PXP_JCPLL_EN);
+ airoha_phy_pma1_clear_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT,
+ PCIE_FORCE_DA_PXP_JCPLL_EN);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_TCL_VTP_EN,
+ CSR_2L_PXP_JCPLL_SPARE_LOW, 0x20);
+ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_RST_DLY,
+ CSR_2L_PXP_JCPLL_RST);
+ writel(0x0, pcie_phy->csr_2l + REG_CSR_2L_JCPLL_SSC_DELTA1);
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC_PERIOD,
+ CSR_2L_PXP_JCPLL_SSC_PERIOD);
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC,
+ CSR_2L_PXP_JCPLL_SSC_PHASE_INI);
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC,
+ CSR_2L_PXP_JCPLL_SSC_TRI_EN);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_LPF_BR,
+ CSR_2L_PXP_JCPLL_LPF_BR, 0xa);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_LPF_BR,
+ CSR_2L_PXP_JCPLL_LPF_BP, 0xc);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_LPF_BR,
+ CSR_2L_PXP_JCPLL_LPF_BC, 0x1f);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_LPF_BWC,
+ CSR_2L_PXP_JCPLL_LPF_BWC, 0x1e);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_LPF_BR,
+ CSR_2L_PXP_JCPLL_LPF_BWR, 0xa);
+ airoha_phy_csr_2l_update_field(pcie_phy,
+ REG_CSR_2L_JCPLL_MMD_PREDIV_MODE,
+ CSR_2L_PXP_JCPLL_MMD_PREDIV_MODE,
+ 0x1);
+ airoha_phy_csr_2l_clear_bits(pcie_phy, CSR_2L_PXP_JCPLL_MONCK,
+ CSR_2L_PXP_JCPLL_REFIN_DIV);
+
+ airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_VOS,
+ PCIE_FORCE_SEL_DA_PXP_JCPLL_SDM_PCW);
+ airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_VOS,
+ PCIE_FORCE_SEL_DA_PXP_JCPLL_SDM_PCW);
+ airoha_phy_pma0_update_field(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_JCPLL_SDM_PCW,
+ PCIE_FORCE_DA_PXP_JCPLL_SDM_PCW,
+ 0x50000000);
+ airoha_phy_pma1_update_field(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_JCPLL_SDM_PCW,
+ PCIE_FORCE_DA_PXP_JCPLL_SDM_PCW,
+ 0x50000000);
+
+ airoha_phy_csr_2l_set_bits(pcie_phy,
+ REG_CSR_2L_JCPLL_MMD_PREDIV_MODE,
+ CSR_2L_PXP_JCPLL_POSTDIV_D5);
+ airoha_phy_csr_2l_set_bits(pcie_phy,
+ REG_CSR_2L_JCPLL_MMD_PREDIV_MODE,
+ CSR_2L_PXP_JCPLL_POSTDIV_D2);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_RST_DLY,
+ CSR_2L_PXP_JCPLL_RST_DLY, 0x4);
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_RST_DLY,
+ CSR_2L_PXP_JCPLL_SDM_DI_LS);
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_TCL_KBAND_VREF,
+ CSR_2L_PXP_JCPLL_VCO_KBAND_MEAS_EN);
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_IB_EXT,
+ CSR_2L_PXP_JCPLL_CHP_IOFST);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_IB_EXT,
+ CSR_2L_PXP_JCPLL_CHP_IBIAS, 0xc);
+ airoha_phy_csr_2l_update_field(pcie_phy,
+ REG_CSR_2L_JCPLL_MMD_PREDIV_MODE,
+ CSR_2L_PXP_JCPLL_MMD_PREDIV_MODE,
+ 0x1);
+ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_VCODIV,
+ CSR_2L_PXP_JCPLL_VCO_HALFLSB_EN);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_VCODIV,
+ CSR_2L_PXP_JCPLL_VCO_CFIX, 0x1);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_VCODIV,
+ CSR_2L_PXP_JCPLL_VCO_SCAPWR, 0x4);
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_IB_EXT,
+ REG_CSR_2L_JCPLL_LPF_SHCK_EN);
+ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_KBAND_KFC,
+ CSR_2L_PXP_JCPLL_POSTDIV_EN);
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_KBAND_KFC,
+ CSR_2L_PXP_JCPLL_KBAND_KFC);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_KBAND_KFC,
+ CSR_2L_PXP_JCPLL_KBAND_KF, 0x3);
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_KBAND_KFC,
+ CSR_2L_PXP_JCPLL_KBAND_KS);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_LPF_BWC,
+ CSR_2L_PXP_JCPLL_KBAND_DIV, 0x1);
+
+ airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_SCAN_MODE,
+ PCIE_FORCE_SEL_DA_PXP_JCPLL_KBAND_LOAD_EN);
+ airoha_phy_pma0_clear_bits(pcie_phy, REG_PCIE_PMA_SCAN_MODE,
+ PCIE_FORCE_DA_PXP_JCPLL_KBAND_LOAD_EN);
+
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_LPF_BWC,
+ CSR_2L_PXP_JCPLL_KBAND_CODE, 0xe4);
+ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SDM_HREN,
+ CSR_2L_PXP_JCPLL_TCL_AMP_EN);
+ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_TCL_CMP,
+ CSR_2L_PXP_JCPLL_TCL_LPF_EN);
+ airoha_phy_csr_2l_update_field(pcie_phy,
+ REG_CSR_2L_JCPLL_TCL_KBAND_VREF,
+ CSR_2L_PXP_JCPLL_TCL_KBAND_VREF, 0xf);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_SDM_HREN,
+ CSR_2L_PXP_JCPLL_TCL_AMP_GAIN, 0x1);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_SDM_HREN,
+ CSR_2L_PXP_JCPLL_TCL_AMP_VREF, 0x5);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_TCL_CMP,
+ CSR_2L_PXP_JCPLL_TCL_LPF_BW, 0x1);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_VCO_TCLVAR,
+ CSR_2L_PXP_JCPLL_VCO_TCLVAR, 0x3);
+
+ airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT,
+ PCIE_FORCE_SEL_DA_PXP_JCPLL_CKOUT_EN);
+ airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT,
+ PCIE_FORCE_DA_PXP_JCPLL_CKOUT_EN);
+ airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT,
+ PCIE_FORCE_SEL_DA_PXP_JCPLL_CKOUT_EN);
+ airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT,
+ PCIE_FORCE_DA_PXP_JCPLL_CKOUT_EN);
+ airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT,
+ PCIE_FORCE_SEL_DA_PXP_JCPLL_EN);
+ airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT,
+ PCIE_FORCE_DA_PXP_JCPLL_EN);
+ airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT,
+ PCIE_FORCE_SEL_DA_PXP_JCPLL_EN);
+ airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT,
+ PCIE_FORCE_DA_PXP_JCPLL_EN);
+}
+
+static void airoha_pcie_phy_txpll(struct airoha_pcie_phy *pcie_phy)
+{
+ airoha_phy_pma0_set_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT,
+ PCIE_FORCE_SEL_DA_PXP_TXPLL_EN);
+ airoha_phy_pma0_clear_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT,
+ PCIE_FORCE_DA_PXP_TXPLL_EN);
+ airoha_phy_pma1_set_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT,
+ PCIE_FORCE_SEL_DA_PXP_TXPLL_EN);
+ airoha_phy_pma1_clear_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT,
+ PCIE_FORCE_DA_PXP_TXPLL_EN);
+
+ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TXPLL_REFIN_DIV,
+ CSR_2L_PXP_TXPLL_PLL_RSTB);
+ writel(0x0, pcie_phy->csr_2l + REG_CSR_2L_TXPLL_SSC_DELTA1);
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_SSC_PERIOD,
+ CSR_2L_PXP_txpll_SSC_PERIOD);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_CHP_IOFST,
+ CSR_2L_PXP_TXPLL_CHP_IOFST, 0x1);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_750M_SYS_CK,
+ CSR_2L_PXP_TXPLL_CHP_IBIAS, 0x2d);
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_REFIN_DIV,
+ CSR_2L_PXP_TXPLL_REFIN_DIV);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_TCL_LPF_BW,
+ CSR_2L_PXP_TXPLL_VCO_CFIX, 0x3);
+
+ airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
+ PCIE_FORCE_SEL_DA_PXP_TXPLL_SDM_PCW);
+ airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
+ PCIE_FORCE_SEL_DA_PXP_TXPLL_SDM_PCW);
+ airoha_phy_pma0_update_field(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_SDM_PCW,
+ PCIE_FORCE_DA_PXP_TXPLL_SDM_PCW,
+ 0xc800000);
+ airoha_phy_pma1_update_field(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_SDM_PCW,
+ PCIE_FORCE_DA_PXP_TXPLL_SDM_PCW,
+ 0xc800000);
+
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_SDM_DI_LS,
+ CSR_2L_PXP_TXPLL_SDM_IFM);
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_SSC,
+ CSR_2L_PXP_TXPLL_SSC_PHASE_INI);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_REFIN_DIV,
+ CSR_2L_PXP_TXPLL_RST_DLY, 0x4);
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_SDM_DI_LS,
+ CSR_2L_PXP_TXPLL_SDM_DI_LS);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_SDM_DI_LS,
+ CSR_2L_PXP_TXPLL_SDM_ORD, 0x3);
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_TCL_KBAND_VREF,
+ CSR_2L_PXP_TXPLL_VCO_KBAND_MEAS_EN);
+ writel(0x0, pcie_phy->csr_2l + REG_CSR_2L_TXPLL_SSC_DELTA1);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_CHP_IOFST,
+ CSR_2L_PXP_TXPLL_LPF_BP, 0x1);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_CHP_IOFST,
+ CSR_2L_PXP_TXPLL_LPF_BC, 0x18);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_CHP_IOFST,
+ CSR_2L_PXP_TXPLL_LPF_BR, 0x5);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_CHP_IOFST,
+ CSR_2L_PXP_TXPLL_CHP_IOFST, 0x1);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_750M_SYS_CK,
+ CSR_2L_PXP_TXPLL_CHP_IBIAS, 0x2d);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_TCL_VTP,
+ CSR_2L_PXP_TXPLL_SPARE_L, 0x1);
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_LPF_BWR,
+ CSR_2L_PXP_TXPLL_LPF_BWC);
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_POSTDIV,
+ CSR_2L_PXP_TXPLL_MMD_PREDIV_MODE);
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_REFIN_DIV,
+ CSR_2L_PXP_TXPLL_REFIN_DIV);
+ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TXPLL_TCL_LPF_BW,
+ CSR_2L_PXP_TXPLL_VCO_HALFLSB_EN);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_VCO_SCAPWR,
+ CSR_2L_PXP_TXPLL_VCO_SCAPWR, 0x7);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_TCL_LPF_BW,
+ CSR_2L_PXP_TXPLL_VCO_CFIX, 0x3);
+
+ airoha_phy_pma0_set_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
+ PCIE_FORCE_SEL_DA_PXP_TXPLL_SDM_PCW);
+ airoha_phy_pma1_set_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
+ PCIE_FORCE_SEL_DA_PXP_TXPLL_SDM_PCW);
+
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_SSC,
+ CSR_2L_PXP_TXPLL_SSC_PHASE_INI);
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_LPF_BWR,
+ CSR_2L_PXP_TXPLL_LPF_BWR);
+ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TXPLL_PHY_CK2,
+ CSR_2L_PXP_TXPLL_REFIN_INTERNAL);
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_TCL_KBAND_VREF,
+ CSR_2L_PXP_TXPLL_VCO_KBAND_MEAS_EN);
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_VTP,
+ CSR_2L_PXP_TXPLL_VTP_EN);
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_POSTDIV,
+ CSR_2L_PXP_TXPLL_PHY_CK1_EN);
+ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TXPLL_PHY_CK2,
+ CSR_2L_PXP_TXPLL_REFIN_INTERNAL);
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_SSC,
+ CSR_2L_PXP_TXPLL_SSC_EN);
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_750M_SYS_CK,
+ CSR_2L_PXP_TXPLL_LPF_SHCK_EN);
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_POSTDIV,
+ CSR_2L_PXP_TXPLL_POSTDIV_EN);
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_KBAND_DIV,
+ CSR_2L_PXP_TXPLL_KBAND_KFC);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_KBAND_DIV,
+ CSR_2L_PXP_TXPLL_KBAND_KF, 0x3);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_KBAND_DIV,
+ CSR_2L_PXP_txpll_KBAND_KS, 0x1);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_KBAND_DIV,
+ CSR_2L_PXP_TXPLL_KBAND_DIV, 0x4);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_LPF_BWR,
+ CSR_2L_PXP_TXPLL_KBAND_CODE, 0xe4);
+ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TXPLL_SDM_OUT,
+ CSR_2L_PXP_TXPLL_TCL_AMP_EN);
+ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TXPLL_TCL_AMP_VREF,
+ CSR_2L_PXP_TXPLL_TCL_LPF_EN);
+ airoha_phy_csr_2l_update_field(pcie_phy,
+ REG_CSR_2L_TXPLL_TCL_KBAND_VREF,
+ CSR_2L_PXP_TXPLL_TCL_KBAND_VREF, 0xf);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_SDM_OUT,
+ CSR_2L_PXP_TXPLL_TCL_AMP_GAIN, 0x3);
+ airoha_phy_csr_2l_update_field(pcie_phy,
+ REG_CSR_2L_TXPLL_TCL_AMP_VREF,
+ CSR_2L_PXP_TXPLL_TCL_AMP_VREF, 0xb);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_TCL_LPF_BW,
+ CSR_2L_PXP_TXPLL_TCL_LPF_BW, 0x3);
+
+ airoha_phy_pma0_set_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT,
+ PCIE_FORCE_SEL_DA_PXP_TXPLL_CKOUT_EN);
+ airoha_phy_pma0_set_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT,
+ PCIE_FORCE_DA_PXP_TXPLL_CKOUT_EN);
+ airoha_phy_pma1_set_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT,
+ PCIE_FORCE_SEL_DA_PXP_TXPLL_CKOUT_EN);
+ airoha_phy_pma1_set_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT,
+ PCIE_FORCE_DA_PXP_TXPLL_CKOUT_EN);
+ airoha_phy_pma0_set_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT,
+ PCIE_FORCE_SEL_DA_PXP_TXPLL_EN);
+ airoha_phy_pma0_set_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT,
+ PCIE_FORCE_DA_PXP_TXPLL_EN);
+ airoha_phy_pma1_set_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT,
+ PCIE_FORCE_SEL_DA_PXP_TXPLL_EN);
+ airoha_phy_pma1_set_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT,
+ PCIE_FORCE_DA_PXP_TXPLL_EN);
+}
+
+static void airoha_pcie_phy_init_ssc_jcpll(struct airoha_pcie_phy *pcie_phy)
+{
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_SSC_DELTA1,
+ CSR_2L_PXP_JCPLL_SSC_DELTA1, 0x106);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_SSC_DELTA1,
+ CSR_2L_PXP_JCPLL_SSC_DELTA, 0x106);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_SSC_PERIOD,
+ CSR_2L_PXP_JCPLL_SSC_PERIOD, 0x31b);
+ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC,
+ CSR_2L_PXP_JCPLL_SSC_PHASE_INI);
+ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC,
+ CSR_2L_PXP_JCPLL_SSC_EN);
+ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SDM_IFM,
+ CSR_2L_PXP_JCPLL_SDM_IFM);
+ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SDM_HREN,
+ REG_CSR_2L_JCPLL_SDM_HREN);
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_RST_DLY,
+ CSR_2L_PXP_JCPLL_SDM_DI_EN);
+ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC,
+ CSR_2L_PXP_JCPLL_SSC_TRI_EN);
+}
+
+static void
+airoha_pcie_phy_set_rxlan0_signal_detect(struct airoha_pcie_phy *pcie_phy)
+{
+ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_CDR0_PR_COR_HBW,
+ CSR_2L_PXP_CDR0_PR_LDO_FORCE_ON);
+
+ usleep_range(100, 200);
+
+ airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_19,
+ PCIE_PCP_RX_REV0_PCIE_GEN1, 0x18b0);
+ airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_20,
+ PCIE_PCP_RX_REV0_PCIE_GEN2, 0x18b0);
+ airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_20,
+ PCIE_PCP_RX_REV0_PCIE_GEN3, 0x1030);
+
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX0_SIGDET_DCTEST,
+ CSR_2L_PXP_RX0_SIGDET_PEAK, 0x2);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX0_SIGDET_VTH_SEL,
+ CSR_2L_PXP_RX0_SIGDET_VTH_SEL, 0x5);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX0_REV0,
+ CSR_2L_PXP_VOS_PNINV, 0x2);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX0_SIGDET_DCTEST,
+ CSR_2L_PXP_RX0_SIGDET_LPF_CTRL, 0x1);
+
+ airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_CAL2,
+ PCIE_CAL_OUT_OS, 0x0);
+
+ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_PXP_RX0_FE_VB_EQ2,
+ CSR_2L_PXP_RX0_FE_VCM_GEN_PWDB);
+
+ airoha_phy_pma0_set_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_GAIN_CTRL,
+ PCIE_FORCE_SEL_DA_PXP_RX_FE_PWDB);
+ airoha_phy_pma0_update_field(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_GAIN_CTRL,
+ PCIE_FORCE_DA_PXP_RX_FE_GAIN_CTRL, 0x3);
+ airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_RX_FORCE_MODE0,
+ PCIE_FORCE_DA_XPON_RX_FE_GAIN_CTRL, 0x1);
+ airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_SIGDET0,
+ PCIE_SIGDET_WIN_NONVLD_TIMES, 0x3);
+ airoha_phy_pma0_clear_bits(pcie_phy, REG_PCIE_PMA_SEQUENCE_DISB_CTRL1,
+ PCIE_DISB_RX_SDCAL_EN);
+
+ airoha_phy_pma0_set_bits(pcie_phy,
+ REG_PCIE_PMA_CTRL_SEQUENCE_FORCE_CTRL1,
+ PCIE_FORCE_RX_SDCAL_EN);
+ usleep_range(150, 200);
+ airoha_phy_pma0_clear_bits(pcie_phy,
+ REG_PCIE_PMA_CTRL_SEQUENCE_FORCE_CTRL1,
+ PCIE_FORCE_RX_SDCAL_EN);
+}
+
+static void
+airoha_pcie_phy_set_rxlan1_signal_detect(struct airoha_pcie_phy *pcie_phy)
+{
+ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_CDR1_PR_COR_HBW,
+ CSR_2L_PXP_CDR1_PR_LDO_FORCE_ON);
+
+ usleep_range(100, 200);
+
+ airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_19,
+ PCIE_PCP_RX_REV0_PCIE_GEN1, 0x18b0);
+ airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_20,
+ PCIE_PCP_RX_REV0_PCIE_GEN2, 0x18b0);
+ airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_20,
+ PCIE_PCP_RX_REV0_PCIE_GEN3, 0x1030);
+
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_SIGDET_NOVTH,
+ CSR_2L_PXP_RX1_SIGDET_PEAK, 0x2);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_SIGDET_NOVTH,
+ CSR_2L_PXP_RX1_SIGDET_VTH_SEL, 0x5);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_REV0,
+ CSR_2L_PXP_VOS_PNINV, 0x2);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_DAC_RANGE_EYE,
+ CSR_2L_PXP_RX1_SIGDET_LPF_CTRL, 0x1);
+
+ airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_CAL2,
+ PCIE_CAL_OUT_OS, 0x0);
+
+ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_RX1_FE_VB_EQ1,
+ CSR_2L_PXP_RX1_FE_VCM_GEN_PWDB);
+
+ airoha_phy_pma1_set_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_GAIN_CTRL,
+ PCIE_FORCE_SEL_DA_PXP_RX_FE_PWDB);
+ airoha_phy_pma1_update_field(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_GAIN_CTRL,
+ PCIE_FORCE_DA_PXP_RX_FE_GAIN_CTRL, 0x3);
+ airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_RX_FORCE_MODE0,
+ PCIE_FORCE_DA_XPON_RX_FE_GAIN_CTRL, 0x1);
+ airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_SIGDET0,
+ PCIE_SIGDET_WIN_NONVLD_TIMES, 0x3);
+ airoha_phy_pma1_clear_bits(pcie_phy, REG_PCIE_PMA_SEQUENCE_DISB_CTRL1,
+ PCIE_DISB_RX_SDCAL_EN);
+
+ airoha_phy_pma1_set_bits(pcie_phy,
+ REG_PCIE_PMA_CTRL_SEQUENCE_FORCE_CTRL1,
+ PCIE_FORCE_RX_SDCAL_EN);
+ usleep_range(150, 200);
+ airoha_phy_pma1_clear_bits(pcie_phy,
+ REG_PCIE_PMA_CTRL_SEQUENCE_FORCE_CTRL1,
+ PCIE_FORCE_RX_SDCAL_EN);
+}
+
+static void airoha_pcie_phy_set_rxflow(struct airoha_pcie_phy *pcie_phy)
+{
+ airoha_phy_pma0_set_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_RX_SCAN_RST,
+ PCIE_FORCE_DA_PXP_RX_SIGDET_PWDB |
+ PCIE_FORCE_SEL_DA_PXP_RX_SIGDET_PWDB);
+ airoha_phy_pma1_set_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_RX_SCAN_RST,
+ PCIE_FORCE_DA_PXP_RX_SIGDET_PWDB |
+ PCIE_FORCE_SEL_DA_PXP_RX_SIGDET_PWDB);
+
+ airoha_phy_pma0_set_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PD_PWDB,
+ PCIE_FORCE_DA_PXP_CDR_PD_PWDB |
+ PCIE_FORCE_SEL_DA_PXP_CDR_PD_PWDB);
+ airoha_phy_pma0_set_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_PWDB,
+ PCIE_FORCE_DA_PXP_RX_FE_PWDB |
+ PCIE_FORCE_SEL_DA_PXP_RX_FE_PWDB);
+ airoha_phy_pma1_set_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PD_PWDB,
+ PCIE_FORCE_DA_PXP_CDR_PD_PWDB |
+ PCIE_FORCE_SEL_DA_PXP_CDR_PD_PWDB);
+ airoha_phy_pma1_set_bits(pcie_phy,
+ REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_PWDB,
+ PCIE_FORCE_DA_PXP_RX_FE_PWDB |
+ PCIE_FORCE_SEL_DA_PXP_RX_FE_PWDB);
+
+ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_RX0_PHYCK_DIV,
+ CSR_2L_PXP_RX0_PHYCK_RSTB |
+ CSR_2L_PXP_RX0_TDC_CK_SEL);
+ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_RX1_PHYCK_DIV,
+ CSR_2L_PXP_RX1_PHYCK_RSTB |
+ CSR_2L_PXP_RX1_TDC_CK_SEL);
+
+ airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_SW_RESET,
+ PCIE_SW_RX_FIFO_RST | PCIE_SW_TX_RST |
+ PCIE_SW_PMA_RST | PCIE_SW_ALLPCS_RST |
+ PCIE_SW_TX_FIFO_RST);
+ airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_SW_RESET,
+ PCIE_SW_RX_FIFO_RST | PCIE_SW_TX_RST |
+ PCIE_SW_PMA_RST | PCIE_SW_ALLPCS_RST |
+ PCIE_SW_TX_FIFO_RST);
+
+ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_PXP_RX0_FE_VB_EQ2,
+ CSR_2L_PXP_RX0_FE_VB_EQ2_EN |
+ CSR_2L_PXP_RX0_FE_VB_EQ3_EN);
+ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_RX0_SIGDET_VTH_SEL,
+ CSR_2L_PXP_RX0_FE_VB_EQ1_EN);
+ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_RX1_FE_VB_EQ1,
+ CSR_2L_PXP_RX1_FE_VB_EQ1_EN |
+ CSR_2L_PXP_RX1_FE_VB_EQ2_EN |
+ CSR_2L_PXP_RX1_FE_VB_EQ3_EN);
+
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX0_REV0,
+ CSR_2L_PXP_FE_GAIN_NORMAL_MODE, 0x4);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX0_REV0,
+ CSR_2L_PXP_FE_GAIN_TRAIN_MODE, 0x4);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_REV0,
+ CSR_2L_PXP_FE_GAIN_NORMAL_MODE, 0x4);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_REV0,
+ CSR_2L_PXP_FE_GAIN_TRAIN_MODE, 0x4);
+}
+
+static void airoha_pcie_phy_set_pr(struct airoha_pcie_phy *pcie_phy)
+{
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR0_PR_VREG_IBAND,
+ CSR_2L_PXP_CDR0_PR_VREG_IBAND, 0x5);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR0_PR_VREG_IBAND,
+ CSR_2L_PXP_CDR0_PR_VREG_CKBUF, 0x5);
+
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR0_PR_CKREF_DIV,
+ CSR_2L_PXP_CDR0_PR_CKREF_DIV);
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR0_PR_COR_HBW,
+ CSR_2L_PXP_CDR0_PR_CKREF_DIV1);
+
+ airoha_phy_csr_2l_update_field(pcie_phy,
+ REG_CSR_2L_CDR1_PR_VREG_IBAND_VAL,
+ CSR_2L_PXP_CDR1_PR_VREG_IBAND, 0x5);
+ airoha_phy_csr_2l_update_field(pcie_phy,
+ REG_CSR_2L_CDR1_PR_VREG_IBAND_VAL,
+ CSR_2L_PXP_CDR1_PR_VREG_CKBUF, 0x5);
+
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR1_PR_CKREF_DIV,
+ CSR_2L_PXP_CDR1_PR_CKREF_DIV);
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR1_PR_COR_HBW,
+ CSR_2L_PXP_CDR1_PR_CKREF_DIV1);
+
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR0_LPF_RATIO,
+ CSR_2L_PXP_CDR0_LPF_TOP_LIM, 0x20000);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR1_LPF_RATIO,
+ CSR_2L_PXP_CDR1_LPF_TOP_LIM, 0x20000);
+
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR0_PR_BETA_DAC,
+ CSR_2L_PXP_CDR0_PR_BETA_SEL, 0x2);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR1_PR_BETA_DAC,
+ CSR_2L_PXP_CDR1_PR_BETA_SEL, 0x2);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR0_PR_BETA_DAC,
+ CSR_2L_PXP_CDR0_PR_KBAND_DIV, 0x4);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR1_PR_BETA_DAC,
+ CSR_2L_PXP_CDR1_PR_KBAND_DIV, 0x4);
+}
+
+static void airoha_pcie_phy_set_txflow(struct airoha_pcie_phy *pcie_phy)
+{
+ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TX0_CKLDO,
+ CSR_2L_PXP_TX0_CKLDO_EN);
+ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TX1_CKLDO,
+ CSR_2L_PXP_TX1_CKLDO_EN);
+
+ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TX0_CKLDO,
+ CSR_2L_PXP_TX0_DMEDGEGEN_EN);
+ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TX1_CKLDO,
+ CSR_2L_PXP_TX1_DMEDGEGEN_EN);
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TX1_MULTLANE,
+ CSR_2L_PXP_TX1_MULTLANE_EN);
+}
+
+static void airoha_pcie_phy_set_rx_mode(struct airoha_pcie_phy *pcie_phy)
+{
+ writel(0x804000, pcie_phy->pma0 + REG_PCIE_PMA_DIG_RESERVE_27);
+ airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_18,
+ PCIE_PXP_RX_VTH_SEL_PCIE_G1, 0x5);
+ airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_18,
+ PCIE_PXP_RX_VTH_SEL_PCIE_G2, 0x5);
+ airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_18,
+ PCIE_PXP_RX_VTH_SEL_PCIE_G3, 0x5);
+ airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_30,
+ 0x77700);
+
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR0_PR_MONCK,
+ CSR_2L_PXP_CDR0_PR_MONCK_ENABLE);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR0_PR_MONCK,
+ CSR_2L_PXP_CDR0_PR_RESERVE0, 0x2);
+ airoha_phy_csr_2l_update_field(pcie_phy,
+ REG_CSR_2L_PXP_RX0_OSCAL_CTLE1IOS,
+ CSR_2L_PXP_RX0_PR_OSCAL_VGA1IOS, 0x19);
+ airoha_phy_csr_2l_update_field(pcie_phy,
+ REG_CSR_2L_PXP_RX0_OSCA_VGA1VOS,
+ CSR_2L_PXP_RX0_PR_OSCAL_VGA1VOS, 0x19);
+ airoha_phy_csr_2l_update_field(pcie_phy,
+ REG_CSR_2L_PXP_RX0_OSCA_VGA1VOS,
+ CSR_2L_PXP_RX0_PR_OSCAL_VGA2IOS, 0x14);
+
+ writel(0x804000, pcie_phy->pma1 + REG_PCIE_PMA_DIG_RESERVE_27);
+ airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_18,
+ PCIE_PXP_RX_VTH_SEL_PCIE_G1, 0x5);
+ airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_18,
+ PCIE_PXP_RX_VTH_SEL_PCIE_G2, 0x5);
+ airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_18,
+ PCIE_PXP_RX_VTH_SEL_PCIE_G3, 0x5);
+
+ airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_30,
+ 0x77700);
+
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR1_PR_MONCK,
+ CSR_2L_PXP_CDR1_PR_MONCK_ENABLE);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR1_PR_MONCK,
+ CSR_2L_PXP_CDR1_PR_RESERVE0, 0x2);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_OSCAL_VGA1IOS,
+ CSR_2L_PXP_RX1_PR_OSCAL_VGA1IOS, 0x19);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_OSCAL_VGA1IOS,
+ CSR_2L_PXP_RX1_PR_OSCAL_VGA1VOS, 0x19);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_OSCAL_VGA1IOS,
+ CSR_2L_PXP_RX1_PR_OSCAL_VGA2IOS, 0x14);
+}
+
+static void airoha_pcie_phy_load_kflow(struct airoha_pcie_phy *pcie_phy)
+{
+ airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_12,
+ PCIE_FORCE_PMA_RX_SPEED, 0xa);
+ airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_12,
+ PCIE_FORCE_PMA_RX_SPEED, 0xa);
+ airoha_phy_init_lane0_rx_fw_pre_calib(pcie_phy, PCIE_PORT_GEN3);
+ airoha_phy_init_lane1_rx_fw_pre_calib(pcie_phy, PCIE_PORT_GEN3);
+
+ airoha_phy_pma0_clear_bits(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_12,
+ PCIE_FORCE_PMA_RX_SPEED);
+ airoha_phy_pma1_clear_bits(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_12,
+ PCIE_FORCE_PMA_RX_SPEED);
+ usleep_range(100, 200);
+
+ airoha_phy_init_lane0_rx_fw_pre_calib(pcie_phy, PCIE_PORT_GEN2);
+ airoha_phy_init_lane1_rx_fw_pre_calib(pcie_phy, PCIE_PORT_GEN2);
+}
+
+/**
+ * airoha_pcie_phy_init() - Initialize the phy
+ * @phy: the phy to be initialized
+ *
+ * Initialize the phy registers.
+ * The hardware settings will be reset during suspend, it should be
+ * reinitialized when the consumer calls phy_init() again on resume.
+ */
+static int airoha_pcie_phy_init(struct phy *phy)
+{
+ struct airoha_pcie_phy *pcie_phy = phy_get_drvdata(phy);
+ u32 val;
+
+ /* Setup Tx-Rx detection time */
+ val = FIELD_PREP(PCIE_XTP_RXDET_VCM_OFF_STB_T_SEL, 0x33) |
+ FIELD_PREP(PCIE_XTP_RXDET_EN_STB_T_SEL, 0x1) |
+ FIELD_PREP(PCIE_XTP_RXDET_FINISH_STB_T_SEL, 0x2) |
+ FIELD_PREP(PCIE_XTP_TXPD_TX_DATA_EN_DLY, 0x3) |
+ FIELD_PREP(PCIE_XTP_RXDET_LATCH_STB_T_SEL, 0x1);
+ writel(val, pcie_phy->p0_xr_dtime + REG_PCIE_PEXTP_DIG_GLB44);
+ writel(val, pcie_phy->p1_xr_dtime + REG_PCIE_PEXTP_DIG_GLB44);
+ /* Setup Rx AEQ training time */
+ val = FIELD_PREP(PCIE_XTP_LN_RX_PDOWN_L1P2_EXIT_WAIT, 0x32) |
+ FIELD_PREP(PCIE_XTP_LN_RX_PDOWN_E0_AEQEN_WAIT, 0x5050);
+ writel(val, pcie_phy->rx_aeq + REG_PCIE_PEXTP_DIG_LN_RX30_P0);
+ writel(val, pcie_phy->rx_aeq + REG_PCIE_PEXTP_DIG_LN_RX30_P1);
+
+ /* enable load FLL-K flow */
+ airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_14,
+ PCIE_FLL_LOAD_EN);
+ airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_14,
+ PCIE_FLL_LOAD_EN);
+
+ airoha_pcie_phy_init_default(pcie_phy);
+ airoha_pcie_phy_init_clk_out(pcie_phy);
+ airoha_pcie_phy_init_csr_2l(pcie_phy);
+
+ usleep_range(100, 200);
+
+ airoha_pcie_phy_init_rx(pcie_phy);
+ /* phase 1, no ssc for K TXPLL */
+ airoha_pcie_phy_init_jcpll(pcie_phy);
+
+ usleep_range(500, 600);
+
+ /* TX PLL settings */
+ airoha_pcie_phy_txpll(pcie_phy);
+
+ usleep_range(200, 300);
+
+ /* SSC JCPLL setting */
+ airoha_pcie_phy_init_ssc_jcpll(pcie_phy);
+
+ usleep_range(100, 200);
+
+ /* Rx lan0 signal detect */
+ airoha_pcie_phy_set_rxlan0_signal_detect(pcie_phy);
+ /* Rx lan1 signal detect */
+ airoha_pcie_phy_set_rxlan1_signal_detect(pcie_phy);
+ /* RX FLOW */
+ airoha_pcie_phy_set_rxflow(pcie_phy);
+
+ usleep_range(100, 200);
+
+ airoha_pcie_phy_set_pr(pcie_phy);
+ /* TX FLOW */
+ airoha_pcie_phy_set_txflow(pcie_phy);
+
+ usleep_range(100, 200);
+ /* RX mode setting */
+ airoha_pcie_phy_set_rx_mode(pcie_phy);
+ /* Load K-Flow */
+ airoha_pcie_phy_load_kflow(pcie_phy);
+ airoha_phy_pma0_clear_bits(pcie_phy, REG_PCIE_PMA_SS_DA_XPON_PWDB0,
+ PCIE_DA_XPON_CDR_PR_PWDB);
+ airoha_phy_pma1_clear_bits(pcie_phy, REG_PCIE_PMA_SS_DA_XPON_PWDB0,
+ PCIE_DA_XPON_CDR_PR_PWDB);
+
+ usleep_range(100, 200);
+
+ airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_SS_DA_XPON_PWDB0,
+ PCIE_DA_XPON_CDR_PR_PWDB);
+ airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_SS_DA_XPON_PWDB0,
+ PCIE_DA_XPON_CDR_PR_PWDB);
+
+ usleep_range(100, 200);
+
+ return 0;
+}
+
+static int airoha_pcie_phy_exit(struct phy *phy)
+{
+ struct airoha_pcie_phy *pcie_phy = phy_get_drvdata(phy);
+
+ airoha_phy_pma0_clear_bits(pcie_phy, REG_PCIE_PMA_SW_RESET,
+ PCIE_PMA_SW_RST);
+ airoha_phy_pma1_clear_bits(pcie_phy, REG_PCIE_PMA_SW_RESET,
+ PCIE_PMA_SW_RST);
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC,
+ CSR_2L_PXP_JCPLL_SSC_PHASE_INI |
+ CSR_2L_PXP_JCPLL_SSC_TRI_EN |
+ CSR_2L_PXP_JCPLL_SSC_EN);
+
+ return 0;
+}
+
+static const struct phy_ops airoha_pcie_phy_ops = {
+ .init = airoha_pcie_phy_init,
+ .exit = airoha_pcie_phy_exit,
+ .owner = THIS_MODULE,
+};
+
+static int airoha_pcie_phy_probe(struct platform_device *pdev)
+{
+ struct airoha_pcie_phy *pcie_phy;
+ struct device *dev = &pdev->dev;
+ struct phy_provider *provider;
+
+ pcie_phy = devm_kzalloc(dev, sizeof(*pcie_phy), GFP_KERNEL);
+ if (!pcie_phy)
+ return -ENOMEM;
+
+ pcie_phy->csr_2l = devm_platform_ioremap_resource_byname(pdev, "csr-2l");
+ if (IS_ERR(pcie_phy->csr_2l))
+ return dev_err_probe(dev, PTR_ERR(pcie_phy->csr_2l),
+ "Failed to map phy-csr-2l base\n");
+
+ pcie_phy->pma0 = devm_platform_ioremap_resource_byname(pdev, "pma0");
+ if (IS_ERR(pcie_phy->pma0))
+ return dev_err_probe(dev, PTR_ERR(pcie_phy->pma0),
+ "Failed to map phy-pma0 base\n");
+
+ pcie_phy->pma1 = devm_platform_ioremap_resource_byname(pdev, "pma1");
+ if (IS_ERR(pcie_phy->pma1))
+ return dev_err_probe(dev, PTR_ERR(pcie_phy->pma1),
+ "Failed to map phy-pma1 base\n");
+
+ pcie_phy->phy = devm_phy_create(dev, dev->of_node, &airoha_pcie_phy_ops);
+ if (IS_ERR(pcie_phy->phy))
+ return dev_err_probe(dev, PTR_ERR(pcie_phy->phy),
+ "Failed to create PCIe phy\n");
+
+ pcie_phy->p0_xr_dtime =
+ devm_platform_ioremap_resource_byname(pdev, "p0-xr-dtime");
+ if (IS_ERR(pcie_phy->p0_xr_dtime))
+ return dev_err_probe(dev, PTR_ERR(pcie_phy->p0_xr_dtime),
+ "Failed to map P0 Tx-Rx dtime base\n");
+
+ pcie_phy->p1_xr_dtime =
+ devm_platform_ioremap_resource_byname(pdev, "p1-xr-dtime");
+ if (IS_ERR(pcie_phy->p1_xr_dtime))
+ return dev_err_probe(dev, PTR_ERR(pcie_phy->p1_xr_dtime),
+ "Failed to map P1 Tx-Rx dtime base\n");
+
+ pcie_phy->rx_aeq = devm_platform_ioremap_resource_byname(pdev, "rx-aeq");
+ if (IS_ERR(pcie_phy->rx_aeq))
+ return dev_err_probe(dev, PTR_ERR(pcie_phy->rx_aeq),
+ "Failed to map Rx AEQ base\n");
+
+ pcie_phy->dev = dev;
+ phy_set_drvdata(pcie_phy->phy, pcie_phy);
+
+ provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(provider))
+ return dev_err_probe(dev, PTR_ERR(provider),
+ "PCIe phy probe failed\n");
+
+ return 0;
+}
+
+static const struct of_device_id airoha_pcie_phy_of_match[] = {
+ { .compatible = "airoha,en7581-pcie-phy" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, airoha_pcie_phy_of_match);
+
+static struct platform_driver airoha_pcie_phy_driver = {
+ .probe = airoha_pcie_phy_probe,
+ .driver = {
+ .name = "airoha-pcie-phy",
+ .of_match_table = airoha_pcie_phy_of_match,
+ },
+};
+module_platform_driver(airoha_pcie_phy_driver);
+
+MODULE_DESCRIPTION("Airoha PCIe PHY driver");
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index bf6a07590321..f053b525ccff 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -664,7 +664,7 @@ out_unlock:
*
* Returns the phy driver, after getting a refcount to it; or
* -ENODEV if there is no such phy. The caller is responsible for
- * calling phy_put() to release that count.
+ * calling of_phy_put() to release that count.
*/
struct phy *of_phy_get(struct device_node *np, const char *con_id)
{
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
index 6c796723c8f5..5b36cc7ac78b 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
@@ -489,6 +489,243 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_gen3_pcs_misc_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
};
+static const struct qmp_phy_init_tbl ipq9574_gen3x1_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_PLL_BIAS_EN_CLKBUFLR_EN, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_BIAS_EN_CTRL_BY_PSM, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_SELECT, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_BG_TRIM, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP_EN, 0x42),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_RESETSM_CNTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE_MAP, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE_TIMER1, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE_TIMER2, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x21),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DEC_START_MODE0, 0x68),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START2_MODE0, 0xaa),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP2_MODE0, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP1_MODE0, 0xd4),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CP_CTRL_MODE0, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_CCTRL_MODE0, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN0_MODE0, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE2_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE1_MODE0, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CORECLK_DIV, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_SELECT, 0x32),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SYSCLK_BUF_ENABLE, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SYSCLK_EN_SEL, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DEC_START_MODE1, 0x53),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START3_MODE1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START2_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START1_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP2_MODE1, 0x29),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP1_MODE1, 0xaa),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CP_CTRL_MODE1, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_CCTRL_MODE1, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN1_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN0_MODE1, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE2_MODE1, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE1_MODE1, 0xb4),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CORECLK_DIV_MODE1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_PER1, 0x7d),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE1_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE2_MODE0, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE1_MODE1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE2_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_EP_DIV_MODE0, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_EP_DIV_MODE1, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_ENABLE1, 0x90),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x89),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_ENABLE1, 0x10),
+};
+
+static const struct qmp_phy_init_tbl ipq9574_gen3x2_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_PLL_BIAS_EN_CLKBUFLR_EN, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_BIAS_EN_CTRL_BY_PSM, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_SELECT, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_BG_TRIM, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP_EN, 0x42),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_RESETSM_CNTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE_MAP, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE_TIMER1, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE_TIMER2, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x21),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DEC_START_MODE0, 0x68),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START2_MODE0, 0xaa),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP2_MODE0, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP1_MODE0, 0xd4),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CP_CTRL_MODE0, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_CCTRL_MODE0, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN0_MODE0, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE2_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE1_MODE0, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CORECLK_DIV, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_SELECT, 0x32),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SYSCLK_BUF_ENABLE, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SYSCLK_EN_SEL, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DEC_START_MODE1, 0x53),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START3_MODE1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START2_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START1_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP2_MODE1, 0x29),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP1_MODE1, 0xaa),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CP_CTRL_MODE1, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_CCTRL_MODE1, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN1_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN0_MODE1, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE2_MODE1, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE1_MODE1, 0xb4),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CORECLK_DIV_MODE1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_PER1, 0x7d),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE1_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE2_MODE0, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE1_MODE1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE2_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_EP_DIV_MODE0, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_EP_DIV_MODE1, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_ENABLE1, 0x90),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x89),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_ENABLE1, 0x10),
+};
+
+static const struct qmp_phy_init_tbl ipq9574_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_ENABLES, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x61),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x70),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL1, 0x73),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_LOW, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH2, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH3, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH4, 0xb1),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb1),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0xf0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0xd3),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+};
+
+static const struct qmp_phy_init_tbl ipq9574_gen3x1_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_DCC_CAL_CONFIG, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_G12S1_TXDEEMPH_M3P5DB, 0x10),
+};
+
+static const struct qmp_phy_init_tbl ipq9574_gen3x1_pcie_pcs_misc_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG2, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_EQ_CONFIG1, 0x14),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_EQ_CONFIG1, 0x10),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_EQ_CONFIG2, 0x0b),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_PRESET_P10_PRE, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_PRESET_P10_POST, 0x58),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG4, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_CONFIG2, 0x52),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_INT_AUX_CLK_CONFIG1, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG2, 0x50),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG4, 0x1a),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG5, 0x06),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG6, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
+};
+
+static const struct qmp_phy_init_tbl ipq9574_gen3x2_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_G12S1_TXDEEMPH_M3P5DB, 0x10),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_DCC_CAL_CONFIG, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
+};
+
+static const struct qmp_phy_init_tbl ipq9574_gen3x2_pcie_pcs_misc_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_POWER_STATE_CONFIG2, 0x1d),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_EQ_CONFIG1, 0x14),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_EQ_CONFIG1, 0x10),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_EQ_CONFIG2, 0x0b),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_PRESET_P10_PRE, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_PRESET_P10_POST, 0x58),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_POWER_STATE_CONFIG4, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_CONFIG1, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_CONFIG2, 0x52),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_CONFIG4, 0x19),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_INT_AUX_CLK_CONFIG1, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG2, 0x49),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG4, 0x2a),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG5, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG6, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
+};
+
static const struct qmp_phy_init_tbl sdm845_qmp_pcie_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x14),
QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
@@ -2535,6 +2772,16 @@ static const struct qmp_pcie_offsets qmp_pcie_offsets_v5 = {
.rx2 = 0x1800,
};
+static const struct qmp_pcie_offsets qmp_pcie_offsets_ipq9574 = {
+ .serdes = 0,
+ .pcs = 0x1000,
+ .pcs_misc = 0x1400,
+ .tx = 0x0200,
+ .rx = 0x0400,
+ .tx2 = 0x0600,
+ .rx2 = 0x0800,
+};
+
static const struct qmp_pcie_offsets qmp_pcie_offsets_v5_20 = {
.serdes = 0x1000,
.pcs = 0x1200,
@@ -2647,6 +2894,62 @@ static const struct qmp_phy_cfg ipq6018_pciephy_cfg = {
.phy_status = PHYSTATUS,
};
+static const struct qmp_phy_cfg ipq9574_gen3x1_pciephy_cfg = {
+ .lanes = 1,
+
+ .offsets = &qmp_pcie_offsets_v4x1,
+
+ .tbls = {
+ .serdes = ipq9574_gen3x1_pcie_serdes_tbl,
+ .serdes_num = ARRAY_SIZE(ipq9574_gen3x1_pcie_serdes_tbl),
+ .tx = ipq8074_pcie_gen3_tx_tbl,
+ .tx_num = ARRAY_SIZE(ipq8074_pcie_gen3_tx_tbl),
+ .rx = ipq9574_pcie_rx_tbl,
+ .rx_num = ARRAY_SIZE(ipq9574_pcie_rx_tbl),
+ .pcs = ipq9574_gen3x1_pcie_pcs_tbl,
+ .pcs_num = ARRAY_SIZE(ipq9574_gen3x1_pcie_pcs_tbl),
+ .pcs_misc = ipq9574_gen3x1_pcie_pcs_misc_tbl,
+ .pcs_misc_num = ARRAY_SIZE(ipq9574_gen3x1_pcie_pcs_misc_tbl),
+ },
+ .reset_list = ipq8074_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l),
+ .vreg_list = NULL,
+ .num_vregs = 0,
+ .regs = pciephy_v4_regs_layout,
+
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS,
+ .pipe_clock_rate = 250000000,
+};
+
+static const struct qmp_phy_cfg ipq9574_gen3x2_pciephy_cfg = {
+ .lanes = 2,
+
+ .offsets = &qmp_pcie_offsets_ipq9574,
+
+ .tbls = {
+ .serdes = ipq9574_gen3x2_pcie_serdes_tbl,
+ .serdes_num = ARRAY_SIZE(ipq9574_gen3x2_pcie_serdes_tbl),
+ .tx = ipq8074_pcie_gen3_tx_tbl,
+ .tx_num = ARRAY_SIZE(ipq8074_pcie_gen3_tx_tbl),
+ .rx = ipq9574_pcie_rx_tbl,
+ .rx_num = ARRAY_SIZE(ipq9574_pcie_rx_tbl),
+ .pcs = ipq9574_gen3x2_pcie_pcs_tbl,
+ .pcs_num = ARRAY_SIZE(ipq9574_gen3x2_pcie_pcs_tbl),
+ .pcs_misc = ipq9574_gen3x2_pcie_pcs_misc_tbl,
+ .pcs_misc_num = ARRAY_SIZE(ipq9574_gen3x2_pcie_pcs_misc_tbl),
+ },
+ .reset_list = ipq8074_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l),
+ .vreg_list = NULL,
+ .num_vregs = 0,
+ .regs = pciephy_v5_regs_layout,
+
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS,
+ .pipe_clock_rate = 250000000,
+};
+
static const struct qmp_phy_cfg sdm845_qmp_pciephy_cfg = {
.lanes = 1,
@@ -3730,14 +4033,11 @@ static int phy_aux_clk_register(struct qmp_pcie *qmp, struct device_node *np)
{
struct clk_fixed_rate *fixed = &qmp->aux_clk_fixed;
struct clk_init_data init = { };
- int ret;
+ char name[64];
- ret = of_property_read_string_index(np, "clock-output-names", 1, &init.name);
- if (ret) {
- dev_err(qmp->dev, "%pOFn: No clock-output-names index 1\n", np);
- return ret;
- }
+ snprintf(name, sizeof(name), "%s::phy_aux_clk", dev_name(qmp->dev));
+ init.name = name;
init.ops = &clk_fixed_rate_ops;
fixed->fixed_rate = qmp->cfg->aux_clock_rate;
@@ -4031,6 +4331,12 @@ static const struct of_device_id qmp_pcie_of_match_table[] = {
.compatible = "qcom,ipq8074-qmp-pcie-phy",
.data = &ipq8074_pciephy_cfg,
}, {
+ .compatible = "qcom,ipq9574-qmp-gen3x1-pcie-phy",
+ .data = &ipq9574_gen3x1_pciephy_cfg,
+ }, {
+ .compatible = "qcom,ipq9574-qmp-gen3x2-pcie-phy",
+ .data = &ipq9574_gen3x2_pciephy_cfg,
+ }, {
.compatible = "qcom,msm8998-qmp-pcie-phy",
.data = &msm8998_pciephy_cfg,
}, {
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5.h
index a469ae2a10a1..fa15a03055de 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5.h
@@ -11,8 +11,22 @@
#define QPHY_V5_PCS_PCIE_POWER_STATE_CONFIG2 0x0c
#define QPHY_V5_PCS_PCIE_POWER_STATE_CONFIG4 0x14
#define QPHY_V5_PCS_PCIE_ENDPOINT_REFCLK_DRIVE 0x20
+#define QPHY_V5_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L 0x44
+#define QPHY_V5_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_H 0x48
+#define QPHY_V5_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_L 0x4c
+#define QPHY_V5_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_H 0x50
#define QPHY_V5_PCS_PCIE_INT_AUX_CLK_CONFIG1 0x54
+#define QPHY_V5_PCS_PCIE_OSC_DTCT_CONFIG1 0x5c
+#define QPHY_V5_PCS_PCIE_OSC_DTCT_CONFIG2 0x60
+#define QPHY_V5_PCS_PCIE_OSC_DTCT_CONFIG4 0x68
+#define QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG2 0x7c
+#define QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG4 0x84
+#define QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG5 0x88
+#define QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG6 0x8c
#define QPHY_V5_PCS_PCIE_OSC_DTCT_ACTIONS 0x94
+#define QPHY_V5_PCS_PCIE_EQ_CONFIG1 0xa4
#define QPHY_V5_PCS_PCIE_EQ_CONFIG2 0xa8
+#define QPHY_V5_PCS_PCIE_PRESET_P10_PRE 0xc0
+#define QPHY_V5_PCS_PCIE_PRESET_P10_POST 0xe4
#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-pll.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-pll.h
index ad326e301a3a..231e59364e31 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-pll.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-pll.h
@@ -8,6 +8,9 @@
/* QMP V2 PHY for PCIE gen3 ports - QSERDES PLL registers */
#define QSERDES_PLL_BG_TIMER 0x00c
+#define QSERDES_PLL_SSC_EN_CENTER 0x010
+#define QSERDES_PLL_SSC_ADJ_PER1 0x014
+#define QSERDES_PLL_SSC_ADJ_PER2 0x018
#define QSERDES_PLL_SSC_PER1 0x01c
#define QSERDES_PLL_SSC_PER2 0x020
#define QSERDES_PLL_SSC_STEP_SIZE1_MODE0 0x024
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
index c174463c58a3..9b0eb87b1680 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
@@ -2253,6 +2253,9 @@ static const struct of_device_id qmp_usb_of_match_table[] = {
.compatible = "qcom,sa8775p-qmp-usb3-uni-phy",
.data = &sa8775p_usb3_uniphy_cfg,
}, {
+ .compatible = "qcom,sc8180x-qmp-usb3-uni-phy",
+ .data = &sm8150_usb3_uniphy_cfg,
+ }, {
.compatible = "qcom,sc8280xp-qmp-usb3-uni-phy",
.data = &sc8280xp_usb3_uniphy_cfg,
}, {
diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig
index 08b0f4345760..490263375057 100644
--- a/drivers/phy/rockchip/Kconfig
+++ b/drivers/phy/rockchip/Kconfig
@@ -86,7 +86,9 @@ config PHY_ROCKCHIP_PCIE
config PHY_ROCKCHIP_SAMSUNG_HDPTX
tristate "Rockchip Samsung HDMI/eDP Combo PHY driver"
depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF
+ depends on HAS_IOMEM
select GENERIC_PHY
+ select MFD_SYSCON
select RATIONAL
help
Enable this to support the Rockchip HDMI/eDP Combo PHY
diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c
index 04171eed5b16..df52b78a120b 100644
--- a/drivers/phy/samsung/phy-exynos5-usbdrd.c
+++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c
@@ -8,6 +8,7 @@
* Author: Vivek Gautam <gautam.vivek@samsung.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
@@ -30,18 +31,16 @@
#define EXYNOS5_FSEL_19MHZ2 0x3
#define EXYNOS5_FSEL_20MHZ 0x4
#define EXYNOS5_FSEL_24MHZ 0x5
-#define EXYNOS5_FSEL_26MHZ 0x82
+#define EXYNOS5_FSEL_26MHZ 0x6
#define EXYNOS5_FSEL_50MHZ 0x7
/* Exynos5: USB 3.0 DRD PHY registers */
#define EXYNOS5_DRD_LINKSYSTEM 0x04
-
+#define LINKSYSTEM_XHCI_VERSION_CONTROL BIT(27)
#define LINKSYSTEM_FLADJ_MASK (0x3f << 1)
#define LINKSYSTEM_FLADJ(_x) ((_x) << 1)
-#define LINKSYSTEM_XHCI_VERSION_CONTROL BIT(27)
#define EXYNOS5_DRD_PHYUTMI 0x08
-
#define PHYUTMI_OTGDISABLE BIT(6)
#define PHYUTMI_FORCESUSPEND BIT(1)
#define PHYUTMI_FORCESLEEP BIT(0)
@@ -49,40 +48,31 @@
#define EXYNOS5_DRD_PHYPIPE 0x0c
#define EXYNOS5_DRD_PHYCLKRST 0x10
-
#define PHYCLKRST_EN_UTMISUSPEND BIT(31)
-
#define PHYCLKRST_SSC_REFCLKSEL_MASK (0xff << 23)
#define PHYCLKRST_SSC_REFCLKSEL(_x) ((_x) << 23)
-
#define PHYCLKRST_SSC_RANGE_MASK (0x03 << 21)
#define PHYCLKRST_SSC_RANGE(_x) ((_x) << 21)
-
#define PHYCLKRST_SSC_EN BIT(20)
#define PHYCLKRST_REF_SSP_EN BIT(19)
#define PHYCLKRST_REF_CLKDIV2 BIT(18)
-
#define PHYCLKRST_MPLL_MULTIPLIER_MASK (0x7f << 11)
#define PHYCLKRST_MPLL_MULTIPLIER_100MHZ_REF (0x19 << 11)
#define PHYCLKRST_MPLL_MULTIPLIER_50M_REF (0x32 << 11)
#define PHYCLKRST_MPLL_MULTIPLIER_24MHZ_REF (0x68 << 11)
#define PHYCLKRST_MPLL_MULTIPLIER_20MHZ_REF (0x7d << 11)
#define PHYCLKRST_MPLL_MULTIPLIER_19200KHZ_REF (0x02 << 11)
-
-#define PHYCLKRST_FSEL_UTMI_MASK (0x7 << 5)
#define PHYCLKRST_FSEL_PIPE_MASK (0x7 << 8)
+#define PHYCLKRST_FSEL_UTMI_MASK (0x7 << 5)
#define PHYCLKRST_FSEL(_x) ((_x) << 5)
#define PHYCLKRST_FSEL_PAD_100MHZ (0x27 << 5)
#define PHYCLKRST_FSEL_PAD_24MHZ (0x2a << 5)
#define PHYCLKRST_FSEL_PAD_20MHZ (0x31 << 5)
#define PHYCLKRST_FSEL_PAD_19_2MHZ (0x38 << 5)
-
#define PHYCLKRST_RETENABLEN BIT(4)
-
#define PHYCLKRST_REFCLKSEL_MASK (0x03 << 2)
#define PHYCLKRST_REFCLKSEL_PAD_REFCLK (0x2 << 2)
#define PHYCLKRST_REFCLKSEL_EXT_REFCLK (0x3 << 2)
-
#define PHYCLKRST_PORTRESET BIT(1)
#define PHYCLKRST_COMMONONN BIT(0)
@@ -100,30 +90,27 @@
#define PHYREG1_CR_ACK BIT(0)
#define EXYNOS5_DRD_PHYPARAM0 0x1c
-
#define PHYPARAM0_REF_USE_PAD BIT(31)
#define PHYPARAM0_REF_LOSLEVEL_MASK (0x1f << 26)
#define PHYPARAM0_REF_LOSLEVEL (0x9 << 26)
#define EXYNOS5_DRD_PHYPARAM1 0x20
-
#define PHYPARAM1_PCS_TXDEEMPH_MASK (0x1f << 0)
#define PHYPARAM1_PCS_TXDEEMPH (0x1c)
#define EXYNOS5_DRD_PHYTERM 0x24
#define EXYNOS5_DRD_PHYTEST 0x28
-
#define PHYTEST_POWERDOWN_SSP BIT(3)
#define PHYTEST_POWERDOWN_HSP BIT(2)
#define EXYNOS5_DRD_PHYADP 0x2c
#define EXYNOS5_DRD_PHYUTMICLKSEL 0x30
-
#define PHYUTMICLKSEL_UTMI_CLKSEL BIT(2)
#define EXYNOS5_DRD_PHYRESUME 0x34
+
#define EXYNOS5_DRD_LINKPORT 0x44
/* USB 3.0 DRD PHY SS Function Control Reg; accessed by CR_PORT */
@@ -147,35 +134,215 @@
/* Exynos850: USB DRD PHY registers */
#define EXYNOS850_DRD_LINKCTRL 0x04
-#define LINKCTRL_BUS_FILTER_BYPASS(_x) ((_x) << 4)
+#define LINKCTRL_FORCE_RXELECIDLE BIT(18)
+#define LINKCTRL_FORCE_PHYSTATUS BIT(17)
+#define LINKCTRL_FORCE_PIPE_EN BIT(16)
#define LINKCTRL_FORCE_QACT BIT(8)
+#define LINKCTRL_BUS_FILTER_BYPASS(_x) ((_x) << 4)
+
+#define EXYNOS850_DRD_LINKPORT 0x08
+#define LINKPORT_HOST_NUM_U3 GENMASK(19, 16)
+#define LINKPORT_HOST_NUM_U2 GENMASK(15, 12)
#define EXYNOS850_DRD_CLKRST 0x20
-#define CLKRST_LINK_SW_RST BIT(0)
-#define CLKRST_PORT_RST BIT(1)
+/*
+ * On versions without SS ports (like E850), bit 3 is for the 2.0 phy (HS),
+ * while on versions with (like gs101), bits 2 and 3 are for the 3.0 phy (SS)
+ * and bits 12 & 13 for the 2.0 phy.
+ */
+#define CLKRST_PHY20_SW_POR BIT(13)
+#define CLKRST_PHY20_SW_POR_SEL BIT(12)
+#define CLKRST_LINK_PCLK_SEL BIT(7)
#define CLKRST_PHY_SW_RST BIT(3)
+#define CLKRST_PHY_RESET_SEL BIT(2)
+#define CLKRST_PORT_RST BIT(1)
+#define CLKRST_LINK_SW_RST BIT(0)
+
+#define EXYNOS850_DRD_SSPPLLCTL 0x30
+#define SSPPLLCTL_FSEL GENMASK(2, 0)
#define EXYNOS850_DRD_UTMI 0x50
-#define UTMI_FORCE_SLEEP BIT(0)
-#define UTMI_FORCE_SUSPEND BIT(1)
-#define UTMI_DM_PULLDOWN BIT(2)
-#define UTMI_DP_PULLDOWN BIT(3)
-#define UTMI_FORCE_BVALID BIT(4)
#define UTMI_FORCE_VBUSVALID BIT(5)
+#define UTMI_FORCE_BVALID BIT(4)
+#define UTMI_DP_PULLDOWN BIT(3)
+#define UTMI_DM_PULLDOWN BIT(2)
+#define UTMI_FORCE_SUSPEND BIT(1)
+#define UTMI_FORCE_SLEEP BIT(0)
#define EXYNOS850_DRD_HSP 0x54
-#define HSP_COMMONONN BIT(8)
-#define HSP_EN_UTMISUSPEND BIT(9)
-#define HSP_VBUSVLDEXT BIT(12)
-#define HSP_VBUSVLDEXTSEL BIT(13)
#define HSP_FSV_OUT_EN BIT(24)
+#define HSP_VBUSVLDEXTSEL BIT(13)
+#define HSP_VBUSVLDEXT BIT(12)
+#define HSP_EN_UTMISUSPEND BIT(9)
+#define HSP_COMMONONN BIT(8)
+
+#define EXYNOS850_DRD_HSPPARACON 0x58
+#define HSPPARACON_TXVREF GENMASK(31, 28)
+#define HSPPARACON_TXRISE GENMASK(25, 24)
+#define HSPPARACON_TXRES GENMASK(22, 21)
+#define HSPPARACON_TXPREEMPPULSE BIT(20)
+#define HSPPARACON_TXPREEMPAMP GENMASK(19, 18)
+#define HSPPARACON_TXHSXV GENMASK(17, 16)
+#define HSPPARACON_TXFSLS GENMASK(15, 12)
+#define HSPPARACON_SQRX GENMASK(10, 8)
+#define HSPPARACON_OTG GENMASK(6, 4)
+#define HSPPARACON_COMPDIS GENMASK(2, 0)
#define EXYNOS850_DRD_HSP_TEST 0x5c
#define HSP_TEST_SIDDQ BIT(24)
+/* Exynos9 - GS101 */
+#define EXYNOS850_DRD_SECPMACTL 0x48
+#define SECPMACTL_PMA_ROPLL_REF_CLK_SEL GENMASK(13, 12)
+#define SECPMACTL_PMA_LCPLL_REF_CLK_SEL GENMASK(11, 10)
+#define SECPMACTL_PMA_REF_FREQ_SEL GENMASK(9, 8)
+#define SECPMACTL_PMA_LOW_PWR BIT(4)
+#define SECPMACTL_PMA_TRSV_SW_RST BIT(3)
+#define SECPMACTL_PMA_CMN_SW_RST BIT(2)
+#define SECPMACTL_PMA_INIT_SW_RST BIT(1)
+#define SECPMACTL_PMA_APB_SW_RST BIT(0)
+
+/* PMA registers */
+#define EXYNOS9_PMA_USBDP_CMN_REG0008 0x0020
+#define CMN_REG0008_OVRD_AUX_EN BIT(3)
+#define CMN_REG0008_AUX_EN BIT(2)
+
+#define EXYNOS9_PMA_USBDP_CMN_REG00B8 0x02e0
+#define CMN_REG00B8_LANE_MUX_SEL_DP GENMASK(3, 0)
+
+#define EXYNOS9_PMA_USBDP_CMN_REG01C0 0x0700
+#define CMN_REG01C0_ANA_LCPLL_LOCK_DONE BIT(7)
+#define CMN_REG01C0_ANA_LCPLL_AFC_DONE BIT(6)
+
+/* these have similar register layout, for lanes 0 and 2 */
+#define EXYNOS9_PMA_USBDP_TRSV_REG03C3 0x0f0c
+#define EXYNOS9_PMA_USBDP_TRSV_REG07C3 0x1f0c
+#define TRSV_REG03C3_LN0_MON_RX_CDR_AFC_DONE BIT(3)
+#define TRSV_REG03C3_LN0_MON_RX_CDR_CAL_DONE BIT(2)
+#define TRSV_REG03C3_LN0_MON_RX_CDR_FLD_PLL_MODE_DONE BIT(1)
+#define TRSV_REG03C3_LN0_MON_RX_CDR_LOCK_DONE BIT(0)
+
+/* TRSV_REG0413 and TRSV_REG0813 have similar register layout */
+#define EXYNOS9_PMA_USBDP_TRSV_REG0413 0x104c
+#define TRSV_REG0413_OVRD_LN1_TX_RXD_COMP_EN BIT(7)
+#define TRSV_REG0413_OVRD_LN1_TX_RXD_EN BIT(5)
+
+#define EXYNOS9_PMA_USBDP_TRSV_REG0813 0x204c
+#define TRSV_REG0813_OVRD_LN3_TX_RXD_COMP_EN BIT(7)
+#define TRSV_REG0813_OVRD_LN3_TX_RXD_EN BIT(5)
+
+/* PCS registers */
+#define EXYNOS9_PCS_NS_VEC_PS1_N1 0x010c
+#define EXYNOS9_PCS_NS_VEC_PS2_N0 0x0110
+#define EXYNOS9_PCS_NS_VEC_PS3_N0 0x0118
+#define NS_VEC_NS_REQ GENMASK(31, 24)
+#define NS_VEC_ENABLE_TIMER BIT(22)
+#define NS_VEC_SEL_TIMEOUT GENMASK(21, 20)
+#define NS_VEC_INV_MASK GENMASK(19, 16)
+#define NS_VEC_COND_MASK GENMASK(11, 8)
+#define NS_VEC_EXP_COND GENMASK(3, 0)
+
+#define EXYNOS9_PCS_OUT_VEC_2 0x014c
+#define EXYNOS9_PCS_OUT_VEC_3 0x0150
+#define PCS_OUT_VEC_B9_DYNAMIC BIT(19)
+#define PCS_OUT_VEC_B9_SEL_OUT BIT(18)
+#define PCS_OUT_VEC_B8_DYNAMIC BIT(17)
+#define PCS_OUT_VEC_B8_SEL_OUT BIT(16)
+#define PCS_OUT_VEC_B7_DYNAMIC BIT(15)
+#define PCS_OUT_VEC_B7_SEL_OUT BIT(14)
+#define PCS_OUT_VEC_B6_DYNAMIC BIT(13)
+#define PCS_OUT_VEC_B6_SEL_OUT BIT(12)
+#define PCS_OUT_VEC_B5_DYNAMIC BIT(11)
+#define PCS_OUT_VEC_B5_SEL_OUT BIT(10)
+#define PCS_OUT_VEC_B4_DYNAMIC BIT(9)
+#define PCS_OUT_VEC_B4_SEL_OUT BIT(8)
+#define PCS_OUT_VEC_B3_DYNAMIC BIT(7)
+#define PCS_OUT_VEC_B3_SEL_OUT BIT(6)
+#define PCS_OUT_VEC_B2_DYNAMIC BIT(5)
+#define PCS_OUT_VEC_B2_SEL_OUT BIT(4)
+#define PCS_OUT_VEC_B1_DYNAMIC BIT(3)
+#define PCS_OUT_VEC_B1_SEL_OUT BIT(2)
+#define PCS_OUT_VEC_B0_DYNAMIC BIT(1)
+#define PCS_OUT_VEC_B0_SEL_OUT BIT(0)
+
+#define EXYNOS9_PCS_TIMEOUT_0 0x0170
+
+#define EXYNOS9_PCS_TIMEOUT_3 0x017c
+
+#define EXYNOS9_PCS_EBUF_PARAM 0x0304
+#define EBUF_PARAM_SKP_REMOVE_TH_EMPTY_MODE GENMASK(29, 24)
+
+#define EXYNOS9_PCS_BACK_END_MODE_VEC 0x030c
+#define BACK_END_MODE_VEC_FORCE_EBUF_EMPTY_MODE BIT(1)
+#define BACK_END_MODE_VEC_DISABLE_DATA_MASK BIT(0)
+
+#define EXYNOS9_PCS_RX_CONTROL 0x03f0
+#define RX_CONTROL_EN_BLOCK_ALIGNER_TYPE_B BIT(22)
+
+#define EXYNOS9_PCS_RX_CONTROL_DEBUG 0x03f4
+#define RX_CONTROL_DEBUG_EN_TS_CHECK BIT(5)
+#define RX_CONTROL_DEBUG_NUM_COM_FOUND GENMASK(3, 0)
+
+#define EXYNOS9_PCS_LOCAL_COEF 0x040c
+#define LOCAL_COEF_PMA_CENTER_COEF GENMASK(21, 16)
+#define LOCAL_COEF_LF GENMASK(13, 8)
+#define LOCAL_COEF_FS GENMASK(5, 0)
+
+#define EXYNOS9_PCS_HS_TX_COEF_MAP_0 0x0410
+#define HS_TX_COEF_MAP_0_SSTX_DEEMP GENMASK(17, 12)
+#define HS_TX_COEF_MAP_0_SSTX_LEVEL GENMASK(11, 6)
+#define HS_TX_COEF_MAP_0_SSTX_PRE_SHOOT GENMASK(5, 0)
+
+
#define KHZ 1000
#define MHZ (KHZ * KHZ)
+#define PHY_TUNING_ENTRY_PHY(o, m, v) { \
+ .off = (o), \
+ .mask = (m), \
+ .val = (v), \
+ .region = PTR_PHY \
+ }
+
+#define PHY_TUNING_ENTRY_PCS(o, m, v) { \
+ .off = (o), \
+ .mask = (m), \
+ .val = (v), \
+ .region = PTR_PCS \
+ }
+
+#define PHY_TUNING_ENTRY_PMA(o, m, v) { \
+ .off = (o), \
+ .mask = (m), \
+ .val = (v), \
+ .region = PTR_PMA, \
+ }
+
+#define PHY_TUNING_ENTRY_LAST { .region = PTR_INVALID }
+
+#define for_each_phy_tune(tune) \
+ for (; (tune)->region != PTR_INVALID; ++(tune))
+
+struct exynos5_usbdrd_phy_tuning {
+ u32 off;
+ u32 mask;
+ u32 val;
+ char region;
+#define PTR_INVALID 0
+#define PTR_PHY 1
+#define PTR_PCS 2
+#define PTR_PMA 3
+};
+
+enum exynos5_usbdrd_phy_tuning_state {
+ PTS_UTMI_POSTINIT,
+ PTS_PIPE3_PREINIT,
+ PTS_PIPE3_INIT,
+ PTS_PIPE3_POSTINIT,
+ PTS_PIPE3_POSTLOCK,
+ PTS_MAX,
+};
+
enum exynos5_usbdrd_phy_id {
EXYNOS5_DRDPHY_UTMI,
EXYNOS5_DRDPHY_PIPE3,
@@ -187,44 +354,48 @@ struct exynos5_usbdrd_phy;
struct exynos5_usbdrd_phy_config {
u32 id;
- void (*phy_isol)(struct phy_usb_instance *inst, u32 on);
+ void (*phy_isol)(struct phy_usb_instance *inst, bool isolate);
void (*phy_init)(struct exynos5_usbdrd_phy *phy_drd);
unsigned int (*set_refclk)(struct phy_usb_instance *inst);
};
struct exynos5_usbdrd_phy_drvdata {
const struct exynos5_usbdrd_phy_config *phy_cfg;
+ const struct exynos5_usbdrd_phy_tuning **phy_tunes;
const struct phy_ops *phy_ops;
+ const char * const *clk_names;
+ int n_clks;
+ const char * const *core_clk_names;
+ int n_core_clks;
+ const char * const *regulator_names;
+ int n_regulators;
u32 pmu_offset_usbdrd0_phy;
+ u32 pmu_offset_usbdrd0_phy_ss;
u32 pmu_offset_usbdrd1_phy;
- bool has_common_clk_gate;
};
/**
* struct exynos5_usbdrd_phy - driver data for USB 3.0 PHY
* @dev: pointer to device instance of this platform device
* @reg_phy: usb phy controller register memory base
- * @clk: phy clock for register access
- * @pipeclk: clock for pipe3 phy
- * @utmiclk: clock for utmi+ phy
- * @itpclk: clock for ITP generation
+ * @reg_pcs: usb phy physical coding sublayer register memory base
+ * @reg_pma: usb phy physical media attachment register memory base
+ * @clks: clocks for register access
+ * @core_clks: core clocks for phy (ref, pipe3, utmi+, ITP, etc. as required)
* @drv_data: pointer to SoC level driver data structure
* @phys: array for 'EXYNOS5_DRDPHYS_NUM' number of PHY
* instances each with its 'phy' and 'phy_cfg'.
* @extrefclk: frequency select settings when using 'separate
* reference clocks' for SS and HS operations
- * @ref_clk: reference clock to PHY block from which PHY's
- * operational clocks are derived
- * @vbus: VBUS regulator for phy
- * @vbus_boost: Boost regulator for VBUS present on few Exynos boards
+ * @regulators: regulators for phy
*/
struct exynos5_usbdrd_phy {
struct device *dev;
void __iomem *reg_phy;
- struct clk *clk;
- struct clk *pipeclk;
- struct clk *utmiclk;
- struct clk *itpclk;
+ void __iomem *reg_pcs;
+ void __iomem *reg_pma;
+ struct clk_bulk_data *clks;
+ struct clk_bulk_data *core_clks;
const struct exynos5_usbdrd_phy_drvdata *drv_data;
struct phy_usb_instance {
struct phy *phy;
@@ -234,9 +405,7 @@ struct exynos5_usbdrd_phy {
const struct exynos5_usbdrd_phy_config *phy_cfg;
} phys[EXYNOS5_DRDPHYS_NUM];
u32 extrefclk;
- struct clk *ref_clk;
- struct regulator *vbus;
- struct regulator *vbus_boost;
+ struct regulator_bulk_data *regulators;
};
static inline
@@ -287,14 +456,14 @@ static unsigned int exynos5_rate_to_clk(unsigned long rate, u32 *reg)
}
static void exynos5_usbdrd_phy_isol(struct phy_usb_instance *inst,
- unsigned int on)
+ bool isolate)
{
unsigned int val;
if (!inst->reg_pmu)
return;
- val = on ? 0 : EXYNOS4_PHY_ENABLE;
+ val = isolate ? 0 : EXYNOS4_PHY_ENABLE;
regmap_update_bits(inst->reg_pmu, inst->pmu_offset,
EXYNOS4_PHY_ENABLE, val);
@@ -371,6 +540,45 @@ exynos5_usbdrd_utmi_set_refclk(struct phy_usb_instance *inst)
return reg;
}
+static void
+exynos5_usbdrd_apply_phy_tunes(struct exynos5_usbdrd_phy *phy_drd,
+ enum exynos5_usbdrd_phy_tuning_state state)
+{
+ const struct exynos5_usbdrd_phy_tuning *tune;
+
+ tune = phy_drd->drv_data->phy_tunes[state];
+ if (!tune)
+ return;
+
+ for_each_phy_tune(tune) {
+ void __iomem *reg_base;
+ u32 reg = 0;
+
+ switch (tune->region) {
+ case PTR_PHY:
+ reg_base = phy_drd->reg_phy;
+ break;
+ case PTR_PCS:
+ reg_base = phy_drd->reg_pcs;
+ break;
+ case PTR_PMA:
+ reg_base = phy_drd->reg_pma;
+ break;
+ default:
+ dev_warn_once(phy_drd->dev,
+ "unknown phy region %d\n", tune->region);
+ continue;
+ }
+
+ if (~tune->mask) {
+ reg = readl(reg_base + tune->off);
+ reg &= ~tune->mask;
+ }
+ reg |= tune->val;
+ writel(reg, reg_base + tune->off);
+ }
+}
+
static void exynos5_usbdrd_pipe3_init(struct exynos5_usbdrd_phy *phy_drd)
{
u32 reg;
@@ -386,6 +594,129 @@ static void exynos5_usbdrd_pipe3_init(struct exynos5_usbdrd_phy *phy_drd)
writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYTEST);
}
+static void
+exynos5_usbdrd_usbdp_g2_v4_ctrl_pma_ready(struct exynos5_usbdrd_phy *phy_drd)
+{
+ void __iomem *regs_base = phy_drd->reg_phy;
+ u32 reg;
+
+ /* link pipe_clock selection to pclk of PMA */
+ reg = readl(regs_base + EXYNOS850_DRD_CLKRST);
+ reg |= CLKRST_LINK_PCLK_SEL;
+ writel(reg, regs_base + EXYNOS850_DRD_CLKRST);
+
+ reg = readl(regs_base + EXYNOS850_DRD_SECPMACTL);
+ reg &= ~SECPMACTL_PMA_REF_FREQ_SEL;
+ reg |= FIELD_PREP_CONST(SECPMACTL_PMA_REF_FREQ_SEL, 1);
+ /* SFR reset */
+ reg |= (SECPMACTL_PMA_LOW_PWR | SECPMACTL_PMA_APB_SW_RST);
+ reg &= ~(SECPMACTL_PMA_ROPLL_REF_CLK_SEL |
+ SECPMACTL_PMA_LCPLL_REF_CLK_SEL);
+ /* PMA power off */
+ reg |= (SECPMACTL_PMA_TRSV_SW_RST | SECPMACTL_PMA_CMN_SW_RST |
+ SECPMACTL_PMA_INIT_SW_RST);
+ writel(reg, regs_base + EXYNOS850_DRD_SECPMACTL);
+
+ udelay(1);
+
+ reg = readl(regs_base + EXYNOS850_DRD_SECPMACTL);
+ reg &= ~SECPMACTL_PMA_LOW_PWR;
+ writel(reg, regs_base + EXYNOS850_DRD_SECPMACTL);
+
+ udelay(1);
+
+ /* release override */
+ reg = readl(regs_base + EXYNOS850_DRD_LINKCTRL);
+ reg &= ~LINKCTRL_FORCE_PIPE_EN;
+ writel(reg, regs_base + EXYNOS850_DRD_LINKCTRL);
+
+ udelay(1);
+
+ /* APB enable */
+ reg = readl(regs_base + EXYNOS850_DRD_SECPMACTL);
+ reg &= ~SECPMACTL_PMA_APB_SW_RST;
+ writel(reg, regs_base + EXYNOS850_DRD_SECPMACTL);
+}
+
+static void
+exynos5_usbdrd_usbdp_g2_v4_pma_lane_mux_sel(struct exynos5_usbdrd_phy *phy_drd)
+{
+ void __iomem *regs_base = phy_drd->reg_pma;
+ u32 reg;
+
+ /* lane configuration: USB on all lanes */
+ reg = readl(regs_base + EXYNOS9_PMA_USBDP_CMN_REG00B8);
+ reg &= ~CMN_REG00B8_LANE_MUX_SEL_DP;
+ writel(reg, regs_base + EXYNOS9_PMA_USBDP_CMN_REG00B8);
+
+ /*
+ * FIXME: below code supports one connector orientation only. It needs
+ * updating once we can receive connector events.
+ */
+ /* override of TX receiver detector and comparator: lane 1 */
+ reg = readl(regs_base + EXYNOS9_PMA_USBDP_TRSV_REG0413);
+ reg &= ~TRSV_REG0413_OVRD_LN1_TX_RXD_COMP_EN;
+ reg &= ~TRSV_REG0413_OVRD_LN1_TX_RXD_EN;
+ writel(reg, regs_base + EXYNOS9_PMA_USBDP_TRSV_REG0413);
+
+ /* lane 3 */
+ reg = readl(regs_base + EXYNOS9_PMA_USBDP_TRSV_REG0813);
+ reg |= TRSV_REG0813_OVRD_LN3_TX_RXD_COMP_EN;
+ reg |= TRSV_REG0813_OVRD_LN3_TX_RXD_EN;
+ writel(reg, regs_base + EXYNOS9_PMA_USBDP_TRSV_REG0813);
+}
+
+static int
+exynos5_usbdrd_usbdp_g2_v4_pma_check_pll_lock(struct exynos5_usbdrd_phy *phy_drd)
+{
+ static const unsigned int timeout_us = 40000;
+ static const unsigned int sleep_us = 40;
+ static const u32 locked = (CMN_REG01C0_ANA_LCPLL_LOCK_DONE |
+ CMN_REG01C0_ANA_LCPLL_AFC_DONE);
+ u32 reg;
+ int err;
+
+ err = readl_poll_timeout(
+ phy_drd->reg_pma + EXYNOS9_PMA_USBDP_CMN_REG01C0,
+ reg, (reg & locked) == locked, sleep_us, timeout_us);
+ if (err)
+ dev_err(phy_drd->dev,
+ "timed out waiting for PLL lock: %#.8x\n", reg);
+
+ return err;
+}
+
+static void
+exynos5_usbdrd_usbdp_g2_v4_pma_check_cdr_lock(struct exynos5_usbdrd_phy *phy_drd)
+{
+ static const unsigned int timeout_us = 40000;
+ static const unsigned int sleep_us = 40;
+ static const u32 locked =
+ (TRSV_REG03C3_LN0_MON_RX_CDR_AFC_DONE
+ | TRSV_REG03C3_LN0_MON_RX_CDR_CAL_DONE
+ | TRSV_REG03C3_LN0_MON_RX_CDR_FLD_PLL_MODE_DONE
+ | TRSV_REG03C3_LN0_MON_RX_CDR_LOCK_DONE);
+ u32 reg;
+ int err;
+
+ err = readl_poll_timeout(
+ phy_drd->reg_pma + EXYNOS9_PMA_USBDP_TRSV_REG03C3,
+ reg, (reg & locked) == locked, sleep_us, timeout_us);
+ if (!err)
+ return;
+
+ dev_err(phy_drd->dev,
+ "timed out waiting for CDR lock (l0): %#.8x, retrying\n", reg);
+
+ /* based on cable orientation, this might be on the other phy port */
+ err = readl_poll_timeout(
+ phy_drd->reg_pma + EXYNOS9_PMA_USBDP_TRSV_REG07C3,
+ reg, (reg & locked) == locked, sleep_us, timeout_us);
+ if (err)
+ dev_err(phy_drd->dev,
+ "timed out waiting for CDR lock (l2): %#.8x\n", reg);
+}
+
static void exynos5_usbdrd_utmi_init(struct exynos5_usbdrd_phy *phy_drd)
{
u32 reg;
@@ -417,7 +748,7 @@ static int exynos5_usbdrd_phy_init(struct phy *phy)
struct phy_usb_instance *inst = phy_get_drvdata(phy);
struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst);
- ret = clk_prepare_enable(phy_drd->clk);
+ ret = clk_bulk_prepare_enable(phy_drd->drv_data->n_clks, phy_drd->clks);
if (ret)
return ret;
@@ -462,12 +793,12 @@ static int exynos5_usbdrd_phy_init(struct phy *phy)
writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYCLKRST);
- udelay(10);
+ fsleep(10);
reg &= ~PHYCLKRST_PORTRESET;
writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYCLKRST);
- clk_disable_unprepare(phy_drd->clk);
+ clk_bulk_disable_unprepare(phy_drd->drv_data->n_clks, phy_drd->clks);
return 0;
}
@@ -479,7 +810,7 @@ static int exynos5_usbdrd_phy_exit(struct phy *phy)
struct phy_usb_instance *inst = phy_get_drvdata(phy);
struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst);
- ret = clk_prepare_enable(phy_drd->clk);
+ ret = clk_bulk_prepare_enable(phy_drd->drv_data->n_clks, phy_drd->clks);
if (ret)
return ret;
@@ -501,7 +832,7 @@ static int exynos5_usbdrd_phy_exit(struct phy *phy)
PHYTEST_POWERDOWN_HSP;
writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYTEST);
- clk_disable_unprepare(phy_drd->clk);
+ clk_bulk_disable_unprepare(phy_drd->drv_data->n_clks, phy_drd->clks);
return 0;
}
@@ -514,47 +845,27 @@ static int exynos5_usbdrd_phy_power_on(struct phy *phy)
dev_dbg(phy_drd->dev, "Request to power_on usbdrd_phy phy\n");
- clk_prepare_enable(phy_drd->ref_clk);
- if (!phy_drd->drv_data->has_common_clk_gate) {
- clk_prepare_enable(phy_drd->pipeclk);
- clk_prepare_enable(phy_drd->utmiclk);
- clk_prepare_enable(phy_drd->itpclk);
- }
+ ret = clk_bulk_prepare_enable(phy_drd->drv_data->n_core_clks,
+ phy_drd->core_clks);
+ if (ret)
+ return ret;
/* Enable VBUS supply */
- if (phy_drd->vbus_boost) {
- ret = regulator_enable(phy_drd->vbus_boost);
- if (ret) {
- dev_err(phy_drd->dev,
- "Failed to enable VBUS boost supply\n");
- goto fail_vbus;
- }
- }
-
- if (phy_drd->vbus) {
- ret = regulator_enable(phy_drd->vbus);
- if (ret) {
- dev_err(phy_drd->dev, "Failed to enable VBUS supply\n");
- goto fail_vbus_boost;
- }
+ ret = regulator_bulk_enable(phy_drd->drv_data->n_regulators,
+ phy_drd->regulators);
+ if (ret) {
+ dev_err(phy_drd->dev, "Failed to enable PHY regulator(s)\n");
+ goto fail_vbus;
}
- /* Power-on PHY*/
- inst->phy_cfg->phy_isol(inst, 0);
+ /* Power-on PHY */
+ inst->phy_cfg->phy_isol(inst, false);
return 0;
-fail_vbus_boost:
- if (phy_drd->vbus_boost)
- regulator_disable(phy_drd->vbus_boost);
-
fail_vbus:
- clk_disable_unprepare(phy_drd->ref_clk);
- if (!phy_drd->drv_data->has_common_clk_gate) {
- clk_disable_unprepare(phy_drd->itpclk);
- clk_disable_unprepare(phy_drd->utmiclk);
- clk_disable_unprepare(phy_drd->pipeclk);
- }
+ clk_bulk_disable_unprepare(phy_drd->drv_data->n_core_clks,
+ phy_drd->core_clks);
return ret;
}
@@ -567,20 +878,14 @@ static int exynos5_usbdrd_phy_power_off(struct phy *phy)
dev_dbg(phy_drd->dev, "Request to power_off usbdrd_phy phy\n");
/* Power-off the PHY */
- inst->phy_cfg->phy_isol(inst, 1);
+ inst->phy_cfg->phy_isol(inst, true);
/* Disable VBUS supply */
- if (phy_drd->vbus)
- regulator_disable(phy_drd->vbus);
- if (phy_drd->vbus_boost)
- regulator_disable(phy_drd->vbus_boost);
-
- clk_disable_unprepare(phy_drd->ref_clk);
- if (!phy_drd->drv_data->has_common_clk_gate) {
- clk_disable_unprepare(phy_drd->itpclk);
- clk_disable_unprepare(phy_drd->pipeclk);
- clk_disable_unprepare(phy_drd->utmiclk);
- }
+ regulator_bulk_disable(phy_drd->drv_data->n_regulators,
+ phy_drd->regulators);
+
+ clk_bulk_disable_unprepare(phy_drd->drv_data->n_core_clks,
+ phy_drd->core_clks);
return 0;
}
@@ -744,10 +1049,29 @@ static const struct phy_ops exynos5_usbdrd_phy_ops = {
.owner = THIS_MODULE,
};
+static void
+exynos5_usbdrd_usb_v3p1_pipe_override(struct exynos5_usbdrd_phy *phy_drd)
+{
+ void __iomem *regs_base = phy_drd->reg_phy;
+ u32 reg;
+
+ /* force pipe3 signal for link */
+ reg = readl(regs_base + EXYNOS850_DRD_LINKCTRL);
+ reg &= ~LINKCTRL_FORCE_PHYSTATUS;
+ reg |= LINKCTRL_FORCE_PIPE_EN | LINKCTRL_FORCE_RXELECIDLE;
+ writel(reg, regs_base + EXYNOS850_DRD_LINKCTRL);
+
+ /* PMA disable */
+ reg = readl(regs_base + EXYNOS850_DRD_SECPMACTL);
+ reg |= SECPMACTL_PMA_LOW_PWR;
+ writel(reg, regs_base + EXYNOS850_DRD_SECPMACTL);
+}
+
static void exynos850_usbdrd_utmi_init(struct exynos5_usbdrd_phy *phy_drd)
{
void __iomem *regs_base = phy_drd->reg_phy;
u32 reg;
+ u32 ss_ports;
/*
* Disable HWACG (hardware auto clock gating control). This will force
@@ -758,8 +1082,16 @@ static void exynos850_usbdrd_utmi_init(struct exynos5_usbdrd_phy *phy_drd)
reg |= LINKCTRL_FORCE_QACT;
writel(reg, regs_base + EXYNOS850_DRD_LINKCTRL);
+ reg = readl(regs_base + EXYNOS850_DRD_LINKPORT);
+ ss_ports = FIELD_GET(LINKPORT_HOST_NUM_U3, reg);
+
/* Start PHY Reset (POR=high) */
reg = readl(regs_base + EXYNOS850_DRD_CLKRST);
+ if (ss_ports) {
+ reg |= CLKRST_PHY20_SW_POR;
+ reg |= CLKRST_PHY20_SW_POR_SEL;
+ reg |= CLKRST_PHY_RESET_SEL;
+ }
reg |= CLKRST_PHY_SW_RST;
writel(reg, regs_base + EXYNOS850_DRD_CLKRST);
@@ -787,22 +1119,58 @@ static void exynos850_usbdrd_utmi_init(struct exynos5_usbdrd_phy *phy_drd)
reg |= HSP_VBUSVLDEXT | HSP_VBUSVLDEXTSEL;
writel(reg, regs_base + EXYNOS850_DRD_HSP);
+ reg = readl(regs_base + EXYNOS850_DRD_SSPPLLCTL);
+ reg &= ~SSPPLLCTL_FSEL;
+ switch (phy_drd->extrefclk) {
+ case EXYNOS5_FSEL_50MHZ:
+ reg |= FIELD_PREP_CONST(SSPPLLCTL_FSEL, 7);
+ break;
+ case EXYNOS5_FSEL_26MHZ:
+ reg |= FIELD_PREP_CONST(SSPPLLCTL_FSEL, 6);
+ break;
+ case EXYNOS5_FSEL_24MHZ:
+ reg |= FIELD_PREP_CONST(SSPPLLCTL_FSEL, 2);
+ break;
+ case EXYNOS5_FSEL_20MHZ:
+ reg |= FIELD_PREP_CONST(SSPPLLCTL_FSEL, 1);
+ break;
+ case EXYNOS5_FSEL_19MHZ2:
+ reg |= FIELD_PREP_CONST(SSPPLLCTL_FSEL, 0);
+ break;
+ default:
+ dev_warn(phy_drd->dev, "unsupported ref clk: %#.2x\n",
+ phy_drd->extrefclk);
+ break;
+ }
+ writel(reg, regs_base + EXYNOS850_DRD_SSPPLLCTL);
+
+ if (phy_drd->drv_data->phy_tunes)
+ exynos5_usbdrd_apply_phy_tunes(phy_drd,
+ PTS_UTMI_POSTINIT);
+
/* Power up PHY analog blocks */
reg = readl(regs_base + EXYNOS850_DRD_HSP_TEST);
reg &= ~HSP_TEST_SIDDQ;
writel(reg, regs_base + EXYNOS850_DRD_HSP_TEST);
/* Finish PHY reset (POR=low) */
- udelay(10); /* required before doing POR=low */
+ fsleep(10); /* required before doing POR=low */
reg = readl(regs_base + EXYNOS850_DRD_CLKRST);
+ if (ss_ports) {
+ reg |= CLKRST_PHY20_SW_POR_SEL;
+ reg &= ~CLKRST_PHY20_SW_POR;
+ }
reg &= ~(CLKRST_PHY_SW_RST | CLKRST_PORT_RST);
writel(reg, regs_base + EXYNOS850_DRD_CLKRST);
- udelay(75); /* required after POR=low for guaranteed PHY clock */
+ fsleep(75); /* required after POR=low for guaranteed PHY clock */
/* Disable single ended signal out */
reg = readl(regs_base + EXYNOS850_DRD_HSP);
reg &= ~HSP_FSV_OUT_EN;
writel(reg, regs_base + EXYNOS850_DRD_HSP);
+
+ if (ss_ports)
+ exynos5_usbdrd_usb_v3p1_pipe_override(phy_drd);
}
static int exynos850_usbdrd_phy_init(struct phy *phy)
@@ -811,14 +1179,14 @@ static int exynos850_usbdrd_phy_init(struct phy *phy)
struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst);
int ret;
- ret = clk_prepare_enable(phy_drd->clk);
+ ret = clk_bulk_prepare_enable(phy_drd->drv_data->n_clks, phy_drd->clks);
if (ret)
return ret;
/* UTMI or PIPE3 specific init */
inst->phy_cfg->phy_init(phy_drd);
- clk_disable_unprepare(phy_drd->clk);
+ clk_bulk_disable_unprepare(phy_drd->drv_data->n_clks, phy_drd->clks);
return 0;
}
@@ -831,7 +1199,7 @@ static int exynos850_usbdrd_phy_exit(struct phy *phy)
u32 reg;
int ret;
- ret = clk_prepare_enable(phy_drd->clk);
+ ret = clk_bulk_prepare_enable(phy_drd->drv_data->n_clks, phy_drd->clks);
if (ret)
return ret;
@@ -850,11 +1218,11 @@ static int exynos850_usbdrd_phy_exit(struct phy *phy)
reg = readl(regs_base + EXYNOS850_DRD_CLKRST);
reg |= CLKRST_LINK_SW_RST;
writel(reg, regs_base + EXYNOS850_DRD_CLKRST);
- udelay(10); /* required before doing POR=low */
+ fsleep(10); /* required before doing POR=low */
reg &= ~CLKRST_LINK_SW_RST;
writel(reg, regs_base + EXYNOS850_DRD_CLKRST);
- clk_disable_unprepare(phy_drd->clk);
+ clk_bulk_disable_unprepare(phy_drd->drv_data->n_clks, phy_drd->clks);
return 0;
}
@@ -867,53 +1235,138 @@ static const struct phy_ops exynos850_usbdrd_phy_ops = {
.owner = THIS_MODULE,
};
-static int exynos5_usbdrd_phy_clk_handle(struct exynos5_usbdrd_phy *phy_drd)
+static void exynos5_usbdrd_gs101_pipe3_init(struct exynos5_usbdrd_phy *phy_drd)
{
- unsigned long ref_rate;
+ void __iomem *regs_pma = phy_drd->reg_pma;
+ void __iomem *regs_phy = phy_drd->reg_phy;
+ u32 reg;
+
+ exynos5_usbdrd_usbdp_g2_v4_ctrl_pma_ready(phy_drd);
+
+ /* force aux off */
+ reg = readl(regs_pma + EXYNOS9_PMA_USBDP_CMN_REG0008);
+ reg &= ~CMN_REG0008_AUX_EN;
+ reg |= CMN_REG0008_OVRD_AUX_EN;
+ writel(reg, regs_pma + EXYNOS9_PMA_USBDP_CMN_REG0008);
+
+ exynos5_usbdrd_apply_phy_tunes(phy_drd, PTS_PIPE3_PREINIT);
+ exynos5_usbdrd_apply_phy_tunes(phy_drd, PTS_PIPE3_INIT);
+ exynos5_usbdrd_apply_phy_tunes(phy_drd, PTS_PIPE3_POSTINIT);
+
+ exynos5_usbdrd_usbdp_g2_v4_pma_lane_mux_sel(phy_drd);
+
+ /* reset release from port */
+ reg = readl(regs_phy + EXYNOS850_DRD_SECPMACTL);
+ reg &= ~(SECPMACTL_PMA_TRSV_SW_RST | SECPMACTL_PMA_CMN_SW_RST |
+ SECPMACTL_PMA_INIT_SW_RST);
+ writel(reg, regs_phy + EXYNOS850_DRD_SECPMACTL);
+
+ if (!exynos5_usbdrd_usbdp_g2_v4_pma_check_pll_lock(phy_drd))
+ exynos5_usbdrd_usbdp_g2_v4_pma_check_cdr_lock(phy_drd);
+}
+
+static int exynos5_usbdrd_gs101_phy_init(struct phy *phy)
+{
+ struct phy_usb_instance *inst = phy_get_drvdata(phy);
+ struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst);
int ret;
- phy_drd->clk = devm_clk_get(phy_drd->dev, "phy");
- if (IS_ERR(phy_drd->clk)) {
- dev_err(phy_drd->dev, "Failed to get phy clock\n");
- return PTR_ERR(phy_drd->clk);
+ if (inst->phy_cfg->id == EXYNOS5_DRDPHY_UTMI) {
+ /* Power-on PHY ... */
+ ret = regulator_bulk_enable(phy_drd->drv_data->n_regulators,
+ phy_drd->regulators);
+ if (ret) {
+ dev_err(phy_drd->dev,
+ "Failed to enable PHY regulator(s)\n");
+ return ret;
+ }
}
+ /*
+ * ... and ungate power via PMU. Without this here, we get an SError
+ * trying to access PMA registers
+ */
+ exynos5_usbdrd_phy_isol(inst, false);
- phy_drd->ref_clk = devm_clk_get(phy_drd->dev, "ref");
- if (IS_ERR(phy_drd->ref_clk)) {
- dev_err(phy_drd->dev, "Failed to get phy reference clock\n");
- return PTR_ERR(phy_drd->ref_clk);
- }
- ref_rate = clk_get_rate(phy_drd->ref_clk);
+ return exynos850_usbdrd_phy_init(phy);
+}
- ret = exynos5_rate_to_clk(ref_rate, &phy_drd->extrefclk);
- if (ret) {
- dev_err(phy_drd->dev, "Clock rate (%ld) not supported\n",
- ref_rate);
+static int exynos5_usbdrd_gs101_phy_exit(struct phy *phy)
+{
+ struct phy_usb_instance *inst = phy_get_drvdata(phy);
+ struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst);
+ int ret;
+
+ if (inst->phy_cfg->id != EXYNOS5_DRDPHY_UTMI)
+ return 0;
+
+ ret = exynos850_usbdrd_phy_exit(phy);
+ if (ret)
return ret;
- }
- if (!phy_drd->drv_data->has_common_clk_gate) {
- phy_drd->pipeclk = devm_clk_get(phy_drd->dev, "phy_pipe");
- if (IS_ERR(phy_drd->pipeclk)) {
- dev_info(phy_drd->dev,
- "PIPE3 phy operational clock not specified\n");
- phy_drd->pipeclk = NULL;
- }
+ exynos5_usbdrd_phy_isol(inst, true);
+ return regulator_bulk_disable(phy_drd->drv_data->n_regulators,
+ phy_drd->regulators);
+}
- phy_drd->utmiclk = devm_clk_get(phy_drd->dev, "phy_utmi");
- if (IS_ERR(phy_drd->utmiclk)) {
- dev_info(phy_drd->dev,
- "UTMI phy operational clock not specified\n");
- phy_drd->utmiclk = NULL;
- }
+static const struct phy_ops gs101_usbdrd_phy_ops = {
+ .init = exynos5_usbdrd_gs101_phy_init,
+ .exit = exynos5_usbdrd_gs101_phy_exit,
+ .owner = THIS_MODULE,
+};
+
+static int exynos5_usbdrd_phy_clk_handle(struct exynos5_usbdrd_phy *phy_drd)
+{
+ int ret;
+ struct clk *ref_clk;
+ unsigned long ref_rate;
+
+ phy_drd->clks = devm_kcalloc(phy_drd->dev, phy_drd->drv_data->n_clks,
+ sizeof(*phy_drd->clks), GFP_KERNEL);
+ if (!phy_drd->clks)
+ return -ENOMEM;
+
+ for (int i = 0; i < phy_drd->drv_data->n_clks; ++i)
+ phy_drd->clks[i].id = phy_drd->drv_data->clk_names[i];
+
+ ret = devm_clk_bulk_get(phy_drd->dev, phy_drd->drv_data->n_clks,
+ phy_drd->clks);
+ if (ret)
+ return dev_err_probe(phy_drd->dev, ret,
+ "failed to get phy clock(s)\n");
+
+ phy_drd->core_clks = devm_kcalloc(phy_drd->dev,
+ phy_drd->drv_data->n_core_clks,
+ sizeof(*phy_drd->core_clks),
+ GFP_KERNEL);
+ if (!phy_drd->core_clks)
+ return -ENOMEM;
- phy_drd->itpclk = devm_clk_get(phy_drd->dev, "itp");
- if (IS_ERR(phy_drd->itpclk)) {
- dev_info(phy_drd->dev,
- "ITP clock from main OSC not specified\n");
- phy_drd->itpclk = NULL;
+ for (int i = 0; i < phy_drd->drv_data->n_core_clks; ++i)
+ phy_drd->core_clks[i].id = phy_drd->drv_data->core_clk_names[i];
+
+ ret = devm_clk_bulk_get(phy_drd->dev, phy_drd->drv_data->n_core_clks,
+ phy_drd->core_clks);
+ if (ret)
+ return dev_err_probe(phy_drd->dev, ret,
+ "failed to get phy core clock(s)\n");
+
+ ref_clk = NULL;
+ for (int i = 0; i < phy_drd->drv_data->n_core_clks; ++i) {
+ if (!strcmp(phy_drd->core_clks[i].id, "ref")) {
+ ref_clk = phy_drd->core_clks[i].clk;
+ break;
}
}
+ if (!ref_clk)
+ return dev_err_probe(phy_drd->dev, -ENODEV,
+ "failed to find phy reference clock\n");
+
+ ref_rate = clk_get_rate(ref_clk);
+ ret = exynos5_rate_to_clk(ref_rate, &phy_drd->extrefclk);
+ if (ret)
+ return dev_err_probe(phy_drd->dev, ret,
+ "clock rate (%ld) not supported\n",
+ ref_rate);
return 0;
}
@@ -941,19 +1394,45 @@ static const struct exynos5_usbdrd_phy_config phy_cfg_exynos850[] = {
},
};
+static const char * const exynos5_clk_names[] = {
+ "phy",
+};
+
+static const char * const exynos5_core_clk_names[] = {
+ "ref",
+};
+
+static const char * const exynos5433_core_clk_names[] = {
+ "ref", "phy_pipe", "phy_utmi", "itp",
+};
+
+static const char * const exynos5_regulator_names[] = {
+ "vbus", "vbus-boost",
+};
+
static const struct exynos5_usbdrd_phy_drvdata exynos5420_usbdrd_phy = {
.phy_cfg = phy_cfg_exynos5,
.phy_ops = &exynos5_usbdrd_phy_ops,
.pmu_offset_usbdrd0_phy = EXYNOS5_USBDRD_PHY_CONTROL,
.pmu_offset_usbdrd1_phy = EXYNOS5420_USBDRD1_PHY_CONTROL,
- .has_common_clk_gate = true,
+ .clk_names = exynos5_clk_names,
+ .n_clks = ARRAY_SIZE(exynos5_clk_names),
+ .core_clk_names = exynos5_core_clk_names,
+ .n_core_clks = ARRAY_SIZE(exynos5_core_clk_names),
+ .regulator_names = exynos5_regulator_names,
+ .n_regulators = ARRAY_SIZE(exynos5_regulator_names),
};
static const struct exynos5_usbdrd_phy_drvdata exynos5250_usbdrd_phy = {
.phy_cfg = phy_cfg_exynos5,
.phy_ops = &exynos5_usbdrd_phy_ops,
.pmu_offset_usbdrd0_phy = EXYNOS5_USBDRD_PHY_CONTROL,
- .has_common_clk_gate = true,
+ .clk_names = exynos5_clk_names,
+ .n_clks = ARRAY_SIZE(exynos5_clk_names),
+ .core_clk_names = exynos5_core_clk_names,
+ .n_core_clks = ARRAY_SIZE(exynos5_core_clk_names),
+ .regulator_names = exynos5_regulator_names,
+ .n_regulators = ARRAY_SIZE(exynos5_regulator_names),
};
static const struct exynos5_usbdrd_phy_drvdata exynos5433_usbdrd_phy = {
@@ -961,25 +1440,218 @@ static const struct exynos5_usbdrd_phy_drvdata exynos5433_usbdrd_phy = {
.phy_ops = &exynos5_usbdrd_phy_ops,
.pmu_offset_usbdrd0_phy = EXYNOS5_USBDRD_PHY_CONTROL,
.pmu_offset_usbdrd1_phy = EXYNOS5433_USBHOST30_PHY_CONTROL,
- .has_common_clk_gate = false,
+ .clk_names = exynos5_clk_names,
+ .n_clks = ARRAY_SIZE(exynos5_clk_names),
+ .core_clk_names = exynos5433_core_clk_names,
+ .n_core_clks = ARRAY_SIZE(exynos5433_core_clk_names),
+ .regulator_names = exynos5_regulator_names,
+ .n_regulators = ARRAY_SIZE(exynos5_regulator_names),
};
static const struct exynos5_usbdrd_phy_drvdata exynos7_usbdrd_phy = {
.phy_cfg = phy_cfg_exynos5,
.phy_ops = &exynos5_usbdrd_phy_ops,
.pmu_offset_usbdrd0_phy = EXYNOS5_USBDRD_PHY_CONTROL,
- .has_common_clk_gate = false,
+ .clk_names = exynos5_clk_names,
+ .n_clks = ARRAY_SIZE(exynos5_clk_names),
+ .core_clk_names = exynos5433_core_clk_names,
+ .n_core_clks = ARRAY_SIZE(exynos5433_core_clk_names),
+ .regulator_names = exynos5_regulator_names,
+ .n_regulators = ARRAY_SIZE(exynos5_regulator_names),
};
static const struct exynos5_usbdrd_phy_drvdata exynos850_usbdrd_phy = {
.phy_cfg = phy_cfg_exynos850,
.phy_ops = &exynos850_usbdrd_phy_ops,
.pmu_offset_usbdrd0_phy = EXYNOS5_USBDRD_PHY_CONTROL,
- .has_common_clk_gate = true,
+ .clk_names = exynos5_clk_names,
+ .n_clks = ARRAY_SIZE(exynos5_clk_names),
+ .core_clk_names = exynos5_core_clk_names,
+ .n_core_clks = ARRAY_SIZE(exynos5_core_clk_names),
+ .regulator_names = exynos5_regulator_names,
+ .n_regulators = ARRAY_SIZE(exynos5_regulator_names),
+};
+
+static const struct exynos5_usbdrd_phy_config phy_cfg_gs101[] = {
+ {
+ .id = EXYNOS5_DRDPHY_UTMI,
+ .phy_isol = exynos5_usbdrd_phy_isol,
+ .phy_init = exynos850_usbdrd_utmi_init,
+ },
+ {
+ .id = EXYNOS5_DRDPHY_PIPE3,
+ .phy_isol = exynos5_usbdrd_phy_isol,
+ .phy_init = exynos5_usbdrd_gs101_pipe3_init,
+ },
+};
+
+static const struct exynos5_usbdrd_phy_tuning gs101_tunes_utmi_postinit[] = {
+ PHY_TUNING_ENTRY_PHY(EXYNOS850_DRD_HSPPARACON,
+ (HSPPARACON_TXVREF | HSPPARACON_TXRES |
+ HSPPARACON_TXPREEMPAMP | HSPPARACON_SQRX |
+ HSPPARACON_COMPDIS),
+ (FIELD_PREP_CONST(HSPPARACON_TXVREF, 6) |
+ FIELD_PREP_CONST(HSPPARACON_TXRES, 1) |
+ FIELD_PREP_CONST(HSPPARACON_TXPREEMPAMP, 3) |
+ FIELD_PREP_CONST(HSPPARACON_SQRX, 5) |
+ FIELD_PREP_CONST(HSPPARACON_COMPDIS, 7))),
+ PHY_TUNING_ENTRY_LAST
+};
+
+static const struct exynos5_usbdrd_phy_tuning gs101_tunes_pipe3_preinit[] = {
+ /* preinit */
+ /* CDR data mode exit GEN1 ON / GEN2 OFF */
+ PHY_TUNING_ENTRY_PMA(0x0c8c, -1, 0xff),
+ PHY_TUNING_ENTRY_PMA(0x1c8c, -1, 0xff),
+ PHY_TUNING_ENTRY_PMA(0x0c9c, -1, 0x7d),
+ PHY_TUNING_ENTRY_PMA(0x1c9c, -1, 0x7d),
+ /* improve EDS distribution */
+ PHY_TUNING_ENTRY_PMA(0x0e7c, -1, 0x06),
+ PHY_TUNING_ENTRY_PMA(0x09e0, -1, 0x00),
+ PHY_TUNING_ENTRY_PMA(0x09e4, -1, 0x36),
+ PHY_TUNING_ENTRY_PMA(0x1e7c, -1, 0x06),
+ PHY_TUNING_ENTRY_PMA(0x1e90, -1, 0x00),
+ PHY_TUNING_ENTRY_PMA(0x1e94, -1, 0x36),
+ /* improve LVCC */
+ PHY_TUNING_ENTRY_PMA(0x08f0, -1, 0x30),
+ PHY_TUNING_ENTRY_PMA(0x18f0, -1, 0x30),
+ /* LFPS RX VIH shmoo hole */
+ PHY_TUNING_ENTRY_PMA(0x0a08, -1, 0x0c),
+ PHY_TUNING_ENTRY_PMA(0x1a08, -1, 0x0c),
+ /* remove unrelated option for v4 phy */
+ PHY_TUNING_ENTRY_PMA(0x0a0c, -1, 0x05),
+ PHY_TUNING_ENTRY_PMA(0x1a0c, -1, 0x05),
+ /* improve Gen2 LVCC */
+ PHY_TUNING_ENTRY_PMA(0x00f8, -1, 0x1c),
+ PHY_TUNING_ENTRY_PMA(0x00fc, -1, 0x54),
+ /* Change Vth of RCV_DET because of TD 7.40 Polling Retry Test */
+ PHY_TUNING_ENTRY_PMA(0x104c, -1, 0x07),
+ PHY_TUNING_ENTRY_PMA(0x204c, -1, 0x07),
+ /* reduce Ux Exit time, assuming 26MHz clock */
+ /* Gen1 */
+ PHY_TUNING_ENTRY_PMA(0x0ca8, -1, 0x00),
+ PHY_TUNING_ENTRY_PMA(0x0cac, -1, 0x04),
+ PHY_TUNING_ENTRY_PMA(0x1ca8, -1, 0x00),
+ PHY_TUNING_ENTRY_PMA(0x1cac, -1, 0x04),
+ /* Gen2 */
+ PHY_TUNING_ENTRY_PMA(0x0cb8, -1, 0x00),
+ PHY_TUNING_ENTRY_PMA(0x0cbc, -1, 0x04),
+ PHY_TUNING_ENTRY_PMA(0x1cb8, -1, 0x00),
+ PHY_TUNING_ENTRY_PMA(0x1cbc, -1, 0x04),
+ /* RX impedance setting */
+ PHY_TUNING_ENTRY_PMA(0x0bb0, 0x03, 0x01),
+ PHY_TUNING_ENTRY_PMA(0x0bb4, 0xf0, 0xa0),
+ PHY_TUNING_ENTRY_PMA(0x1bb0, 0x03, 0x01),
+ PHY_TUNING_ENTRY_PMA(0x1bb4, 0xf0, 0xa0),
+
+ PHY_TUNING_ENTRY_LAST
+};
+
+static const struct exynos5_usbdrd_phy_tuning gs101_tunes_pipe3_init[] = {
+ /* init */
+ /* abnormal common pattern mask */
+ PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_BACK_END_MODE_VEC,
+ BACK_END_MODE_VEC_DISABLE_DATA_MASK, 0),
+ /* de-serializer enabled when U2 */
+ PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_OUT_VEC_2, PCS_OUT_VEC_B4_DYNAMIC,
+ PCS_OUT_VEC_B4_SEL_OUT),
+ /* TX Keeper Disable, Squelch on when U3 */
+ PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_OUT_VEC_3, PCS_OUT_VEC_B7_DYNAMIC,
+ PCS_OUT_VEC_B7_SEL_OUT | PCS_OUT_VEC_B2_SEL_OUT),
+ PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_NS_VEC_PS1_N1, -1,
+ (FIELD_PREP_CONST(NS_VEC_NS_REQ, 5) |
+ NS_VEC_ENABLE_TIMER |
+ FIELD_PREP_CONST(NS_VEC_SEL_TIMEOUT, 3))),
+ PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_NS_VEC_PS2_N0, -1,
+ (FIELD_PREP_CONST(NS_VEC_NS_REQ, 1) |
+ NS_VEC_ENABLE_TIMER |
+ FIELD_PREP_CONST(NS_VEC_SEL_TIMEOUT, 3) |
+ FIELD_PREP_CONST(NS_VEC_COND_MASK, 2) |
+ FIELD_PREP_CONST(NS_VEC_EXP_COND, 2))),
+ PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_NS_VEC_PS3_N0, -1,
+ (FIELD_PREP_CONST(NS_VEC_NS_REQ, 1) |
+ NS_VEC_ENABLE_TIMER |
+ FIELD_PREP_CONST(NS_VEC_SEL_TIMEOUT, 3) |
+ FIELD_PREP_CONST(NS_VEC_COND_MASK, 7) |
+ FIELD_PREP_CONST(NS_VEC_EXP_COND, 7))),
+ PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_TIMEOUT_0, -1, 112),
+ /* Block Aligner Type B */
+ PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_RX_CONTROL, 0,
+ RX_CONTROL_EN_BLOCK_ALIGNER_TYPE_B),
+ /* Block align at TS1/TS2 for Gen2 stability (Gen2 only) */
+ PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_RX_CONTROL_DEBUG,
+ RX_CONTROL_DEBUG_NUM_COM_FOUND,
+ (RX_CONTROL_DEBUG_EN_TS_CHECK |
+ /*
+ * increase pcs ts1 adding packet-cnt 1 --> 4
+ * lnx_rx_valid_rstn_delay_rise_sp/ssp :
+ * 19.6us(0x200) -> 15.3us(0x4)
+ */
+ FIELD_PREP_CONST(RX_CONTROL_DEBUG_NUM_COM_FOUND, 4))),
+ /* Gen1 Tx DRIVER pre-shoot, de-emphasis, level ctrl */
+ PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_HS_TX_COEF_MAP_0,
+ (HS_TX_COEF_MAP_0_SSTX_DEEMP | HS_TX_COEF_MAP_0_SSTX_LEVEL |
+ HS_TX_COEF_MAP_0_SSTX_PRE_SHOOT),
+ (FIELD_PREP_CONST(HS_TX_COEF_MAP_0_SSTX_DEEMP, 8) |
+ FIELD_PREP_CONST(HS_TX_COEF_MAP_0_SSTX_LEVEL, 0xb) |
+ FIELD_PREP_CONST(HS_TX_COEF_MAP_0_SSTX_PRE_SHOOT, 0))),
+ /* Gen2 Tx DRIVER level ctrl */
+ PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_LOCAL_COEF,
+ LOCAL_COEF_PMA_CENTER_COEF,
+ FIELD_PREP_CONST(LOCAL_COEF_PMA_CENTER_COEF, 0xb)),
+ /* Gen2 U1 exit LFPS duration : 900ns ~ 1.2us */
+ PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_TIMEOUT_3, -1, 4096),
+ /* set skp_remove_th 0x2 -> 0x7 for avoiding retry problem. */
+ PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_EBUF_PARAM,
+ EBUF_PARAM_SKP_REMOVE_TH_EMPTY_MODE,
+ FIELD_PREP_CONST(EBUF_PARAM_SKP_REMOVE_TH_EMPTY_MODE, 0x7)),
+
+ PHY_TUNING_ENTRY_LAST
+};
+
+static const struct exynos5_usbdrd_phy_tuning gs101_tunes_pipe3_postlock[] = {
+ /* Squelch off when U3 */
+ PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_OUT_VEC_3, PCS_OUT_VEC_B2_SEL_OUT, 0),
+
+ PHY_TUNING_ENTRY_LAST
+};
+
+static const struct exynos5_usbdrd_phy_tuning *gs101_tunes[PTS_MAX] = {
+ [PTS_UTMI_POSTINIT] = gs101_tunes_utmi_postinit,
+ [PTS_PIPE3_PREINIT] = gs101_tunes_pipe3_preinit,
+ [PTS_PIPE3_INIT] = gs101_tunes_pipe3_init,
+ [PTS_PIPE3_POSTLOCK] = gs101_tunes_pipe3_postlock,
+};
+
+static const char * const gs101_clk_names[] = {
+ "phy", "ctrl_aclk", "ctrl_pclk", "scl_pclk",
+};
+
+static const char * const gs101_regulator_names[] = {
+ "pll",
+ "dvdd-usb20", "vddh-usb20", "vdd33-usb20",
+ "vdda-usbdp", "vddh-usbdp",
+};
+
+static const struct exynos5_usbdrd_phy_drvdata gs101_usbd31rd_phy = {
+ .phy_cfg = phy_cfg_gs101,
+ .phy_tunes = gs101_tunes,
+ .phy_ops = &gs101_usbdrd_phy_ops,
+ .pmu_offset_usbdrd0_phy = GS101_PHY_CTRL_USB20,
+ .pmu_offset_usbdrd0_phy_ss = GS101_PHY_CTRL_USBDP,
+ .clk_names = gs101_clk_names,
+ .n_clks = ARRAY_SIZE(gs101_clk_names),
+ .core_clk_names = exynos5_core_clk_names,
+ .n_core_clks = ARRAY_SIZE(exynos5_core_clk_names),
+ .regulator_names = gs101_regulator_names,
+ .n_regulators = ARRAY_SIZE(gs101_regulator_names),
};
static const struct of_device_id exynos5_usbdrd_phy_of_match[] = {
{
+ .compatible = "google,gs101-usb31drd-phy",
+ .data = &gs101_usbd31rd_phy
+ }, {
.compatible = "samsung,exynos5250-usbdrd-phy",
.data = &exynos5250_usbdrd_phy
}, {
@@ -1018,21 +1690,38 @@ static int exynos5_usbdrd_phy_probe(struct platform_device *pdev)
dev_set_drvdata(dev, phy_drd);
phy_drd->dev = dev;
- phy_drd->reg_phy = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(phy_drd->reg_phy))
- return PTR_ERR(phy_drd->reg_phy);
-
drv_data = of_device_get_match_data(dev);
if (!drv_data)
return -EINVAL;
-
phy_drd->drv_data = drv_data;
+ if (of_property_present(dev->of_node, "reg-names")) {
+ void __iomem *reg;
+
+ reg = devm_platform_ioremap_resource_byname(pdev, "phy");
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+ phy_drd->reg_phy = reg;
+
+ reg = devm_platform_ioremap_resource_byname(pdev, "pcs");
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+ phy_drd->reg_pcs = reg;
+
+ reg = devm_platform_ioremap_resource_byname(pdev, "pma");
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+ phy_drd->reg_pma = reg;
+ } else {
+ /* DTB with just a single region */
+ phy_drd->reg_phy = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(phy_drd->reg_phy))
+ return PTR_ERR(phy_drd->reg_phy);
+ }
+
ret = exynos5_usbdrd_phy_clk_handle(phy_drd);
- if (ret) {
- dev_err(dev, "Failed to initialize clocks\n");
+ if (ret)
return ret;
- }
reg_pmu = syscon_regmap_lookup_by_phandle(dev->of_node,
"samsung,pmu-syscon");
@@ -1050,36 +1739,20 @@ static int exynos5_usbdrd_phy_probe(struct platform_device *pdev)
if (channel < 0)
dev_dbg(dev, "Not a multi-controller usbdrd phy\n");
- switch (channel) {
- case 1:
- pmu_offset = phy_drd->drv_data->pmu_offset_usbdrd1_phy;
- break;
- case 0:
- default:
- pmu_offset = phy_drd->drv_data->pmu_offset_usbdrd0_phy;
- break;
- }
-
- /* Get Vbus regulators */
- phy_drd->vbus = devm_regulator_get(dev, "vbus");
- if (IS_ERR(phy_drd->vbus)) {
- ret = PTR_ERR(phy_drd->vbus);
- if (ret == -EPROBE_DEFER)
- return ret;
-
- dev_warn(dev, "Failed to get VBUS supply regulator\n");
- phy_drd->vbus = NULL;
- }
-
- phy_drd->vbus_boost = devm_regulator_get(dev, "vbus-boost");
- if (IS_ERR(phy_drd->vbus_boost)) {
- ret = PTR_ERR(phy_drd->vbus_boost);
- if (ret == -EPROBE_DEFER)
- return ret;
-
- dev_warn(dev, "Failed to get VBUS boost supply regulator\n");
- phy_drd->vbus_boost = NULL;
- }
+ /* Get regulators */
+ phy_drd->regulators = devm_kcalloc(dev,
+ drv_data->n_regulators,
+ sizeof(*phy_drd->regulators),
+ GFP_KERNEL);
+ if (!phy_drd->regulators)
+ return ENOMEM;
+ regulator_bulk_set_supply_names(phy_drd->regulators,
+ drv_data->regulator_names,
+ drv_data->n_regulators);
+ ret = devm_regulator_bulk_get(dev, drv_data->n_regulators,
+ phy_drd->regulators);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get regulators\n");
dev_vdbg(dev, "Creating usbdrd_phy phy\n");
@@ -1094,6 +1767,18 @@ static int exynos5_usbdrd_phy_probe(struct platform_device *pdev)
phy_drd->phys[i].phy = phy;
phy_drd->phys[i].index = i;
phy_drd->phys[i].reg_pmu = reg_pmu;
+ switch (channel) {
+ case 1:
+ pmu_offset = drv_data->pmu_offset_usbdrd1_phy;
+ break;
+ case 0:
+ default:
+ pmu_offset = drv_data->pmu_offset_usbdrd0_phy;
+ if (i == EXYNOS5_DRDPHY_PIPE3 && drv_data
+ ->pmu_offset_usbdrd0_phy_ss)
+ pmu_offset = drv_data->pmu_offset_usbdrd0_phy_ss;
+ break;
+ }
phy_drd->phys[i].pmu_offset = pmu_offset;
phy_drd->phys[i].phy_cfg = &drv_data->phy_cfg[i];
phy_set_drvdata(phy, &phy_drd->phys[i]);
diff --git a/drivers/phy/samsung/phy-exynos5250-usb2.c b/drivers/phy/samsung/phy-exynos5250-usb2.c
index e198010e1bfd..21b06072f866 100644
--- a/drivers/phy/samsung/phy-exynos5250-usb2.c
+++ b/drivers/phy/samsung/phy-exynos5250-usb2.c
@@ -121,7 +121,7 @@
#define EXYNOS_5420_USB_ISOL_HOST_OFFSET 0x70C
#define EXYNOS_5250_USB_ISOL_ENABLE BIT(0)
-/* Mode swtich register */
+/* Mode switch register */
#define EXYNOS_5250_MODE_SWITCH_OFFSET 0x230
#define EXYNOS_5250_MODE_SWITCH_MASK 1
#define EXYNOS_5250_MODE_SWITCH_DEVICE 0
diff --git a/drivers/phy/st/phy-miphy28lp.c b/drivers/phy/st/phy-miphy28lp.c
index 063fc38788ed..43cef89af55e 100644
--- a/drivers/phy/st/phy-miphy28lp.c
+++ b/drivers/phy/st/phy-miphy28lp.c
@@ -228,11 +228,6 @@ struct miphy28lp_dev {
int nphys;
};
-struct miphy_initval {
- u16 reg;
- u16 val;
-};
-
enum miphy_sata_gen { SATA_GEN1, SATA_GEN2, SATA_GEN3 };
static char *PHY_TYPE_name[] = { "sata-up", "pcie-up", "", "usb3-up" };
diff --git a/drivers/phy/starfive/Kconfig b/drivers/phy/starfive/Kconfig
index 9508e2143011..d0cdd7cb4a13 100644
--- a/drivers/phy/starfive/Kconfig
+++ b/drivers/phy/starfive/Kconfig
@@ -15,6 +15,16 @@ config PHY_STARFIVE_JH7110_DPHY_RX
system. If M is selected, the module will be called
phy-jh7110-dphy-rx.ko.
+config PHY_STARFIVE_JH7110_DPHY_TX
+ tristate "StarFive JH7110 D-PHY TX Support"
+ depends on HAS_IOMEM
+ select GENERIC_PHY
+ select GENERIC_PHY_MIPI_DPHY
+ help
+ Choose this option if you have a StarFive D-PHY TX in your
+ system. If M is selected, the module will be called
+ phy-jh7110-dphy-tx.ko.
+
config PHY_STARFIVE_JH7110_PCIE
tristate "Starfive JH7110 PCIE 2.0/USB 3.0 PHY support"
depends on HAS_IOMEM
diff --git a/drivers/phy/starfive/Makefile b/drivers/phy/starfive/Makefile
index b391018b7c47..eedc4a6fec15 100644
--- a/drivers/phy/starfive/Makefile
+++ b/drivers/phy/starfive/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PHY_STARFIVE_JH7110_DPHY_RX) += phy-jh7110-dphy-rx.o
+obj-$(CONFIG_PHY_STARFIVE_JH7110_DPHY_TX) += phy-jh7110-dphy-tx.o
obj-$(CONFIG_PHY_STARFIVE_JH7110_PCIE) += phy-jh7110-pcie.o
obj-$(CONFIG_PHY_STARFIVE_JH7110_USB) += phy-jh7110-usb.o
diff --git a/drivers/phy/starfive/phy-jh7110-dphy-rx.c b/drivers/phy/starfive/phy-jh7110-dphy-rx.c
index 037a9e0263cd..0b039e1f71c5 100644
--- a/drivers/phy/starfive/phy-jh7110-dphy-rx.c
+++ b/drivers/phy/starfive/phy-jh7110-dphy-rx.c
@@ -46,11 +46,6 @@
#define STF_MAP_LANES_NUM 6
-struct regval {
- u32 addr;
- u32 val;
-};
-
struct stf_dphy_info {
/**
* @maps:
diff --git a/drivers/phy/starfive/phy-jh7110-dphy-tx.c b/drivers/phy/starfive/phy-jh7110-dphy-tx.c
new file mode 100644
index 000000000000..c64d1c91b130
--- /dev/null
+++ b/drivers/phy/starfive/phy-jh7110-dphy-tx.c
@@ -0,0 +1,461 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * DPHY TX driver for the StarFive JH7110 SoC
+ *
+ * Copyright (C) 2023 StarFive Technology Co., Ltd.
+ * Author: Keith Zhao <keith.zhao@starfivetech.com>
+ * Author: Shengyang Chen <shengyang.chen@starfivetech.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-mipi-dphy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#define STF_DPHY_APBIFSAIF_SYSCFG(x) (x)
+
+#define STF_DPHY_AON_POWER_READY_N_ACTIVE 0
+#define STF_DPHY_AON_POWER_READY_N BIT(0)
+#define STF_DPHY_CFG_L0_SWAP_SEL GENMASK(14, 12)
+#define STF_DPHY_CFG_L1_SWAP_SEL GENMASK(17, 15)
+#define STF_DPHY_CFG_L2_SWAP_SEL GENMASK(20, 18)
+#define STF_DPHY_CFG_L3_SWAP_SEL GENMASK(23, 21)
+#define STF_DPHY_CFG_L4_SWAP_SEL GENMASK(26, 24)
+#define STF_DPHY_RGS_CDTX_PLL_UNLOCK BIT(18)
+#define STF_DPHY_RG_CDTX_L0N_HSTX_RES GENMASK(23, 19)
+#define STF_DPHY_RG_CDTX_L0P_HSTX_RES GENMASK(28, 24)
+
+#define STF_DPHY_RG_CDTX_L1P_HSTX_RES GENMASK(9, 5)
+#define STF_DPHY_RG_CDTX_L2N_HSTX_RES GENMASK(14, 10)
+#define STF_DPHY_RG_CDTX_L2P_HSTX_RES GENMASK(19, 15)
+#define STF_DPHY_RG_CDTX_L3N_HSTX_RES GENMASK(24, 20)
+#define STF_DPHY_RG_CDTX_L3P_HSTX_RES GENMASK(29, 25)
+
+#define STF_DPHY_RG_CDTX_L4N_HSTX_RES GENMASK(4, 0)
+#define STF_DPHY_RG_CDTX_L4P_HSTX_RES GENMASK(9, 5)
+#define STF_DPHY_RG_CDTX_PLL_FBK_FRA GENMASK(23, 0)
+
+#define STF_DPHY_RG_CDTX_PLL_FBK_INT GENMASK(8, 0)
+#define STF_DPHY_RG_CDTX_PLL_FM_EN BIT(9)
+#define STF_DPHY_RG_CDTX_PLL_LDO_STB_X2_EN BIT(10)
+#define STF_DPHY_RG_CDTX_PLL_PRE_DIV GENMASK(12, 11)
+
+#define STF_DPHY_RG_CDTX_PLL_SSC_EN BIT(18)
+
+#define STF_DPHY_RG_CLANE_HS_CLK_POST_TIME GENMASK(7, 0)
+#define STF_DPHY_RG_CLANE_HS_CLK_PRE_TIME GENMASK(15, 8)
+#define STF_DPHY_RG_CLANE_HS_PRE_TIME GENMASK(23, 16)
+#define STF_DPHY_RG_CLANE_HS_TRAIL_TIME GENMASK(31, 24)
+
+#define STF_DPHY_RG_CLANE_HS_ZERO_TIME GENMASK(7, 0)
+#define STF_DPHY_RG_DLANE_HS_PRE_TIME GENMASK(15, 8)
+#define STF_DPHY_RG_DLANE_HS_TRAIL_TIME GENMASK(23, 16)
+#define STF_DPHY_RG_DLANE_HS_ZERO_TIME GENMASK(31, 24)
+
+#define STF_DPHY_RG_EXTD_CYCLE_SEL GENMASK(2, 0)
+#define STF_DPHY_SCFG_C_HS_PRE_ZERO_TIME GENMASK(31, 0)
+
+#define STF_DPHY_SCFG_DSI_TXREADY_ESC_SEL GENMASK(2, 1)
+#define STF_DPHY_SCFG_PPI_C_READY_SEL GENMASK(4, 3)
+
+#define STF_DPHY_REFCLK_IN_SEL GENMASK(28, 26)
+#define STF_DPHY_RESETB BIT(29)
+
+#define STF_DPHY_REFCLK_12M 1
+#define STF_DPHY_BITRATE_ALIGN 10000000
+
+#define STF_MAP_LANES_NUM 5
+
+#define STF_DPHY_LSHIFT_16(x) (FIELD_PREP(GENMASK(23, 16), (x)))
+#define STF_DPHY_LSHIFT_8(x) (FIELD_PREP(GENMASK(15, 8), (x)))
+
+#define STF_DPHY_HW_DELAY_US 200
+#define STF_DPHY_HW_TIMEOUT_US 5000
+
+struct stf_dphy_config {
+ unsigned long bitrate;
+ u32 pll_fbk_int;
+ u32 pll_fbk_fra_val;
+ u32 extd_cycle_sel;
+ u32 dlane_hs_pre_time;
+ u32 dlane_hs_zero_time;
+ u32 dlane_hs_trail_time;
+ u32 clane_hs_pre_time;
+ u32 clane_hs_zero_time;
+ u32 clane_hs_trail_time;
+ u32 clane_hs_clk_pre_time;
+ u32 clane_hs_clk_post_time;
+};
+
+static const struct stf_dphy_config reg_configs[] = {
+ {160000000, 0x6a, 0xaa, 0x3, 0xa, 0x17, 0x11, 0x5, 0x2b, 0xd, 0x7, 0x3d},
+ {170000000, 0x71, 0x55, 0x3, 0xb, 0x18, 0x11, 0x5, 0x2e, 0xd, 0x7, 0x3d},
+ {180000000, 0x78, 0x0, 0x3, 0xb, 0x19, 0x12, 0x6, 0x30, 0xe, 0x7, 0x3e},
+ {190000000, 0x7e, 0xaa, 0x3, 0xc, 0x1a, 0x12, 0x6, 0x33, 0xe, 0x7, 0x3e},
+ {200000000, 0x85, 0x55, 0x3, 0xc, 0x1b, 0x13, 0x7, 0x35, 0xf, 0x7, 0x3f},
+ {320000000, 0x6a, 0xaa, 0x2, 0x8, 0x14, 0xf, 0x5, 0x2b, 0xd, 0x3, 0x23},
+ {330000000, 0x6e, 0x0, 0x2, 0x8, 0x15, 0xf, 0x5, 0x2d, 0xd, 0x3, 0x23},
+ {340000000, 0x71, 0x55, 0x2, 0x9, 0x15, 0xf, 0x5, 0x2e, 0xd, 0x3, 0x23},
+ {350000000, 0x74, 0xaa, 0x2, 0x9, 0x15, 0x10, 0x6, 0x2f, 0xe, 0x3, 0x24},
+ {360000000, 0x78, 0x0, 0x2, 0x9, 0x16, 0x10, 0x6, 0x30, 0xe, 0x3, 0x24},
+ {370000000, 0x7b, 0x55, 0x2, 0x9, 0x17, 0x10, 0x6, 0x32, 0xe, 0x3, 0x24},
+ {380000000, 0x7e, 0xaa, 0x2, 0xa, 0x17, 0x10, 0x6, 0x33, 0xe, 0x3, 0x24},
+ {390000000, 0x82, 0x0, 0x2, 0xa, 0x17, 0x11, 0x6, 0x35, 0xf, 0x3, 0x25},
+ {400000000, 0x85, 0x55, 0x2, 0xa, 0x18, 0x11, 0x7, 0x35, 0xf, 0x3, 0x25},
+ {410000000, 0x88, 0xaa, 0x2, 0xa, 0x19, 0x11, 0x7, 0x37, 0xf, 0x3, 0x25},
+ {420000000, 0x8c, 0x0, 0x2, 0xa, 0x19, 0x12, 0x7, 0x38, 0x10, 0x3, 0x26},
+ {430000000, 0x8f, 0x55, 0x2, 0xb, 0x19, 0x12, 0x7, 0x39, 0x10, 0x3, 0x26},
+ {440000000, 0x92, 0xaa, 0x2, 0xb, 0x1a, 0x12, 0x7, 0x3b, 0x10, 0x3, 0x26},
+ {450000000, 0x96, 0x0, 0x2, 0xb, 0x1b, 0x12, 0x8, 0x3c, 0x10, 0x3, 0x26},
+ {460000000, 0x99, 0x55, 0x2, 0xb, 0x1b, 0x13, 0x8, 0x3d, 0x11, 0x3, 0x27},
+ {470000000, 0x9c, 0xaa, 0x2, 0xc, 0x1b, 0x13, 0x8, 0x3e, 0x11, 0x3, 0x27},
+ {480000000, 0xa0, 0x27, 0x2, 0xc, 0x1c, 0x13, 0x8, 0x40, 0x11, 0x3, 0x27},
+ {490000000, 0xa3, 0x55, 0x2, 0xc, 0x1d, 0x14, 0x8, 0x42, 0x12, 0x3, 0x28},
+ {500000000, 0xa6, 0xaa, 0x2, 0xc, 0x1d, 0x14, 0x9, 0x42, 0x12, 0x3, 0x28},
+ {510000000, 0xaa, 0x0, 0x2, 0xc, 0x1e, 0x14, 0x9, 0x44, 0x12, 0x3, 0x28},
+ {520000000, 0xad, 0x55, 0x2, 0xd, 0x1e, 0x15, 0x9, 0x45, 0x13, 0x3, 0x29},
+ {530000000, 0xb0, 0xaa, 0x2, 0xd, 0x1e, 0x15, 0x9, 0x47, 0x13, 0x3, 0x29},
+ {540000000, 0xb4, 0x0, 0x2, 0xd, 0x1f, 0x15, 0x9, 0x48, 0x13, 0x3, 0x29},
+ {550000000, 0xb7, 0x55, 0x2, 0xd, 0x20, 0x16, 0x9, 0x4a, 0x14, 0x3, 0x2a},
+ {560000000, 0xba, 0xaa, 0x2, 0xe, 0x20, 0x16, 0xa, 0x4a, 0x14, 0x3, 0x2a},
+ {570000000, 0xbe, 0x0, 0x2, 0xe, 0x20, 0x16, 0xa, 0x4c, 0x14, 0x3, 0x2a},
+ {580000000, 0xc1, 0x55, 0x2, 0xe, 0x21, 0x16, 0xa, 0x4d, 0x14, 0x3, 0x2a},
+ {590000000, 0xc4, 0xaa, 0x2, 0xe, 0x22, 0x17, 0xa, 0x4f, 0x15, 0x3, 0x2b},
+ {600000000, 0xc8, 0x0, 0x2, 0xe, 0x23, 0x17, 0xa, 0x50, 0x15, 0x3, 0x2b},
+ {610000000, 0xcb, 0x55, 0x2, 0xf, 0x22, 0x17, 0xb, 0x50, 0x15, 0x3, 0x2b},
+ {620000000, 0xce, 0xaa, 0x2, 0xf, 0x23, 0x18, 0xb, 0x52, 0x16, 0x3, 0x2c},
+ {630000000, 0x69, 0x0, 0x1, 0x7, 0x12, 0xd, 0x5, 0x2a, 0xc, 0x1, 0x15},
+ {640000000, 0x6a, 0xaa, 0x1, 0x7, 0x13, 0xe, 0x5, 0x2b, 0xd, 0x1, 0x16},
+ {650000000, 0x6c, 0x55, 0x1, 0x7, 0x13, 0xe, 0x5, 0x2c, 0xd, 0x1, 0x16},
+ {660000000, 0x6e, 0x0, 0x1, 0x7, 0x13, 0xe, 0x5, 0x2d, 0xd, 0x1, 0x16},
+ {670000000, 0x6f, 0xaa, 0x1, 0x8, 0x13, 0xe, 0x5, 0x2d, 0xd, 0x1, 0x16},
+ {680000000, 0x71, 0x55, 0x1, 0x8, 0x13, 0xe, 0x5, 0x2e, 0xd, 0x1, 0x16},
+ {690000000, 0x73, 0x0, 0x1, 0x8, 0x14, 0xe, 0x6, 0x2e, 0xd, 0x1, 0x16},
+ {700000000, 0x74, 0xaa, 0x1, 0x8, 0x14, 0xf, 0x6, 0x2f, 0xe, 0x1, 0x16},
+ {710000000, 0x76, 0x55, 0x1, 0x8, 0x14, 0xf, 0x6, 0x2f, 0xe, 0x1, 0x17},
+ {720000000, 0x78, 0x0, 0x1, 0x8, 0x15, 0xf, 0x6, 0x30, 0xe, 0x1, 0x17},
+ {730000000, 0x79, 0xaa, 0x1, 0x8, 0x15, 0xf, 0x6, 0x31, 0xe, 0x1, 0x17},
+ {740000000, 0x7b, 0x55, 0x1, 0x8, 0x15, 0xf, 0x6, 0x32, 0xe, 0x1, 0x17},
+ {750000000, 0x7d, 0x0, 0x1, 0x8, 0x16, 0xf, 0x6, 0x32, 0xe, 0x1, 0x17},
+ {760000000, 0x7e, 0xaa, 0x1, 0x9, 0x15, 0xf, 0x6, 0x33, 0xe, 0x1, 0x17},
+ {770000000, 0x80, 0x55, 0x1, 0x9, 0x15, 0x10, 0x6, 0x34, 0xf, 0x1, 0x18},
+ {780000000, 0x82, 0x0, 0x1, 0x9, 0x16, 0x10, 0x6, 0x35, 0xf, 0x1, 0x18,},
+ {790000000, 0x83, 0xaa, 0x1, 0x9, 0x16, 0x10, 0x7, 0x34, 0xf, 0x1, 0x18},
+ {800000000, 0x85, 0x55, 0x1, 0x9, 0x17, 0x10, 0x7, 0x35, 0xf, 0x1, 0x18},
+ {810000000, 0x87, 0x0, 0x1, 0x9, 0x17, 0x10, 0x7, 0x36, 0xf, 0x1, 0x18},
+ {820000000, 0x88, 0xaa, 0x1, 0x9, 0x17, 0x10, 0x7, 0x37, 0xf, 0x1, 0x18},
+ {830000000, 0x8a, 0x55, 0x1, 0x9, 0x18, 0x10, 0x7, 0x37, 0xf, 0x1, 0x18},
+ {840000000, 0x8c, 0x0, 0x1, 0x9, 0x18, 0x11, 0x7, 0x38, 0x10, 0x1, 0x19},
+ {850000000, 0x8d, 0xaa, 0x1, 0xa, 0x17, 0x11, 0x7, 0x39, 0x10, 0x1, 0x19},
+ {860000000, 0x8f, 0x55, 0x1, 0xa, 0x18, 0x11, 0x7, 0x39, 0x10, 0x1, 0x19},
+ {870000000, 0x91, 0x0, 0x1, 0xa, 0x18, 0x11, 0x7, 0x3a, 0x10, 0x1, 0x19},
+ {880000000, 0x92, 0xaa, 0x1, 0xa, 0x18, 0x11, 0x7, 0x3b, 0x10, 0x1, 0x19},
+ {890000000, 0x94, 0x55, 0x1, 0xa, 0x19, 0x11, 0x7, 0x3c, 0x10, 0x1, 0x19},
+ {900000000, 0x96, 0x0, 0x1, 0xa, 0x19, 0x12, 0x8, 0x3c, 0x10, 0x1, 0x19},
+ {910000000, 0x97, 0xaa, 0x1, 0xa, 0x19, 0x12, 0x8, 0x3c, 0x11, 0x1, 0x1a},
+ {920000000, 0x99, 0x55, 0x1, 0xa, 0x1a, 0x12, 0x8, 0x3d, 0x11, 0x1, 0x1a},
+ {930000000, 0x9b, 0x0, 0x1, 0xa, 0x1a, 0x12, 0x8, 0x3e, 0x11, 0x1, 0x1a},
+ {940000000, 0x9c, 0xaa, 0x1, 0xb, 0x1a, 0x12, 0x8, 0x3e, 0x11, 0x1, 0x1a},
+ {950000000, 0x9e, 0x55, 0x1, 0xb, 0x1a, 0x12, 0x8, 0x3f, 0x11, 0x1, 0x1a},
+ {960000000, 0xa0, 0x0, 0x1, 0xb, 0x1a, 0x12, 0x8, 0x40, 0x11, 0x1, 0x1a},
+ {970000000, 0xa1, 0xaa, 0x1, 0xb, 0x1b, 0x13, 0x8, 0x41, 0x12, 0x1, 0x1b},
+ {980000000, 0xa3, 0x55, 0x1, 0xb, 0x1b, 0x13, 0x8, 0x42, 0x12, 0x1, 0x1b},
+ {990000000, 0xa5, 0x0, 0x1, 0xb, 0x1b, 0x13, 0x8, 0x42, 0x12, 0x1, 0x1b},
+ {1000000000, 0xa6, 0xaa, 0x1, 0xb, 0x1c, 0x13, 0x9, 0x42, 0x12, 0x1, 0x1b},
+};
+
+struct stf_dphy_info {
+ /**
+ * @maps:
+ *
+ * Physical lanes and logic lanes mapping table.
+ *
+ * The default order is:
+ * [data lane 0, data lane 1, data lane 2, date lane 3, clk lane]
+ */
+ u8 maps[STF_MAP_LANES_NUM];
+};
+
+struct stf_dphy {
+ struct device *dev;
+ void __iomem *topsys;
+ struct clk *txesc_clk;
+ struct reset_control *sys_rst;
+
+ struct phy_configure_opts_mipi_dphy config;
+
+ struct phy *phy;
+ const struct stf_dphy_info *info;
+};
+
+static u32 stf_dphy_get_config_index(u32 bitrate)
+{
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(reg_configs); i++) {
+ if (reg_configs[i].bitrate == bitrate)
+ return i;
+ }
+
+ return 0;
+}
+
+static void stf_dphy_hw_reset(struct stf_dphy *dphy, int assert)
+{
+ int rc;
+ u32 status = 0;
+
+ writel(FIELD_PREP(STF_DPHY_RESETB, assert),
+ dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(100));
+
+ if (assert) {
+ rc = readl_poll_timeout_atomic(dphy->topsys +
+ STF_DPHY_APBIFSAIF_SYSCFG(8),
+ status,
+ !(FIELD_GET(STF_DPHY_RGS_CDTX_PLL_UNLOCK, status)),
+ STF_DPHY_HW_DELAY_US, STF_DPHY_HW_TIMEOUT_US);
+ if (rc)
+ dev_err(dphy->dev, "MIPI dphy-tx # PLL Locked\n");
+ }
+}
+
+static int stf_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
+{
+ struct stf_dphy *dphy = phy_get_drvdata(phy);
+ const struct stf_dphy_info *info = dphy->info;
+ const struct stf_dphy_config *p = reg_configs;
+ unsigned long alignment = STF_DPHY_BITRATE_ALIGN;
+ u32 bitrate = opts->mipi_dphy.hs_clk_rate;
+ u32 tmp;
+ u32 i;
+
+ if (bitrate % alignment)
+ bitrate += alignment - (bitrate % alignment);
+
+ i = stf_dphy_get_config_index(bitrate);
+
+ tmp = readl(dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(100));
+ tmp &= ~STF_DPHY_REFCLK_IN_SEL;
+ tmp |= FIELD_PREP(STF_DPHY_REFCLK_IN_SEL, STF_DPHY_REFCLK_12M);
+ writel(tmp, dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(100));
+
+ writel(FIELD_PREP(STF_DPHY_RG_CDTX_L0N_HSTX_RES, 0x10) |
+ FIELD_PREP(STF_DPHY_RG_CDTX_L0P_HSTX_RES, 0x10),
+ dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(8));
+
+ writel(FIELD_PREP(STF_DPHY_RG_CDTX_L0N_HSTX_RES, 0x10) |
+ FIELD_PREP(STF_DPHY_RG_CDTX_L2N_HSTX_RES, 0x10) |
+ FIELD_PREP(STF_DPHY_RG_CDTX_L3N_HSTX_RES, 0x10) |
+ FIELD_PREP(STF_DPHY_RG_CDTX_L1P_HSTX_RES, 0x10) |
+ FIELD_PREP(STF_DPHY_RG_CDTX_L2P_HSTX_RES, 0x10) |
+ FIELD_PREP(STF_DPHY_RG_CDTX_L3P_HSTX_RES, 0x10),
+ dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(12));
+
+ writel(FIELD_PREP(STF_DPHY_RG_CDTX_L4N_HSTX_RES, 0x10) |
+ FIELD_PREP(STF_DPHY_RG_CDTX_L4P_HSTX_RES, 0x10),
+ dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(16));
+
+ /* Lane setting */
+ writel(FIELD_PREP(STF_DPHY_AON_POWER_READY_N,
+ STF_DPHY_AON_POWER_READY_N_ACTIVE) |
+ FIELD_PREP(STF_DPHY_CFG_L0_SWAP_SEL, info->maps[0]) |
+ FIELD_PREP(STF_DPHY_CFG_L1_SWAP_SEL, info->maps[1]) |
+ FIELD_PREP(STF_DPHY_CFG_L2_SWAP_SEL, info->maps[2]) |
+ FIELD_PREP(STF_DPHY_CFG_L3_SWAP_SEL, info->maps[3]) |
+ FIELD_PREP(STF_DPHY_CFG_L4_SWAP_SEL, info->maps[4]),
+ dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(0));
+
+ /* PLL setting */
+ writel(FIELD_PREP(STF_DPHY_RG_CDTX_PLL_SSC_EN, 0x0),
+ dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(28));
+
+ writel(FIELD_PREP(STF_DPHY_RG_CDTX_PLL_LDO_STB_X2_EN, 0x1) |
+ FIELD_PREP(STF_DPHY_RG_CDTX_PLL_FM_EN, 0x1) |
+ FIELD_PREP(STF_DPHY_RG_CDTX_PLL_PRE_DIV, 0x0) |
+ FIELD_PREP(STF_DPHY_RG_CDTX_PLL_FBK_INT, p[i].pll_fbk_int),
+ dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(24));
+
+ writel(FIELD_PREP(STF_DPHY_RG_CDTX_PLL_FBK_FRA,
+ STF_DPHY_LSHIFT_16(p[i].pll_fbk_fra_val) |
+ STF_DPHY_LSHIFT_8(p[i].pll_fbk_fra_val) |
+ p[i].pll_fbk_fra_val),
+ dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(20));
+
+ writel(FIELD_PREP(STF_DPHY_RG_EXTD_CYCLE_SEL, p[i].extd_cycle_sel),
+ dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(40));
+
+ writel(FIELD_PREP(STF_DPHY_RG_DLANE_HS_PRE_TIME, p[i].dlane_hs_pre_time) |
+ FIELD_PREP(STF_DPHY_RG_DLANE_HS_ZERO_TIME, p[i].dlane_hs_zero_time) |
+ FIELD_PREP(STF_DPHY_RG_DLANE_HS_TRAIL_TIME, p[i].dlane_hs_trail_time) |
+ FIELD_PREP(STF_DPHY_RG_CLANE_HS_ZERO_TIME, p[i].clane_hs_zero_time),
+ dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(36));
+
+ writel(FIELD_PREP(STF_DPHY_RG_CLANE_HS_PRE_TIME, p[i].clane_hs_pre_time) |
+ FIELD_PREP(STF_DPHY_RG_CLANE_HS_TRAIL_TIME, p[i].clane_hs_trail_time) |
+ FIELD_PREP(STF_DPHY_RG_CLANE_HS_CLK_PRE_TIME, p[i].clane_hs_clk_pre_time) |
+ FIELD_PREP(STF_DPHY_RG_CLANE_HS_CLK_POST_TIME, p[i].clane_hs_clk_post_time),
+ dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(32));
+
+ return 0;
+}
+
+static int stf_dphy_init(struct phy *phy)
+{
+ struct stf_dphy *dphy = phy_get_drvdata(phy);
+ int ret;
+
+ stf_dphy_hw_reset(dphy, 1);
+
+ writel(FIELD_PREP(STF_DPHY_SCFG_PPI_C_READY_SEL, 0) |
+ FIELD_PREP(STF_DPHY_SCFG_DSI_TXREADY_ESC_SEL, 0),
+ dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(48));
+
+ writel(FIELD_PREP(STF_DPHY_SCFG_C_HS_PRE_ZERO_TIME, 0x30),
+ dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(44));
+
+ ret = clk_prepare_enable(dphy->txesc_clk);
+ if (ret) {
+ dev_err(dphy->dev, "Failed to prepare/enable txesc_clk\n");
+ return ret;
+ }
+
+ ret = reset_control_deassert(dphy->sys_rst);
+ if (ret) {
+ dev_err(dphy->dev, "Failed to deassert sys_rst\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int stf_dphy_exit(struct phy *phy)
+{
+ struct stf_dphy *dphy = phy_get_drvdata(phy);
+ int ret;
+
+ ret = reset_control_assert(dphy->sys_rst);
+ if (ret) {
+ dev_err(dphy->dev, "Failed to assert sys_rst\n");
+ return ret;
+ }
+
+ clk_disable_unprepare(dphy->txesc_clk);
+
+ stf_dphy_hw_reset(dphy, 0);
+
+ return 0;
+}
+
+static int stf_dphy_power_on(struct phy *phy)
+{
+ struct stf_dphy *dphy = phy_get_drvdata(phy);
+
+ return pm_runtime_resume_and_get(dphy->dev);
+}
+
+static int stf_dphy_validate(struct phy *phy, enum phy_mode mode, int submode,
+ union phy_configure_opts *opts)
+{
+ if (mode != PHY_MODE_MIPI_DPHY)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int stf_dphy_power_off(struct phy *phy)
+{
+ struct stf_dphy *dphy = phy_get_drvdata(phy);
+
+ return pm_runtime_put_sync(dphy->dev);
+}
+
+static const struct phy_ops stf_dphy_ops = {
+ .power_on = stf_dphy_power_on,
+ .power_off = stf_dphy_power_off,
+ .init = stf_dphy_init,
+ .exit = stf_dphy_exit,
+ .configure = stf_dphy_configure,
+ .validate = stf_dphy_validate,
+ .owner = THIS_MODULE,
+};
+
+static int stf_dphy_probe(struct platform_device *pdev)
+{
+ struct phy_provider *phy_provider;
+ struct stf_dphy *dphy;
+
+ dphy = devm_kzalloc(&pdev->dev, sizeof(*dphy), GFP_KERNEL);
+ if (!dphy)
+ return -ENOMEM;
+
+ dphy->info = of_device_get_match_data(&pdev->dev);
+
+ dphy->dev = &pdev->dev;
+ dev_set_drvdata(&pdev->dev, dphy);
+
+ dphy->topsys = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(dphy->topsys))
+ return PTR_ERR(dphy->topsys);
+
+ pm_runtime_enable(&pdev->dev);
+
+ dphy->txesc_clk = devm_clk_get(&pdev->dev, "txesc");
+ if (IS_ERR(dphy->txesc_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(dphy->txesc_clk),
+ "Failed to get txesc clock\n");
+
+ dphy->sys_rst = devm_reset_control_get_exclusive(&pdev->dev, "sys");
+ if (IS_ERR(dphy->sys_rst))
+ return dev_err_probe(&pdev->dev, PTR_ERR(dphy->sys_rst),
+ "Failed to get sys reset\n");
+
+ dphy->phy = devm_phy_create(&pdev->dev, NULL, &stf_dphy_ops);
+ if (IS_ERR(dphy->phy))
+ return dev_err_probe(&pdev->dev, PTR_ERR(dphy->phy),
+ "Failed to create phy\n");
+
+ phy_set_drvdata(dphy->phy, dphy);
+
+ phy_provider = devm_of_phy_provider_register(&pdev->dev, of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return dev_err_probe(&pdev->dev, PTR_ERR(phy_provider),
+ "Failed to register phy\n");
+
+ return 0;
+}
+
+static const struct stf_dphy_info starfive_dphy_info = {
+ .maps = {0, 1, 2, 3, 4},
+};
+
+static const struct of_device_id stf_dphy_dt_ids[] = {
+ {
+ .compatible = "starfive,jh7110-dphy-tx",
+ .data = &starfive_dphy_info,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, stf_dphy_dt_ids);
+
+static struct platform_driver stf_dphy_driver = {
+ .driver = {
+ .name = "starfive-dphy-tx",
+ .of_match_table = stf_dphy_dt_ids,
+ },
+ .probe = stf_dphy_probe,
+};
+module_platform_driver(stf_dphy_driver);
+
+MODULE_AUTHOR("Keith Zhao <keith.zhao@starfivetech.com>");
+MODULE_AUTHOR("Shengyang Chen <shengyang.chen@starfivetech.com>");
+MODULE_DESCRIPTION("StarFive JH7110 DPHY TX driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/ti/phy-am654-serdes.c b/drivers/phy/ti/phy-am654-serdes.c
index 8b3b937de624..673449607c02 100644
--- a/drivers/phy/ti/phy-am654-serdes.c
+++ b/drivers/phy/ti/phy-am654-serdes.c
@@ -30,7 +30,6 @@
#define LANE_R058 0x258
#define LANE_R06c 0x26c
#define LANE_R070 0x270
-#define LANE_R070 0x270
#define LANE_R19C 0x39c
#define COMLANE_R004 0xa04
diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
index 00d7e6a6de03..7f626c597025 100644
--- a/drivers/phy/ti/phy-j721e-wiz.c
+++ b/drivers/phy/ti/phy-j721e-wiz.c
@@ -1076,27 +1076,12 @@ static int wiz_clock_register(struct wiz *wiz)
return ret;
}
-static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
+static void wiz_clock_init(struct wiz *wiz)
{
- const struct wiz_clk_mux_sel *clk_mux_sel = wiz->clk_mux_sel;
- struct device *dev = wiz->dev;
- struct device_node *clk_node;
- const char *node_name;
unsigned long rate;
- struct clk *clk;
- int ret;
- int i;
- clk = devm_clk_get(dev, "core_ref_clk");
- if (IS_ERR(clk)) {
- dev_err(dev, "core_ref_clk clock not found\n");
- ret = PTR_ERR(clk);
- return ret;
- }
- wiz->input_clks[WIZ_CORE_REFCLK] = clk;
-
- rate = clk_get_rate(clk);
- if (rate >= 100000000)
+ rate = clk_get_rate(wiz->input_clks[WIZ_CORE_REFCLK]);
+ if (rate >= REF_CLK_100MHZ)
regmap_field_write(wiz->pma_cmn_refclk_int_mode, 0x1);
else
regmap_field_write(wiz->pma_cmn_refclk_int_mode, 0x3);
@@ -1120,35 +1105,55 @@ static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
break;
}
- if (wiz->data->pma_cmn_refclk1_int_mode) {
- clk = devm_clk_get(dev, "core_ref1_clk");
- if (IS_ERR(clk)) {
- dev_err(dev, "core_ref1_clk clock not found\n");
- ret = PTR_ERR(clk);
- return ret;
- }
- wiz->input_clks[WIZ_CORE_REFCLK1] = clk;
-
- rate = clk_get_rate(clk);
- if (rate >= 100000000)
+ if (wiz->input_clks[WIZ_CORE_REFCLK1]) {
+ rate = clk_get_rate(wiz->input_clks[WIZ_CORE_REFCLK1]);
+ if (rate >= REF_CLK_100MHZ)
regmap_field_write(wiz->pma_cmn_refclk1_int_mode, 0x1);
else
regmap_field_write(wiz->pma_cmn_refclk1_int_mode, 0x3);
}
- clk = devm_clk_get(dev, "ext_ref_clk");
- if (IS_ERR(clk)) {
- dev_err(dev, "ext_ref_clk clock not found\n");
- ret = PTR_ERR(clk);
- return ret;
- }
- wiz->input_clks[WIZ_EXT_REFCLK] = clk;
-
- rate = clk_get_rate(clk);
- if (rate >= 100000000)
+ rate = clk_get_rate(wiz->input_clks[WIZ_EXT_REFCLK]);
+ if (rate >= REF_CLK_100MHZ)
regmap_field_write(wiz->pma_cmn_refclk_mode, 0x0);
else
regmap_field_write(wiz->pma_cmn_refclk_mode, 0x2);
+}
+
+static int wiz_clock_probe(struct wiz *wiz, struct device_node *node)
+{
+ const struct wiz_clk_mux_sel *clk_mux_sel = wiz->clk_mux_sel;
+ struct device *dev = wiz->dev;
+ struct device_node *clk_node;
+ const char *node_name;
+ struct clk *clk;
+ int ret;
+ int i;
+
+ clk = devm_clk_get(dev, "core_ref_clk");
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk),
+ "core_ref_clk clock not found\n");
+
+ wiz->input_clks[WIZ_CORE_REFCLK] = clk;
+
+ if (wiz->data->pma_cmn_refclk1_int_mode) {
+ clk = devm_clk_get(dev, "core_ref1_clk");
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk),
+ "core_ref1_clk clock not found\n");
+
+ wiz->input_clks[WIZ_CORE_REFCLK1] = clk;
+ }
+
+ clk = devm_clk_get(dev, "ext_ref_clk");
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk),
+ "ext_ref_clk clock not found\n");
+
+ wiz->input_clks[WIZ_EXT_REFCLK] = clk;
+
+ wiz_clock_init(wiz);
switch (wiz->type) {
case AM64_WIZ_10G:
@@ -1157,8 +1162,9 @@ static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
case J721S2_WIZ_10G:
ret = wiz_clock_register(wiz);
if (ret)
- dev_err(dev, "Failed to register wiz clocks\n");
- return ret;
+ return dev_err_probe(dev, ret, "Failed to register wiz clocks\n");
+
+ return 0;
default:
break;
}
@@ -1167,16 +1173,15 @@ static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
node_name = clk_mux_sel[i].node_name;
clk_node = of_get_child_by_name(node, node_name);
if (!clk_node) {
- dev_err(dev, "Unable to get %s node\n", node_name);
- ret = -EINVAL;
+ ret = dev_err_probe(dev, -EINVAL, "Unable to get %s node\n", node_name);
goto err;
}
ret = wiz_mux_of_clk_register(wiz, clk_node, wiz->mux_sel_field[i],
clk_mux_sel[i].table);
if (ret) {
- dev_err(dev, "Failed to register %s clock\n",
- node_name);
+ dev_err_probe(dev, ret, "Failed to register %s clock\n",
+ node_name);
of_node_put(clk_node);
goto err;
}
@@ -1188,16 +1193,15 @@ static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
node_name = clk_div_sel[i].node_name;
clk_node = of_get_child_by_name(node, node_name);
if (!clk_node) {
- dev_err(dev, "Unable to get %s node\n", node_name);
- ret = -EINVAL;
+ ret = dev_err_probe(dev, -EINVAL, "Unable to get %s node\n", node_name);
goto err;
}
ret = wiz_div_clk_register(wiz, clk_node, wiz->div_sel_field[i],
clk_div_sel[i].table);
if (ret) {
- dev_err(dev, "Failed to register %s clock\n",
- node_name);
+ dev_err_probe(dev, ret, "Failed to register %s clock\n",
+ node_name);
of_node_put(clk_node);
goto err;
}
@@ -1593,7 +1597,7 @@ static int wiz_probe(struct platform_device *pdev)
goto err_get_sync;
}
- ret = wiz_clock_init(wiz, node);
+ ret = wiz_clock_probe(wiz, node);
if (ret < 0) {
dev_warn(dev, "Failed to initialize clocks\n");
goto err_get_sync;
@@ -1655,12 +1659,41 @@ static void wiz_remove(struct platform_device *pdev)
pm_runtime_disable(dev);
}
+static int wiz_resume_noirq(struct device *dev)
+{
+ struct device_node *node = dev->of_node;
+ struct wiz *wiz = dev_get_drvdata(dev);
+ int ret;
+
+ /* Enable supplemental Control override if available */
+ if (wiz->sup_legacy_clk_override)
+ regmap_field_write(wiz->sup_legacy_clk_override, 1);
+
+ wiz_clock_init(wiz);
+
+ ret = wiz_init(wiz);
+ if (ret) {
+ dev_err(dev, "WIZ initialization failed\n");
+ goto err_wiz_init;
+ }
+
+ return 0;
+
+err_wiz_init:
+ wiz_clock_cleanup(wiz, node);
+
+ return ret;
+}
+
+static DEFINE_NOIRQ_DEV_PM_OPS(wiz_pm_ops, NULL, wiz_resume_noirq);
+
static struct platform_driver wiz_driver = {
.probe = wiz_probe,
.remove_new = wiz_remove,
.driver = {
.name = "wiz",
.of_match_table = wiz_id_table,
+ .pm = pm_sleep_ptr(&wiz_pm_ops),
},
};
module_platform_driver(wiz_driver);
diff --git a/drivers/phy/xilinx/phy-zynqmp.c b/drivers/phy/xilinx/phy-zynqmp.c
index dc8319bda43d..cb15041371c9 100644
--- a/drivers/phy/xilinx/phy-zynqmp.c
+++ b/drivers/phy/xilinx/phy-zynqmp.c
@@ -13,6 +13,7 @@
*/
#include <linux/clk.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -80,7 +81,8 @@
/* Reference clock selection parameters */
#define L0_Ln_REF_CLK_SEL(n) (0x2860 + (n) * 4)
-#define L0_REF_CLK_SEL_MASK 0x8f
+#define L0_REF_CLK_LCL_SEL BIT(7)
+#define L0_REF_CLK_SEL_MASK 0x9f
/* Calibration digital logic parameters */
#define L3_TM_CALIB_DIG19 0xec4c
@@ -122,6 +124,15 @@
#define ICM_PROTOCOL_DP 0x4
#define ICM_PROTOCOL_SGMII 0x5
+static const char *const xpsgtr_icm_str[] = {
+ [ICM_PROTOCOL_PD] = "none",
+ [ICM_PROTOCOL_PCIE] = "PCIe",
+ [ICM_PROTOCOL_SATA] = "SATA",
+ [ICM_PROTOCOL_USB] = "USB",
+ [ICM_PROTOCOL_DP] = "DisplayPort",
+ [ICM_PROTOCOL_SGMII] = "SGMII",
+};
+
/* Test Mode common reset control parameters */
#define TM_CMN_RST 0x10018
#define TM_CMN_RST_EN 0x1
@@ -146,22 +157,6 @@
/* Total number of controllers */
#define CONTROLLERS_PER_LANE 5
-/* Protocol Type parameters */
-#define XPSGTR_TYPE_USB0 0 /* USB controller 0 */
-#define XPSGTR_TYPE_USB1 1 /* USB controller 1 */
-#define XPSGTR_TYPE_SATA_0 2 /* SATA controller lane 0 */
-#define XPSGTR_TYPE_SATA_1 3 /* SATA controller lane 1 */
-#define XPSGTR_TYPE_PCIE_0 4 /* PCIe controller lane 0 */
-#define XPSGTR_TYPE_PCIE_1 5 /* PCIe controller lane 1 */
-#define XPSGTR_TYPE_PCIE_2 6 /* PCIe controller lane 2 */
-#define XPSGTR_TYPE_PCIE_3 7 /* PCIe controller lane 3 */
-#define XPSGTR_TYPE_DP_0 8 /* Display Port controller lane 0 */
-#define XPSGTR_TYPE_DP_1 9 /* Display Port controller lane 1 */
-#define XPSGTR_TYPE_SGMII0 10 /* Ethernet SGMII controller 0 */
-#define XPSGTR_TYPE_SGMII1 11 /* Ethernet SGMII controller 1 */
-#define XPSGTR_TYPE_SGMII2 12 /* Ethernet SGMII controller 2 */
-#define XPSGTR_TYPE_SGMII3 13 /* Ethernet SGMII controller 3 */
-
/* Timeout values */
#define TIMEOUT_US 1000
@@ -184,7 +179,8 @@ struct xpsgtr_ssc {
/**
* struct xpsgtr_phy - representation of a lane
* @phy: pointer to the kernel PHY device
- * @type: controller which uses this lane
+ * @instance: instance of the protocol type (such as the lane within a
+ * protocol, or the USB/Ethernet controller)
* @lane: lane number
* @protocol: protocol in which the lane operates
* @skip_phy_init: skip phy_init() if true
@@ -193,7 +189,7 @@ struct xpsgtr_ssc {
*/
struct xpsgtr_phy {
struct phy *phy;
- u8 type;
+ u8 instance;
u8 lane;
u8 protocol;
bool skip_phy_init;
@@ -308,10 +304,30 @@ static int xpsgtr_wait_pll_lock(struct phy *phy)
struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
unsigned int timeout = TIMEOUT_US;
+ u8 protocol = gtr_phy->protocol;
int ret;
dev_dbg(gtr_dev->dev, "Waiting for PLL lock\n");
+ /*
+ * For DP and PCIe, only the instance 0 PLL is used. Switch to that phy
+ * so we wait on the right PLL.
+ */
+ if ((protocol == ICM_PROTOCOL_DP || protocol == ICM_PROTOCOL_PCIE) &&
+ gtr_phy->instance) {
+ int i;
+
+ for (i = 0; i < NUM_LANES; i++) {
+ gtr_phy = &gtr_dev->phys[i];
+
+ if (gtr_phy->protocol == protocol && !gtr_phy->instance)
+ goto got_phy;
+ }
+
+ return -EBUSY;
+ }
+
+got_phy:
while (1) {
u32 reg = xpsgtr_read_phy(gtr_phy, L0_PLL_STATUS_READ_1);
@@ -330,8 +346,8 @@ static int xpsgtr_wait_pll_lock(struct phy *phy)
if (ret == -ETIMEDOUT)
dev_err(gtr_dev->dev,
- "lane %u (type %u, protocol %u): PLL lock timeout\n",
- gtr_phy->lane, gtr_phy->type, gtr_phy->protocol);
+ "lane %u (protocol %u, instance %u): PLL lock timeout\n",
+ gtr_phy->lane, gtr_phy->protocol, gtr_phy->instance);
return ret;
}
@@ -349,11 +365,12 @@ static void xpsgtr_configure_pll(struct xpsgtr_phy *gtr_phy)
PLL_FREQ_MASK, ssc->pll_ref_clk);
/* Enable lane clock sharing, if required */
- if (gtr_phy->refclk != gtr_phy->lane) {
- /* Lane3 Ref Clock Selection Register */
+ if (gtr_phy->refclk == gtr_phy->lane)
+ xpsgtr_clr_set(gtr_phy->dev, L0_Ln_REF_CLK_SEL(gtr_phy->lane),
+ L0_REF_CLK_SEL_MASK, L0_REF_CLK_LCL_SEL);
+ else
xpsgtr_clr_set(gtr_phy->dev, L0_Ln_REF_CLK_SEL(gtr_phy->lane),
L0_REF_CLK_SEL_MASK, 1 << gtr_phy->refclk);
- }
/* SSC step size [7:0] */
xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_0_LSB,
@@ -573,7 +590,7 @@ static int xpsgtr_phy_init(struct phy *phy)
mutex_lock(&gtr_dev->gtr_mutex);
/* Configure and enable the clock when peripheral phy_init call */
- if (clk_prepare_enable(gtr_dev->clk[gtr_phy->lane]))
+ if (clk_prepare_enable(gtr_dev->clk[gtr_phy->refclk]))
goto out;
/* Skip initialization if not required. */
@@ -625,7 +642,7 @@ static int xpsgtr_phy_exit(struct phy *phy)
gtr_phy->skip_phy_init = false;
/* Ensure that disable clock only, which configure for lane */
- clk_disable_unprepare(gtr_dev->clk[gtr_phy->lane]);
+ clk_disable_unprepare(gtr_dev->clk[gtr_phy->refclk]);
return 0;
}
@@ -638,16 +655,7 @@ static int xpsgtr_phy_power_on(struct phy *phy)
/* Skip initialization if not required. */
if (!xpsgtr_phy_init_required(gtr_phy))
return ret;
- /*
- * Wait for the PLL to lock. For DP, only wait on DP0 to avoid
- * cumulating waits for both lanes. The user is expected to initialize
- * lane 0 last.
- */
- if (gtr_phy->protocol != ICM_PROTOCOL_DP ||
- gtr_phy->type == XPSGTR_TYPE_DP_0)
- ret = xpsgtr_wait_pll_lock(phy);
-
- return ret;
+ return xpsgtr_wait_pll_lock(phy);
}
static int xpsgtr_phy_configure(struct phy *phy, union phy_configure_opts *opts)
@@ -674,73 +682,33 @@ static const struct phy_ops xpsgtr_phyops = {
* OF Xlate Support
*/
-/* Set the lane type and protocol based on the PHY type and instance number. */
+/* Set the lane protocol and instance based on the PHY type and instance number. */
static int xpsgtr_set_lane_type(struct xpsgtr_phy *gtr_phy, u8 phy_type,
unsigned int phy_instance)
{
unsigned int num_phy_types;
- const int *phy_types;
switch (phy_type) {
- case PHY_TYPE_SATA: {
- static const int types[] = {
- XPSGTR_TYPE_SATA_0,
- XPSGTR_TYPE_SATA_1,
- };
-
- phy_types = types;
- num_phy_types = ARRAY_SIZE(types);
+ case PHY_TYPE_SATA:
+ num_phy_types = 2;
gtr_phy->protocol = ICM_PROTOCOL_SATA;
break;
- }
- case PHY_TYPE_USB3: {
- static const int types[] = {
- XPSGTR_TYPE_USB0,
- XPSGTR_TYPE_USB1,
- };
-
- phy_types = types;
- num_phy_types = ARRAY_SIZE(types);
+ case PHY_TYPE_USB3:
+ num_phy_types = 2;
gtr_phy->protocol = ICM_PROTOCOL_USB;
break;
- }
- case PHY_TYPE_DP: {
- static const int types[] = {
- XPSGTR_TYPE_DP_0,
- XPSGTR_TYPE_DP_1,
- };
-
- phy_types = types;
- num_phy_types = ARRAY_SIZE(types);
+ case PHY_TYPE_DP:
+ num_phy_types = 2;
gtr_phy->protocol = ICM_PROTOCOL_DP;
break;
- }
- case PHY_TYPE_PCIE: {
- static const int types[] = {
- XPSGTR_TYPE_PCIE_0,
- XPSGTR_TYPE_PCIE_1,
- XPSGTR_TYPE_PCIE_2,
- XPSGTR_TYPE_PCIE_3,
- };
-
- phy_types = types;
- num_phy_types = ARRAY_SIZE(types);
+ case PHY_TYPE_PCIE:
+ num_phy_types = 4;
gtr_phy->protocol = ICM_PROTOCOL_PCIE;
break;
- }
- case PHY_TYPE_SGMII: {
- static const int types[] = {
- XPSGTR_TYPE_SGMII0,
- XPSGTR_TYPE_SGMII1,
- XPSGTR_TYPE_SGMII2,
- XPSGTR_TYPE_SGMII3,
- };
-
- phy_types = types;
- num_phy_types = ARRAY_SIZE(types);
+ case PHY_TYPE_SGMII:
+ num_phy_types = 4;
gtr_phy->protocol = ICM_PROTOCOL_SGMII;
break;
- }
default:
return -EINVAL;
}
@@ -748,22 +716,25 @@ static int xpsgtr_set_lane_type(struct xpsgtr_phy *gtr_phy, u8 phy_type,
if (phy_instance >= num_phy_types)
return -EINVAL;
- gtr_phy->type = phy_types[phy_instance];
+ gtr_phy->instance = phy_instance;
return 0;
}
/*
- * Valid combinations of controllers and lanes (Interconnect Matrix).
+ * Valid combinations of controllers and lanes (Interconnect Matrix). Each
+ * "instance" represents one controller for a lane. For PCIe and DP, the
+ * "instance" is the logical lane in the link. For SATA, USB, and SGMII,
+ * the instance is the index of the controller.
+ *
+ * This information is only used to validate the devicetree reference, and is
+ * not used when programming the hardware.
*/
static const unsigned int icm_matrix[NUM_LANES][CONTROLLERS_PER_LANE] = {
- { XPSGTR_TYPE_PCIE_0, XPSGTR_TYPE_SATA_0, XPSGTR_TYPE_USB0,
- XPSGTR_TYPE_DP_1, XPSGTR_TYPE_SGMII0 },
- { XPSGTR_TYPE_PCIE_1, XPSGTR_TYPE_SATA_1, XPSGTR_TYPE_USB0,
- XPSGTR_TYPE_DP_0, XPSGTR_TYPE_SGMII1 },
- { XPSGTR_TYPE_PCIE_2, XPSGTR_TYPE_SATA_0, XPSGTR_TYPE_USB0,
- XPSGTR_TYPE_DP_1, XPSGTR_TYPE_SGMII2 },
- { XPSGTR_TYPE_PCIE_3, XPSGTR_TYPE_SATA_1, XPSGTR_TYPE_USB1,
- XPSGTR_TYPE_DP_0, XPSGTR_TYPE_SGMII3 }
+ /* PCIe, SATA, USB, DP, SGMII */
+ { 0, 0, 0, 1, 0 }, /* Lane 0 */
+ { 1, 1, 0, 0, 1 }, /* Lane 1 */
+ { 2, 0, 0, 1, 2 }, /* Lane 2 */
+ { 3, 1, 1, 0, 3 }, /* Lane 3 */
};
/* Translate OF phandle and args to PHY instance. */
@@ -798,6 +769,7 @@ static struct phy *xpsgtr_xlate(struct device *dev,
phy_type = args->args[1];
phy_instance = args->args[2];
+ guard(mutex)(&gtr_phy->phy->mutex);
ret = xpsgtr_set_lane_type(gtr_phy, phy_type, phy_instance);
if (ret < 0) {
dev_err(gtr_dev->dev, "Invalid PHY type and/or instance\n");
@@ -818,7 +790,7 @@ static struct phy *xpsgtr_xlate(struct device *dev,
* is allowed to operate on the lane.
*/
for (i = 0; i < CONTROLLERS_PER_LANE; i++) {
- if (icm_matrix[phy_lane][i] == gtr_phy->type)
+ if (icm_matrix[phy_lane][i] == gtr_phy->instance)
return gtr_phy->phy;
}
@@ -826,6 +798,34 @@ static struct phy *xpsgtr_xlate(struct device *dev,
}
/*
+ * DebugFS
+ */
+
+static int xpsgtr_status_read(struct seq_file *seq, void *data)
+{
+ struct device *dev = seq->private;
+ struct xpsgtr_phy *gtr_phy = dev_get_drvdata(dev);
+ struct clk *clk;
+ u32 pll_status;
+
+ mutex_lock(&gtr_phy->phy->mutex);
+ pll_status = xpsgtr_read_phy(gtr_phy, L0_PLL_STATUS_READ_1);
+ clk = gtr_phy->dev->clk[gtr_phy->refclk];
+
+ seq_printf(seq, "Lane: %u\n", gtr_phy->lane);
+ seq_printf(seq, "Protocol: %s\n",
+ xpsgtr_icm_str[gtr_phy->protocol]);
+ seq_printf(seq, "Instance: %u\n", gtr_phy->instance);
+ seq_printf(seq, "Reference clock: %u (%pC)\n", gtr_phy->refclk, clk);
+ seq_printf(seq, "Reference rate: %lu\n", clk_get_rate(clk));
+ seq_printf(seq, "PLL locked: %s\n",
+ pll_status & PLL_STATUS_LOCKED ? "yes" : "no");
+
+ mutex_unlock(&gtr_phy->phy->mutex);
+ return 0;
+}
+
+/*
* Power Management
*/
@@ -974,6 +974,8 @@ static int xpsgtr_probe(struct platform_device *pdev)
gtr_phy->phy = phy;
phy_set_drvdata(phy, gtr_phy);
+ debugfs_create_devm_seqfile(&phy->dev, "status", phy->debugfs,
+ xpsgtr_status_read);
}
/* Register the PHY provider. */
diff --git a/drivers/pinctrl/nxp/pinctrl-s32cc.c b/drivers/pinctrl/nxp/pinctrl-s32cc.c
index df3e5d82da4b..f2609a35c312 100644
--- a/drivers/pinctrl/nxp/pinctrl-s32cc.c
+++ b/drivers/pinctrl/nxp/pinctrl-s32cc.c
@@ -730,9 +730,7 @@ static int s32_pinctrl_parse_groups(struct device_node *np,
struct s32_pin_group *grp,
struct s32_pinctrl_soc_info *info)
{
- const __be32 *p;
struct device *dev;
- struct property *prop;
unsigned int *pins, *sss;
int i, npins;
u32 pinmux;
@@ -763,7 +761,7 @@ static int s32_pinctrl_parse_groups(struct device_node *np,
return -ENOMEM;
i = 0;
- of_property_for_each_u32(np, "pinmux", prop, p, pinmux) {
+ of_property_for_each_u32(np, "pinmux", pinmux) {
pins[i] = get_pin_no(pinmux);
sss[i] = get_pin_func(pinmux);
diff --git a/drivers/pinctrl/pinctrl-k210.c b/drivers/pinctrl/pinctrl-k210.c
index 2753e14c3e38..a898e40451fe 100644
--- a/drivers/pinctrl/pinctrl-k210.c
+++ b/drivers/pinctrl/pinctrl-k210.c
@@ -763,8 +763,6 @@ static int k210_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
unsigned int *reserved_maps,
unsigned int *num_maps)
{
- struct property *prop;
- const __be32 *p;
int ret, pinmux_groups;
u32 pinmux_group;
unsigned long *configs = NULL;
@@ -797,7 +795,7 @@ static int k210_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
if (ret < 0)
goto exit;
- of_property_for_each_u32(np, "pinmux", prop, p, pinmux_group) {
+ of_property_for_each_u32(np, "pinmux", pinmux_group) {
const char *group_name, *func_name;
u32 pin = FIELD_GET(K210_PG_PIN, pinmux_group);
u32 func = FIELD_GET(K210_PG_FUNC, pinmux_group);
diff --git a/drivers/platform/surface/aggregator/bus.c b/drivers/platform/surface/aggregator/bus.c
index 118caa651bec..af8d573aae93 100644
--- a/drivers/platform/surface/aggregator/bus.c
+++ b/drivers/platform/surface/aggregator/bus.c
@@ -306,9 +306,9 @@ const void *ssam_device_get_match_data(const struct ssam_device *dev)
}
EXPORT_SYMBOL_GPL(ssam_device_get_match_data);
-static int ssam_bus_match(struct device *dev, struct device_driver *drv)
+static int ssam_bus_match(struct device *dev, const struct device_driver *drv)
{
- struct ssam_device_driver *sdrv = to_ssam_device_driver(drv);
+ const struct ssam_device_driver *sdrv = to_ssam_device_driver(drv);
struct ssam_device *sdev = to_ssam_device(dev);
if (!is_ssam_device(dev))
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 6bfae28b962a..1d0b2d6040d1 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -727,10 +727,7 @@ char *wmi_get_acpi_device_uid(const char *guid_string)
}
EXPORT_SYMBOL_GPL(wmi_get_acpi_device_uid);
-static inline struct wmi_driver *drv_to_wdrv(struct device_driver *drv)
-{
- return container_of(drv, struct wmi_driver, driver);
-}
+#define drv_to_wdrv(__drv) container_of_const(__drv, struct wmi_driver, driver)
/*
* sysfs interface
@@ -877,9 +874,9 @@ static void wmi_dev_release(struct device *dev)
kfree(wblock);
}
-static int wmi_dev_match(struct device *dev, struct device_driver *driver)
+static int wmi_dev_match(struct device *dev, const struct device_driver *driver)
{
- struct wmi_driver *wmi_driver = drv_to_wdrv(driver);
+ const struct wmi_driver *wmi_driver = drv_to_wdrv(driver);
struct wmi_block *wblock = dev_to_wblock(dev);
const struct wmi_device_id *id = wmi_driver->id_table;
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
index 3483e52e3a81..7de7aabb275e 100644
--- a/drivers/pnp/driver.c
+++ b/drivers/pnp/driver.c
@@ -41,7 +41,7 @@ int compare_pnp_id(struct pnp_id *pos, const char *id)
return 0;
}
-static const struct pnp_device_id *match_device(struct pnp_driver *drv,
+static const struct pnp_device_id *match_device(const struct pnp_driver *drv,
struct pnp_dev *dev)
{
const struct pnp_device_id *drv_id = drv->id_table;
@@ -150,10 +150,10 @@ static void pnp_device_shutdown(struct device *dev)
drv->shutdown(pnp_dev);
}
-static int pnp_bus_match(struct device *dev, struct device_driver *drv)
+static int pnp_bus_match(struct device *dev, const struct device_driver *drv)
{
struct pnp_dev *pnp_dev = to_pnp_dev(dev);
- struct pnp_driver *pnp_drv = to_pnp_driver(drv);
+ const struct pnp_driver *pnp_drv = to_pnp_driver(drv);
if (match_device(pnp_drv, pnp_dev) == NULL)
return 0;
diff --git a/drivers/power/reset/piix4-poweroff.c b/drivers/power/reset/piix4-poweroff.c
index 7f308292d7e3..e6822c021000 100644
--- a/drivers/power/reset/piix4-poweroff.c
+++ b/drivers/power/reset/piix4-poweroff.c
@@ -106,4 +106,5 @@ static struct pci_driver piix4_poweroff_driver = {
module_pci_driver(piix4_poweroff_driver);
MODULE_AUTHOR("Paul Burton <paul.burton@mips.com>");
+MODULE_DESCRIPTION("Intel PIIX4 power-off driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index f6321a42aa53..bcfa63fb9f1e 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -167,6 +167,15 @@ config BATTERY_LEGO_EV3
help
Say Y here to enable support for the LEGO MINDSTORMS EV3 battery.
+config BATTERY_LENOVO_YOGA_C630
+ tristate "Lenovo Yoga C630 battery"
+ depends on EC_LENOVO_YOGA_C630
+ help
+ This driver enables battery support on the Lenovo Yoga C630 laptop.
+
+ To compile the driver as a module, choose M here: the module will be
+ called lenovo_yoga_c630_battery.
+
config BATTERY_PMU
tristate "Apple PMU battery"
depends on PPC32 && ADB_PMU
@@ -402,6 +411,18 @@ config BATTERY_MAX17042
Driver can be build as a module (max17042_battery).
+config BATTERY_MAX1720X
+ tristate "Maxim MAX17201/MAX17205 Fuel Gauge"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ MAX1720x is a family of fuel-gauge systems for lithium-ion (Li+)
+ batteries in handheld and portable equipment. MAX17201 are
+ configured to operate with a single lithium cell, the MAX17205
+ can operate with multiple cells.
+
+ Say Y to include support for the MAX17201/MAX17205 Fuel Gauges.
+
config BATTERY_MAX1721X
tristate "MAX17211/MAX17215 standalone gas-gauge"
depends on W1
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index 31ca6653a564..8dcb41545317 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_BATTERY_DS2782) += ds2782_battery.o
obj-$(CONFIG_BATTERY_GAUGE_LTC2941) += ltc2941-battery-gauge.o
obj-$(CONFIG_BATTERY_GOLDFISH) += goldfish_battery.o
obj-$(CONFIG_BATTERY_LEGO_EV3) += lego_ev3_battery.o
+obj-$(CONFIG_BATTERY_LENOVO_YOGA_C630) += lenovo_yoga_c630_battery.o
obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o
obj-$(CONFIG_BATTERY_QCOM_BATTMGR) += qcom_battmgr.o
obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o
@@ -52,6 +53,7 @@ obj-$(CONFIG_CHARGER_DA9150) += da9150-charger.o
obj-$(CONFIG_BATTERY_DA9150) += da9150-fg.o
obj-$(CONFIG_BATTERY_MAX17040) += max17040_battery.o
obj-$(CONFIG_BATTERY_MAX17042) += max17042_battery.o
+obj-$(CONFIG_BATTERY_MAX1720X) += max1720x_battery.o
obj-$(CONFIG_BATTERY_MAX1721X) += max1721x_battery.o
obj-$(CONFIG_BATTERY_RT5033) += rt5033_battery.o
obj-$(CONFIG_CHARGER_RT5033) += rt5033_charger.o
diff --git a/drivers/power/supply/ab8500_chargalg.c b/drivers/power/supply/ab8500_chargalg.c
index 55ab7a28056e..854491ad3ecd 100644
--- a/drivers/power/supply/ab8500_chargalg.c
+++ b/drivers/power/supply/ab8500_chargalg.c
@@ -1225,8 +1225,8 @@ static bool ab8500_chargalg_time_to_restart(struct ab8500_chargalg *di)
*/
static void ab8500_chargalg_algorithm(struct ab8500_chargalg *di)
{
+ const struct power_supply_maintenance_charge_table *mt;
struct power_supply_battery_info *bi = di->bm->bi;
- struct power_supply_maintenance_charge_table *mt;
int charger_status;
int ret;
diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index 9b34d1a60f66..93181ebfb324 100644
--- a/drivers/power/supply/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -487,14 +487,17 @@ static int ab8500_charger_get_ac_voltage(struct ab8500_charger *di)
/* Only measure voltage if the charger is connected */
if (di->ac.charger_connected) {
- ret = iio_read_channel_processed(di->adc_main_charger_v, &vch);
- if (ret < 0)
- dev_err(di->dev, "%s ADC conv failed,\n", __func__);
+ /* Convert to microvolt, IIO returns millivolt */
+ ret = iio_read_channel_processed_scale(di->adc_main_charger_v,
+ &vch, 1000);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ADC conv failed\n", __func__);
+ return ret;
+ }
} else {
vch = 0;
}
- /* Convert to microvolt, IIO returns millivolt */
- return vch * 1000;
+ return vch;
}
/**
@@ -539,14 +542,17 @@ static int ab8500_charger_get_vbus_voltage(struct ab8500_charger *di)
/* Only measure voltage if the charger is connected */
if (di->usb.charger_connected) {
- ret = iio_read_channel_processed(di->adc_vbus_v, &vch);
- if (ret < 0)
- dev_err(di->dev, "%s ADC conv failed,\n", __func__);
+ /* Convert to microvolt, IIO returns millivolt */
+ ret = iio_read_channel_processed_scale(di->adc_vbus_v,
+ &vch, 1000);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ADC conv failed\n", __func__);
+ return ret;
+ }
} else {
vch = 0;
}
- /* Convert to microvolt, IIO returns millivolt */
- return vch * 1000;
+ return vch;
}
/**
@@ -562,14 +568,17 @@ static int ab8500_charger_get_usb_current(struct ab8500_charger *di)
/* Only measure current if the charger is online */
if (di->usb.charger_online) {
- ret = iio_read_channel_processed(di->adc_usb_charger_c, &ich);
- if (ret < 0)
- dev_err(di->dev, "%s ADC conv failed,\n", __func__);
+ /* Return microamperes */
+ ret = iio_read_channel_processed_scale(di->adc_usb_charger_c,
+ &ich, 1000);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ADC conv failed\n", __func__);
+ return ret;
+ }
} else {
ich = 0;
}
- /* Return microamperes */
- return ich * 1000;
+ return ich;
}
/**
@@ -585,14 +594,17 @@ static int ab8500_charger_get_ac_current(struct ab8500_charger *di)
/* Only measure current if the charger is online */
if (di->ac.charger_online) {
- ret = iio_read_channel_processed(di->adc_main_charger_c, &ich);
- if (ret < 0)
- dev_err(di->dev, "%s ADC conv failed,\n", __func__);
+ /* Return microamperes */
+ ret = iio_read_channel_processed_scale(di->adc_main_charger_c,
+ &ich, 1000);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ADC conv failed\n", __func__);
+ return ret;
+ }
} else {
ich = 0;
}
- /* Return microamperes */
- return ich * 1000;
+ return ich;
}
/**
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index 2ccaf6116c09..270874eeb934 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -149,11 +149,6 @@ struct ab8500_fg_flags {
bool batt_id_received;
};
-struct inst_curr_result_list {
- struct list_head list;
- int *result;
-};
-
/**
* struct ab8500_fg - ab8500 FG device information
* @dev: Pointer to the structure device
diff --git a/drivers/power/supply/adp5061.c b/drivers/power/supply/adp5061.c
index 3e3a0d118ce5..dac9875d993c 100644
--- a/drivers/power/supply/adp5061.c
+++ b/drivers/power/supply/adp5061.c
@@ -727,7 +727,7 @@ static int adp5061_probe(struct i2c_client *client)
}
static const struct i2c_device_id adp5061_id[] = {
- { "adp5061", 0},
+ { "adp5061" },
{ }
};
MODULE_DEVICE_TABLE(i2c, adp5061_id);
diff --git a/drivers/power/supply/bd99954-charger.c b/drivers/power/supply/bd99954-charger.c
index 1ed1d9f99fb3..54bf88262510 100644
--- a/drivers/power/supply/bd99954-charger.c
+++ b/drivers/power/supply/bd99954-charger.c
@@ -70,13 +70,6 @@
#include "bd99954-charger.h"
-struct battery_data {
- u16 precharge_current; /* Trickle-charge Current */
- u16 fc_reg_voltage; /* Fast Charging Regulation Voltage */
- u16 voltage_min;
- u16 voltage_max;
-};
-
/* Initial field values, converted to initial register values */
struct bd9995x_init_data {
u16 vsysreg_set; /* VSYS Regulation Setting */
diff --git a/drivers/power/supply/bq24735-charger.c b/drivers/power/supply/bq24735-charger.c
index 8efceeae864c..73a7fc867b03 100644
--- a/drivers/power/supply/bq24735-charger.c
+++ b/drivers/power/supply/bq24735-charger.c
@@ -489,7 +489,7 @@ static int bq24735_charger_probe(struct i2c_client *client)
}
static const struct i2c_device_id bq24735_charger_id[] = {
- { "bq24735-charger", 0 },
+ { "bq24735-charger" },
{}
};
MODULE_DEVICE_TABLE(i2c, bq24735_charger_id);
diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c
index 03fa11a1c9b6..2f5ceaf00b94 100644
--- a/drivers/power/supply/bq25890_charger.c
+++ b/drivers/power/supply/bq25890_charger.c
@@ -1617,11 +1617,11 @@ static const struct dev_pm_ops bq25890_pm = {
};
static const struct i2c_device_id bq25890_i2c_ids[] = {
- { "bq25890", 0 },
- { "bq25892", 0 },
- { "bq25895", 0 },
- { "bq25896", 0 },
- {},
+ { "bq25890" },
+ { "bq25892" },
+ { "bq25895" },
+ { "bq25896" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, bq25890_i2c_ids);
diff --git a/drivers/power/supply/cw2015_battery.c b/drivers/power/supply/cw2015_battery.c
index 99f3ccdc30a6..f63c3c410451 100644
--- a/drivers/power/supply/cw2015_battery.c
+++ b/drivers/power/supply/cw2015_battery.c
@@ -731,7 +731,7 @@ static int __maybe_unused cw_bat_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(cw_bat_pm_ops, cw_bat_suspend, cw_bat_resume);
static const struct i2c_device_id cw_bat_id_table[] = {
- { "cw2015", 0 },
+ { "cw2015" },
{ }
};
diff --git a/drivers/power/supply/ingenic-battery.c b/drivers/power/supply/ingenic-battery.c
index 2e7fdfde47ec..0a40f425c277 100644
--- a/drivers/power/supply/ingenic-battery.c
+++ b/drivers/power/supply/ingenic-battery.c
@@ -31,8 +31,9 @@ static int ingenic_battery_get_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_HEALTH:
- ret = iio_read_channel_processed(bat->channel, &val->intval);
- val->intval *= 1000;
+ ret = iio_read_channel_processed_scale(bat->channel,
+ &val->intval,
+ 1000);
if (val->intval < info->voltage_min_design_uv)
val->intval = POWER_SUPPLY_HEALTH_DEAD;
else if (val->intval > info->voltage_max_design_uv)
@@ -41,8 +42,9 @@ static int ingenic_battery_get_property(struct power_supply *psy,
val->intval = POWER_SUPPLY_HEALTH_GOOD;
return ret;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
- ret = iio_read_channel_processed(bat->channel, &val->intval);
- val->intval *= 1000;
+ ret = iio_read_channel_processed_scale(bat->channel,
+ &val->intval,
+ 1000);
return ret;
case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
val->intval = info->voltage_min_design_uv;
diff --git a/drivers/power/supply/lenovo_yoga_c630_battery.c b/drivers/power/supply/lenovo_yoga_c630_battery.c
new file mode 100644
index 000000000000..d4d422cc5353
--- /dev/null
+++ b/drivers/power/supply/lenovo_yoga_c630_battery.c
@@ -0,0 +1,501 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024, Linaro Ltd
+ * Authors:
+ * Bjorn Andersson
+ * Dmitry Baryshkov
+ */
+#include <linux/auxiliary_bus.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/power_supply.h>
+#include <linux/platform_data/lenovo-yoga-c630.h>
+
+struct yoga_c630_psy {
+ struct yoga_c630_ec *ec;
+ struct device *dev;
+ struct fwnode_handle *fwnode;
+ struct notifier_block nb;
+
+ /* guards all battery properties and registration of power supplies */
+ struct mutex lock;
+
+ struct power_supply *adp_psy;
+ struct power_supply *bat_psy;
+
+ unsigned long last_status_update;
+
+ bool adapter_online;
+
+ bool unit_mA;
+
+ bool bat_present;
+ unsigned int bat_status;
+ unsigned int design_capacity;
+ unsigned int design_voltage;
+ unsigned int full_charge_capacity;
+
+ unsigned int capacity_now;
+ unsigned int voltage_now;
+
+ int current_now;
+ int rate_now;
+};
+
+#define LENOVO_EC_CACHE_TIME (10 * HZ)
+
+#define LENOVO_EC_ADPT_STATUS 0xa3
+#define LENOVO_EC_ADPT_STATUS_PRESENT BIT(7)
+#define LENOVO_EC_BAT_ATTRIBUTES 0xc0
+#define LENOVO_EC_BAT_ATTRIBUTES_UNIT_IS_MA BIT(1)
+#define LENOVO_EC_BAT_STATUS 0xc1
+#define LENOVO_EC_BAT_STATUS_DISCHARGING BIT(0)
+#define LENOVO_EC_BAT_STATUS_CHARGING BIT(1)
+#define LENOVO_EC_BAT_REMAIN_CAPACITY 0xc2
+#define LENOVO_EC_BAT_VOLTAGE 0xc6
+#define LENOVO_EC_BAT_DESIGN_VOLTAGE 0xc8
+#define LENOVO_EC_BAT_DESIGN_CAPACITY 0xca
+#define LENOVO_EC_BAT_FULL_CAPACITY 0xcc
+#define LENOVO_EC_BAT_CURRENT 0xd2
+#define LENOVO_EC_BAT_FULL_FACTORY 0xd6
+#define LENOVO_EC_BAT_PRESENT 0xda
+#define LENOVO_EC_BAT_PRESENT_IS_PRESENT BIT(0)
+#define LENOVO_EC_BAT_FULL_REGISTER 0xdb
+#define LENOVO_EC_BAT_FULL_REGISTER_IS_FACTORY BIT(0)
+
+static int yoga_c630_psy_update_bat_info(struct yoga_c630_psy *ecbat)
+{
+ struct yoga_c630_ec *ec = ecbat->ec;
+ int val;
+
+ lockdep_assert_held(&ecbat->lock);
+
+ val = yoga_c630_ec_read8(ec, LENOVO_EC_BAT_PRESENT);
+ if (val < 0)
+ return val;
+ ecbat->bat_present = !!(val & LENOVO_EC_BAT_PRESENT_IS_PRESENT);
+ if (!ecbat->bat_present)
+ return val;
+
+ val = yoga_c630_ec_read8(ec, LENOVO_EC_BAT_ATTRIBUTES);
+ if (val < 0)
+ return val;
+ ecbat->unit_mA = val & LENOVO_EC_BAT_ATTRIBUTES_UNIT_IS_MA;
+
+ val = yoga_c630_ec_read16(ec, LENOVO_EC_BAT_DESIGN_CAPACITY);
+ if (val < 0)
+ return val;
+ ecbat->design_capacity = val * 1000;
+
+ /*
+ * DSDT has delays after most of EC reads in these methods.
+ * Having no documentation for the EC we have to follow and sleep here.
+ */
+ msleep(50);
+
+ val = yoga_c630_ec_read16(ec, LENOVO_EC_BAT_DESIGN_VOLTAGE);
+ if (val < 0)
+ return val;
+ ecbat->design_voltage = val;
+
+ msleep(50);
+
+ val = yoga_c630_ec_read8(ec, LENOVO_EC_BAT_FULL_REGISTER);
+ if (val < 0)
+ return val;
+ val = yoga_c630_ec_read16(ec,
+ val & LENOVO_EC_BAT_FULL_REGISTER_IS_FACTORY ?
+ LENOVO_EC_BAT_FULL_FACTORY :
+ LENOVO_EC_BAT_FULL_CAPACITY);
+ if (val < 0)
+ return val;
+
+ ecbat->full_charge_capacity = val * 1000;
+
+ if (!ecbat->unit_mA) {
+ ecbat->design_capacity *= 10;
+ ecbat->full_charge_capacity *= 10;
+ }
+
+ return 0;
+}
+
+static int yoga_c630_psy_maybe_update_bat_status(struct yoga_c630_psy *ecbat)
+{
+ struct yoga_c630_ec *ec = ecbat->ec;
+ int current_mA;
+ int val;
+
+ guard(mutex)(&ecbat->lock);
+ if (time_before(jiffies, ecbat->last_status_update + LENOVO_EC_CACHE_TIME))
+ return 0;
+
+ val = yoga_c630_ec_read8(ec, LENOVO_EC_BAT_STATUS);
+ if (val < 0)
+ return val;
+ ecbat->bat_status = val;
+
+ msleep(50);
+
+ val = yoga_c630_ec_read16(ec, LENOVO_EC_BAT_REMAIN_CAPACITY);
+ if (val < 0)
+ return val;
+ ecbat->capacity_now = val * 1000;
+
+ msleep(50);
+
+ val = yoga_c630_ec_read16(ec, LENOVO_EC_BAT_VOLTAGE);
+ if (val < 0)
+ return val;
+ ecbat->voltage_now = val * 1000;
+
+ msleep(50);
+
+ val = yoga_c630_ec_read16(ec, LENOVO_EC_BAT_CURRENT);
+ if (val < 0)
+ return val;
+ current_mA = sign_extend32(val, 15);
+ ecbat->current_now = current_mA * 1000;
+ ecbat->rate_now = current_mA * (ecbat->voltage_now / 1000);
+
+ msleep(50);
+
+ if (!ecbat->unit_mA)
+ ecbat->capacity_now *= 10;
+
+ ecbat->last_status_update = jiffies;
+
+ return 0;
+}
+
+static int yoga_c630_psy_update_adapter_status(struct yoga_c630_psy *ecbat)
+{
+ struct yoga_c630_ec *ec = ecbat->ec;
+ int val;
+
+ guard(mutex)(&ecbat->lock);
+
+ val = yoga_c630_ec_read8(ec, LENOVO_EC_ADPT_STATUS);
+ if (val < 0)
+ return val;
+
+ ecbat->adapter_online = !!(val & LENOVO_EC_ADPT_STATUS_PRESENT);
+
+ return 0;
+}
+
+static bool yoga_c630_psy_is_charged(struct yoga_c630_psy *ecbat)
+{
+ if (ecbat->bat_status != 0)
+ return false;
+
+ if (ecbat->full_charge_capacity <= ecbat->capacity_now)
+ return true;
+
+ if (ecbat->design_capacity <= ecbat->capacity_now)
+ return true;
+
+ return false;
+}
+
+static int yoga_c630_psy_bat_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct yoga_c630_psy *ecbat = power_supply_get_drvdata(psy);
+ int rc = 0;
+
+ if (!ecbat->bat_present && psp != POWER_SUPPLY_PROP_PRESENT)
+ return -ENODEV;
+
+ rc = yoga_c630_psy_maybe_update_bat_status(ecbat);
+ if (rc)
+ return rc;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ if (ecbat->bat_status & LENOVO_EC_BAT_STATUS_DISCHARGING)
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ else if (ecbat->bat_status & LENOVO_EC_BAT_STATUS_CHARGING)
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else if (yoga_c630_psy_is_charged(ecbat))
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ else
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = ecbat->bat_present;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ val->intval = ecbat->design_voltage;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
+ val->intval = ecbat->design_capacity;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ case POWER_SUPPLY_PROP_ENERGY_FULL:
+ val->intval = ecbat->full_charge_capacity;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ case POWER_SUPPLY_PROP_ENERGY_NOW:
+ val->intval = ecbat->capacity_now;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = ecbat->current_now;
+ break;
+ case POWER_SUPPLY_PROP_POWER_NOW:
+ val->intval = ecbat->rate_now;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = ecbat->voltage_now;
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = "PABAS0241231";
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = "Compal";
+ break;
+ case POWER_SUPPLY_PROP_SCOPE:
+ val->intval = POWER_SUPPLY_SCOPE_SYSTEM;
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+static enum power_supply_property yoga_c630_psy_bat_mA_properties[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_POWER_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_SCOPE,
+};
+
+static enum power_supply_property yoga_c630_psy_bat_mWh_properties[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
+ POWER_SUPPLY_PROP_ENERGY_FULL,
+ POWER_SUPPLY_PROP_ENERGY_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_POWER_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_SCOPE,
+};
+
+static const struct power_supply_desc yoga_c630_psy_bat_psy_desc_mA = {
+ .name = "yoga-c630-battery",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = yoga_c630_psy_bat_mA_properties,
+ .num_properties = ARRAY_SIZE(yoga_c630_psy_bat_mA_properties),
+ .get_property = yoga_c630_psy_bat_get_property,
+};
+
+static const struct power_supply_desc yoga_c630_psy_bat_psy_desc_mWh = {
+ .name = "yoga-c630-battery",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = yoga_c630_psy_bat_mWh_properties,
+ .num_properties = ARRAY_SIZE(yoga_c630_psy_bat_mWh_properties),
+ .get_property = yoga_c630_psy_bat_get_property,
+};
+
+static int yoga_c630_psy_adpt_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct yoga_c630_psy *ecbat = power_supply_get_drvdata(psy);
+ int ret = 0;
+
+ ret = yoga_c630_psy_update_adapter_status(ecbat);
+ if (ret < 0)
+ return ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = ecbat->adapter_online;
+ break;
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ val->intval = POWER_SUPPLY_USB_TYPE_C;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static enum power_supply_property yoga_c630_psy_adpt_properties[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_USB_TYPE,
+};
+
+static const enum power_supply_usb_type yoga_c630_psy_adpt_usb_type[] = {
+ POWER_SUPPLY_USB_TYPE_C,
+};
+
+static const struct power_supply_desc yoga_c630_psy_adpt_psy_desc = {
+ .name = "yoga-c630-adapter",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .usb_types = yoga_c630_psy_adpt_usb_type,
+ .num_usb_types = ARRAY_SIZE(yoga_c630_psy_adpt_usb_type),
+ .properties = yoga_c630_psy_adpt_properties,
+ .num_properties = ARRAY_SIZE(yoga_c630_psy_adpt_properties),
+ .get_property = yoga_c630_psy_adpt_get_property,
+};
+
+static int yoga_c630_psy_register_bat_psy(struct yoga_c630_psy *ecbat)
+{
+ struct power_supply_config bat_cfg = {};
+
+ bat_cfg.drv_data = ecbat;
+ bat_cfg.fwnode = ecbat->fwnode;
+ ecbat->bat_psy = power_supply_register_no_ws(ecbat->dev,
+ ecbat->unit_mA ?
+ &yoga_c630_psy_bat_psy_desc_mA :
+ &yoga_c630_psy_bat_psy_desc_mWh,
+ &bat_cfg);
+ if (IS_ERR(ecbat->bat_psy)) {
+ dev_err(ecbat->dev, "failed to register battery supply\n");
+ return PTR_ERR(ecbat->bat_psy);
+ }
+
+ return 0;
+}
+
+static void yoga_c630_ec_refresh_bat_info(struct yoga_c630_psy *ecbat)
+{
+ bool current_unit;
+
+ guard(mutex)(&ecbat->lock);
+
+ current_unit = ecbat->unit_mA;
+
+ yoga_c630_psy_update_bat_info(ecbat);
+
+ if (current_unit != ecbat->unit_mA) {
+ power_supply_unregister(ecbat->bat_psy);
+ yoga_c630_psy_register_bat_psy(ecbat);
+ }
+}
+
+static int yoga_c630_psy_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct yoga_c630_psy *ecbat = container_of(nb, struct yoga_c630_psy, nb);
+
+ switch (action) {
+ case LENOVO_EC_EVENT_BAT_INFO:
+ yoga_c630_ec_refresh_bat_info(ecbat);
+ break;
+ case LENOVO_EC_EVENT_BAT_ADPT_STATUS:
+ power_supply_changed(ecbat->adp_psy);
+ fallthrough;
+ case LENOVO_EC_EVENT_BAT_STATUS:
+ power_supply_changed(ecbat->bat_psy);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static int yoga_c630_psy_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct yoga_c630_ec *ec = adev->dev.platform_data;
+ struct power_supply_config adp_cfg = {};
+ struct device *dev = &adev->dev;
+ struct yoga_c630_psy *ecbat;
+ int ret;
+
+ ecbat = devm_kzalloc(&adev->dev, sizeof(*ecbat), GFP_KERNEL);
+ if (!ecbat)
+ return -ENOMEM;
+
+ ecbat->ec = ec;
+ ecbat->dev = dev;
+ mutex_init(&ecbat->lock);
+ ecbat->fwnode = adev->dev.parent->fwnode;
+ ecbat->nb.notifier_call = yoga_c630_psy_notify;
+
+ auxiliary_set_drvdata(adev, ecbat);
+
+ adp_cfg.drv_data = ecbat;
+ adp_cfg.fwnode = ecbat->fwnode;
+ adp_cfg.supplied_to = (char **)&yoga_c630_psy_bat_psy_desc_mA.name;
+ adp_cfg.num_supplicants = 1;
+ ecbat->adp_psy = devm_power_supply_register_no_ws(dev, &yoga_c630_psy_adpt_psy_desc, &adp_cfg);
+ if (IS_ERR(ecbat->adp_psy)) {
+ dev_err(dev, "failed to register AC adapter supply\n");
+ return PTR_ERR(ecbat->adp_psy);
+ }
+
+ scoped_guard(mutex, &ecbat->lock) {
+ ret = yoga_c630_psy_update_bat_info(ecbat);
+ if (ret)
+ goto err_unreg_bat;
+
+ ret = yoga_c630_psy_register_bat_psy(ecbat);
+ if (ret)
+ goto err_unreg_bat;
+ }
+
+ ret = yoga_c630_ec_register_notify(ecbat->ec, &ecbat->nb);
+ if (ret)
+ goto err_unreg_bat;
+
+ return 0;
+
+err_unreg_bat:
+ power_supply_unregister(ecbat->bat_psy);
+ return ret;
+}
+
+static void yoga_c630_psy_remove(struct auxiliary_device *adev)
+{
+ struct yoga_c630_psy *ecbat = auxiliary_get_drvdata(adev);
+
+ yoga_c630_ec_unregister_notify(ecbat->ec, &ecbat->nb);
+ power_supply_unregister(ecbat->bat_psy);
+}
+
+static const struct auxiliary_device_id yoga_c630_psy_id_table[] = {
+ { .name = YOGA_C630_MOD_NAME "." YOGA_C630_DEV_PSY, },
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, yoga_c630_psy_id_table);
+
+static struct auxiliary_driver yoga_c630_psy_driver = {
+ .name = YOGA_C630_DEV_PSY,
+ .id_table = yoga_c630_psy_id_table,
+ .probe = yoga_c630_psy_probe,
+ .remove = yoga_c630_psy_remove,
+};
+
+module_auxiliary_driver(yoga_c630_psy_driver);
+
+MODULE_DESCRIPTION("Lenovo Yoga C630 psy");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/lp8727_charger.c b/drivers/power/supply/lp8727_charger.c
index 34548a4da90b..4186fcd37512 100644
--- a/drivers/power/supply/lp8727_charger.c
+++ b/drivers/power/supply/lp8727_charger.c
@@ -584,7 +584,7 @@ static const struct of_device_id lp8727_dt_ids[] __maybe_unused = {
MODULE_DEVICE_TABLE(of, lp8727_dt_ids);
static const struct i2c_device_id lp8727_ids[] = {
- {"lp8727", 0},
+ { "lp8727" },
{ }
};
MODULE_DEVICE_TABLE(i2c, lp8727_ids);
diff --git a/drivers/power/supply/ltc4162-l-charger.c b/drivers/power/supply/ltc4162-l-charger.c
index f0eace731480..2e4bc74e1c4a 100644
--- a/drivers/power/supply/ltc4162-l-charger.c
+++ b/drivers/power/supply/ltc4162-l-charger.c
@@ -903,8 +903,8 @@ static void ltc4162l_alert(struct i2c_client *client,
}
static const struct i2c_device_id ltc4162l_i2c_id_table[] = {
- { "ltc4162-l", 0 },
- { },
+ { "ltc4162-l" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, ltc4162l_i2c_id_table);
diff --git a/drivers/power/supply/max14656_charger_detector.c b/drivers/power/supply/max14656_charger_detector.c
index 89f2af72dfcd..a5b42b42d134 100644
--- a/drivers/power/supply/max14656_charger_detector.c
+++ b/drivers/power/supply/max14656_charger_detector.c
@@ -300,7 +300,7 @@ static int max14656_probe(struct i2c_client *client)
}
static const struct i2c_device_id max14656_id[] = {
- { "max14656", 0 },
+ { "max14656" },
{}
};
MODULE_DEVICE_TABLE(i2c, max14656_id);
diff --git a/drivers/power/supply/max1720x_battery.c b/drivers/power/supply/max1720x_battery.c
new file mode 100644
index 000000000000..edc262f0a62f
--- /dev/null
+++ b/drivers/power/supply/max1720x_battery.c
@@ -0,0 +1,337 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Fuel gauge driver for Maxim 17201/17205
+ *
+ * based on max1721x_battery.c
+ *
+ * Copyright (C) 2024 Liebherr-Electronics and Drives GmbH
+ */
+
+#include <linux/bitfield.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+
+#include <asm/unaligned.h>
+
+/* Nonvolatile registers */
+#define MAX1720X_NRSENSE 0xCF /* RSense in 10^-5 Ohm */
+
+/* ModelGauge m5 */
+#define MAX172XX_STATUS 0x00 /* Status */
+#define MAX172XX_STATUS_BAT_ABSENT BIT(3) /* Battery absent */
+#define MAX172XX_REPCAP 0x05 /* Average capacity */
+#define MAX172XX_REPSOC 0x06 /* Percentage of charge */
+#define MAX172XX_TEMP 0x08 /* Temperature */
+#define MAX172XX_CURRENT 0x0A /* Actual current */
+#define MAX172XX_AVG_CURRENT 0x0B /* Average current */
+#define MAX172XX_TTE 0x11 /* Time to empty */
+#define MAX172XX_AVG_TA 0x16 /* Average temperature */
+#define MAX172XX_CYCLES 0x17
+#define MAX172XX_DESIGN_CAP 0x18 /* Design capacity */
+#define MAX172XX_AVG_VCELL 0x19
+#define MAX172XX_TTF 0x20 /* Time to full */
+#define MAX172XX_DEV_NAME 0x21 /* Device name */
+#define MAX172XX_DEV_NAME_TYPE_MASK GENMASK(3, 0)
+#define MAX172XX_DEV_NAME_TYPE_MAX17201 BIT(0)
+#define MAX172XX_DEV_NAME_TYPE_MAX17205 (BIT(0) | BIT(2))
+#define MAX172XX_QR_TABLE10 0x22
+#define MAX172XX_BATT 0xDA /* Battery voltage */
+#define MAX172XX_ATAVCAP 0xDF
+
+static const char *const max1720x_manufacturer = "Maxim Integrated";
+static const char *const max17201_model = "MAX17201";
+static const char *const max17205_model = "MAX17205";
+
+struct max1720x_device_info {
+ struct regmap *regmap;
+ int rsense;
+};
+
+/*
+ * Model Gauge M5 Algorithm output register
+ * Volatile data (must not be cached)
+ */
+static const struct regmap_range max1720x_volatile_allow[] = {
+ regmap_reg_range(MAX172XX_STATUS, MAX172XX_CYCLES),
+ regmap_reg_range(MAX172XX_AVG_VCELL, MAX172XX_TTF),
+ regmap_reg_range(MAX172XX_QR_TABLE10, MAX172XX_ATAVCAP),
+};
+
+static const struct regmap_range max1720x_readable_allow[] = {
+ regmap_reg_range(MAX172XX_STATUS, MAX172XX_ATAVCAP),
+};
+
+static const struct regmap_range max1720x_readable_deny[] = {
+ /* unused registers */
+ regmap_reg_range(0x24, 0x26),
+ regmap_reg_range(0x30, 0x31),
+ regmap_reg_range(0x33, 0x34),
+ regmap_reg_range(0x37, 0x37),
+ regmap_reg_range(0x3B, 0x3C),
+ regmap_reg_range(0x40, 0x41),
+ regmap_reg_range(0x43, 0x44),
+ regmap_reg_range(0x47, 0x49),
+ regmap_reg_range(0x4B, 0x4C),
+ regmap_reg_range(0x4E, 0xAF),
+ regmap_reg_range(0xB1, 0xB3),
+ regmap_reg_range(0xB5, 0xB7),
+ regmap_reg_range(0xBF, 0xD0),
+ regmap_reg_range(0xDB, 0xDB),
+ regmap_reg_range(0xE0, 0xFF),
+};
+
+static const struct regmap_access_table max1720x_readable_regs = {
+ .yes_ranges = max1720x_readable_allow,
+ .n_yes_ranges = ARRAY_SIZE(max1720x_readable_allow),
+ .no_ranges = max1720x_readable_deny,
+ .n_no_ranges = ARRAY_SIZE(max1720x_readable_deny),
+};
+
+static const struct regmap_access_table max1720x_volatile_regs = {
+ .yes_ranges = max1720x_volatile_allow,
+ .n_yes_ranges = ARRAY_SIZE(max1720x_volatile_allow),
+ .no_ranges = max1720x_readable_deny,
+ .n_no_ranges = ARRAY_SIZE(max1720x_readable_deny),
+};
+
+static const struct regmap_config max1720x_regmap_cfg = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = MAX172XX_ATAVCAP,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .rd_table = &max1720x_readable_regs,
+ .volatile_table = &max1720x_volatile_regs,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static const enum power_supply_property max1720x_battery_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_AVG,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
+ POWER_SUPPLY_PROP_TIME_TO_FULL_AVG,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+/* Convert regs value to power_supply units */
+
+static int max172xx_time_to_ps(unsigned int reg)
+{
+ return reg * 5625 / 1000; /* in sec. */
+}
+
+static int max172xx_percent_to_ps(unsigned int reg)
+{
+ return reg / 256; /* in percent from 0 to 100 */
+}
+
+static int max172xx_voltage_to_ps(unsigned int reg)
+{
+ return reg * 1250; /* in uV */
+}
+
+static int max172xx_capacity_to_ps(unsigned int reg)
+{
+ return reg * 500; /* in uAh */
+}
+
+/*
+ * Current and temperature is signed values, so unsigned regs
+ * value must be converted to signed type
+ */
+
+static int max172xx_temperature_to_ps(unsigned int reg)
+{
+ int val = (int16_t)reg;
+
+ return val * 10 / 256; /* in tenths of deg. C */
+}
+
+/*
+ * Calculating current registers resolution:
+ *
+ * RSense stored in 10^-5 Ohm, so mesaurment voltage must be
+ * in 10^-11 Volts for get current in uA.
+ * 16 bit current reg fullscale +/-51.2mV is 102400 uV.
+ * So: 102400 / 65535 * 10^5 = 156252
+ */
+static int max172xx_current_to_voltage(unsigned int reg)
+{
+ int val = (int16_t)reg;
+
+ return val * 156252;
+}
+
+static int max1720x_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct max1720x_device_info *info = power_supply_get_drvdata(psy);
+ unsigned int reg_val;
+ int ret = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ /*
+ * POWER_SUPPLY_PROP_PRESENT will always readable via
+ * sysfs interface. Value return 0 if battery not
+ * present or unaccesable via I2c.
+ */
+ ret = regmap_read(info->regmap, MAX172XX_STATUS, &reg_val);
+ if (ret < 0) {
+ val->intval = 0;
+ return 0;
+ }
+
+ val->intval = !FIELD_GET(MAX172XX_STATUS_BAT_ABSENT, reg_val);
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ ret = regmap_read(info->regmap, MAX172XX_REPSOC, &reg_val);
+ val->intval = max172xx_percent_to_ps(reg_val);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ ret = regmap_read(info->regmap, MAX172XX_BATT, &reg_val);
+ val->intval = max172xx_voltage_to_ps(reg_val);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ ret = regmap_read(info->regmap, MAX172XX_DESIGN_CAP, &reg_val);
+ val->intval = max172xx_capacity_to_ps(reg_val);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_AVG:
+ ret = regmap_read(info->regmap, MAX172XX_REPCAP, &reg_val);
+ val->intval = max172xx_capacity_to_ps(reg_val);
+ break;
+ case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
+ ret = regmap_read(info->regmap, MAX172XX_TTE, &reg_val);
+ val->intval = max172xx_time_to_ps(reg_val);
+ break;
+ case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
+ ret = regmap_read(info->regmap, MAX172XX_TTF, &reg_val);
+ val->intval = max172xx_time_to_ps(reg_val);
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ ret = regmap_read(info->regmap, MAX172XX_TEMP, &reg_val);
+ val->intval = max172xx_temperature_to_ps(reg_val);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ ret = regmap_read(info->regmap, MAX172XX_CURRENT, &reg_val);
+ val->intval = max172xx_current_to_voltage(reg_val) / info->rsense;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ ret = regmap_read(info->regmap, MAX172XX_AVG_CURRENT, &reg_val);
+ val->intval = max172xx_current_to_voltage(reg_val) / info->rsense;
+ break;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ ret = regmap_read(info->regmap, MAX172XX_DEV_NAME, &reg_val);
+ reg_val = FIELD_GET(MAX172XX_DEV_NAME_TYPE_MASK, reg_val);
+ if (reg_val == MAX172XX_DEV_NAME_TYPE_MAX17201)
+ val->strval = max17201_model;
+ else if (reg_val == MAX172XX_DEV_NAME_TYPE_MAX17205)
+ val->strval = max17205_model;
+ else
+ return -ENODEV;
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = max1720x_manufacturer;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int max1720x_probe_sense_resistor(struct i2c_client *client,
+ struct max1720x_device_info *info)
+{
+ struct device *dev = &client->dev;
+ struct i2c_client *ancillary;
+ int ret;
+
+ ancillary = i2c_new_ancillary_device(client, "nvmem", 0xb);
+ if (IS_ERR(ancillary)) {
+ dev_err(dev, "Failed to initialize ancillary i2c device\n");
+ return PTR_ERR(ancillary);
+ }
+
+ ret = i2c_smbus_read_word_data(ancillary, MAX1720X_NRSENSE);
+ i2c_unregister_device(ancillary);
+ if (ret < 0)
+ return ret;
+
+ info->rsense = ret;
+ if (!info->rsense) {
+ dev_warn(dev, "RSense not calibrated, set 10 mOhms!\n");
+ info->rsense = 1000; /* in regs in 10^-5 */
+ }
+
+ return 0;
+}
+
+static const struct power_supply_desc max1720x_bat_desc = {
+ .name = "max1720x",
+ .no_thermal = true,
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = max1720x_battery_props,
+ .num_properties = ARRAY_SIZE(max1720x_battery_props),
+ .get_property = max1720x_battery_get_property,
+};
+
+static int max1720x_probe(struct i2c_client *client)
+{
+ struct power_supply_config psy_cfg = {};
+ struct device *dev = &client->dev;
+ struct max1720x_device_info *info;
+ struct power_supply *bat;
+ int ret;
+
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ psy_cfg.drv_data = info;
+ psy_cfg.fwnode = dev_fwnode(dev);
+ info->regmap = devm_regmap_init_i2c(client, &max1720x_regmap_cfg);
+ if (IS_ERR(info->regmap))
+ return dev_err_probe(dev, PTR_ERR(info->regmap),
+ "regmap initialization failed\n");
+
+ ret = max1720x_probe_sense_resistor(client, info);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to read sense resistor value\n");
+
+ bat = devm_power_supply_register(dev, &max1720x_bat_desc, &psy_cfg);
+ if (IS_ERR(bat))
+ return dev_err_probe(dev, PTR_ERR(bat),
+ "Failed to register power supply\n");
+
+ return 0;
+}
+
+static const struct of_device_id max1720x_of_match[] = {
+ { .compatible = "maxim,max17201" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, max1720x_of_match);
+
+static struct i2c_driver max1720x_i2c_driver = {
+ .driver = {
+ .name = "max1720x",
+ .of_match_table = max1720x_of_match,
+ },
+ .probe = max1720x_probe,
+};
+module_i2c_driver(max1720x_i2c_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Dimitri Fedrau <dima.fedrau@gmail.com>");
+MODULE_DESCRIPTION("Maxim MAX17201/MAX17205 Fuel Gauge IC driver");
diff --git a/drivers/power/supply/max77976_charger.c b/drivers/power/supply/max77976_charger.c
index 99659dc8f5a6..d7e520da7688 100644
--- a/drivers/power/supply/max77976_charger.c
+++ b/drivers/power/supply/max77976_charger.c
@@ -483,8 +483,8 @@ static int max77976_probe(struct i2c_client *client)
}
static const struct i2c_device_id max77976_i2c_id[] = {
- { MAX77976_DRIVER_NAME, 0 },
- { },
+ { MAX77976_DRIVER_NAME },
+ { }
};
MODULE_DEVICE_TABLE(i2c, max77976_i2c_id);
diff --git a/drivers/power/supply/mm8013.c b/drivers/power/supply/mm8013.c
index 20c1651ca38e..5bcfaeeda3db 100644
--- a/drivers/power/supply/mm8013.c
+++ b/drivers/power/supply/mm8013.c
@@ -284,7 +284,7 @@ static int mm8013_probe(struct i2c_client *client)
}
static const struct i2c_device_id mm8013_id_table[] = {
- { "mm8013", 0 },
+ { "mm8013" },
{}
};
MODULE_DEVICE_TABLE(i2c, mm8013_id_table);
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index fefe938c9342..8f6025acd10a 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -1024,7 +1024,7 @@ EXPORT_SYMBOL_GPL(power_supply_temp2resist_simple);
int power_supply_vbat2ri(struct power_supply_battery_info *info,
int vbat_uv, bool charging)
{
- struct power_supply_vbat_ri_table *vbat2ri;
+ const struct power_supply_vbat_ri_table *vbat2ri;
int table_len;
int i, high, low;
@@ -1072,7 +1072,7 @@ int power_supply_vbat2ri(struct power_supply_battery_info *info,
}
EXPORT_SYMBOL_GPL(power_supply_vbat2ri);
-struct power_supply_maintenance_charge_table *
+const struct power_supply_maintenance_charge_table *
power_supply_get_maintenance_charging_setting(struct power_supply_battery_info *info,
int index)
{
diff --git a/drivers/power/supply/power_supply_hwmon.c b/drivers/power/supply/power_supply_hwmon.c
index c97893d4c25e..baacefbdf768 100644
--- a/drivers/power/supply/power_supply_hwmon.c
+++ b/drivers/power/supply/power_supply_hwmon.c
@@ -48,6 +48,18 @@ static int power_supply_hwmon_curr_to_property(u32 attr)
}
}
+static int power_supply_hwmon_power_to_property(u32 attr)
+{
+ switch (attr) {
+ case hwmon_power_input:
+ return POWER_SUPPLY_PROP_POWER_NOW;
+ case hwmon_power_average:
+ return POWER_SUPPLY_PROP_POWER_AVG;
+ default:
+ return -EINVAL;
+ }
+}
+
static int power_supply_hwmon_temp_to_property(u32 attr, int channel)
{
if (channel) {
@@ -90,6 +102,8 @@ power_supply_hwmon_to_property(enum hwmon_sensor_types type,
return power_supply_hwmon_in_to_property(attr);
case hwmon_curr:
return power_supply_hwmon_curr_to_property(attr);
+ case hwmon_power:
+ return power_supply_hwmon_power_to_property(attr);
case hwmon_temp:
return power_supply_hwmon_temp_to_property(attr, channel);
default:
@@ -229,6 +243,11 @@ power_supply_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
case hwmon_in:
pspval.intval = DIV_ROUND_CLOSEST(pspval.intval, 1000);
break;
+ case hwmon_power:
+ /*
+ * Power properties are already in microwatts.
+ */
+ break;
/*
* Temp needs to be converted from 1/10 C to milli-C
*/
@@ -311,6 +330,10 @@ static const struct hwmon_channel_info * const power_supply_hwmon_info[] = {
HWMON_C_MAX |
HWMON_C_INPUT),
+ HWMON_CHANNEL_INFO(power,
+ HWMON_P_INPUT |
+ HWMON_P_AVERAGE),
+
HWMON_CHANNEL_INFO(in,
HWMON_I_AVERAGE |
HWMON_I_MIN |
@@ -359,6 +382,8 @@ int power_supply_add_hwmon_sysfs(struct power_supply *psy)
case POWER_SUPPLY_PROP_CURRENT_AVG:
case POWER_SUPPLY_PROP_CURRENT_MAX:
case POWER_SUPPLY_PROP_CURRENT_NOW:
+ case POWER_SUPPLY_PROP_POWER_AVG:
+ case POWER_SUPPLY_PROP_POWER_NOW:
case POWER_SUPPLY_PROP_TEMP:
case POWER_SUPPLY_PROP_TEMP_MAX:
case POWER_SUPPLY_PROP_TEMP_MIN:
diff --git a/drivers/power/supply/power_supply_leds.c b/drivers/power/supply/power_supply_leds.c
index 73935de844d9..f4a7e566bea1 100644
--- a/drivers/power/supply/power_supply_leds.c
+++ b/drivers/power/supply/power_supply_leds.c
@@ -19,6 +19,76 @@
/* Battery specific LEDs triggers. */
+struct power_supply_led_trigger {
+ struct led_trigger trig;
+ struct power_supply *psy;
+};
+
+#define trigger_to_psy_trigger(trigger) \
+ container_of(trigger, struct power_supply_led_trigger, trig)
+
+static int power_supply_led_trigger_activate(struct led_classdev *led_cdev)
+{
+ struct power_supply_led_trigger *psy_trig =
+ trigger_to_psy_trigger(led_cdev->trigger);
+
+ /* Sync current power-supply state to LED being activated */
+ power_supply_update_leds(psy_trig->psy);
+ return 0;
+}
+
+static int power_supply_register_led_trigger(struct power_supply *psy,
+ const char *name_template,
+ struct led_trigger **tp, int *err)
+{
+ struct power_supply_led_trigger *psy_trig;
+ int ret = -ENOMEM;
+
+ /* Bail on previous errors */
+ if (err && *err)
+ return *err;
+
+ psy_trig = kzalloc(sizeof(*psy_trig), GFP_KERNEL);
+ if (!psy_trig)
+ goto err_free_trigger;
+
+ psy_trig->trig.name = kasprintf(GFP_KERNEL, name_template, psy->desc->name);
+ if (!psy_trig->trig.name)
+ goto err_free_trigger;
+
+ psy_trig->trig.activate = power_supply_led_trigger_activate;
+ psy_trig->psy = psy;
+
+ ret = led_trigger_register(&psy_trig->trig);
+ if (ret)
+ goto err_free_name;
+
+ *tp = &psy_trig->trig;
+ return 0;
+
+err_free_name:
+ kfree(psy_trig->trig.name);
+err_free_trigger:
+ kfree(psy_trig);
+ if (err)
+ *err = ret;
+
+ return ret;
+}
+
+static void power_supply_unregister_led_trigger(struct led_trigger *trig)
+{
+ struct power_supply_led_trigger *psy_trig;
+
+ if (!trig)
+ return;
+
+ psy_trig = trigger_to_psy_trigger(trig);
+ led_trigger_unregister(&psy_trig->trig);
+ kfree(psy_trig->trig.name);
+ kfree(psy_trig);
+}
+
static void power_supply_update_bat_leds(struct power_supply *psy)
{
union power_supply_propval status;
@@ -32,7 +102,7 @@ static void power_supply_update_bat_leds(struct power_supply *psy)
switch (status.intval) {
case POWER_SUPPLY_STATUS_FULL:
- led_trigger_event(psy->charging_full_trig, LED_FULL);
+ led_trigger_event(psy->trig, LED_FULL);
led_trigger_event(psy->charging_trig, LED_OFF);
led_trigger_event(psy->full_trig, LED_FULL);
/* Going from blink to LED on requires a LED_OFF event to stop blink */
@@ -44,7 +114,7 @@ static void power_supply_update_bat_leds(struct power_supply *psy)
LED_FULL);
break;
case POWER_SUPPLY_STATUS_CHARGING:
- led_trigger_event(psy->charging_full_trig, LED_FULL);
+ led_trigger_event(psy->trig, LED_FULL);
led_trigger_event(psy->charging_trig, LED_FULL);
led_trigger_event(psy->full_trig, LED_OFF);
led_trigger_blink(psy->charging_blink_full_solid_trig, 0, 0);
@@ -54,7 +124,7 @@ static void power_supply_update_bat_leds(struct power_supply *psy)
LED_FULL);
break;
default:
- led_trigger_event(psy->charging_full_trig, LED_OFF);
+ led_trigger_event(psy->trig, LED_OFF);
led_trigger_event(psy->charging_trig, LED_OFF);
led_trigger_event(psy->full_trig, LED_OFF);
led_trigger_event(psy->charging_blink_full_solid_trig,
@@ -65,69 +135,33 @@ static void power_supply_update_bat_leds(struct power_supply *psy)
}
}
-static int power_supply_create_bat_triggers(struct power_supply *psy)
+static void power_supply_remove_bat_triggers(struct power_supply *psy)
{
- psy->charging_full_trig_name = kasprintf(GFP_KERNEL,
- "%s-charging-or-full", psy->desc->name);
- if (!psy->charging_full_trig_name)
- goto charging_full_failed;
-
- psy->charging_trig_name = kasprintf(GFP_KERNEL,
- "%s-charging", psy->desc->name);
- if (!psy->charging_trig_name)
- goto charging_failed;
-
- psy->full_trig_name = kasprintf(GFP_KERNEL, "%s-full", psy->desc->name);
- if (!psy->full_trig_name)
- goto full_failed;
-
- psy->charging_blink_full_solid_trig_name = kasprintf(GFP_KERNEL,
- "%s-charging-blink-full-solid", psy->desc->name);
- if (!psy->charging_blink_full_solid_trig_name)
- goto charging_blink_full_solid_failed;
-
- psy->charging_orange_full_green_trig_name = kasprintf(GFP_KERNEL,
- "%s-charging-orange-full-green", psy->desc->name);
- if (!psy->charging_orange_full_green_trig_name)
- goto charging_red_full_green_failed;
-
- led_trigger_register_simple(psy->charging_full_trig_name,
- &psy->charging_full_trig);
- led_trigger_register_simple(psy->charging_trig_name,
- &psy->charging_trig);
- led_trigger_register_simple(psy->full_trig_name,
- &psy->full_trig);
- led_trigger_register_simple(psy->charging_blink_full_solid_trig_name,
- &psy->charging_blink_full_solid_trig);
- led_trigger_register_simple(psy->charging_orange_full_green_trig_name,
- &psy->charging_orange_full_green_trig);
-
- return 0;
-
-charging_red_full_green_failed:
- kfree(psy->charging_blink_full_solid_trig_name);
-charging_blink_full_solid_failed:
- kfree(psy->full_trig_name);
-full_failed:
- kfree(psy->charging_trig_name);
-charging_failed:
- kfree(psy->charging_full_trig_name);
-charging_full_failed:
- return -ENOMEM;
+ power_supply_unregister_led_trigger(psy->trig);
+ power_supply_unregister_led_trigger(psy->charging_trig);
+ power_supply_unregister_led_trigger(psy->full_trig);
+ power_supply_unregister_led_trigger(psy->charging_blink_full_solid_trig);
+ power_supply_unregister_led_trigger(psy->charging_orange_full_green_trig);
}
-static void power_supply_remove_bat_triggers(struct power_supply *psy)
+static int power_supply_create_bat_triggers(struct power_supply *psy)
{
- led_trigger_unregister_simple(psy->charging_full_trig);
- led_trigger_unregister_simple(psy->charging_trig);
- led_trigger_unregister_simple(psy->full_trig);
- led_trigger_unregister_simple(psy->charging_blink_full_solid_trig);
- led_trigger_unregister_simple(psy->charging_orange_full_green_trig);
- kfree(psy->charging_blink_full_solid_trig_name);
- kfree(psy->full_trig_name);
- kfree(psy->charging_trig_name);
- kfree(psy->charging_full_trig_name);
- kfree(psy->charging_orange_full_green_trig_name);
+ int err = 0;
+
+ power_supply_register_led_trigger(psy, "%s-charging-or-full",
+ &psy->trig, &err);
+ power_supply_register_led_trigger(psy, "%s-charging",
+ &psy->charging_trig, &err);
+ power_supply_register_led_trigger(psy, "%s-full",
+ &psy->full_trig, &err);
+ power_supply_register_led_trigger(psy, "%s-charging-blink-full-solid",
+ &psy->charging_blink_full_solid_trig, &err);
+ power_supply_register_led_trigger(psy, "%s-charging-orange-full-green",
+ &psy->charging_orange_full_green_trig, &err);
+ if (err)
+ power_supply_remove_bat_triggers(psy);
+
+ return err;
}
/* Generated power specific LEDs triggers. */
@@ -142,27 +176,19 @@ static void power_supply_update_gen_leds(struct power_supply *psy)
dev_dbg(&psy->dev, "%s %d\n", __func__, online.intval);
if (online.intval)
- led_trigger_event(psy->online_trig, LED_FULL);
+ led_trigger_event(psy->trig, LED_FULL);
else
- led_trigger_event(psy->online_trig, LED_OFF);
+ led_trigger_event(psy->trig, LED_OFF);
}
static int power_supply_create_gen_triggers(struct power_supply *psy)
{
- psy->online_trig_name = kasprintf(GFP_KERNEL, "%s-online",
- psy->desc->name);
- if (!psy->online_trig_name)
- return -ENOMEM;
-
- led_trigger_register_simple(psy->online_trig_name, &psy->online_trig);
-
- return 0;
+ return power_supply_register_led_trigger(psy, "%s-online", &psy->trig, NULL);
}
static void power_supply_remove_gen_triggers(struct power_supply *psy)
{
- led_trigger_unregister_simple(psy->online_trig);
- kfree(psy->online_trig_name);
+ power_supply_unregister_led_trigger(psy->trig);
}
/* Choice what triggers to create&update. */
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index b86e11bdc07e..3e63d165b2f7 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -379,8 +379,7 @@ static umode_t power_supply_attr_is_visible(struct kobject *kobj,
int property = psy->desc->properties[i];
if (property == attrno) {
- if (psy->desc->property_is_writeable &&
- psy->desc->property_is_writeable(psy, property) > 0)
+ if (power_supply_property_is_writeable(psy, property) > 0)
mode |= S_IWUSR;
return mode;
diff --git a/drivers/power/supply/qcom_battmgr.c b/drivers/power/supply/qcom_battmgr.c
index ec163d1bcd18..46f36dcb185c 100644
--- a/drivers/power/supply/qcom_battmgr.c
+++ b/drivers/power/supply/qcom_battmgr.c
@@ -1308,6 +1308,7 @@ static void qcom_battmgr_pdr_notify(void *priv, int state)
static const struct of_device_id qcom_battmgr_of_variants[] = {
{ .compatible = "qcom,sc8180x-pmic-glink", .data = (void *)QCOM_BATTMGR_SC8280XP },
{ .compatible = "qcom,sc8280xp-pmic-glink", .data = (void *)QCOM_BATTMGR_SC8280XP },
+ { .compatible = "qcom,x1e80100-pmic-glink", .data = (void *)QCOM_BATTMGR_SC8280XP },
/* Unmatched devices falls back to QCOM_BATTMGR_SM8350 */
{}
};
diff --git a/drivers/power/supply/rt9455_charger.c b/drivers/power/supply/rt9455_charger.c
index e4dbacd50a43..64a23e3d7bb0 100644
--- a/drivers/power/supply/rt9455_charger.c
+++ b/drivers/power/supply/rt9455_charger.c
@@ -1718,8 +1718,8 @@ static void rt9455_remove(struct i2c_client *client)
}
static const struct i2c_device_id rt9455_i2c_id_table[] = {
- { RT9455_DRIVER_NAME, 0 },
- { },
+ { RT9455_DRIVER_NAME },
+ { }
};
MODULE_DEVICE_TABLE(i2c, rt9455_i2c_id_table);
diff --git a/drivers/power/supply/samsung-sdi-battery.c b/drivers/power/supply/samsung-sdi-battery.c
index b33daab798b9..b63fd2758c2f 100644
--- a/drivers/power/supply/samsung-sdi-battery.c
+++ b/drivers/power/supply/samsung-sdi-battery.c
@@ -25,7 +25,7 @@ struct samsung_sdi_battery {
* tables apply depending on whether we are charging or not.
*/
-static struct power_supply_vbat_ri_table samsung_vbat2res_discharging_eb_l1m7flu[] = {
+static const struct power_supply_vbat_ri_table samsung_vbat2res_discharging_eb_l1m7flu[] = {
{ .vbat_uv = 4240000, .ri_uohm = 160000 },
{ .vbat_uv = 4210000, .ri_uohm = 179000 },
{ .vbat_uv = 4180000, .ri_uohm = 183000 },
@@ -53,7 +53,7 @@ static struct power_supply_vbat_ri_table samsung_vbat2res_discharging_eb_l1m7flu
{ .vbat_uv = 3300000, .ri_uohm = 339000 },
};
-static struct power_supply_vbat_ri_table samsung_vbat2res_charging_eb_l1m7flu[] = {
+static const struct power_supply_vbat_ri_table samsung_vbat2res_charging_eb_l1m7flu[] = {
{ .vbat_uv = 4302000, .ri_uohm = 230000 },
{ .vbat_uv = 4276000, .ri_uohm = 345000 },
{ .vbat_uv = 4227000, .ri_uohm = 345000 },
@@ -73,7 +73,7 @@ static struct power_supply_vbat_ri_table samsung_vbat2res_charging_eb_l1m7flu[]
{ .vbat_uv = 3590000, .ri_uohm = 164000 },
};
-static struct power_supply_vbat_ri_table samsung_vbat2res_discharging_eb425161la[] = {
+static const struct power_supply_vbat_ri_table samsung_vbat2res_discharging_eb425161la[] = {
{ .vbat_uv = 4240000, .ri_uohm = 160000 },
{ .vbat_uv = 4210000, .ri_uohm = 179000 },
{ .vbat_uv = 4180000, .ri_uohm = 183000 },
@@ -105,7 +105,7 @@ static struct power_supply_vbat_ri_table samsung_vbat2res_discharging_eb425161la
{ .vbat_uv = 3300000, .ri_uohm = 339000 },
};
-static struct power_supply_vbat_ri_table samsung_vbat2res_charging_eb425161la[] = {
+static const struct power_supply_vbat_ri_table samsung_vbat2res_charging_eb425161la[] = {
{ .vbat_uv = 4345000, .ri_uohm = 230000 },
{ .vbat_uv = 4329000, .ri_uohm = 238000 },
{ .vbat_uv = 4314000, .ri_uohm = 225000 },
@@ -182,7 +182,7 @@ static struct power_supply_vbat_ri_table samsung_vbat2res_charging_eb425161la[]
{ .vbat_uv = 3590000, .ri_uohm = 164000 },
};
-static struct power_supply_vbat_ri_table samsung_vbat2res_discharging_eb425161lu[] = {
+static const struct power_supply_vbat_ri_table samsung_vbat2res_discharging_eb425161lu[] = {
{ .vbat_uv = 4240000, .ri_uohm = 160000 },
{ .vbat_uv = 4210000, .ri_uohm = 179000 },
{ .vbat_uv = 4180000, .ri_uohm = 183000 },
@@ -214,7 +214,7 @@ static struct power_supply_vbat_ri_table samsung_vbat2res_discharging_eb425161lu
{ .vbat_uv = 3300000, .ri_uohm = 339000 },
};
-static struct power_supply_vbat_ri_table samsung_vbat2res_charging_eb425161lu[] = {
+static const struct power_supply_vbat_ri_table samsung_vbat2res_charging_eb425161lu[] = {
{ .vbat_uv = 4346000, .ri_uohm = 293000 },
{ .vbat_uv = 4336000, .ri_uohm = 290000 },
{ .vbat_uv = 4315000, .ri_uohm = 274000 },
@@ -244,7 +244,7 @@ static struct power_supply_vbat_ri_table samsung_vbat2res_charging_eb425161lu[]
{ .vbat_uv = 3590000, .ri_uohm = 164000 },
};
-static struct power_supply_vbat_ri_table samsung_vbat2res_discharging_eb485159lu[] = {
+static const struct power_supply_vbat_ri_table samsung_vbat2res_discharging_eb485159lu[] = {
{ .vbat_uv = 4240000, .ri_uohm = 160000 },
{ .vbat_uv = 4210000, .ri_uohm = 179000 },
{ .vbat_uv = 4180000, .ri_uohm = 183000 },
@@ -271,7 +271,7 @@ static struct power_supply_vbat_ri_table samsung_vbat2res_discharging_eb485159lu
{ .vbat_uv = 3300000, .ri_uohm = 339000 },
};
-static struct power_supply_vbat_ri_table samsung_vbat2res_charging_eb485159lu[] = {
+static const struct power_supply_vbat_ri_table samsung_vbat2res_charging_eb485159lu[] = {
{ .vbat_uv = 4302000, .ri_uohm = 200000 },
{ .vbat_uv = 4258000, .ri_uohm = 206000 },
{ .vbat_uv = 4200000, .ri_uohm = 231000 },
@@ -291,7 +291,7 @@ static struct power_supply_vbat_ri_table samsung_vbat2res_charging_eb485159lu[]
{ .vbat_uv = 3590000, .ri_uohm = 164000 },
};
-static struct power_supply_vbat_ri_table samsung_vbat2res_discharging_eb535151vu[] = {
+static const struct power_supply_vbat_ri_table samsung_vbat2res_discharging_eb535151vu[] = {
{ .vbat_uv = 4071000, .ri_uohm = 158000 },
{ .vbat_uv = 4019000, .ri_uohm = 187000 },
{ .vbat_uv = 3951000, .ri_uohm = 191000 },
@@ -311,7 +311,7 @@ static struct power_supply_vbat_ri_table samsung_vbat2res_discharging_eb535151vu
{ .vbat_uv = 3280000, .ri_uohm = 250000 },
};
-static struct power_supply_vbat_ri_table samsung_vbat2res_charging_eb535151vu[] = {
+static const struct power_supply_vbat_ri_table samsung_vbat2res_charging_eb535151vu[] = {
{ .vbat_uv = 4190000, .ri_uohm = 214000 },
{ .vbat_uv = 4159000, .ri_uohm = 252000 },
{ .vbat_uv = 4121000, .ri_uohm = 245000 },
@@ -331,7 +331,7 @@ static struct power_supply_vbat_ri_table samsung_vbat2res_charging_eb535151vu[]
{ .vbat_uv = 3510000, .ri_uohm = 228000 },
};
-static struct power_supply_vbat_ri_table samsung_vbat2res_discharging_eb585157lu[] = {
+static const struct power_supply_vbat_ri_table samsung_vbat2res_discharging_eb585157lu[] = {
{ .vbat_uv = 4194000, .ri_uohm = 121000 },
{ .vbat_uv = 4169000, .ri_uohm = 188000 },
{ .vbat_uv = 4136000, .ri_uohm = 173000 },
@@ -401,7 +401,7 @@ static struct power_supply_vbat_ri_table samsung_vbat2res_discharging_eb585157lu
{ .vbat_uv = 3161000, .ri_uohm = 452000 },
};
-static struct power_supply_vbat_ri_table samsung_vbat2res_charging_eb585157lu[] = {
+static const struct power_supply_vbat_ri_table samsung_vbat2res_charging_eb585157lu[] = {
{ .vbat_uv = 4360000, .ri_uohm = 128000 },
{ .vbat_uv = 4325000, .ri_uohm = 130000 },
{ .vbat_uv = 4316000, .ri_uohm = 148000 },
@@ -613,7 +613,7 @@ static struct power_supply_battery_ocv_table samsung_ocv_cap_eb585157lu[] = {
{ .ocv = 3300000, .capacity = 0},
};
-static struct power_supply_maintenance_charge_table samsung_maint_charge_table[] = {
+static const struct power_supply_maintenance_charge_table samsung_maint_charge_table[] = {
{
/* Maintenance charging phase A, 60 hours */
.charge_current_max_ua = 600000,
diff --git a/drivers/power/supply/sbs-charger.c b/drivers/power/supply/sbs-charger.c
index f4adde449270..ab3f095d90ea 100644
--- a/drivers/power/supply/sbs-charger.c
+++ b/drivers/power/supply/sbs-charger.c
@@ -234,7 +234,7 @@ MODULE_DEVICE_TABLE(of, sbs_dt_ids);
#endif
static const struct i2c_device_id sbs_id[] = {
- { "sbs-charger", 0 },
+ { "sbs-charger" },
{ }
};
MODULE_DEVICE_TABLE(i2c, sbs_id);
diff --git a/drivers/power/supply/sbs-manager.c b/drivers/power/supply/sbs-manager.c
index 933b04806d10..7d2f39f19acb 100644
--- a/drivers/power/supply/sbs-manager.c
+++ b/drivers/power/supply/sbs-manager.c
@@ -389,8 +389,8 @@ static int sbsm_probe(struct i2c_client *client)
}
static const struct i2c_device_id sbsm_ids[] = {
- { "sbs-manager", 0 },
- { "ltc1760", 0 },
+ { "sbs-manager" },
+ { "ltc1760" },
{ }
};
MODULE_DEVICE_TABLE(i2c, sbsm_ids);
diff --git a/drivers/pps/clients/pps-gpio.c b/drivers/pps/clients/pps-gpio.c
index 2f4b11b4dfcd..791fdc9326dd 100644
--- a/drivers/pps/clients/pps-gpio.c
+++ b/drivers/pps/clients/pps-gpio.c
@@ -220,7 +220,7 @@ static int pps_gpio_probe(struct platform_device *pdev)
return 0;
}
-static int pps_gpio_remove(struct platform_device *pdev)
+static void pps_gpio_remove(struct platform_device *pdev)
{
struct pps_gpio_device_data *data = platform_get_drvdata(pdev);
@@ -229,7 +229,6 @@ static int pps_gpio_remove(struct platform_device *pdev)
/* reset echo pin in any case */
gpiod_set_value(data->echo_pin, 0);
dev_info(&pdev->dev, "removed IRQ %d as PPS source\n", data->irq);
- return 0;
}
static const struct of_device_id pps_gpio_dt_ids[] = {
@@ -240,7 +239,7 @@ MODULE_DEVICE_TABLE(of, pps_gpio_dt_ids);
static struct platform_driver pps_gpio_driver = {
.probe = pps_gpio_probe,
- .remove = pps_gpio_remove,
+ .remove_new = pps_gpio_remove,
.driver = {
.name = PPS_GPIO_NAME,
.of_match_table = pps_gpio_dt_ids,
diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c
index 7adf4f2b1049..951b38ff5f8e 100644
--- a/drivers/pwm/pwm-samsung.c
+++ b/drivers/pwm/pwm-samsung.c
@@ -510,8 +510,6 @@ static int pwm_samsung_parse_dt(struct pwm_chip *chip)
struct samsung_pwm_chip *our_chip = to_samsung_pwm_chip(chip);
struct device_node *np = pwmchip_parent(chip)->of_node;
const struct of_device_id *match;
- struct property *prop;
- const __be32 *cur;
u32 val;
match = of_match_node(samsung_pwm_matches, np);
@@ -520,7 +518,7 @@ static int pwm_samsung_parse_dt(struct pwm_chip *chip)
memcpy(&our_chip->variant, match->data, sizeof(our_chip->variant));
- of_property_for_each_u32(np, "samsung,pwm-outputs", prop, cur, val) {
+ of_property_for_each_u32(np, "samsung,pwm-outputs", val) {
if (val >= SAMSUNG_PWM_NUM) {
dev_err(pwmchip_parent(chip),
"%s: invalid channel index in samsung,pwm-outputs property\n",
diff --git a/drivers/rapidio/rio-driver.c b/drivers/rapidio/rio-driver.c
index 1b3b4c2e015d..238250e69005 100644
--- a/drivers/rapidio/rio-driver.c
+++ b/drivers/rapidio/rio-driver.c
@@ -186,10 +186,10 @@ EXPORT_SYMBOL_GPL(rio_attach_device);
* there is a matching &struct rio_device_id or 0 if there is
* no match.
*/
-static int rio_match_bus(struct device *dev, struct device_driver *drv)
+static int rio_match_bus(struct device *dev, const struct device_driver *drv)
{
struct rio_dev *rdev = to_rio_dev(dev);
- struct rio_driver *rdrv = to_rio_driver(drv);
+ const struct rio_driver *rdrv = to_rio_driver(drv);
const struct rio_device_id *id = rdrv->id_table;
const struct rio_device_id *found_id;
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index e6a9027773fc..4b411a09c1a6 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -1661,10 +1661,10 @@ config REGULATOR_UNIPHIER
config REGULATOR_RZG2L_VBCTRL
tristate "Renesas RZ/G2L USB VBUS regulator driver"
- depends on ARCH_RZG2L || COMPILE_TEST
+ depends on RESET_RZG2L_USBPHY_CTRL || COMPILE_TEST
depends on OF
select REGMAP_MMIO
- default ARCH_RZG2L
+ default RESET_RZG2L_USBPHY_CTRL
help
Support for VBUS regulators implemented on Renesas RZ/G2L SoCs.
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index 48845dc8fa85..dda2ada215b7 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -166,6 +166,7 @@ config QCOM_PIL_INFO
config QCOM_RPROC_COMMON
tristate
+ select AUXILIARY_BUS
config QCOM_Q6V5_COMMON
tristate
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
index 5a3fb902acc9..144c8e9a642e 100644
--- a/drivers/remoteproc/imx_rproc.c
+++ b/drivers/remoteproc/imx_rproc.c
@@ -726,31 +726,37 @@ static int imx_rproc_addr_init(struct imx_rproc *priv,
struct resource res;
node = of_parse_phandle(np, "memory-region", a);
+ if (!node)
+ continue;
/* Not map vdevbuffer, vdevring region */
if (!strncmp(node->name, "vdev", strlen("vdev"))) {
of_node_put(node);
continue;
}
err = of_address_to_resource(node, 0, &res);
- of_node_put(node);
if (err) {
dev_err(dev, "unable to resolve memory region\n");
+ of_node_put(node);
return err;
}
- if (b >= IMX_RPROC_MEM_MAX)
+ if (b >= IMX_RPROC_MEM_MAX) {
+ of_node_put(node);
break;
+ }
/* Not use resource version, because we might share region */
priv->mem[b].cpu_addr = devm_ioremap_wc(&pdev->dev, res.start, resource_size(&res));
if (!priv->mem[b].cpu_addr) {
dev_err(dev, "failed to remap %pr\n", &res);
+ of_node_put(node);
return -ENOMEM;
}
priv->mem[b].sys_addr = res.start;
priv->mem[b].size = resource_size(&res);
if (!strcmp(node->name, "rsc-table"))
priv->rsc_table = priv->mem[b].cpu_addr;
+ of_node_put(node);
b++;
}
diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c
index b8498772dba1..e744c07507ee 100644
--- a/drivers/remoteproc/mtk_scp.c
+++ b/drivers/remoteproc/mtk_scp.c
@@ -117,8 +117,8 @@ static void scp_ipi_handler(struct mtk_scp *scp)
return;
}
- memset(scp->share_buf, 0, scp_sizes->ipi_share_buffer_size);
memcpy_fromio(scp->share_buf, &rcv_obj->share_buf, len);
+ memset(&scp->share_buf[len], 0, scp_sizes->ipi_share_buffer_size - len);
handler(scp->share_buf, len, ipi_desc[id].priv);
scp_ipi_unlock(scp, id);
@@ -1344,14 +1344,12 @@ static int scp_probe(struct platform_device *pdev)
/* l1tcm is an optional memory region */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "l1tcm");
- scp_cluster->l1tcm_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(scp_cluster->l1tcm_base)) {
- ret = PTR_ERR(scp_cluster->l1tcm_base);
- if (ret != -EINVAL)
- return dev_err_probe(dev, ret, "Failed to map l1tcm memory\n");
+ if (res) {
+ scp_cluster->l1tcm_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(scp_cluster->l1tcm_base))
+ return dev_err_probe(dev, PTR_ERR(scp_cluster->l1tcm_base),
+ "Failed to map l1tcm memory\n");
- scp_cluster->l1tcm_base = NULL;
- } else {
scp_cluster->l1tcm_size = resource_size(res);
scp_cluster->l1tcm_phys = res->start;
}
@@ -1390,7 +1388,7 @@ static const struct mtk_scp_sizes_data default_scp_sizes = {
};
static const struct mtk_scp_sizes_data mt8188_scp_sizes = {
- .max_dram_size = 0x500000,
+ .max_dram_size = 0x800000,
.ipi_share_buffer_size = 600,
};
@@ -1399,6 +1397,11 @@ static const struct mtk_scp_sizes_data mt8188_scp_c1_sizes = {
.ipi_share_buffer_size = 600,
};
+static const struct mtk_scp_sizes_data mt8195_scp_sizes = {
+ .max_dram_size = 0x800000,
+ .ipi_share_buffer_size = 288,
+};
+
static const struct mtk_scp_of_data mt8183_of_data = {
.scp_clk_get = mt8183_scp_clk_get,
.scp_before_load = mt8183_scp_before_load,
@@ -1476,7 +1479,7 @@ static const struct mtk_scp_of_data mt8195_of_data = {
.scp_da_to_va = mt8192_scp_da_to_va,
.host_to_scp_reg = MT8192_GIPC_IN_SET,
.host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
- .scp_sizes = &default_scp_sizes,
+ .scp_sizes = &mt8195_scp_sizes,
};
static const struct mtk_scp_of_data mt8195_of_data_c1 = {
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
index 8f50ab80e56f..9ae2e831456d 100644
--- a/drivers/remoteproc/omap_remoteproc.c
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -1277,6 +1277,13 @@ static int omap_rproc_of_get_timers(struct platform_device *pdev,
return 0;
}
+static void omap_rproc_mem_release(void *data)
+{
+ struct device *dev = data;
+
+ of_reserved_mem_device_release(dev);
+}
+
static int omap_rproc_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -1305,8 +1312,8 @@ static int omap_rproc_probe(struct platform_device *pdev)
return ret;
}
- rproc = rproc_alloc(&pdev->dev, dev_name(&pdev->dev), &omap_rproc_ops,
- firmware, sizeof(*oproc));
+ rproc = devm_rproc_alloc(&pdev->dev, dev_name(&pdev->dev), &omap_rproc_ops,
+ firmware, sizeof(*oproc));
if (!rproc)
return -ENOMEM;
@@ -1318,15 +1325,15 @@ static int omap_rproc_probe(struct platform_device *pdev)
ret = omap_rproc_of_get_internal_memories(pdev, rproc);
if (ret)
- goto free_rproc;
+ return ret;
ret = omap_rproc_get_boot_data(pdev, rproc);
if (ret)
- goto free_rproc;
+ return ret;
ret = omap_rproc_of_get_timers(pdev, rproc);
if (ret)
- goto free_rproc;
+ return ret;
init_completion(&oproc->pm_comp);
oproc->autosuspend_delay = DEFAULT_AUTOSUSPEND_DELAY;
@@ -1337,10 +1344,8 @@ static int omap_rproc_probe(struct platform_device *pdev)
pm_runtime_set_autosuspend_delay(&pdev->dev, oproc->autosuspend_delay);
oproc->fck = devm_clk_get(&pdev->dev, 0);
- if (IS_ERR(oproc->fck)) {
- ret = PTR_ERR(oproc->fck);
- goto free_rproc;
- }
+ if (IS_ERR(oproc->fck))
+ return PTR_ERR(oproc->fck);
ret = of_reserved_mem_device_init(&pdev->dev);
if (ret) {
@@ -1348,29 +1353,17 @@ static int omap_rproc_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "Typically this should be provided,\n");
dev_warn(&pdev->dev, "only omit if you know what you are doing.\n");
}
+ ret = devm_add_action_or_reset(&pdev->dev, omap_rproc_mem_release, &pdev->dev);
+ if (ret)
+ return ret;
platform_set_drvdata(pdev, rproc);
- ret = rproc_add(rproc);
+ ret = devm_rproc_add(&pdev->dev, rproc);
if (ret)
- goto release_mem;
+ return ret;
return 0;
-
-release_mem:
- of_reserved_mem_device_release(&pdev->dev);
-free_rproc:
- rproc_free(rproc);
- return ret;
-}
-
-static void omap_rproc_remove(struct platform_device *pdev)
-{
- struct rproc *rproc = platform_get_drvdata(pdev);
-
- rproc_del(rproc);
- rproc_free(rproc);
- of_reserved_mem_device_release(&pdev->dev);
}
static const struct dev_pm_ops omap_rproc_pm_ops = {
@@ -1381,7 +1374,6 @@ static const struct dev_pm_ops omap_rproc_pm_ops = {
static struct platform_driver omap_rproc_driver = {
.probe = omap_rproc_probe,
- .remove_new = omap_rproc_remove,
.driver = {
.name = "omap-rproc",
.pm = &omap_rproc_pm_ops,
diff --git a/drivers/remoteproc/qcom_common.c b/drivers/remoteproc/qcom_common.c
index 03e5f5d533eb..8c8688f99f0a 100644
--- a/drivers/remoteproc/qcom_common.c
+++ b/drivers/remoteproc/qcom_common.c
@@ -13,6 +13,7 @@
#include <linux/notifier.h>
#include <linux/remoteproc.h>
#include <linux/remoteproc/qcom_rproc.h>
+#include <linux/auxiliary_bus.h>
#include <linux/rpmsg/qcom_glink.h>
#include <linux/rpmsg/qcom_smd.h>
#include <linux/slab.h>
@@ -25,6 +26,7 @@
#define to_glink_subdev(d) container_of(d, struct qcom_rproc_glink, subdev)
#define to_smd_subdev(d) container_of(d, struct qcom_rproc_subdev, subdev)
#define to_ssr_subdev(d) container_of(d, struct qcom_rproc_ssr, subdev)
+#define to_pdm_subdev(d) container_of(d, struct qcom_rproc_pdm, subdev)
#define MAX_NUM_OF_SS 10
#define MAX_REGION_NAME_LENGTH 16
@@ -519,5 +521,90 @@ void qcom_remove_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr)
}
EXPORT_SYMBOL_GPL(qcom_remove_ssr_subdev);
+static void pdm_dev_release(struct device *dev)
+{
+ struct auxiliary_device *adev = to_auxiliary_dev(dev);
+
+ kfree(adev);
+}
+
+static int pdm_notify_prepare(struct rproc_subdev *subdev)
+{
+ struct qcom_rproc_pdm *pdm = to_pdm_subdev(subdev);
+ struct auxiliary_device *adev;
+ int ret;
+
+ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ return -ENOMEM;
+
+ adev->dev.parent = pdm->dev;
+ adev->dev.release = pdm_dev_release;
+ adev->name = "pd-mapper";
+ adev->id = pdm->index;
+
+ ret = auxiliary_device_init(adev);
+ if (ret) {
+ kfree(adev);
+ return ret;
+ }
+
+ ret = auxiliary_device_add(adev);
+ if (ret) {
+ auxiliary_device_uninit(adev);
+ return ret;
+ }
+
+ pdm->adev = adev;
+
+ return 0;
+}
+
+
+static void pdm_notify_unprepare(struct rproc_subdev *subdev)
+{
+ struct qcom_rproc_pdm *pdm = to_pdm_subdev(subdev);
+
+ if (!pdm->adev)
+ return;
+
+ auxiliary_device_delete(pdm->adev);
+ auxiliary_device_uninit(pdm->adev);
+ pdm->adev = NULL;
+}
+
+/**
+ * qcom_add_pdm_subdev() - register PD Mapper subdevice
+ * @rproc: rproc handle
+ * @pdm: PDM subdevice handle
+ *
+ * Register @pdm so that Protection Device mapper service is started when the
+ * DSP is started too.
+ */
+void qcom_add_pdm_subdev(struct rproc *rproc, struct qcom_rproc_pdm *pdm)
+{
+ pdm->dev = &rproc->dev;
+ pdm->index = rproc->index;
+
+ pdm->subdev.prepare = pdm_notify_prepare;
+ pdm->subdev.unprepare = pdm_notify_unprepare;
+
+ rproc_add_subdev(rproc, &pdm->subdev);
+}
+EXPORT_SYMBOL_GPL(qcom_add_pdm_subdev);
+
+/**
+ * qcom_remove_pdm_subdev() - remove PD Mapper subdevice
+ * @rproc: rproc handle
+ * @pdm: PDM subdevice handle
+ *
+ * Remove the PD Mapper subdevice.
+ */
+void qcom_remove_pdm_subdev(struct rproc *rproc, struct qcom_rproc_pdm *pdm)
+{
+ rproc_remove_subdev(rproc, &pdm->subdev);
+}
+EXPORT_SYMBOL_GPL(qcom_remove_pdm_subdev);
+
MODULE_DESCRIPTION("Qualcomm Remoteproc helper driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/qcom_common.h b/drivers/remoteproc/qcom_common.h
index 9ef4449052a9..b07fbaa091a0 100644
--- a/drivers/remoteproc/qcom_common.h
+++ b/drivers/remoteproc/qcom_common.h
@@ -34,6 +34,13 @@ struct qcom_rproc_ssr {
struct qcom_ssr_subsystem *info;
};
+struct qcom_rproc_pdm {
+ struct rproc_subdev subdev;
+ struct device *dev;
+ int index;
+ struct auxiliary_device *adev;
+};
+
void qcom_minidump(struct rproc *rproc, unsigned int minidump_id,
void (*rproc_dumpfn_t)(struct rproc *rproc,
struct rproc_dump_segment *segment, void *dest, size_t offset,
@@ -52,6 +59,9 @@ void qcom_add_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr,
const char *ssr_name);
void qcom_remove_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr);
+void qcom_add_pdm_subdev(struct rproc *rproc, struct qcom_rproc_pdm *pdm);
+void qcom_remove_pdm_subdev(struct rproc *rproc, struct qcom_rproc_pdm *pdm);
+
#if IS_ENABLED(CONFIG_QCOM_SYSMON)
struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
const char *name,
diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c
index 1d24c9b656a8..572dcb0f055b 100644
--- a/drivers/remoteproc/qcom_q6v5_adsp.c
+++ b/drivers/remoteproc/qcom_q6v5_adsp.c
@@ -112,6 +112,7 @@ struct qcom_adsp {
struct dev_pm_domain_list *pd_list;
struct qcom_rproc_glink glink_subdev;
+ struct qcom_rproc_pdm pdm_subdev;
struct qcom_rproc_ssr ssr_subdev;
struct qcom_sysmon *sysmon;
@@ -726,6 +727,7 @@ static int adsp_probe(struct platform_device *pdev)
goto disable_pm;
qcom_add_glink_subdev(rproc, &adsp->glink_subdev, desc->ssr_name);
+ qcom_add_pdm_subdev(rproc, &adsp->pdm_subdev);
qcom_add_ssr_subdev(rproc, &adsp->ssr_subdev, desc->ssr_name);
adsp->sysmon = qcom_add_sysmon_subdev(rproc,
desc->sysmon_name,
@@ -755,6 +757,7 @@ static void adsp_remove(struct platform_device *pdev)
qcom_q6v5_deinit(&adsp->q6v5);
qcom_remove_glink_subdev(adsp->rproc, &adsp->glink_subdev);
+ qcom_remove_pdm_subdev(adsp->rproc, &adsp->pdm_subdev);
qcom_remove_sysmon_subdev(adsp->sysmon);
qcom_remove_ssr_subdev(adsp->rproc, &adsp->ssr_subdev);
qcom_rproc_pds_detach(adsp);
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index 1779fc890e10..2a42215ce8e0 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -228,6 +228,7 @@ struct q6v5 {
struct qcom_rproc_glink glink_subdev;
struct qcom_rproc_subdev smd_subdev;
+ struct qcom_rproc_pdm pdm_subdev;
struct qcom_rproc_ssr ssr_subdev;
struct qcom_sysmon *sysmon;
struct platform_device *bam_dmux;
@@ -2102,6 +2103,7 @@ static int q6v5_probe(struct platform_device *pdev)
qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
qcom_add_glink_subdev(rproc, &qproc->glink_subdev, "mpss");
qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
+ qcom_add_pdm_subdev(rproc, &qproc->pdm_subdev);
qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12);
if (IS_ERR(qproc->sysmon)) {
@@ -2143,6 +2145,7 @@ static void q6v5_remove(struct platform_device *pdev)
qcom_q6v5_deinit(&qproc->q6v5);
qcom_remove_sysmon_subdev(qproc->sysmon);
qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
+ qcom_remove_pdm_subdev(rproc, &qproc->pdm_subdev);
qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index 54d8005d40a3..88e7b84f223c 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -52,6 +52,7 @@ struct adsp_data {
const char *ssr_name;
const char *sysmon_name;
int ssctl_id;
+ unsigned int smem_host_id;
int region_assign_idx;
int region_assign_count;
@@ -81,6 +82,7 @@ struct qcom_adsp {
int lite_pas_id;
unsigned int minidump_id;
int crash_reason_smem;
+ unsigned int smem_host_id;
bool decrypt_shutdown;
const char *info_name;
@@ -109,6 +111,7 @@ struct qcom_adsp {
struct qcom_rproc_glink glink_subdev;
struct qcom_rproc_subdev smd_subdev;
+ struct qcom_rproc_pdm pdm_subdev;
struct qcom_rproc_ssr ssr_subdev;
struct qcom_sysmon *sysmon;
@@ -399,6 +402,9 @@ static int adsp_stop(struct rproc *rproc)
if (handover)
qcom_pas_handover(&adsp->q6v5);
+ if (adsp->smem_host_id)
+ ret = qcom_smem_bust_hwspin_lock_by_host(adsp->smem_host_id);
+
return ret;
}
@@ -727,6 +733,7 @@ static int adsp_probe(struct platform_device *pdev)
adsp->pas_id = desc->pas_id;
adsp->lite_pas_id = desc->lite_pas_id;
adsp->info_name = desc->sysmon_name;
+ adsp->smem_host_id = desc->smem_host_id;
adsp->decrypt_shutdown = desc->decrypt_shutdown;
adsp->region_assign_idx = desc->region_assign_idx;
adsp->region_assign_count = min_t(int, MAX_ASSIGN_COUNT, desc->region_assign_count);
@@ -771,6 +778,7 @@ static int adsp_probe(struct platform_device *pdev)
qcom_add_glink_subdev(rproc, &adsp->glink_subdev, desc->ssr_name);
qcom_add_smd_subdev(rproc, &adsp->smd_subdev);
+ qcom_add_pdm_subdev(rproc, &adsp->pdm_subdev);
adsp->sysmon = qcom_add_sysmon_subdev(rproc,
desc->sysmon_name,
desc->ssctl_id);
@@ -805,6 +813,7 @@ static void adsp_remove(struct platform_device *pdev)
qcom_remove_glink_subdev(adsp->rproc, &adsp->glink_subdev);
qcom_remove_sysmon_subdev(adsp->sysmon);
qcom_remove_smd_subdev(adsp->rproc, &adsp->smd_subdev);
+ qcom_remove_pdm_subdev(adsp->rproc, &adsp->pdm_subdev);
qcom_remove_ssr_subdev(adsp->rproc, &adsp->ssr_subdev);
adsp_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
device_init_wakeup(adsp->dev, false);
@@ -1196,6 +1205,7 @@ static const struct adsp_data sm8550_adsp_resource = {
.ssr_name = "lpass",
.sysmon_name = "adsp",
.ssctl_id = 0x14,
+ .smem_host_id = 2,
};
static const struct adsp_data sm8550_cdsp_resource = {
@@ -1216,6 +1226,7 @@ static const struct adsp_data sm8550_cdsp_resource = {
.ssr_name = "cdsp",
.sysmon_name = "cdsp",
.ssctl_id = 0x17,
+ .smem_host_id = 5,
};
static const struct adsp_data sm8550_mpss_resource = {
@@ -1236,6 +1247,7 @@ static const struct adsp_data sm8550_mpss_resource = {
.ssr_name = "mpss",
.sysmon_name = "modem",
.ssctl_id = 0x12,
+ .smem_host_id = 1,
.region_assign_idx = 2,
.region_assign_count = 1,
.region_assign_vmid = QCOM_SCM_VMID_MSS_MSA,
@@ -1275,6 +1287,7 @@ static const struct adsp_data sm8650_cdsp_resource = {
.ssr_name = "cdsp",
.sysmon_name = "cdsp",
.ssctl_id = 0x17,
+ .smem_host_id = 5,
.region_assign_idx = 2,
.region_assign_count = 1,
.region_assign_shared = true,
@@ -1299,6 +1312,7 @@ static const struct adsp_data sm8650_mpss_resource = {
.ssr_name = "mpss",
.sysmon_name = "modem",
.ssctl_id = 0x12,
+ .smem_host_id = 1,
.region_assign_idx = 2,
.region_assign_count = 3,
.region_assign_vmid = QCOM_SCM_VMID_MSS_MSA,
diff --git a/drivers/remoteproc/qcom_q6v5_wcss.c b/drivers/remoteproc/qcom_q6v5_wcss.c
index 94f68c919ee6..e913dabae992 100644
--- a/drivers/remoteproc/qcom_q6v5_wcss.c
+++ b/drivers/remoteproc/qcom_q6v5_wcss.c
@@ -148,6 +148,7 @@ struct q6v5_wcss {
bool requires_force_stop;
struct qcom_rproc_glink glink_subdev;
+ struct qcom_rproc_pdm pdm_subdev;
struct qcom_rproc_ssr ssr_subdev;
};
@@ -1052,6 +1053,7 @@ static int q6v5_wcss_probe(struct platform_device *pdev)
return ret;
qcom_add_glink_subdev(rproc, &wcss->glink_subdev, "q6wcss");
+ qcom_add_pdm_subdev(rproc, &wcss->pdm_subdev);
qcom_add_ssr_subdev(rproc, &wcss->ssr_subdev, "q6wcss");
if (desc->ssctl_id)
@@ -1074,6 +1076,7 @@ static void q6v5_wcss_remove(struct platform_device *pdev)
struct q6v5_wcss *wcss = rproc->priv;
qcom_q6v5_deinit(&wcss->q6v5);
+ qcom_remove_pdm_subdev(rproc, &wcss->pdm_subdev);
rproc_del(rproc);
}
diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
index 88623df7d0c3..8c7f7950b80e 100644
--- a/drivers/remoteproc/stm32_rproc.c
+++ b/drivers/remoteproc/stm32_rproc.c
@@ -294,7 +294,7 @@ static void stm32_rproc_mb_vq_work(struct work_struct *work)
mutex_lock(&rproc->lock);
- if (rproc->state != RPROC_RUNNING)
+ if (rproc->state != RPROC_RUNNING && rproc->state != RPROC_ATTACHED)
goto unlock_mutex;
if (rproc_vq_interrupt(rproc, mb->vq_id) == IRQ_NONE)
diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
index 3555b535b168..a22d41689a7d 100644
--- a/drivers/remoteproc/ti_k3_dsp_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
@@ -327,7 +327,7 @@ static int k3_dsp_rproc_start(struct rproc *rproc)
goto put_mbox;
}
- dev_err(dev, "booting DSP core using boot addr = 0x%x\n", boot_addr);
+ dev_dbg(dev, "booting DSP core using boot addr = 0x%x\n", boot_addr);
ret = ti_sci_proc_set_config(kproc->tsp, boot_addr, 0, 0);
if (ret)
goto put_mbox;
diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
index 50e486bcfa10..39a47540c590 100644
--- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
@@ -1144,6 +1144,7 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc)
u32 atcm_enable, btcm_enable, loczrama;
struct k3_r5_core *core0;
enum cluster_mode mode = cluster->mode;
+ int reset_ctrl_status;
int ret;
core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
@@ -1160,11 +1161,11 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc)
r_state, c_state);
}
- ret = reset_control_status(core->reset);
- if (ret < 0) {
+ reset_ctrl_status = reset_control_status(core->reset);
+ if (reset_ctrl_status < 0) {
dev_err(cdev, "failed to get initial local reset status, ret = %d\n",
- ret);
- return ret;
+ reset_ctrl_status);
+ return reset_ctrl_status;
}
/*
@@ -1199,7 +1200,7 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc)
* irrelevant if module reset is asserted (POR value has local reset
* deasserted), and is deemed as remoteproc mode
*/
- if (c_state && !ret && !halted) {
+ if (c_state && !reset_ctrl_status && !halted) {
dev_info(cdev, "configured R5F for IPC-only mode\n");
kproc->rproc->state = RPROC_DETACHED;
ret = 1;
@@ -1217,7 +1218,7 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc)
ret = 0;
} else {
dev_err(cdev, "mismatched mode: local_reset = %s, module_reset = %s, core_state = %s\n",
- !ret ? "deasserted" : "asserted",
+ !reset_ctrl_status ? "deasserted" : "asserted",
c_state ? "deasserted" : "asserted",
halted ? "halted" : "unhalted");
ret = -EINVAL;
diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c b/drivers/remoteproc/xlnx_r5_remoteproc.c
index 84243d1dff9f..596f3ffb8935 100644
--- a/drivers/remoteproc/xlnx_r5_remoteproc.c
+++ b/drivers/remoteproc/xlnx_r5_remoteproc.c
@@ -25,6 +25,10 @@
/* RX mailbox client buffer max length */
#define MBOX_CLIENT_BUF_MAX (IPI_BUF_LEN_MAX + \
sizeof(struct zynqmp_ipi_message))
+
+#define RSC_TBL_XLNX_MAGIC ((uint32_t)'x' << 24 | (uint32_t)'a' << 16 | \
+ (uint32_t)'m' << 8 | (uint32_t)'p')
+
/*
* settings for RPU cluster mode which
* reflects possible values of xlnx,cluster-mode dt-property
@@ -73,6 +77,26 @@ struct mbox_info {
struct mbox_chan *rx_chan;
};
+/**
+ * struct rsc_tbl_data
+ *
+ * Platform specific data structure used to sync resource table address.
+ * It's important to maintain order and size of each field on remote side.
+ *
+ * @version: version of data structure
+ * @magic_num: 32-bit magic number.
+ * @comp_magic_num: complement of above magic number
+ * @rsc_tbl_size: resource table size
+ * @rsc_tbl: resource table address
+ */
+struct rsc_tbl_data {
+ const int version;
+ const u32 magic_num;
+ const u32 comp_magic_num;
+ const u32 rsc_tbl_size;
+ const uintptr_t rsc_tbl;
+} __packed;
+
/*
* Hardcoded TCM bank values. This will stay in driver to maintain backward
* compatibility with device-tree that does not have TCM information.
@@ -95,20 +119,24 @@ static const struct mem_bank_data zynqmp_tcm_banks_lockstep[] = {
/**
* struct zynqmp_r5_core
*
+ * @rsc_tbl_va: resource table virtual address
* @dev: device of RPU instance
* @np: device node of RPU instance
* @tcm_bank_count: number TCM banks accessible to this RPU
* @tcm_banks: array of each TCM bank data
* @rproc: rproc handle
+ * @rsc_tbl_size: resource table size retrieved from remote
* @pm_domain_id: RPU CPU power domain id
* @ipi: pointer to mailbox information
*/
struct zynqmp_r5_core {
+ void __iomem *rsc_tbl_va;
struct device *dev;
struct device_node *np;
int tcm_bank_count;
struct mem_bank_data **tcm_banks;
struct rproc *rproc;
+ u32 rsc_tbl_size;
u32 pm_domain_id;
struct mbox_info *ipi;
};
@@ -557,6 +585,14 @@ static int add_tcm_banks(struct rproc *rproc)
dev_dbg(dev, "TCM carveout %s addr=%llx, da=0x%x, size=0x%lx",
bank_name, bank_addr, da, bank_size);
+ /*
+ * In DETACHED state firmware is already running so no need to
+ * request add TCM registers. However, request TCM PD node to let
+ * platform management firmware know that TCM is in use.
+ */
+ if (rproc->state == RPROC_DETACHED)
+ continue;
+
rproc_mem = rproc_mem_entry_init(dev, NULL, bank_addr,
bank_size, da,
tcm_mem_map, tcm_mem_unmap,
@@ -662,6 +698,107 @@ static int zynqmp_r5_rproc_unprepare(struct rproc *rproc)
return 0;
}
+static struct resource_table *zynqmp_r5_get_loaded_rsc_table(struct rproc *rproc,
+ size_t *size)
+{
+ struct zynqmp_r5_core *r5_core;
+
+ r5_core = rproc->priv;
+
+ *size = r5_core->rsc_tbl_size;
+
+ return (struct resource_table *)r5_core->rsc_tbl_va;
+}
+
+static int zynqmp_r5_get_rsc_table_va(struct zynqmp_r5_core *r5_core)
+{
+ struct resource_table *rsc_tbl_addr;
+ struct device *dev = r5_core->dev;
+ struct rsc_tbl_data *rsc_data_va;
+ struct resource res_mem;
+ struct device_node *np;
+ int ret;
+
+ /*
+ * It is expected from remote processor firmware to provide resource
+ * table address via struct rsc_tbl_data data structure.
+ * Start address of first entry under "memory-region" property list
+ * contains that data structure which holds resource table address, size
+ * and some magic number to validate correct resource table entry.
+ */
+ np = of_parse_phandle(r5_core->np, "memory-region", 0);
+ if (!np) {
+ dev_err(dev, "failed to get memory region dev node\n");
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(np, 0, &res_mem);
+ of_node_put(np);
+ if (ret) {
+ dev_err(dev, "failed to get memory-region resource addr\n");
+ return -EINVAL;
+ }
+
+ rsc_data_va = (struct rsc_tbl_data *)ioremap_wc(res_mem.start,
+ sizeof(struct rsc_tbl_data));
+ if (!rsc_data_va) {
+ dev_err(dev, "failed to map resource table data address\n");
+ return -EIO;
+ }
+
+ /*
+ * If RSC_TBL_XLNX_MAGIC number and its complement isn't found then
+ * do not consider resource table address valid and don't attach
+ */
+ if (rsc_data_va->magic_num != RSC_TBL_XLNX_MAGIC ||
+ rsc_data_va->comp_magic_num != ~RSC_TBL_XLNX_MAGIC) {
+ dev_dbg(dev, "invalid magic number, won't attach\n");
+ return -EINVAL;
+ }
+
+ r5_core->rsc_tbl_va = ioremap_wc(rsc_data_va->rsc_tbl,
+ rsc_data_va->rsc_tbl_size);
+ if (!r5_core->rsc_tbl_va) {
+ dev_err(dev, "failed to get resource table va\n");
+ return -EINVAL;
+ }
+
+ rsc_tbl_addr = (struct resource_table *)r5_core->rsc_tbl_va;
+
+ /*
+ * As of now resource table version 1 is expected. Don't fail to attach
+ * but warn users about it.
+ */
+ if (rsc_tbl_addr->ver != 1)
+ dev_warn(dev, "unexpected resource table version %d\n",
+ rsc_tbl_addr->ver);
+
+ r5_core->rsc_tbl_size = rsc_data_va->rsc_tbl_size;
+
+ iounmap((void __iomem *)rsc_data_va);
+
+ return 0;
+}
+
+static int zynqmp_r5_attach(struct rproc *rproc)
+{
+ dev_dbg(&rproc->dev, "rproc %d attached\n", rproc->index);
+
+ return 0;
+}
+
+static int zynqmp_r5_detach(struct rproc *rproc)
+{
+ /*
+ * Generate last notification to remote after clearing virtio flag.
+ * Remote can avoid polling on virtio reset flag if kick is generated
+ * during detach by host and check virtio reset flag on kick interrupt.
+ */
+ zynqmp_r5_rproc_kick(rproc, 0);
+
+ return 0;
+}
+
static const struct rproc_ops zynqmp_r5_rproc_ops = {
.prepare = zynqmp_r5_rproc_prepare,
.unprepare = zynqmp_r5_rproc_unprepare,
@@ -673,6 +810,9 @@ static const struct rproc_ops zynqmp_r5_rproc_ops = {
.sanity_check = rproc_elf_sanity_check,
.get_boot_addr = rproc_elf_get_boot_addr,
.kick = zynqmp_r5_rproc_kick,
+ .get_loaded_rsc_table = zynqmp_r5_get_loaded_rsc_table,
+ .attach = zynqmp_r5_attach,
+ .detach = zynqmp_r5_detach,
};
/**
@@ -723,6 +863,16 @@ static struct zynqmp_r5_core *zynqmp_r5_add_rproc_core(struct device *cdev)
goto free_rproc;
}
+ /*
+ * If firmware is already available in the memory then move rproc state
+ * to DETACHED. Firmware can be preloaded via debugger or by any other
+ * agent (processors) in the system.
+ * If firmware isn't available in the memory and resource table isn't
+ * found, then rproc state remains OFFLINE.
+ */
+ if (!zynqmp_r5_get_rsc_table_va(r5_core))
+ r5_rproc->state = RPROC_DETACHED;
+
r5_core->rproc = r5_rproc;
return r5_core;
@@ -1134,6 +1284,7 @@ static void zynqmp_r5_cluster_exit(void *data)
for (i = 0; i < cluster->core_count; i++) {
r5_core = cluster->r5_cores[i];
zynqmp_r5_free_mbox(r5_core->ipi);
+ iounmap(r5_core->rsc_tbl_va);
of_reserved_mem_device_release(r5_core->dev);
put_device(r5_core->dev);
rproc_del(r5_core->rproc);
diff --git a/drivers/reset/reset-meson-audio-arb.c b/drivers/reset/reset-meson-audio-arb.c
index 894ad9d37a66..421ccb40da8c 100644
--- a/drivers/reset/reset-meson-audio-arb.c
+++ b/drivers/reset/reset-meson-audio-arb.c
@@ -120,7 +120,7 @@ static const struct of_device_id meson_audio_arb_of_match[] = {
};
MODULE_DEVICE_TABLE(of, meson_audio_arb_of_match);
-static int meson_audio_arb_remove(struct platform_device *pdev)
+static void meson_audio_arb_remove(struct platform_device *pdev)
{
struct meson_audio_arb_data *arb = platform_get_drvdata(pdev);
@@ -128,8 +128,6 @@ static int meson_audio_arb_remove(struct platform_device *pdev)
spin_lock(&arb->lock);
writel(0, arb->regs);
spin_unlock(&arb->lock);
-
- return 0;
}
static int meson_audio_arb_probe(struct platform_device *pdev)
@@ -182,7 +180,7 @@ static int meson_audio_arb_probe(struct platform_device *pdev)
static struct platform_driver meson_audio_arb_pdrv = {
.probe = meson_audio_arb_probe,
- .remove = meson_audio_arb_remove,
+ .remove_new = meson_audio_arb_remove,
.driver = {
.name = "meson-audio-arb-reset",
.of_match_table = meson_audio_arb_of_match,
diff --git a/drivers/reset/reset-rzg2l-usbphy-ctrl.c b/drivers/reset/reset-rzg2l-usbphy-ctrl.c
index 255c894a4782..1cd157f4f03b 100644
--- a/drivers/reset/reset-rzg2l-usbphy-ctrl.c
+++ b/drivers/reset/reset-rzg2l-usbphy-ctrl.c
@@ -192,7 +192,7 @@ err_pm_disable_reset_deassert:
return error;
}
-static int rzg2l_usbphy_ctrl_remove(struct platform_device *pdev)
+static void rzg2l_usbphy_ctrl_remove(struct platform_device *pdev)
{
struct rzg2l_usbphy_ctrl_priv *priv = dev_get_drvdata(&pdev->dev);
@@ -200,8 +200,6 @@ static int rzg2l_usbphy_ctrl_remove(struct platform_device *pdev)
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
reset_control_assert(priv->rstc);
-
- return 0;
}
static struct platform_driver rzg2l_usbphy_ctrl_driver = {
@@ -210,7 +208,7 @@ static struct platform_driver rzg2l_usbphy_ctrl_driver = {
.of_match_table = rzg2l_usbphy_ctrl_match_table,
},
.probe = rzg2l_usbphy_ctrl_probe,
- .remove = rzg2l_usbphy_ctrl_remove,
+ .remove_new = rzg2l_usbphy_ctrl_remove,
};
module_platform_driver(rzg2l_usbphy_ctrl_driver);
diff --git a/drivers/reset/reset-ti-sci.c b/drivers/reset/reset-ti-sci.c
index cc01fa5b0bea..d384da0982fa 100644
--- a/drivers/reset/reset-ti-sci.c
+++ b/drivers/reset/reset-ti-sci.c
@@ -235,20 +235,18 @@ static int ti_sci_reset_probe(struct platform_device *pdev)
return reset_controller_register(&data->rcdev);
}
-static int ti_sci_reset_remove(struct platform_device *pdev)
+static void ti_sci_reset_remove(struct platform_device *pdev)
{
struct ti_sci_reset_data *data = platform_get_drvdata(pdev);
reset_controller_unregister(&data->rcdev);
idr_destroy(&data->idr);
-
- return 0;
}
static struct platform_driver ti_sci_reset_driver = {
.probe = ti_sci_reset_probe,
- .remove = ti_sci_reset_remove,
+ .remove_new = ti_sci_reset_remove,
.driver = {
.name = "ti-sci-reset",
.of_match_table = ti_sci_reset_of_match,
diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
index d7a342510902..eec7642d2686 100644
--- a/drivers/rpmsg/rpmsg_char.c
+++ b/drivers/rpmsg/rpmsg_char.c
@@ -52,8 +52,8 @@ static DEFINE_IDA(rpmsg_minor_ida);
* @readq: wait object for incoming queue
* @default_ept: set to channel default endpoint if the default endpoint should be re-used
* on device open to prevent endpoint address update.
- * remote_flow_restricted: to indicate if the remote has requested for flow to be limited
- * remote_flow_updated: to indicate if the flow control has been requested
+ * @remote_flow_restricted: to indicate if the remote has requested for flow to be limited
+ * @remote_flow_updated: to indicate if the flow control has been requested
*/
struct rpmsg_eptdev {
struct device dev;
@@ -566,4 +566,5 @@ static void rpmsg_chrdev_exit(void)
module_exit(rpmsg_chrdev_exit);
MODULE_ALIAS("rpmsg:rpmsg_chrdev");
+MODULE_DESCRIPTION("RPMSG device interface");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index 0fa08266404d..712c06c02696 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -493,10 +493,10 @@ static inline int rpmsg_id_match(const struct rpmsg_device *rpdev,
}
/* match rpmsg channel and rpmsg driver */
-static int rpmsg_dev_match(struct device *dev, struct device_driver *drv)
+static int rpmsg_dev_match(struct device *dev, const struct device_driver *drv)
{
struct rpmsg_device *rpdev = to_rpmsg_device(dev);
- struct rpmsg_driver *rpdrv = to_rpmsg_driver(drv);
+ const struct rpmsg_driver *rpdrv = to_rpmsg_driver(drv);
const struct rpmsg_device_id *ids = rpdrv->id_table;
unsigned int i;
diff --git a/drivers/rpmsg/rpmsg_internal.h b/drivers/rpmsg/rpmsg_internal.h
index a3ba768138f1..42c7007be1b5 100644
--- a/drivers/rpmsg/rpmsg_internal.h
+++ b/drivers/rpmsg/rpmsg_internal.h
@@ -16,7 +16,7 @@
#include <linux/poll.h>
#define to_rpmsg_device(d) container_of(d, struct rpmsg_device, dev)
-#define to_rpmsg_driver(d) container_of(d, struct rpmsg_driver, drv)
+#define to_rpmsg_driver(d) container_of_const(d, struct rpmsg_driver, drv)
extern const struct class rpmsg_class;
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index 8a03af5ee5b3..80c4e5101c97 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -96,7 +96,7 @@ config SCLP_OFB
config S390_UV_UAPI
def_tristate m
prompt "Ultravisor userspace API"
- depends on S390 && (KVM || PROTECTED_VIRTUALIZATION_GUEST)
+ depends on S390
help
Selecting exposes parts of the UV interface to userspace
by providing a misc character device at /dev/uv.
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 781f84901256..53b68f8c32f3 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -1354,10 +1354,10 @@ int sch_is_pseudo_sch(struct subchannel *sch)
return sch == to_css(sch->dev.parent)->pseudo_subchannel;
}
-static int css_bus_match(struct device *dev, struct device_driver *drv)
+static int css_bus_match(struct device *dev, const struct device_driver *drv)
{
struct subchannel *sch = to_subchannel(dev);
- struct css_driver *driver = to_cssdriver(drv);
+ const struct css_driver *driver = to_cssdriver(drv);
struct css_device_id *id;
/* When driver_override is set, only bind to the matching driver */
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index c2b175592bb7..a65a27dc520c 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -103,7 +103,7 @@ struct css_driver {
int (*settle)(void);
};
-#define to_cssdriver(n) container_of(n, struct css_driver, drv)
+#define to_cssdriver(n) container_of_const(n, struct css_driver, drv)
extern int css_driver_register(struct css_driver *);
extern void css_driver_unregister(struct css_driver *);
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 920f550bc313..b0f23242e171 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -58,10 +58,10 @@ static const struct bus_type ccw_bus_type;
* subsystem driver and one channel system per machine, but
* we still use the abstraction. T.R. says it's a good idea. */
static int
-ccw_bus_match (struct device * dev, struct device_driver * drv)
+ccw_bus_match (struct device * dev, const struct device_driver * drv)
{
struct ccw_device *cdev = to_ccwdev(dev);
- struct ccw_driver *cdrv = to_ccwdrv(drv);
+ const struct ccw_driver *cdrv = to_ccwdrv(drv);
const struct ccw_device_id *ids = cdrv->ids, *found;
if (!ids)
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 898865be0dad..0998b17ecb37 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -552,9 +552,9 @@ static void ap_poll_thread_stop(void)
*
* AP bus driver registration/unregistration.
*/
-static int ap_bus_match(struct device *dev, struct device_driver *drv)
+static int ap_bus_match(struct device *dev, const struct device_driver *drv)
{
- struct ap_driver *ap_drv = to_ap_drv(drv);
+ const struct ap_driver *ap_drv = to_ap_drv(drv);
struct ap_device_id *id;
/*
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index fdbc6fdfdf57..0b275c719319 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -158,7 +158,7 @@ struct ap_driver {
struct ap_config_info *old_config_info);
};
-#define to_ap_drv(x) container_of((x), struct ap_driver, driver)
+#define to_ap_drv(x) container_of_const((x), struct ap_driver, driver)
int ap_driver_register(struct ap_driver *, struct module *, char *);
void ap_driver_unregister(struct ap_driver *);
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
index 453665ac6020..7d3b904af9e8 100644
--- a/drivers/scsi/fcoe/fcoe_sysfs.c
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -600,7 +600,7 @@ static const struct attribute_group *fcoe_fcf_attr_groups[] = {
static const struct bus_type fcoe_bus_type;
static int fcoe_bus_match(struct device *dev,
- struct device_driver *drv)
+ const struct device_driver *drv)
{
if (dev->bus == &fcoe_bus_type)
return 1;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index b5aae4e8ae33..32f94db6d6bf 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -528,7 +528,7 @@ static struct class sdev_class = {
};
/* all probing is done in the individual ->probe routines */
-static int scsi_bus_match(struct device *dev, struct device_driver *gendrv)
+static int scsi_bus_match(struct device *dev, const struct device_driver *gendrv)
{
struct scsi_device *sdp;
@@ -661,7 +661,7 @@ static int scsi_sdev_check_buf_bit(const char *buf)
return 1;
else if (buf[0] == '0')
return 0;
- else
+ else
return -EINVAL;
} else
return -EINVAL;
@@ -886,7 +886,7 @@ store_queue_type_field(struct device *dev, struct device_attribute *attr,
if (!sdev->tagged_supported)
return -EINVAL;
-
+
sdev_printk(KERN_INFO, sdev,
"ignoring write to deprecated queue_type attribute");
return count;
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 93e1978ad564..fde7de3b1e55 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1204,7 +1204,7 @@ static const struct device_type iscsi_flashnode_conn_dev_type = {
static const struct bus_type iscsi_flashnode_bus;
int iscsi_flashnode_bus_match(struct device *dev,
- struct device_driver *drv)
+ const struct device_driver *drv)
{
if (dev->bus == &iscsi_flashnode_bus)
return 1;
diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c
index 16018009a5a6..6dc0549f7900 100644
--- a/drivers/sh/maple/maple.c
+++ b/drivers/sh/maple/maple.c
@@ -747,9 +747,9 @@ static int maple_get_dma_buffer(void)
}
static int maple_match_bus_driver(struct device *devptr,
- struct device_driver *drvptr)
+ const struct device_driver *drvptr)
{
- struct maple_driver *maple_drv = to_maple_driver(drvptr);
+ const struct maple_driver *maple_drv = to_maple_driver(drvptr);
struct maple_device *maple_dev = to_maple_dev(devptr);
/* Trap empty port case */
diff --git a/drivers/siox/siox-core.c b/drivers/siox/siox-core.c
index 24a45920a240..f98f5a27e659 100644
--- a/drivers/siox/siox-core.c
+++ b/drivers/siox/siox-core.c
@@ -503,7 +503,7 @@ static const struct device_type siox_device_type = {
.release = siox_device_release,
};
-static int siox_match(struct device *dev, struct device_driver *drv)
+static int siox_match(struct device *dev, const struct device_driver *drv)
{
if (dev->type != &siox_device_type)
return 0;
diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c
index 41e62de1f91f..65e5515f7555 100644
--- a/drivers/slimbus/core.c
+++ b/drivers/slimbus/core.c
@@ -30,10 +30,10 @@ static const struct slim_device_id *slim_match(const struct slim_device_id *id,
return NULL;
}
-static int slim_device_match(struct device *dev, struct device_driver *drv)
+static int slim_device_match(struct device *dev, const struct device_driver *drv)
{
struct slim_device *sbdev = to_slim_device(dev);
- struct slim_driver *sbdrv = to_slim_driver(drv);
+ const struct slim_driver *sbdrv = to_slim_driver(drv);
/* Attempt an OF style match first */
if (of_driver_match_device(dev, drv))
diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c
index 50749e870efa..4fbff3a890e2 100644
--- a/drivers/soc/qcom/apr.c
+++ b/drivers/soc/qcom/apr.c
@@ -338,10 +338,10 @@ static void apr_rxwq(struct work_struct *work)
}
}
-static int apr_device_match(struct device *dev, struct device_driver *drv)
+static int apr_device_match(struct device *dev, const struct device_driver *drv)
{
struct apr_device *adev = to_apr_device(dev);
- struct apr_driver *adrv = to_apr_driver(drv);
+ const struct apr_driver *adrv = to_apr_driver(drv);
const struct apr_device_id *id = adrv->id_table;
/* Attempt an OF style match first */
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index e40aac281b06..e4411771f482 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -359,6 +359,32 @@ static struct qcom_smem *__smem;
/* Timeout (ms) for the trylock of remote spinlocks */
#define HWSPINLOCK_TIMEOUT 1000
+/* The qcom hwspinlock id is always plus one from the smem host id */
+#define SMEM_HOST_ID_TO_HWSPINLOCK_ID(__x) ((__x) + 1)
+
+/**
+ * qcom_smem_bust_hwspin_lock_by_host() - bust the smem hwspinlock for a host
+ * @host: remote processor id
+ *
+ * Busts the hwspin_lock for the given smem host id. This helper is intended
+ * for remoteproc drivers that manage remoteprocs with an equivalent smem
+ * driver instance in the remote firmware. Drivers can force a release of the
+ * smem hwspin_lock if the rproc unexpectedly goes into a bad state.
+ *
+ * Context: Process context.
+ *
+ * Returns: 0 on success, otherwise negative errno.
+ */
+int qcom_smem_bust_hwspin_lock_by_host(unsigned int host)
+{
+ /* This function is for remote procs, so ignore SMEM_HOST_APPS */
+ if (host == SMEM_HOST_APPS || host >= SMEM_HOST_COUNT)
+ return -EINVAL;
+
+ return hwspin_lock_bust(__smem->hwlock, SMEM_HOST_ID_TO_HWSPINLOCK_ID(host));
+}
+EXPORT_SYMBOL_GPL(qcom_smem_bust_hwspin_lock_by_host);
+
/**
* qcom_smem_is_available() - Check if SMEM is available
*
diff --git a/drivers/soundwire/amd_init.c b/drivers/soundwire/amd_init.c
index 4cd26f3a21f5..db040f435059 100644
--- a/drivers/soundwire/amd_init.c
+++ b/drivers/soundwire/amd_init.c
@@ -8,6 +8,7 @@
*/
#include <linux/acpi.h>
+#include <linux/cleanup.h>
#include <linux/export.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -69,7 +70,6 @@ static struct sdw_amd_ctx *sdw_amd_probe_controller(struct sdw_amd_res *res)
{
struct sdw_amd_ctx *ctx;
struct acpi_device *adev;
- struct resource *sdw_res;
struct acp_sdw_pdata sdw_pdata[2];
struct platform_device_info pdevinfo[2];
u32 link_mask;
@@ -104,7 +104,8 @@ static struct sdw_amd_ctx *sdw_amd_probe_controller(struct sdw_amd_res *res)
ctx->count = count;
ctx->link_mask = res->link_mask;
- sdw_res = kzalloc(sizeof(*sdw_res), GFP_KERNEL);
+ struct resource *sdw_res __free(kfree) = kzalloc(sizeof(*sdw_res),
+ GFP_KERNEL);
if (!sdw_res) {
kfree(ctx);
return NULL;
@@ -132,7 +133,6 @@ static struct sdw_amd_ctx *sdw_amd_probe_controller(struct sdw_amd_res *res)
if (IS_ERR(ctx->pdev[index]))
goto err;
}
- kfree(sdw_res);
return ctx;
err:
while (index--) {
@@ -142,7 +142,6 @@ err:
platform_device_unregister(ctx->pdev[index]);
}
- kfree(sdw_res);
kfree(ctx);
return NULL;
}
diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c
index 795e223f7e5c..0d01849c3586 100644
--- a/drivers/soundwire/amd_manager.c
+++ b/drivers/soundwire/amd_manager.c
@@ -6,6 +6,7 @@
*/
#include <linux/completion.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/jiffies.h>
@@ -603,7 +604,6 @@ static int amd_sdw_hw_params(struct snd_pcm_substream *substream,
struct amd_sdw_manager *amd_manager = snd_soc_dai_get_drvdata(dai);
struct sdw_amd_dai_runtime *dai_runtime;
struct sdw_stream_config sconfig;
- struct sdw_port_config *pconfig;
int ch, dir;
int ret;
@@ -626,11 +626,10 @@ static int amd_sdw_hw_params(struct snd_pcm_substream *substream,
sconfig.bps = snd_pcm_format_width(params_format(params));
/* Port configuration */
- pconfig = kzalloc(sizeof(*pconfig), GFP_KERNEL);
- if (!pconfig) {
- ret = -ENOMEM;
- goto error;
- }
+ struct sdw_port_config *pconfig __free(kfree) = kzalloc(sizeof(*pconfig),
+ GFP_KERNEL);
+ if (!pconfig)
+ return -ENOMEM;
pconfig->num = dai->id;
pconfig->ch_mask = (1 << ch) - 1;
@@ -639,8 +638,6 @@ static int amd_sdw_hw_params(struct snd_pcm_substream *substream,
if (ret)
dev_err(amd_manager->dev, "add manager to stream failed:%d\n", ret);
- kfree(pconfig);
-error:
return ret;
}
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index 191e6cc6f962..263ca32f0c5c 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -1410,7 +1410,7 @@ static int sdw_initialize_slave(struct sdw_slave *slave)
}
}
if ((slave->bus->prop.quirks & SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY) &&
- !(slave->prop.quirks & SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY)) {
+ !(prop->quirks & SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY)) {
/* Clear parity interrupt before enabling interrupt mask */
status = sdw_read_no_pm(slave, SDW_SCP_INT1);
if (status < 0) {
@@ -1436,7 +1436,7 @@ static int sdw_initialize_slave(struct sdw_slave *slave)
* device-dependent, it might e.g. only be enabled in
* steady-state after a couple of frames.
*/
- val = slave->prop.scp_int1_mask;
+ val = prop->scp_int1_mask;
/* Enable SCP interrupts */
ret = sdw_update_no_pm(slave, SDW_SCP_INTMASK1, val, val);
@@ -1447,7 +1447,7 @@ static int sdw_initialize_slave(struct sdw_slave *slave)
}
/* No need to continue if DP0 is not present */
- if (!slave->prop.dp0_prop)
+ if (!prop->dp0_prop)
return 0;
/* Enable DP0 interrupts */
diff --git a/drivers/soundwire/bus_type.c b/drivers/soundwire/bus_type.c
index c32faace618f..d928258c6761 100644
--- a/drivers/soundwire/bus_type.c
+++ b/drivers/soundwire/bus_type.c
@@ -19,7 +19,7 @@
* struct sdw_device_id.
*/
static const struct sdw_device_id *
-sdw_get_device_id(struct sdw_slave *slave, struct sdw_driver *drv)
+sdw_get_device_id(struct sdw_slave *slave, const struct sdw_driver *drv)
{
const struct sdw_device_id *id;
@@ -35,10 +35,10 @@ sdw_get_device_id(struct sdw_slave *slave, struct sdw_driver *drv)
return NULL;
}
-static int sdw_bus_match(struct device *dev, struct device_driver *ddrv)
+static int sdw_bus_match(struct device *dev, const struct device_driver *ddrv)
{
struct sdw_slave *slave;
- struct sdw_driver *drv;
+ const struct sdw_driver *drv;
int ret = 0;
if (is_sdw_slave(dev)) {
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index 74da99034dab..e0683a5975d1 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -6,6 +6,7 @@
* Used by Master driver
*/
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/debugfs.h>
@@ -323,12 +324,11 @@ static ssize_t cdns_sprintf(struct sdw_cdns *cdns,
static int cdns_reg_show(struct seq_file *s, void *data)
{
struct sdw_cdns *cdns = s->private;
- char *buf;
ssize_t ret;
int num_ports;
int i, j;
- buf = kzalloc(RD_BUF, GFP_KERNEL);
+ char *buf __free(kfree) = kzalloc(RD_BUF, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -389,7 +389,6 @@ static int cdns_reg_show(struct seq_file *s, void *data)
ret += cdns_sprintf(cdns, buf, ret, CDNS_PDI_CONFIG(i));
seq_printf(s, "%s", buf);
- kfree(buf);
return 0;
}
diff --git a/drivers/soundwire/debugfs.c b/drivers/soundwire/debugfs.c
index 67abd7e52f09..c30f571934ee 100644
--- a/drivers/soundwire/debugfs.c
+++ b/drivers/soundwire/debugfs.c
@@ -1,8 +1,10 @@
// SPDX-License-Identifier: GPL-2.0-only
// Copyright(c) 2017-2019 Intel Corporation.
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/debugfs.h>
+#include <linux/firmware.h>
#include <linux/mod_devicetable.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
@@ -48,18 +50,16 @@ static ssize_t sdw_sprintf(struct sdw_slave *slave,
static int sdw_slave_reg_show(struct seq_file *s_file, void *data)
{
struct sdw_slave *slave = s_file->private;
- char *buf;
ssize_t ret;
int i, j;
- buf = kzalloc(RD_BUF, GFP_KERNEL);
+ char *buf __free(kfree) = kzalloc(RD_BUF, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = pm_runtime_get_sync(&slave->dev);
if (ret < 0 && ret != -EACCES) {
pm_runtime_put_noidle(&slave->dev);
- kfree(buf);
return ret;
}
@@ -131,12 +131,149 @@ static int sdw_slave_reg_show(struct seq_file *s_file, void *data)
pm_runtime_mark_last_busy(&slave->dev);
pm_runtime_put(&slave->dev);
- kfree(buf);
-
return 0;
}
DEFINE_SHOW_ATTRIBUTE(sdw_slave_reg);
+#define MAX_CMD_BYTES 256
+
+static int cmd;
+static u32 start_addr;
+static size_t num_bytes;
+static u8 read_buffer[MAX_CMD_BYTES];
+static char *firmware_file;
+
+static int set_command(void *data, u64 value)
+{
+ struct sdw_slave *slave = data;
+
+ if (value > 1)
+ return -EINVAL;
+
+ /* Userspace changed the hardware state behind the kernel's back */
+ add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+
+ dev_dbg(&slave->dev, "command: %s\n", value ? "read" : "write");
+ cmd = value;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(set_command_fops, NULL,
+ set_command, "%llu\n");
+
+static int set_start_address(void *data, u64 value)
+{
+ struct sdw_slave *slave = data;
+
+ /* Userspace changed the hardware state behind the kernel's back */
+ add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+
+ dev_dbg(&slave->dev, "start address %#llx\n", value);
+
+ start_addr = value;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(set_start_address_fops, NULL,
+ set_start_address, "%llu\n");
+
+static int set_num_bytes(void *data, u64 value)
+{
+ struct sdw_slave *slave = data;
+
+ if (value == 0 || value > MAX_CMD_BYTES)
+ return -EINVAL;
+
+ /* Userspace changed the hardware state behind the kernel's back */
+ add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+
+ dev_dbg(&slave->dev, "number of bytes %lld\n", value);
+
+ num_bytes = value;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(set_num_bytes_fops, NULL,
+ set_num_bytes, "%llu\n");
+
+static int cmd_go(void *data, u64 value)
+{
+ struct sdw_slave *slave = data;
+ int ret;
+
+ if (value != 1)
+ return -EINVAL;
+
+ /* one last check */
+ if (start_addr > SDW_REG_MAX ||
+ num_bytes == 0 || num_bytes > MAX_CMD_BYTES)
+ return -EINVAL;
+
+ ret = pm_runtime_get_sync(&slave->dev);
+ if (ret < 0 && ret != -EACCES) {
+ pm_runtime_put_noidle(&slave->dev);
+ return ret;
+ }
+
+ /* Userspace changed the hardware state behind the kernel's back */
+ add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+
+ dev_dbg(&slave->dev, "starting command\n");
+
+ if (cmd == 0) {
+ const struct firmware *fw;
+
+ ret = request_firmware(&fw, firmware_file, &slave->dev);
+ if (ret < 0) {
+ dev_err(&slave->dev, "firmware %s not found\n", firmware_file);
+ goto out;
+ }
+
+ if (fw->size != num_bytes) {
+ dev_err(&slave->dev,
+ "firmware %s: unexpected size %zd, desired %zd\n",
+ firmware_file, fw->size, num_bytes);
+ release_firmware(fw);
+ goto out;
+ }
+
+ ret = sdw_nwrite_no_pm(slave, start_addr, num_bytes, fw->data);
+ release_firmware(fw);
+ } else {
+ ret = sdw_nread_no_pm(slave, start_addr, num_bytes, read_buffer);
+ }
+
+ dev_dbg(&slave->dev, "command completed %d\n", ret);
+
+out:
+ pm_runtime_mark_last_busy(&slave->dev);
+ pm_runtime_put(&slave->dev);
+
+ return ret;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(cmd_go_fops, NULL,
+ cmd_go, "%llu\n");
+
+#define MAX_LINE_LEN 128
+
+static int read_buffer_show(struct seq_file *s_file, void *data)
+{
+ char buf[MAX_LINE_LEN];
+ int i;
+
+ if (num_bytes == 0 || num_bytes > MAX_CMD_BYTES)
+ return -EINVAL;
+
+ for (i = 0; i < num_bytes; i++) {
+ scnprintf(buf, MAX_LINE_LEN, "address %#x val 0x%02x\n",
+ start_addr + i, read_buffer[i]);
+ seq_printf(s_file, "%s", buf);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(read_buffer);
+
void sdw_slave_debugfs_init(struct sdw_slave *slave)
{
struct dentry *master;
@@ -151,6 +288,16 @@ void sdw_slave_debugfs_init(struct sdw_slave *slave)
debugfs_create_file("registers", 0400, d, slave, &sdw_slave_reg_fops);
+ /* interface to send arbitrary commands */
+ debugfs_create_file("command", 0200, d, slave, &set_command_fops);
+ debugfs_create_file("start_address", 0200, d, slave, &set_start_address_fops);
+ debugfs_create_file("num_bytes", 0200, d, slave, &set_num_bytes_fops);
+ debugfs_create_file("go", 0200, d, slave, &cmd_go_fops);
+
+ debugfs_create_file("read_buffer", 0400, d, slave, &read_buffer_fops);
+ firmware_file = NULL;
+ debugfs_create_str("firmware_file", 0200, d, &firmware_file);
+
slave->debugfs = d;
}
diff --git a/drivers/soundwire/generic_bandwidth_allocation.c b/drivers/soundwire/generic_bandwidth_allocation.c
index c70a63d009ae..b9316207c3ab 100644
--- a/drivers/soundwire/generic_bandwidth_allocation.c
+++ b/drivers/soundwire/generic_bandwidth_allocation.c
@@ -83,7 +83,7 @@ EXPORT_SYMBOL(sdw_compute_slave_ports);
static void sdw_compute_master_ports(struct sdw_master_runtime *m_rt,
struct sdw_group_params *params,
- int port_bo, int hstop)
+ int *port_bo, int hstop)
{
struct sdw_transport_data t_data = {0};
struct sdw_port_runtime *p_rt;
@@ -108,7 +108,7 @@ static void sdw_compute_master_ports(struct sdw_master_runtime *m_rt,
sdw_fill_xport_params(&p_rt->transport_params, p_rt->num,
false, SDW_BLK_GRP_CNT_1, sample_int,
- port_bo, port_bo >> 8, hstart, hstop,
+ *port_bo, (*port_bo) >> 8, hstart, hstop,
SDW_BLK_PKG_PER_PORT, 0x0);
sdw_fill_port_params(&p_rt->port_params,
@@ -120,15 +120,15 @@ static void sdw_compute_master_ports(struct sdw_master_runtime *m_rt,
if (!(p_rt == list_first_entry(&m_rt->port_list,
struct sdw_port_runtime,
port_node))) {
- port_bo += bps * ch;
+ (*port_bo) += bps * ch;
continue;
}
t_data.hstart = hstart;
t_data.hstop = hstop;
- t_data.block_offset = port_bo;
+ t_data.block_offset = *port_bo;
t_data.sub_block_offset = 0;
- port_bo += bps * ch;
+ (*port_bo) += bps * ch;
}
sdw_compute_slave_ports(m_rt, &t_data);
@@ -146,9 +146,7 @@ static void _sdw_compute_port_params(struct sdw_bus *bus,
port_bo = 1;
list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
- sdw_compute_master_ports(m_rt, &params[i], port_bo, hstop);
-
- port_bo += m_rt->ch_count * m_rt->stream->params.bps;
+ sdw_compute_master_ports(m_rt, &params[i], &port_bo, hstop);
}
hstop = hstop - params[i].hwidth;
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index 01e1a0f3ec39..421da0f86fad 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -6,6 +6,7 @@
*/
#include <linux/acpi.h>
+#include <linux/cleanup.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/io.h>
@@ -73,12 +74,11 @@ static int intel_reg_show(struct seq_file *s_file, void *data)
struct sdw_intel *sdw = s_file->private;
void __iomem *s = sdw->link_res->shim;
void __iomem *a = sdw->link_res->alh;
- char *buf;
ssize_t ret;
int i, j;
unsigned int links, reg;
- buf = kzalloc(RD_BUF, GFP_KERNEL);
+ char *buf __free(kfree) = kzalloc(RD_BUF, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -129,7 +129,6 @@ static int intel_reg_show(struct seq_file *s_file, void *data)
ret += intel_sprintf(a, true, buf, ret, SDW_ALH_STRMZCFG(i));
seq_printf(s_file, "%s", buf);
- kfree(buf);
return 0;
}
@@ -727,7 +726,6 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
struct sdw_cdns_dai_runtime *dai_runtime;
struct sdw_cdns_pdi *pdi;
struct sdw_stream_config sconfig;
- struct sdw_port_config *pconfig;
int ch, dir;
int ret;
@@ -743,10 +741,8 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, ch, dir, dai->id);
- if (!pdi) {
- ret = -EINVAL;
- goto error;
- }
+ if (!pdi)
+ return -EINVAL;
/* do run-time configurations for SHIM, ALH and PDI/PORT */
intel_pdi_shim_configure(sdw, pdi);
@@ -763,7 +759,7 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
sdw->instance,
pdi->intel_alh_id);
if (ret)
- goto error;
+ return ret;
sconfig.direction = dir;
sconfig.ch_count = ch;
@@ -773,11 +769,10 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
sconfig.bps = snd_pcm_format_width(params_format(params));
/* Port configuration */
- pconfig = kzalloc(sizeof(*pconfig), GFP_KERNEL);
- if (!pconfig) {
- ret = -ENOMEM;
- goto error;
- }
+ struct sdw_port_config *pconfig __free(kfree) = kzalloc(sizeof(*pconfig),
+ GFP_KERNEL);
+ if (!pconfig)
+ return -ENOMEM;
pconfig->num = pdi->num;
pconfig->ch_mask = (1 << ch) - 1;
@@ -787,8 +782,6 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
if (ret)
dev_err(cdns->dev, "add master to stream failed:%d\n", ret);
- kfree(pconfig);
-error:
return ret;
}
diff --git a/drivers/soundwire/intel.h b/drivers/soundwire/intel.h
index b68e74c294e7..68838e843b54 100644
--- a/drivers/soundwire/intel.h
+++ b/drivers/soundwire/intel.h
@@ -59,6 +59,11 @@ struct sdw_intel {
};
struct sdw_intel_prop {
+ u16 clde;
+ u16 doaise2;
+ u16 dodse2;
+ u16 clds;
+ u16 clss;
u16 doaise;
u16 doais;
u16 dodse;
diff --git a/drivers/soundwire/intel_ace2x.c b/drivers/soundwire/intel_ace2x.c
index 8b1b6ad420cf..781fe0aefa68 100644
--- a/drivers/soundwire/intel_ace2x.c
+++ b/drivers/soundwire/intel_ace2x.c
@@ -1,11 +1,12 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
-// Copyright(c) 2023 Intel Corporation. All rights reserved.
+// Copyright(c) 2023 Intel Corporation
/*
* Soundwire Intel ops for LunarLake
*/
#include <linux/acpi.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/soundwire/sdw_registers.h>
#include <linux/soundwire/sdw.h>
@@ -27,6 +28,11 @@ static void intel_shim_vs_init(struct sdw_intel *sdw)
void __iomem *shim_vs = sdw->link_res->shim_vs;
struct sdw_bus *bus = &sdw->cdns.bus;
struct sdw_intel_prop *intel_prop;
+ u16 clde;
+ u16 doaise2;
+ u16 dodse2;
+ u16 clds;
+ u16 clss;
u16 doaise;
u16 doais;
u16 dodse;
@@ -34,12 +40,22 @@ static void intel_shim_vs_init(struct sdw_intel *sdw)
u16 act;
intel_prop = bus->vendor_specific_prop;
+ clde = intel_prop->clde;
+ doaise2 = intel_prop->doaise2;
+ dodse2 = intel_prop->dodse2;
+ clds = intel_prop->clds;
+ clss = intel_prop->clss;
doaise = intel_prop->doaise;
doais = intel_prop->doais;
dodse = intel_prop->dodse;
dods = intel_prop->dods;
act = intel_readw(shim_vs, SDW_SHIM2_INTEL_VS_ACTMCTL);
+ u16p_replace_bits(&act, clde, SDW_SHIM3_INTEL_VS_ACTMCTL_CLDE);
+ u16p_replace_bits(&act, doaise2, SDW_SHIM3_INTEL_VS_ACTMCTL_DOAISE2);
+ u16p_replace_bits(&act, dodse2, SDW_SHIM3_INTEL_VS_ACTMCTL_DODSE2);
+ u16p_replace_bits(&act, clds, SDW_SHIM3_INTEL_VS_ACTMCTL_CLDS);
+ u16p_replace_bits(&act, clss, SDW_SHIM3_INTEL_VS_ACTMCTL_CLSS);
u16p_replace_bits(&act, doaise, SDW_SHIM2_INTEL_VS_ACTMCTL_DOAISE);
u16p_replace_bits(&act, doais, SDW_SHIM2_INTEL_VS_ACTMCTL_DOAIS);
u16p_replace_bits(&act, dodse, SDW_SHIM2_INTEL_VS_ACTMCTL_DODSE);
@@ -295,7 +311,6 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
struct sdw_cdns_dai_runtime *dai_runtime;
struct sdw_cdns_pdi *pdi;
struct sdw_stream_config sconfig;
- struct sdw_port_config *pconfig;
int ch, dir;
int ret;
@@ -310,11 +325,8 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
dir = SDW_DATA_DIR_TX;
pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, ch, dir, dai->id);
-
- if (!pdi) {
- ret = -EINVAL;
- goto error;
- }
+ if (!pdi)
+ return -EINVAL;
/* use same definitions for alh_id as previous generations */
pdi->intel_alh_id = (sdw->instance * 16) + pdi->num + 3;
@@ -335,7 +347,7 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
sdw->instance,
pdi->intel_alh_id);
if (ret)
- goto error;
+ return ret;
sconfig.direction = dir;
sconfig.ch_count = ch;
@@ -345,11 +357,10 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
sconfig.bps = snd_pcm_format_width(params_format(params));
/* Port configuration */
- pconfig = kzalloc(sizeof(*pconfig), GFP_KERNEL);
- if (!pconfig) {
- ret = -ENOMEM;
- goto error;
- }
+ struct sdw_port_config *pconfig __free(kfree) = kzalloc(sizeof(*pconfig),
+ GFP_KERNEL);
+ if (!pconfig)
+ return -ENOMEM;
pconfig->num = pdi->num;
pconfig->ch_mask = (1 << ch) - 1;
@@ -359,8 +370,6 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
if (ret)
dev_err(cdns->dev, "add master to stream failed:%d\n", ret);
- kfree(pconfig);
-error:
return ret;
}
diff --git a/drivers/soundwire/intel_ace2x_debugfs.c b/drivers/soundwire/intel_ace2x_debugfs.c
index 3d24661ffd37..206a8d511ebd 100644
--- a/drivers/soundwire/intel_ace2x_debugfs.c
+++ b/drivers/soundwire/intel_ace2x_debugfs.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-// Copyright(c) 2023 Intel Corporation. All rights reserved.
+// Copyright(c) 2023 Intel Corporation
#include <linux/acpi.h>
#include <linux/debugfs.h>
diff --git a/drivers/soundwire/intel_auxdevice.c b/drivers/soundwire/intel_auxdevice.c
index 18517121cc89..8807e01cbf7c 100644
--- a/drivers/soundwire/intel_auxdevice.c
+++ b/drivers/soundwire/intel_auxdevice.c
@@ -47,6 +47,7 @@ struct wake_capable_part {
};
static struct wake_capable_part wake_capable_list[] = {
+ {0x01fa, 0x4243},
{0x025d, 0x5682},
{0x025d, 0x700},
{0x025d, 0x711},
@@ -161,12 +162,32 @@ static int sdw_master_read_intel_prop(struct sdw_bus *bus)
}
/* initialize with hardware defaults, in case the properties are not found */
+ intel_prop->clde = 0x0;
+ intel_prop->doaise2 = 0x0;
+ intel_prop->dodse2 = 0x0;
+ intel_prop->clds = 0x0;
+ intel_prop->clss = 0x0;
intel_prop->doaise = 0x1;
intel_prop->doais = 0x3;
intel_prop->dodse = 0x0;
intel_prop->dods = 0x1;
fwnode_property_read_u16(link,
+ "intel-sdw-clde",
+ &intel_prop->clde);
+ fwnode_property_read_u16(link,
+ "intel-sdw-doaise2",
+ &intel_prop->doaise2);
+ fwnode_property_read_u16(link,
+ "intel-sdw-dodse2",
+ &intel_prop->dodse2);
+ fwnode_property_read_u16(link,
+ "intel-sdw-clds",
+ &intel_prop->clds);
+ fwnode_property_read_u16(link,
+ "intel-sdw-clss",
+ &intel_prop->clss);
+ fwnode_property_read_u16(link,
"intel-sdw-doaise",
&intel_prop->doaise);
fwnode_property_read_u16(link,
@@ -193,9 +214,30 @@ static int sdw_master_read_intel_prop(struct sdw_bus *bus)
static int intel_prop_read(struct sdw_bus *bus)
{
+ struct sdw_master_prop *prop;
+
/* Initialize with default handler to read all DisCo properties */
sdw_master_read_prop(bus);
+ /*
+ * Only one bus frequency is supported so far, filter
+ * frequencies reported in the DSDT
+ */
+ prop = &bus->prop;
+ if (prop->clk_freq && prop->num_clk_freq > 1) {
+ unsigned int default_bus_frequency;
+
+ default_bus_frequency =
+ prop->default_frame_rate *
+ prop->default_row *
+ prop->default_col /
+ SDW_DOUBLE_RATE_FACTOR;
+
+ prop->num_clk_freq = 1;
+ prop->clk_freq[0] = default_bus_frequency;
+ prop->max_clk_freq = default_bus_frequency;
+ }
+
/* read Intel-specific properties */
sdw_master_read_intel_prop(bus);
diff --git a/drivers/soundwire/intel_bus_common.c b/drivers/soundwire/intel_bus_common.c
index e5ac3cc7cb79..df944e11b9ca 100644
--- a/drivers/soundwire/intel_bus_common.c
+++ b/drivers/soundwire/intel_bus_common.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
-// Copyright(c) 2015-2023 Intel Corporation. All rights reserved.
+// Copyright(c) 2015-2023 Intel Corporation
#include <linux/acpi.h>
#include <linux/soundwire/sdw_registers.h>
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index ce5cf3ecceb5..aed57002fd0e 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -197,8 +197,7 @@ struct qcom_swrm_ctrl {
int num_dout_ports;
int cols_index;
int rows_index;
- unsigned long dout_port_mask;
- unsigned long din_port_mask;
+ unsigned long port_mask;
u32 intr_mask;
u8 rcmd_id;
u8 wcmd_id;
@@ -1146,11 +1145,7 @@ static void qcom_swrm_stream_free_ports(struct qcom_swrm_ctrl *ctrl,
mutex_lock(&ctrl->port_lock);
list_for_each_entry(m_rt, &stream->master_list, stream_node) {
- if (m_rt->direction == SDW_DATA_DIR_RX)
- port_mask = &ctrl->dout_port_mask;
- else
- port_mask = &ctrl->din_port_mask;
-
+ port_mask = &ctrl->port_mask;
list_for_each_entry(p_rt, &m_rt->port_list, port_node)
clear_bit(p_rt->num, port_mask);
}
@@ -1195,13 +1190,9 @@ static int qcom_swrm_stream_alloc_ports(struct qcom_swrm_ctrl *ctrl,
if (ctrl->bus.id != m_rt->bus->id)
continue;
- if (m_rt->direction == SDW_DATA_DIR_RX) {
- maxport = ctrl->num_dout_ports;
- port_mask = &ctrl->dout_port_mask;
- } else {
- maxport = ctrl->num_din_ports;
- port_mask = &ctrl->din_port_mask;
- }
+ port_mask = &ctrl->port_mask;
+ maxport = ctrl->num_dout_ports + ctrl->num_din_ports;
+
list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
slave = s_rt->slave;
@@ -1401,8 +1392,7 @@ static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl)
return -EINVAL;
/* Valid port numbers are from 1-14, so mask out port 0 explicitly */
- set_bit(0, &ctrl->dout_port_mask);
- set_bit(0, &ctrl->din_port_mask);
+ set_bit(0, &ctrl->port_mask);
ret = of_property_read_u8_array(np, "qcom,ports-offset1",
off1, nports);
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index 4e9e7d2a942d..7aa4900dcf31 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -1181,6 +1181,8 @@ static struct sdw_master_runtime
m_rt->bus = bus;
m_rt->stream = stream;
+ bus->stream_refcount++;
+
return m_rt;
}
@@ -1217,6 +1219,7 @@ static void sdw_master_rt_free(struct sdw_master_runtime *m_rt,
struct sdw_stream_runtime *stream)
{
struct sdw_slave_runtime *s_rt, *_s_rt;
+ struct sdw_bus *bus = m_rt->bus;
list_for_each_entry_safe(s_rt, _s_rt, &m_rt->slave_rt_list, m_rt_node) {
sdw_slave_port_free(s_rt->slave, stream);
@@ -1226,6 +1229,8 @@ static void sdw_master_rt_free(struct sdw_master_runtime *m_rt,
list_del(&m_rt->stream_node);
list_del(&m_rt->bus_node);
kfree(m_rt);
+
+ bus->stream_refcount--;
}
/**
diff --git a/drivers/spi/spi-microchip-core.c b/drivers/spi/spi-microchip-core.c
index 6246254e1dff..7c1a9a985373 100644
--- a/drivers/spi/spi-microchip-core.c
+++ b/drivers/spi/spi-microchip-core.c
@@ -75,6 +75,7 @@
#define REG_CONTROL (0x00)
#define REG_FRAME_SIZE (0x04)
+#define FRAME_SIZE_MASK GENMASK(5, 0)
#define REG_STATUS (0x08)
#define REG_INT_CLEAR (0x0c)
#define REG_RX_DATA (0x10)
@@ -89,6 +90,9 @@
#define REG_RIS (0x24)
#define REG_CONTROL2 (0x28)
#define REG_COMMAND (0x2c)
+#define COMMAND_CLRFRAMECNT BIT(4)
+#define COMMAND_TXFIFORST BIT(3)
+#define COMMAND_RXFIFORST BIT(2)
#define REG_PKTSIZE (0x30)
#define REG_CMD_SIZE (0x34)
#define REG_HWSTATUS (0x38)
@@ -103,10 +107,11 @@ struct mchp_corespi {
u8 *rx_buf;
u32 clk_gen; /* divider for spi output clock generated by the controller */
u32 clk_mode;
+ u32 pending_slave_select;
int irq;
int tx_len;
int rx_len;
- int pending;
+ int n_bytes;
};
static inline u32 mchp_corespi_read(struct mchp_corespi *spi, unsigned int reg)
@@ -130,113 +135,126 @@ static inline void mchp_corespi_disable(struct mchp_corespi *spi)
static inline void mchp_corespi_read_fifo(struct mchp_corespi *spi)
{
- u8 data;
- int fifo_max, i = 0;
+ while (spi->rx_len >= spi->n_bytes && !(mchp_corespi_read(spi, REG_STATUS) & STATUS_RXFIFO_EMPTY)) {
+ u32 data = mchp_corespi_read(spi, REG_RX_DATA);
- fifo_max = min(spi->rx_len, FIFO_DEPTH);
+ spi->rx_len -= spi->n_bytes;
- while ((i < fifo_max) && !(mchp_corespi_read(spi, REG_STATUS) & STATUS_RXFIFO_EMPTY)) {
- data = mchp_corespi_read(spi, REG_RX_DATA);
+ if (!spi->rx_buf)
+ continue;
- if (spi->rx_buf)
- *spi->rx_buf++ = data;
- i++;
+ if (spi->n_bytes == 4)
+ *((u32 *)spi->rx_buf) = data;
+ else if (spi->n_bytes == 2)
+ *((u16 *)spi->rx_buf) = data;
+ else
+ *spi->rx_buf = data;
+
+ spi->rx_buf += spi->n_bytes;
}
- spi->rx_len -= i;
- spi->pending -= i;
}
static void mchp_corespi_enable_ints(struct mchp_corespi *spi)
{
- u32 control, mask = INT_ENABLE_MASK;
-
- mchp_corespi_disable(spi);
-
- control = mchp_corespi_read(spi, REG_CONTROL);
-
- control |= mask;
- mchp_corespi_write(spi, REG_CONTROL, control);
+ u32 control = mchp_corespi_read(spi, REG_CONTROL);
- control |= CONTROL_ENABLE;
+ control |= INT_ENABLE_MASK;
mchp_corespi_write(spi, REG_CONTROL, control);
}
static void mchp_corespi_disable_ints(struct mchp_corespi *spi)
{
- u32 control, mask = INT_ENABLE_MASK;
-
- mchp_corespi_disable(spi);
-
- control = mchp_corespi_read(spi, REG_CONTROL);
- control &= ~mask;
- mchp_corespi_write(spi, REG_CONTROL, control);
+ u32 control = mchp_corespi_read(spi, REG_CONTROL);
- control |= CONTROL_ENABLE;
+ control &= ~INT_ENABLE_MASK;
mchp_corespi_write(spi, REG_CONTROL, control);
}
static inline void mchp_corespi_set_xfer_size(struct mchp_corespi *spi, int len)
{
u32 control;
- u16 lenpart;
+ u32 lenpart;
+ u32 frames = mchp_corespi_read(spi, REG_FRAMESUP);
/*
- * Disable the SPI controller. Writes to transfer length have
- * no effect when the controller is enabled.
+ * Writing to FRAMECNT in REG_CONTROL will reset the frame count, taking
+ * a shortcut requires an explicit clear.
*/
- mchp_corespi_disable(spi);
+ if (frames == len) {
+ mchp_corespi_write(spi, REG_COMMAND, COMMAND_CLRFRAMECNT);
+ return;
+ }
/*
* The lower 16 bits of the frame count are stored in the control reg
* for legacy reasons, but the upper 16 written to a different register:
* FRAMESUP. While both the upper and lower bits can be *READ* from the
- * FRAMESUP register, writing to the lower 16 bits is a NOP
+ * FRAMESUP register, writing to the lower 16 bits is (supposedly) a NOP.
+ *
+ * The driver used to disable the controller while modifying the frame
+ * count, and mask off the lower 16 bits of len while writing to
+ * FRAMES_UP. When the driver was changed to disable the controller as
+ * infrequently as possible, it was discovered that the logic of
+ * lenpart = len & 0xffff_0000
+ * write(REG_FRAMESUP, lenpart)
+ * would actually write zeros into the lower 16 bits on an mpfs250t-es,
+ * despite documentation stating these bits were read-only.
+ * Writing len unmasked into FRAMES_UP ensures those bits aren't zeroed
+ * on an mpfs250t-es and will be a NOP for the lower 16 bits on hardware
+ * that matches the documentation.
*/
lenpart = len & 0xffff;
-
control = mchp_corespi_read(spi, REG_CONTROL);
control &= ~CONTROL_FRAMECNT_MASK;
control |= lenpart << CONTROL_FRAMECNT_SHIFT;
mchp_corespi_write(spi, REG_CONTROL, control);
-
- lenpart = len & 0xffff0000;
- mchp_corespi_write(spi, REG_FRAMESUP, lenpart);
-
- control |= CONTROL_ENABLE;
- mchp_corespi_write(spi, REG_CONTROL, control);
+ mchp_corespi_write(spi, REG_FRAMESUP, len);
}
static inline void mchp_corespi_write_fifo(struct mchp_corespi *spi)
{
- u8 byte;
int fifo_max, i = 0;
- fifo_max = min(spi->tx_len, FIFO_DEPTH);
+ fifo_max = DIV_ROUND_UP(min(spi->tx_len, FIFO_DEPTH), spi->n_bytes);
mchp_corespi_set_xfer_size(spi, fifo_max);
while ((i < fifo_max) && !(mchp_corespi_read(spi, REG_STATUS) & STATUS_TXFIFO_FULL)) {
- byte = spi->tx_buf ? *spi->tx_buf++ : 0xaa;
- mchp_corespi_write(spi, REG_TX_DATA, byte);
+ u32 word;
+
+ if (spi->n_bytes == 4)
+ word = spi->tx_buf ? *((u32 *)spi->tx_buf) : 0xaa;
+ else if (spi->n_bytes == 2)
+ word = spi->tx_buf ? *((u16 *)spi->tx_buf) : 0xaa;
+ else
+ word = spi->tx_buf ? *spi->tx_buf : 0xaa;
+
+ mchp_corespi_write(spi, REG_TX_DATA, word);
+ if (spi->tx_buf)
+ spi->tx_buf += spi->n_bytes;
i++;
}
- spi->tx_len -= i;
- spi->pending += i;
+ spi->tx_len -= i * spi->n_bytes;
}
static inline void mchp_corespi_set_framesize(struct mchp_corespi *spi, int bt)
{
+ u32 frame_size = mchp_corespi_read(spi, REG_FRAME_SIZE);
u32 control;
+ if ((frame_size & FRAME_SIZE_MASK) == bt)
+ return;
+
/*
* Disable the SPI controller. Writes to the frame size have
* no effect when the controller is enabled.
*/
- mchp_corespi_disable(spi);
+ control = mchp_corespi_read(spi, REG_CONTROL);
+ control &= ~CONTROL_ENABLE;
+ mchp_corespi_write(spi, REG_CONTROL, control);
mchp_corespi_write(spi, REG_FRAME_SIZE, bt);
- control = mchp_corespi_read(spi, REG_CONTROL);
control |= CONTROL_ENABLE;
mchp_corespi_write(spi, REG_CONTROL, control);
}
@@ -249,8 +267,18 @@ static void mchp_corespi_set_cs(struct spi_device *spi, bool disable)
reg = mchp_corespi_read(corespi, REG_SLAVE_SELECT);
reg &= ~BIT(spi_get_chipselect(spi, 0));
reg |= !disable << spi_get_chipselect(spi, 0);
+ corespi->pending_slave_select = reg;
- mchp_corespi_write(corespi, REG_SLAVE_SELECT, reg);
+ /*
+ * Only deassert chip select immediately. Writing to some registers
+ * requires the controller to be disabled, which results in the
+ * output pins being tristated and can cause the SCLK and MOSI lines
+ * to transition. Therefore asserting the chip select is deferred
+ * until just before writing to the TX FIFO, to ensure the device
+ * doesn't see any spurious clock transitions whilst CS is enabled.
+ */
+ if (((spi->mode & SPI_CS_HIGH) == 0) == disable)
+ mchp_corespi_write(corespi, REG_SLAVE_SELECT, reg);
}
static int mchp_corespi_setup(struct spi_device *spi)
@@ -269,6 +297,7 @@ static int mchp_corespi_setup(struct spi_device *spi)
if (spi->mode & SPI_CS_HIGH) {
reg = mchp_corespi_read(corespi, REG_SLAVE_SELECT);
reg |= BIT(spi_get_chipselect(spi, 0));
+ corespi->pending_slave_select = reg;
mchp_corespi_write(corespi, REG_SLAVE_SELECT, reg);
}
return 0;
@@ -279,17 +308,13 @@ static void mchp_corespi_init(struct spi_controller *host, struct mchp_corespi *
unsigned long clk_hz;
u32 control = mchp_corespi_read(spi, REG_CONTROL);
- control |= CONTROL_MASTER;
+ control &= ~CONTROL_ENABLE;
+ mchp_corespi_write(spi, REG_CONTROL, control);
+ control |= CONTROL_MASTER;
control &= ~CONTROL_MODE_MASK;
control |= MOTOROLA_MODE;
- mchp_corespi_set_framesize(spi, DEFAULT_FRAMESIZE);
-
- /* max. possible spi clock rate is the apb clock rate */
- clk_hz = clk_get_rate(spi->clk);
- host->max_speed_hz = clk_hz;
-
/*
* The controller must be configured so that it doesn't remove Chip
* Select until the entire message has been transferred, even if at
@@ -298,11 +323,16 @@ static void mchp_corespi_init(struct spi_controller *host, struct mchp_corespi *
* BIGFIFO mode is also enabled, which sets the fifo depth to 32 frames
* for the 8 bit transfers that this driver uses.
*/
- control = mchp_corespi_read(spi, REG_CONTROL);
control |= CONTROL_SPS | CONTROL_BIGFIFO;
mchp_corespi_write(spi, REG_CONTROL, control);
+ mchp_corespi_set_framesize(spi, DEFAULT_FRAMESIZE);
+
+ /* max. possible spi clock rate is the apb clock rate */
+ clk_hz = clk_get_rate(spi->clk);
+ host->max_speed_hz = clk_hz;
+
mchp_corespi_enable_ints(spi);
/*
@@ -310,7 +340,8 @@ static void mchp_corespi_init(struct spi_controller *host, struct mchp_corespi *
* select is relinquished to the hardware. SSELOUT is enabled too so we
* can deal with active high targets.
*/
- mchp_corespi_write(spi, REG_SLAVE_SELECT, SSELOUT | SSEL_DIRECT);
+ spi->pending_slave_select = SSELOUT | SSEL_DIRECT;
+ mchp_corespi_write(spi, REG_SLAVE_SELECT, spi->pending_slave_select);
control = mchp_corespi_read(spi, REG_CONTROL);
@@ -324,8 +355,6 @@ static inline void mchp_corespi_set_clk_gen(struct mchp_corespi *spi)
{
u32 control;
- mchp_corespi_disable(spi);
-
control = mchp_corespi_read(spi, REG_CONTROL);
if (spi->clk_mode)
control |= CONTROL_CLKMODE;
@@ -334,12 +363,12 @@ static inline void mchp_corespi_set_clk_gen(struct mchp_corespi *spi)
mchp_corespi_write(spi, REG_CLK_GEN, spi->clk_gen);
mchp_corespi_write(spi, REG_CONTROL, control);
- mchp_corespi_write(spi, REG_CONTROL, control | CONTROL_ENABLE);
}
static inline void mchp_corespi_set_mode(struct mchp_corespi *spi, unsigned int mode)
{
- u32 control, mode_val;
+ u32 mode_val;
+ u32 control = mchp_corespi_read(spi, REG_CONTROL);
switch (mode & SPI_MODE_X_MASK) {
case SPI_MODE_0:
@@ -357,12 +386,13 @@ static inline void mchp_corespi_set_mode(struct mchp_corespi *spi, unsigned int
}
/*
- * Disable the SPI controller. Writes to the frame size have
+ * Disable the SPI controller. Writes to the frame protocol have
* no effect when the controller is enabled.
*/
- mchp_corespi_disable(spi);
- control = mchp_corespi_read(spi, REG_CONTROL);
+ control &= ~CONTROL_ENABLE;
+ mchp_corespi_write(spi, REG_CONTROL, control);
+
control &= ~(SPI_MODE_X_MASK << MODE_X_MASK_SHIFT);
control |= mode_val;
@@ -383,21 +413,18 @@ static irqreturn_t mchp_corespi_interrupt(int irq, void *dev_id)
if (intfield == 0)
return IRQ_NONE;
- if (intfield & INT_TXDONE) {
+ if (intfield & INT_TXDONE)
mchp_corespi_write(spi, REG_INT_CLEAR, INT_TXDONE);
+ if (intfield & INT_RXRDY) {
+ mchp_corespi_write(spi, REG_INT_CLEAR, INT_RXRDY);
+
if (spi->rx_len)
mchp_corespi_read_fifo(spi);
-
- if (spi->tx_len)
- mchp_corespi_write_fifo(spi);
-
- if (!spi->rx_len)
- finalise = true;
}
- if (intfield & INT_RXRDY)
- mchp_corespi_write(spi, REG_INT_CLEAR, INT_RXRDY);
+ if (!spi->rx_len && !spi->tx_len)
+ finalise = true;
if (intfield & INT_RX_CHANNEL_OVERFLOW) {
mchp_corespi_write(spi, REG_INT_CLEAR, INT_RX_CHANNEL_OVERFLOW);
@@ -477,13 +504,17 @@ static int mchp_corespi_transfer_one(struct spi_controller *host,
spi->rx_buf = xfer->rx_buf;
spi->tx_len = xfer->len;
spi->rx_len = xfer->len;
- spi->pending = 0;
+ spi->n_bytes = roundup_pow_of_two(DIV_ROUND_UP(xfer->bits_per_word, BITS_PER_BYTE));
- mchp_corespi_set_xfer_size(spi, (spi->tx_len > FIFO_DEPTH)
- ? FIFO_DEPTH : spi->tx_len);
+ mchp_corespi_set_framesize(spi, xfer->bits_per_word);
- if (spi->tx_len)
+ mchp_corespi_write(spi, REG_COMMAND, COMMAND_RXFIFORST | COMMAND_TXFIFORST);
+
+ mchp_corespi_write(spi, REG_SLAVE_SELECT, spi->pending_slave_select);
+
+ while (spi->tx_len)
mchp_corespi_write_fifo(spi);
+
return 1;
}
@@ -493,7 +524,6 @@ static int mchp_corespi_prepare_message(struct spi_controller *host,
struct spi_device *spi_dev = msg->spi;
struct mchp_corespi *spi = spi_controller_get_devdata(host);
- mchp_corespi_set_framesize(spi, DEFAULT_FRAMESIZE);
mchp_corespi_set_mode(spi, spi_dev->mode);
return 0;
@@ -521,7 +551,7 @@ static int mchp_corespi_probe(struct platform_device *pdev)
host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
host->use_gpio_descriptors = true;
host->setup = mchp_corespi_setup;
- host->bits_per_word_mask = SPI_BPW_MASK(8);
+ host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
host->transfer_one = mchp_corespi_transfer_one;
host->prepare_message = mchp_corespi_prepare_message;
host->set_cs = mchp_corespi_set_cs;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index d4da5464dbd0..6ebe5dd9bbb1 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -371,7 +371,7 @@ const void *spi_get_device_match_data(const struct spi_device *sdev)
}
EXPORT_SYMBOL_GPL(spi_get_device_match_data);
-static int spi_match_device(struct device *dev, struct device_driver *drv)
+static int spi_match_device(struct device *dev, const struct device_driver *drv)
{
const struct spi_device *spi = to_spi_device(dev);
const struct spi_driver *sdrv = to_spi_driver(drv);
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 95fb5f1c91c1..05e6d007f9a7 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -734,6 +734,7 @@ static const struct of_device_id spidev_dt_ids[] = {
{ .compatible = "lwn,bk4", .data = &spidev_of_check },
{ .compatible = "menlo,m53cpld", .data = &spidev_of_check },
{ .compatible = "micron,spi-authenta", .data = &spidev_of_check },
+ { .compatible = "rohm,bh2228fv", .data = &spidev_of_check },
{ .compatible = "rohm,dh2228fv", .data = &spidev_of_check },
{ .compatible = "semtech,sx1301", .data = &spidev_of_check },
{ .compatible = "silabs,em3581", .data = &spidev_of_check },
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index 667085cb199d..fb0101da1485 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -43,7 +43,7 @@ static const struct device_type spmi_ctrl_type = {
.release = spmi_ctrl_release,
};
-static int spmi_device_match(struct device *dev, struct device_driver *drv)
+static int spmi_device_match(struct device *dev, const struct device_driver *drv)
{
if (of_driver_match_device(dev, drv))
return 1;
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 4da8848b3639..aa6165e3db4a 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -323,10 +323,10 @@ static int ssb_match_devid(const struct ssb_device_id *tabid,
return 1;
}
-static int ssb_bus_match(struct device *dev, struct device_driver *drv)
+static int ssb_bus_match(struct device *dev, const struct device_driver *drv)
{
struct ssb_device *ssb_dev = dev_to_ssb_dev(dev);
- struct ssb_driver *ssb_drv = drv_to_ssb_drv(drv);
+ const struct ssb_driver *ssb_drv = drv_to_ssb_drv(drv);
const struct ssb_device_id *id;
for (id = ssb_drv->id_table;
diff --git a/drivers/staging/fieldbus/anybuss/anybuss-client.h b/drivers/staging/fieldbus/anybuss/anybuss-client.h
index a219688006fe..c21c4bebfb84 100644
--- a/drivers/staging/fieldbus/anybuss/anybuss-client.h
+++ b/drivers/staging/fieldbus/anybuss/anybuss-client.h
@@ -44,11 +44,7 @@ static inline struct anybuss_client *to_anybuss_client(struct device *dev)
return container_of(dev, struct anybuss_client, dev);
}
-static inline struct anybuss_client_driver *
-to_anybuss_client_driver(struct device_driver *drv)
-{
- return container_of(drv, struct anybuss_client_driver, driver);
-}
+#define to_anybuss_client_driver(__drv) container_of_const(__drv, struct anybuss_client_driver, driver)
static inline void *
anybuss_get_drvdata(const struct anybuss_client *client)
diff --git a/drivers/staging/fieldbus/anybuss/host.c b/drivers/staging/fieldbus/anybuss/host.c
index 410e6f8073c0..4f2b2fce92ee 100644
--- a/drivers/staging/fieldbus/anybuss/host.c
+++ b/drivers/staging/fieldbus/anybuss/host.c
@@ -1166,9 +1166,9 @@ EXPORT_SYMBOL_GPL(anybuss_recv_msg);
/* ------------------------ bus functions ------------------------ */
static int anybus_bus_match(struct device *dev,
- struct device_driver *drv)
+ const struct device_driver *drv)
{
- struct anybuss_client_driver *adrv =
+ const struct anybuss_client_driver *adrv =
to_anybuss_client_driver(drv);
struct anybuss_client *adev =
to_anybuss_client(dev);
diff --git a/drivers/staging/greybus/gbphy.c b/drivers/staging/greybus/gbphy.c
index d992db8d45cb..6adcad286633 100644
--- a/drivers/staging/greybus/gbphy.c
+++ b/drivers/staging/greybus/gbphy.c
@@ -117,7 +117,7 @@ gbphy_dev_match_id(struct gbphy_device *gbphy_dev,
return NULL;
}
-static int gbphy_dev_match(struct device *dev, struct device_driver *drv)
+static int gbphy_dev_match(struct device *dev, const struct device_driver *drv)
{
struct gbphy_driver *gbphy_drv = to_gbphy_driver(drv);
struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.c
index 3f87b93c6537..41ece91ab88a 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.c
@@ -14,7 +14,7 @@
#include "vchiq_arm.h"
#include "vchiq_bus.h"
-static int vchiq_bus_type_match(struct device *dev, struct device_driver *drv)
+static int vchiq_bus_type_match(struct device *dev, const struct device_driver *drv)
{
if (dev->bus == &vchiq_bus_type &&
strcmp(dev_name(dev), drv->name) == 0)
diff --git a/drivers/staging/vme_user/vme.c b/drivers/staging/vme_user/vme.c
index 0cd370ab1008..9a091463656d 100644
--- a/drivers/staging/vme_user/vme.c
+++ b/drivers/staging/vme_user/vme.c
@@ -1931,7 +1931,7 @@ EXPORT_SYMBOL(vme_unregister_driver);
/* - Bus Registration ------------------------------------------------------ */
-static int vme_bus_match(struct device *dev, struct device_driver *drv)
+static int vme_bus_match(struct device *dev, const struct device_driver *drv)
{
struct vme_driver *vme_drv;
diff --git a/drivers/tc/tc-driver.c b/drivers/tc/tc-driver.c
index 1c9d983a5a1f..2f6d147594b0 100644
--- a/drivers/tc/tc-driver.c
+++ b/drivers/tc/tc-driver.c
@@ -56,7 +56,7 @@ EXPORT_SYMBOL(tc_unregister_driver);
* system is in its list of supported devices. Returns the matching
* tc_device_id structure or %NULL if there is no match.
*/
-static const struct tc_device_id *tc_match_device(struct tc_driver *tdrv,
+static const struct tc_device_id *tc_match_device(const struct tc_driver *tdrv,
struct tc_dev *tdev)
{
const struct tc_device_id *id = tdrv->id_table;
@@ -82,10 +82,10 @@ static const struct tc_device_id *tc_match_device(struct tc_driver *tdrv,
* system is in its list of supported devices. Returns 1 if there
* is a match or 0 otherwise.
*/
-static int tc_bus_match(struct device *dev, struct device_driver *drv)
+static int tc_bus_match(struct device *dev, const struct device_driver *drv)
{
struct tc_dev *tdev = to_tc_dev(dev);
- struct tc_driver *tdrv = to_tc_driver(drv);
+ const struct tc_driver *tdrv = to_tc_driver(drv);
const struct tc_device_id *id;
id = tc_match_device(tdrv, tdev);
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index 82ad095d2b1c..d52e879b204e 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -1201,7 +1201,7 @@ int tee_client_cancel_req(struct tee_context *ctx,
}
static int tee_client_device_match(struct device *dev,
- struct device_driver *drv)
+ const struct device_driver *drv)
{
const struct tee_client_device_id *id_table;
struct tee_client_device *tee_device;
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index f6e700e48aad..95c399f94744 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -272,6 +272,44 @@ static int __init thermal_register_governors(void)
return ret;
}
+static int __thermal_zone_device_set_mode(struct thermal_zone_device *tz,
+ enum thermal_device_mode mode)
+{
+ if (tz->ops.change_mode) {
+ int ret;
+
+ ret = tz->ops.change_mode(tz, mode);
+ if (ret)
+ return ret;
+ }
+
+ tz->mode = mode;
+
+ return 0;
+}
+
+static void thermal_zone_broken_disable(struct thermal_zone_device *tz)
+{
+ struct thermal_trip_desc *td;
+
+ dev_err(&tz->device, "Unable to get temperature, disabling!\n");
+ /*
+ * This function only runs for enabled thermal zones, so no need to
+ * check for the current mode.
+ */
+ __thermal_zone_device_set_mode(tz, THERMAL_DEVICE_DISABLED);
+ thermal_notify_tz_disable(tz);
+
+ for_each_trip_desc(tz, td) {
+ if (td->trip.type == THERMAL_TRIP_CRITICAL &&
+ td->trip.temperature > THERMAL_TEMP_INVALID) {
+ dev_crit(&tz->device,
+ "Disabled thermal zone with critical trip point\n");
+ return;
+ }
+ }
+}
+
/*
* Zone update section: main control loop applied to each zone while monitoring
* in polling mode. The monitoring is done using a workqueue.
@@ -292,6 +330,34 @@ static void thermal_zone_device_set_polling(struct thermal_zone_device *tz,
cancel_delayed_work(&tz->poll_queue);
}
+static void thermal_zone_recheck(struct thermal_zone_device *tz, int error)
+{
+ if (error == -EAGAIN) {
+ thermal_zone_device_set_polling(tz, THERMAL_RECHECK_DELAY);
+ return;
+ }
+
+ /*
+ * Print the message once to reduce log noise. It will be followed by
+ * another one if the temperature cannot be determined after multiple
+ * attempts.
+ */
+ if (tz->recheck_delay_jiffies == THERMAL_RECHECK_DELAY)
+ dev_info(&tz->device, "Temperature check failed (%d)\n", error);
+
+ thermal_zone_device_set_polling(tz, tz->recheck_delay_jiffies);
+
+ tz->recheck_delay_jiffies += max(tz->recheck_delay_jiffies >> 1, 1ULL);
+ if (tz->recheck_delay_jiffies > THERMAL_MAX_RECHECK_DELAY) {
+ thermal_zone_broken_disable(tz);
+ /*
+ * Restore the original recheck delay value to allow the thermal
+ * zone to try to recover when it is reenabled by user space.
+ */
+ tz->recheck_delay_jiffies = THERMAL_RECHECK_DELAY;
+ }
+}
+
static void monitor_thermal_zone(struct thermal_zone_device *tz)
{
if (tz->mode != THERMAL_DEVICE_ENABLED)
@@ -491,10 +557,7 @@ void __thermal_zone_device_update(struct thermal_zone_device *tz,
ret = __thermal_zone_get_temp(tz, &temp);
if (ret) {
- if (ret != -EAGAIN)
- dev_info(&tz->device, "Temperature check failed (%d)\n", ret);
-
- thermal_zone_device_set_polling(tz, msecs_to_jiffies(THERMAL_RECHECK_DELAY_MS));
+ thermal_zone_recheck(tz, ret);
return;
} else if (temp <= THERMAL_TEMP_INVALID) {
/*
@@ -506,6 +569,8 @@ void __thermal_zone_device_update(struct thermal_zone_device *tz,
goto monitor;
}
+ tz->recheck_delay_jiffies = THERMAL_RECHECK_DELAY;
+
tz->last_temperature = tz->temperature;
tz->temperature = temp;
@@ -540,7 +605,7 @@ monitor:
static int thermal_zone_device_set_mode(struct thermal_zone_device *tz,
enum thermal_device_mode mode)
{
- int ret = 0;
+ int ret;
mutex_lock(&tz->lock);
@@ -548,14 +613,15 @@ static int thermal_zone_device_set_mode(struct thermal_zone_device *tz,
if (mode == tz->mode) {
mutex_unlock(&tz->lock);
- return ret;
+ return 0;
}
- if (tz->ops.change_mode)
- ret = tz->ops.change_mode(tz, mode);
+ ret = __thermal_zone_device_set_mode(tz, mode);
+ if (ret) {
+ mutex_unlock(&tz->lock);
- if (!ret)
- tz->mode = mode;
+ return ret;
+ }
__thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
@@ -566,7 +632,7 @@ static int thermal_zone_device_set_mode(struct thermal_zone_device *tz,
else
thermal_notify_tz_disable(tz);
- return ret;
+ return 0;
}
int thermal_zone_device_enable(struct thermal_zone_device *tz)
@@ -1445,6 +1511,7 @@ thermal_zone_device_register_with_trips(const char *type,
thermal_set_delay_jiffies(&tz->passive_delay_jiffies, passive_delay);
thermal_set_delay_jiffies(&tz->polling_delay_jiffies, polling_delay);
+ tz->recheck_delay_jiffies = THERMAL_RECHECK_DELAY;
/* sys I/F */
/* Add nodes that are always present via .groups */
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index ba8e6fc807ca..4cf2b7230d04 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -67,6 +67,8 @@ struct thermal_governor {
* @polling_delay_jiffies: number of jiffies to wait between polls when
* checking whether trip points have been crossed (0 for
* interrupt driven systems)
+ * @recheck_delay_jiffies: delay after a failed attempt to determine the zone
+ * temperature before trying again
* @temperature: current temperature. This is only for core code,
* drivers should use thermal_zone_get_temp() to get the
* current temperature
@@ -108,6 +110,7 @@ struct thermal_zone_device {
int num_trips;
unsigned long passive_delay_jiffies;
unsigned long polling_delay_jiffies;
+ unsigned long recheck_delay_jiffies;
int temperature;
int last_temperature;
int emul_temperature;
@@ -137,10 +140,11 @@ struct thermal_zone_device {
#define THERMAL_TEMP_INIT INT_MIN
/*
- * Default delay after a failing thermal zone temperature check before
- * attempting to check it again.
+ * Default and maximum delay after a failed thermal zone temperature check
+ * before attempting to check it again (in jiffies).
*/
-#define THERMAL_RECHECK_DELAY_MS 250
+#define THERMAL_RECHECK_DELAY msecs_to_jiffies(250)
+#define THERMAL_MAX_RECHECK_DELAY (120 * HZ)
/* Default Thermal Governor */
#if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE)
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
index 0023017299f7..144d0232a70c 100644
--- a/drivers/thunderbolt/domain.c
+++ b/drivers/thunderbolt/domain.c
@@ -45,9 +45,9 @@ static bool match_service_id(const struct tb_service_id *id,
}
static const struct tb_service_id *__tb_service_match(struct device *dev,
- struct device_driver *drv)
+ const struct device_driver *drv)
{
- struct tb_service_driver *driver;
+ const struct tb_service_driver *driver;
const struct tb_service_id *ids;
struct tb_service *svc;
@@ -55,7 +55,7 @@ static const struct tb_service_id *__tb_service_match(struct device *dev,
if (!svc)
return NULL;
- driver = container_of(drv, struct tb_service_driver, driver);
+ driver = container_of_const(drv, struct tb_service_driver, driver);
if (!driver->id_table)
return NULL;
@@ -67,7 +67,7 @@ static const struct tb_service_id *__tb_service_match(struct device *dev,
return NULL;
}
-static int tb_service_match(struct device *dev, struct device_driver *drv)
+static int tb_service_match(struct device *dev, const struct device_driver *drv)
{
return !!__tb_service_match(dev, drv);
}
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index 613cb356b918..8913cdd675f6 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -85,7 +85,7 @@ static const struct device_type serdev_ctrl_type = {
.release = serdev_ctrl_release,
};
-static int serdev_device_match(struct device *dev, struct device_driver *drv)
+static int serdev_device_match(struct device *dev, const struct device_driver *drv)
{
if (!is_serdev_device(dev))
return 0;
diff --git a/drivers/tty/serial/serial_base_bus.c b/drivers/tty/serial/serial_base_bus.c
index d822499ba9d6..5d1677f1b651 100644
--- a/drivers/tty/serial/serial_base_bus.c
+++ b/drivers/tty/serial/serial_base_bus.c
@@ -29,7 +29,7 @@ static const struct device_type serial_port_type = {
.name = "port",
};
-static int serial_base_match(struct device *dev, struct device_driver *drv)
+static int serial_base_match(struct device *dev, const struct device_driver *drv)
{
if (dev->type == &serial_ctrl_type &&
str_has_prefix(drv->name, serial_ctrl_type.name))
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index e5974b8239c9..14f8f00fdcf9 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -452,7 +452,7 @@ static const struct sysrq_key_op sysrq_unrt_op = {
static void sysrq_handle_replay_logs(u8 key)
{
- console_replay_all();
+ console_try_replay_all();
}
static struct sysrq_key_op sysrq_replay_logs_op = {
.handler = sysrq_handle_replay_logs,
@@ -770,8 +770,6 @@ static void sysrq_of_get_keyreset_config(void)
{
u32 key;
struct device_node *np;
- struct property *prop;
- const __be32 *p;
np = of_find_node_by_path("/chosen/linux,sysrq-reset-seq");
if (!np) {
@@ -782,7 +780,7 @@ static void sysrq_of_get_keyreset_config(void)
/* Reset in case a __weak definition was present */
sysrq_reset_seq_len = 0;
- of_property_for_each_u32(np, "keyset", prop, p, key) {
+ of_property_for_each_u32(np, "keyset", key) {
if (key == KEY_RESERVED || key > KEY_MAX ||
sysrq_reset_seq_len == SYSRQ_KEY_RESET_MAX)
break;
diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c
index 0886b19d2e1c..4a2ee447b213 100644
--- a/drivers/usb/common/ulpi.c
+++ b/drivers/usb/common/ulpi.c
@@ -34,7 +34,7 @@ EXPORT_SYMBOL_GPL(ulpi_write);
/* -------------------------------------------------------------------------- */
-static int ulpi_match(struct device *dev, struct device_driver *driver)
+static int ulpi_match(struct device *dev, const struct device_driver *driver)
{
struct ulpi_driver *drv = to_ulpi_driver(driver);
struct ulpi *ulpi = to_ulpi_dev(dev);
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index b35734d03109..0c3f12daac79 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -868,7 +868,7 @@ bool usb_driver_applicable(struct usb_device *udev,
return false;
}
-static int usb_device_match(struct device *dev, struct device_driver *drv)
+static int usb_device_match(struct device *dev, const struct device_driver *drv)
{
/* devices and interfaces are handled separately */
if (is_usb_device(dev)) {
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 2dfae7a17b3f..b0a613758414 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1568,7 +1568,7 @@ EXPORT_SYMBOL_GPL(usb_del_gadget_udc);
/* ------------------------------------------------------------------------- */
-static int gadget_match_driver(struct device *dev, struct device_driver *drv)
+static int gadget_match_driver(struct device *dev, const struct device_driver *drv)
{
struct usb_gadget *gadget = dev_to_usb_gadget(dev);
struct usb_udc *udc = gadget->udc;
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
index b98cda1cef73..e24cdb667307 100644
--- a/drivers/usb/misc/usb251xb.c
+++ b/drivers/usb/misc/usb251xb.c
@@ -382,11 +382,9 @@ static void usb251xb_get_ports_field(struct usb251xb *hub,
bool ds_only, u8 *fld)
{
struct device *dev = hub->dev;
- struct property *prop;
- const __be32 *p;
u32 port;
- of_property_for_each_u32(dev->of_node, prop_name, prop, p, port) {
+ of_property_for_each_u32(dev->of_node, prop_name, port) {
if ((port >= ds_only ? 1 : 0) && (port <= port_cnt))
*fld |= BIT(port);
else
diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c
index 6c812d01b37d..d200e2c29a8f 100644
--- a/drivers/usb/serial/bus.c
+++ b/drivers/usb/serial/bus.c
@@ -14,7 +14,7 @@
#include <linux/usb/serial.h>
static int usb_serial_device_match(struct device *dev,
- struct device_driver *drv)
+ const struct device_driver *drv)
{
const struct usb_serial_port *port = to_usb_serial_port(dev);
struct usb_serial_driver *driver = to_usb_serial_driver(drv);
diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c
index 6ea103e1abae..aa879253d3b8 100644
--- a/drivers/usb/typec/bus.c
+++ b/drivers/usb/typec/bus.c
@@ -447,7 +447,7 @@ static struct attribute *typec_attrs[] = {
};
ATTRIBUTE_GROUPS(typec);
-static int typec_match(struct device *dev, struct device_driver *driver)
+static int typec_match(struct device *dev, const struct device_driver *driver)
{
struct typec_altmode_driver *drv = to_altmode_driver(driver);
struct typec_altmode *altmode = to_typec_altmode(dev);
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index 1ca445e31acb..4dbd2e55a288 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -65,7 +65,7 @@ static void vdpa_dev_remove(struct device *d)
drv->remove(vdev);
}
-static int vdpa_dev_match(struct device *dev, struct device_driver *drv)
+static int vdpa_dev_match(struct device *dev, const struct device_driver *drv)
{
struct vdpa_device *vdev = dev_to_vdpa(dev);
diff --git a/drivers/vfio/mdev/mdev_driver.c b/drivers/vfio/mdev/mdev_driver.c
index b98322966b3e..ad5b834806ff 100644
--- a/drivers/vfio/mdev/mdev_driver.c
+++ b/drivers/vfio/mdev/mdev_driver.c
@@ -31,7 +31,7 @@ static void mdev_remove(struct device *dev)
drv->remove(to_mdev_device(dev));
}
-static int mdev_match(struct device *dev, struct device_driver *drv)
+static int mdev_match(struct device *dev, const struct device_driver *drv)
{
/*
* No drivers automatically match. Drivers are only bound by explicit
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 396d3cd49a1b..a9b93e99c23a 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -82,7 +82,7 @@ static inline int virtio_id_match(const struct virtio_device *dev,
/* This looks through all the IDs a driver claims to support. If any of them
* match, we return 1 and the kernel will call virtio_dev_probe(). */
-static int virtio_dev_match(struct device *_dv, struct device_driver *_dr)
+static int virtio_dev_match(struct device *_dv, const struct device_driver *_dr)
{
unsigned int i;
struct virtio_device *dev = dev_to_virtio(_dv);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index c44918768a97..bae1d97cce89 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -946,7 +946,8 @@ config RENESAS_RZN1WDT
config RENESAS_RZG2LWDT
tristate "Renesas RZ/G2L WDT Watchdog"
- depends on ARCH_RENESAS || COMPILE_TEST
+ depends on ARCH_RZG2L || ARCH_R9A09G011 || COMPILE_TEST
+ depends on PM || COMPILE_TEST
select WATCHDOG_CORE
help
This driver adds watchdog support for the integrated watchdogs in the
diff --git a/drivers/watchdog/imx7ulp_wdt.c b/drivers/watchdog/imx7ulp_wdt.c
index b21d7a74a42d..94914a22daff 100644
--- a/drivers/watchdog/imx7ulp_wdt.c
+++ b/drivers/watchdog/imx7ulp_wdt.c
@@ -290,6 +290,11 @@ static int imx7ulp_wdt_init(struct imx7ulp_wdt_device *wdt, unsigned int timeout
if (wdt->ext_reset)
val |= WDOG_CS_INT_EN;
+ if (readl(wdt->base + WDOG_CS) & WDOG_CS_EN) {
+ set_bit(WDOG_HW_RUNNING, &wdt->wdd.status);
+ val |= WDOG_CS_EN;
+ }
+
do {
ret = _imx7ulp_wdt_init(wdt, timeout, val);
toval = readl(wdt->base + WDOG_TOVAL);
diff --git a/drivers/watchdog/lenovo_se10_wdt.c b/drivers/watchdog/lenovo_se10_wdt.c
index 139ff0e8220f..cd0500e5080b 100644
--- a/drivers/watchdog/lenovo_se10_wdt.c
+++ b/drivers/watchdog/lenovo_se10_wdt.c
@@ -196,8 +196,8 @@ static int se10_wdt_probe(struct platform_device *pdev)
watchdog_set_drvdata(&priv->wdd, priv);
priv->wdd.parent = dev;
- priv->wdd.info = &wdt_info,
- priv->wdd.ops = &se10_wdt_ops,
+ priv->wdd.info = &wdt_info;
+ priv->wdd.ops = &se10_wdt_ops;
priv->wdd.timeout = WATCHDOG_TIMEOUT; /* Set default timeout */
priv->wdd.min_timeout = MIN_TIMEOUT;
priv->wdd.max_timeout = MAX_TIMEOUT;
diff --git a/drivers/watchdog/rzg2l_wdt.c b/drivers/watchdog/rzg2l_wdt.c
index 1741f98ca67c..2a35f890a288 100644
--- a/drivers/watchdog/rzg2l_wdt.c
+++ b/drivers/watchdog/rzg2l_wdt.c
@@ -8,7 +8,6 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
-#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -54,35 +53,11 @@ struct rzg2l_wdt_priv {
struct reset_control *rstc;
unsigned long osc_clk_rate;
unsigned long delay;
- unsigned long minimum_assertion_period;
struct clk *pclk;
struct clk *osc_clk;
enum rz_wdt_type devtype;
};
-static int rzg2l_wdt_reset(struct rzg2l_wdt_priv *priv)
-{
- int err, status;
-
- if (priv->devtype == WDT_RZV2M) {
- /* WDT needs TYPE-B reset control */
- err = reset_control_assert(priv->rstc);
- if (err)
- return err;
- ndelay(priv->minimum_assertion_period);
- err = reset_control_deassert(priv->rstc);
- if (err)
- return err;
- err = read_poll_timeout(reset_control_status, status,
- status != 1, 0, 1000, false,
- priv->rstc);
- } else {
- err = reset_control_reset(priv->rstc);
- }
-
- return err;
-}
-
static void rzg2l_wdt_wait_delay(struct rzg2l_wdt_priv *priv)
{
/* delay timer when change the setting register */
@@ -123,8 +98,17 @@ static void rzg2l_wdt_init_timeout(struct watchdog_device *wdev)
static int rzg2l_wdt_start(struct watchdog_device *wdev)
{
struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
+ int ret;
- pm_runtime_get_sync(wdev->parent);
+ ret = pm_runtime_resume_and_get(wdev->parent);
+ if (ret)
+ return ret;
+
+ ret = reset_control_deassert(priv->rstc);
+ if (ret) {
+ pm_runtime_put(wdev->parent);
+ return ret;
+ }
/* Initialize time out */
rzg2l_wdt_init_timeout(wdev);
@@ -141,15 +125,23 @@ static int rzg2l_wdt_start(struct watchdog_device *wdev)
static int rzg2l_wdt_stop(struct watchdog_device *wdev)
{
struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
+ int ret;
- rzg2l_wdt_reset(priv);
- pm_runtime_put(wdev->parent);
+ ret = reset_control_assert(priv->rstc);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_put(wdev->parent);
+ if (ret < 0)
+ return ret;
return 0;
}
static int rzg2l_wdt_set_timeout(struct watchdog_device *wdev, unsigned int timeout)
{
+ int ret = 0;
+
wdev->timeout = timeout;
/*
@@ -158,22 +150,30 @@ static int rzg2l_wdt_set_timeout(struct watchdog_device *wdev, unsigned int time
* to reset the module) so that it is updated with new timeout values.
*/
if (watchdog_active(wdev)) {
- rzg2l_wdt_stop(wdev);
- rzg2l_wdt_start(wdev);
+ ret = rzg2l_wdt_stop(wdev);
+ if (ret)
+ return ret;
+
+ ret = rzg2l_wdt_start(wdev);
}
- return 0;
+ return ret;
}
static int rzg2l_wdt_restart(struct watchdog_device *wdev,
unsigned long action, void *data)
{
struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
+ int ret;
clk_prepare_enable(priv->pclk);
clk_prepare_enable(priv->osc_clk);
if (priv->devtype == WDT_RZG2L) {
+ ret = reset_control_deassert(priv->rstc);
+ if (ret)
+ return ret;
+
/* Generate Reset (WDTRSTB) Signal on parity error */
rzg2l_wdt_write(priv, 0, PECR);
@@ -181,7 +181,9 @@ static int rzg2l_wdt_restart(struct watchdog_device *wdev,
rzg2l_wdt_write(priv, PEEN_FORCE, PEEN);
} else {
/* RZ/V2M doesn't have parity error registers */
- rzg2l_wdt_reset(priv);
+ ret = reset_control_reset(priv->rstc);
+ if (ret)
+ return ret;
wdev->timeout = 0;
@@ -224,13 +226,11 @@ static const struct watchdog_ops rzg2l_wdt_ops = {
.restart = rzg2l_wdt_restart,
};
-static void rzg2l_wdt_reset_assert_pm_disable(void *data)
+static void rzg2l_wdt_pm_disable(void *data)
{
struct watchdog_device *wdev = data;
- struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
pm_runtime_disable(wdev->parent);
- reset_control_assert(priv->rstc);
}
static int rzg2l_wdt_probe(struct platform_device *pdev)
@@ -273,19 +273,8 @@ static int rzg2l_wdt_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(priv->rstc),
"failed to get cpg reset");
- ret = reset_control_deassert(priv->rstc);
- if (ret)
- return dev_err_probe(dev, ret, "failed to deassert");
-
priv->devtype = (uintptr_t)of_device_get_match_data(dev);
- if (priv->devtype == WDT_RZV2M) {
- priv->minimum_assertion_period = RZV2M_A_NSEC +
- 3 * F2CYCLE_NSEC(pclk_rate) + 5 *
- max(F2CYCLE_NSEC(priv->osc_clk_rate),
- F2CYCLE_NSEC(pclk_rate));
- }
-
pm_runtime_enable(&pdev->dev);
priv->wdev.info = &rzg2l_wdt_ident;
@@ -297,10 +286,9 @@ static int rzg2l_wdt_probe(struct platform_device *pdev)
priv->wdev.timeout = WDT_DEFAULT_TIMEOUT;
watchdog_set_drvdata(&priv->wdev, priv);
- ret = devm_add_action_or_reset(&pdev->dev,
- rzg2l_wdt_reset_assert_pm_disable,
- &priv->wdev);
- if (ret < 0)
+ dev_set_drvdata(dev, priv);
+ ret = devm_add_action_or_reset(&pdev->dev, rzg2l_wdt_pm_disable, &priv->wdev);
+ if (ret)
return ret;
watchdog_set_nowayout(&priv->wdev, nowayout);
@@ -320,10 +308,35 @@ static const struct of_device_id rzg2l_wdt_ids[] = {
};
MODULE_DEVICE_TABLE(of, rzg2l_wdt_ids);
+static int rzg2l_wdt_suspend_late(struct device *dev)
+{
+ struct rzg2l_wdt_priv *priv = dev_get_drvdata(dev);
+
+ if (!watchdog_active(&priv->wdev))
+ return 0;
+
+ return rzg2l_wdt_stop(&priv->wdev);
+}
+
+static int rzg2l_wdt_resume_early(struct device *dev)
+{
+ struct rzg2l_wdt_priv *priv = dev_get_drvdata(dev);
+
+ if (!watchdog_active(&priv->wdev))
+ return 0;
+
+ return rzg2l_wdt_start(&priv->wdev);
+}
+
+static const struct dev_pm_ops rzg2l_wdt_pm_ops = {
+ LATE_SYSTEM_SLEEP_PM_OPS(rzg2l_wdt_suspend_late, rzg2l_wdt_resume_early)
+};
+
static struct platform_driver rzg2l_wdt_driver = {
.driver = {
.name = "rzg2l_wdt",
.of_match_table = rzg2l_wdt_ids,
+ .pm = &rzg2l_wdt_pm_ops,
},
.probe = rzg2l_wdt_probe,
};
diff --git a/drivers/watchdog/rzn1_wdt.c b/drivers/watchdog/rzn1_wdt.c
index 980c1717adb5..7d3192d34afd 100644
--- a/drivers/watchdog/rzn1_wdt.c
+++ b/drivers/watchdog/rzn1_wdt.c
@@ -140,9 +140,9 @@ static int rzn1_wdt_probe(struct platform_device *pdev)
}
wdt->clk_rate_khz = clk_rate / 1000;
- wdt->wdtdev.info = &rzn1_wdt_info,
- wdt->wdtdev.ops = &rzn1_wdt_ops,
- wdt->wdtdev.status = WATCHDOG_NOWAYOUT_INIT_STATUS,
+ wdt->wdtdev.info = &rzn1_wdt_info;
+ wdt->wdtdev.ops = &rzn1_wdt_ops;
+ wdt->wdtdev.status = WATCHDOG_NOWAYOUT_INIT_STATUS;
wdt->wdtdev.parent = dev;
/*
* The period of the watchdog cannot be changed once set
diff --git a/drivers/watchdog/starfive-wdt.c b/drivers/watchdog/starfive-wdt.c
index b4b059883618..19a2620d3d38 100644
--- a/drivers/watchdog/starfive-wdt.c
+++ b/drivers/watchdog/starfive-wdt.c
@@ -152,8 +152,10 @@ static int starfive_wdt_enable_clock(struct starfive_wdt *wdt)
return dev_err_probe(wdt->wdd.parent, ret, "failed to enable apb clock\n");
ret = clk_prepare_enable(wdt->core_clk);
- if (ret)
+ if (ret) {
+ clk_disable_unprepare(wdt->apb_clk);
return dev_err_probe(wdt->wdd.parent, ret, "failed to enable core clock\n");
+ }
return 0;
}
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index e2bd266b1b5b..4190cb800cc4 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -1004,7 +1004,7 @@ static struct miscdevice watchdog_miscdev = {
.fops = &watchdog_fops,
};
-static struct class watchdog_class = {
+static const struct class watchdog_class = {
.name = "watchdog",
.dev_groups = wdt_groups,
};
diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h
index 2754bdfadcb8..13821e7e825e 100644
--- a/drivers/xen/xenbus/xenbus.h
+++ b/drivers/xen/xenbus/xenbus.h
@@ -104,7 +104,7 @@ void xb_deinit_comms(void);
int xs_watch_msg(struct xs_watch_event *event);
void xs_request_exit(struct xb_req_data *req);
-int xenbus_match(struct device *_dev, struct device_driver *_drv);
+int xenbus_match(struct device *_dev, const struct device_driver *_drv);
int xenbus_dev_probe(struct device *_dev);
void xenbus_dev_remove(struct device *_dev);
int xenbus_register_driver_common(struct xenbus_driver *drv,
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 1a9ded0cddcb..9f097f1f4a4c 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -94,9 +94,9 @@ match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev)
return NULL;
}
-int xenbus_match(struct device *_dev, struct device_driver *_drv)
+int xenbus_match(struct device *_dev, const struct device_driver *_drv)
{
- struct xenbus_driver *drv = to_xenbus_driver(_drv);
+ const struct xenbus_driver *drv = to_xenbus_driver(_drv);
if (!drv->ids)
return 0;
diff --git a/drivers/zorro/zorro-driver.c b/drivers/zorro/zorro-driver.c
index f49d19977e82..e7d3af1a223f 100644
--- a/drivers/zorro/zorro-driver.c
+++ b/drivers/zorro/zorro-driver.c
@@ -118,10 +118,10 @@ EXPORT_SYMBOL(zorro_unregister_driver);
* supported, and 0 if there is no match.
*/
-static int zorro_bus_match(struct device *dev, struct device_driver *drv)
+static int zorro_bus_match(struct device *dev, const struct device_driver *drv)
{
struct zorro_dev *z = to_zorro_dev(dev);
- struct zorro_driver *zorro_drv = to_zorro_driver(drv);
+ const struct zorro_driver *zorro_drv = to_zorro_driver(drv);
const struct zorro_device_id *ids = zorro_drv->id_table;
if (!ids)
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 5ae8045f4df4..19fa49cd9907 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -2150,5 +2150,5 @@ core_initcall(init_elf_binfmt);
module_exit(exit_elf_binfmt);
#ifdef CONFIG_BINFMT_ELF_KUNIT_TEST
-#include "binfmt_elf_test.c"
+#include "tests/binfmt_elf_kunit.c"
#endif
diff --git a/fs/cachefiles/io.c b/fs/cachefiles/io.c
index e667dbcd20e8..a91acd03ee12 100644
--- a/fs/cachefiles/io.c
+++ b/fs/cachefiles/io.c
@@ -630,7 +630,7 @@ static void cachefiles_prepare_write_subreq(struct netfs_io_subrequest *subreq)
_enter("W=%x[%x] %llx", wreq->debug_id, subreq->debug_index, subreq->start);
- subreq->max_len = ULONG_MAX;
+ subreq->max_len = MAX_RW_COUNT;
subreq->max_nr_segs = BIO_MAX_VECS;
if (!cachefiles_cres_file(cres)) {
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index c4941ba245ac..e98aa8219303 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -3067,10 +3067,13 @@ int __ceph_get_caps(struct inode *inode, struct ceph_file_info *fi, int need,
flags, &_got);
WARN_ON_ONCE(ret == -EAGAIN);
if (!ret) {
+#ifdef CONFIG_DEBUG_FS
struct ceph_mds_client *mdsc = fsc->mdsc;
struct cap_wait cw;
+#endif
DEFINE_WAIT_FUNC(wait, woken_wake_function);
+#ifdef CONFIG_DEBUG_FS
cw.ino = ceph_ino(inode);
cw.tgid = current->tgid;
cw.need = need;
@@ -3079,6 +3082,7 @@ int __ceph_get_caps(struct inode *inode, struct ceph_file_info *fi, int need,
spin_lock(&mdsc->caps_list_lock);
list_add(&cw.list, &mdsc->cap_wait_list);
spin_unlock(&mdsc->caps_list_lock);
+#endif
/* make sure used fmode not timeout */
ceph_get_fmode(ci, flags, FMODE_WAIT_BIAS);
@@ -3097,9 +3101,11 @@ int __ceph_get_caps(struct inode *inode, struct ceph_file_info *fi, int need,
remove_wait_queue(&ci->i_cap_wq, &wait);
ceph_put_fmode(ci, flags, FMODE_WAIT_BIAS);
+#ifdef CONFIG_DEBUG_FS
spin_lock(&mdsc->caps_list_lock);
list_del(&cw.list);
spin_unlock(&mdsc->caps_list_lock);
+#endif
if (ret == -EAGAIN)
continue;
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 5aadc56e0cc0..18c72b305858 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1589,7 +1589,7 @@ void __ceph_dentry_dir_lease_touch(struct ceph_dentry_info *di)
}
spin_lock(&mdsc->dentry_list_lock);
- __dentry_dir_lease_touch(mdsc, di),
+ __dentry_dir_lease_touch(mdsc, di);
spin_unlock(&mdsc->dentry_list_lock);
}
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index c2157f6e0c69..276e34ab3e2c 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -5446,6 +5446,8 @@ static void delayed_work(struct work_struct *work)
}
mutex_unlock(&mdsc->mutex);
+ ceph_flush_cap_releases(mdsc, s);
+
mutex_lock(&s->s_mutex);
if (renew_caps)
send_renew_caps(mdsc, s);
@@ -5505,7 +5507,9 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
mdsc->last_renew_caps = jiffies;
INIT_LIST_HEAD(&mdsc->cap_delay_list);
+#ifdef CONFIG_DEBUG_FS
INIT_LIST_HEAD(&mdsc->cap_wait_list);
+#endif
spin_lock_init(&mdsc->cap_delay_lock);
INIT_LIST_HEAD(&mdsc->cap_unlink_delay_list);
INIT_LIST_HEAD(&mdsc->snap_flush_list);
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index cfa18cf915a0..9bcc7f181bfe 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -416,6 +416,8 @@ struct ceph_quotarealm_inode {
struct inode *inode;
};
+#ifdef CONFIG_DEBUG_FS
+
struct cap_wait {
struct list_head list;
u64 ino;
@@ -424,6 +426,8 @@ struct cap_wait {
int want;
};
+#endif
+
enum {
CEPH_MDSC_STOPPING_BEGIN = 1,
CEPH_MDSC_STOPPING_FLUSHING = 2,
@@ -512,7 +516,9 @@ struct ceph_mds_client {
spinlock_t caps_list_lock;
struct list_head caps_list; /* unused (reserved or
unreserved) */
+#ifdef CONFIG_DEBUG_FS
struct list_head cap_wait_list;
+#endif
int caps_total_count; /* total caps allocated */
int caps_use_count; /* in use */
int caps_use_max; /* max used caps */
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 885cb5d4e771..0cdf84cd1791 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -961,7 +961,8 @@ static int __init init_caches(void)
if (!ceph_mds_request_cachep)
goto bad_mds_req;
- ceph_wb_pagevec_pool = mempool_create_kmalloc_pool(10, CEPH_MAX_WRITE_SIZE >> PAGE_SHIFT);
+ ceph_wb_pagevec_pool = mempool_create_kmalloc_pool(10,
+ (CEPH_MAX_WRITE_SIZE >> PAGE_SHIFT) * sizeof(struct page *));
if (!ceph_wb_pagevec_pool)
goto bad_pagevec_pool;
diff --git a/fs/coredump.c b/fs/coredump.c
index 4dc5140bac3f..7f12ff6ad1d3 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -991,7 +991,7 @@ void validate_coredump_safety(void)
}
}
-static int proc_dostring_coredump(struct ctl_table *table, int write,
+static int proc_dostring_coredump(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int error = proc_dostring(table, write, buffer, lenp, ppos);
diff --git a/fs/dcache.c b/fs/dcache.c
index 8bdc278a0205..3d8daaecb6d1 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -177,7 +177,7 @@ static long get_nr_dentry_negative(void)
return sum < 0 ? 0 : sum;
}
-static int proc_nr_dentry(struct ctl_table *table, int write, void *buffer,
+static int proc_nr_dentry(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
dentry_stat.nr_dentry = get_nr_dentry();
diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index b9575957a7c2..d45ef541d848 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -48,7 +48,7 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
iput(toput_inode);
}
-int drop_caches_sysctl_handler(struct ctl_table *table, int write,
+int drop_caches_sysctl_handler(const struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos)
{
int ret;
diff --git a/fs/erofs/data.c b/fs/erofs/data.c
index 8be60797ea2f..1b7eba38ba1e 100644
--- a/fs/erofs/data.c
+++ b/fs/erofs/data.c
@@ -21,38 +21,32 @@ void erofs_put_metabuf(struct erofs_buf *buf)
if (!buf->page)
return;
erofs_unmap_metabuf(buf);
- put_page(buf->page);
+ folio_put(page_folio(buf->page));
buf->page = NULL;
}
-/*
- * Derive the block size from inode->i_blkbits to make compatible with
- * anonymous inode in fscache mode.
- */
void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset,
enum erofs_kmap_type type)
{
pgoff_t index = offset >> PAGE_SHIFT;
- struct page *page = buf->page;
- struct folio *folio;
- unsigned int nofs_flag;
+ struct folio *folio = NULL;
- if (!page || page->index != index) {
+ if (buf->page) {
+ folio = page_folio(buf->page);
+ if (folio_file_page(folio, index) != buf->page)
+ erofs_unmap_metabuf(buf);
+ }
+ if (!folio || !folio_contains(folio, index)) {
erofs_put_metabuf(buf);
-
- nofs_flag = memalloc_nofs_save();
- folio = read_cache_folio(buf->mapping, index, NULL, NULL);
- memalloc_nofs_restore(nofs_flag);
+ folio = read_mapping_folio(buf->mapping, index, NULL);
if (IS_ERR(folio))
return folio;
-
- /* should already be PageUptodate, no need to lock page */
- page = folio_file_page(folio, index);
- buf->page = page;
}
+ buf->page = folio_file_page(folio, index);
+
if (buf->kmap_type == EROFS_NO_KMAP) {
if (type == EROFS_KMAP)
- buf->base = kmap_local_page(page);
+ buf->base = kmap_local_page(buf->page);
buf->kmap_type = type;
} else if (buf->kmap_type != type) {
DBG_BUGON(1);
diff --git a/fs/erofs/decompressor_lzma.c b/fs/erofs/decompressor_lzma.c
index 06a722b85a45..40666815046f 100644
--- a/fs/erofs/decompressor_lzma.c
+++ b/fs/erofs/decompressor_lzma.c
@@ -188,7 +188,7 @@ again:
!rq->partial_decoding);
buf.in_size = min(rq->inputsize, PAGE_SIZE - rq->pageofs_in);
rq->inputsize -= buf.in_size;
- buf.in = dctx.kin + rq->pageofs_in,
+ buf.in = dctx.kin + rq->pageofs_in;
dctx.bounce = strm->bounce;
do {
dctx.avail_out = buf.out_size - buf.out_pos;
diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c
index 5f6439a63af7..43c09aae2afc 100644
--- a/fs/erofs/inode.c
+++ b/fs/erofs/inode.c
@@ -334,14 +334,29 @@ int erofs_getattr(struct mnt_idmap *idmap, const struct path *path,
unsigned int query_flags)
{
struct inode *const inode = d_inode(path->dentry);
+ bool compressed =
+ erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout);
- if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout))
+ if (compressed)
stat->attributes |= STATX_ATTR_COMPRESSED;
-
stat->attributes |= STATX_ATTR_IMMUTABLE;
stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
STATX_ATTR_IMMUTABLE);
+ /*
+ * Return the DIO alignment restrictions if requested.
+ *
+ * In EROFS, STATX_DIOALIGN is not supported in ondemand mode and
+ * compressed files, so in these cases we report no DIO support.
+ */
+ if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->i_mode)) {
+ stat->result_mask |= STATX_DIOALIGN;
+ if (!erofs_is_fscache_mode(inode->i_sb) && !compressed) {
+ stat->dio_mem_align =
+ bdev_logical_block_size(inode->i_sb->s_bdev);
+ stat->dio_offset_align = stat->dio_mem_align;
+ }
+ }
generic_fillattr(idmap, request_mask, inode, stat);
return 0;
}
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index 35268263aaed..32ce5b35e1df 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -576,6 +576,21 @@ static const struct export_operations erofs_export_ops = {
.get_parent = erofs_get_parent,
};
+static void erofs_set_sysfs_name(struct super_block *sb)
+{
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+
+ if (erofs_is_fscache_mode(sb)) {
+ if (sbi->domain_id)
+ super_set_sysfs_name_generic(sb, "%s,%s",sbi->domain_id,
+ sbi->fsid);
+ else
+ super_set_sysfs_name_generic(sb, "%s", sbi->fsid);
+ return;
+ }
+ super_set_sysfs_name_id(sb);
+}
+
static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct inode *inode;
@@ -643,6 +658,7 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_flags |= SB_POSIXACL;
else
sb->s_flags &= ~SB_POSIXACL;
+ erofs_set_sysfs_name(sb);
#ifdef CONFIG_EROFS_FS_ZIP
xa_init(&sbi->managed_pslots);
diff --git a/fs/erofs/zutil.c b/fs/erofs/zutil.c
index b80f612867c2..9b53883e5caf 100644
--- a/fs/erofs/zutil.c
+++ b/fs/erofs/zutil.c
@@ -38,11 +38,13 @@ void *z_erofs_get_gbuf(unsigned int requiredpages)
{
struct z_erofs_gbuf *gbuf;
+ migrate_disable();
gbuf = &z_erofs_gbufpool[z_erofs_gbuf_id()];
spin_lock(&gbuf->lock);
/* check if the buffer is too small */
if (requiredpages > gbuf->nrpages) {
spin_unlock(&gbuf->lock);
+ migrate_enable();
/* (for sparse checker) pretend gbuf->lock is still taken */
__acquire(gbuf->lock);
return NULL;
@@ -57,6 +59,7 @@ void z_erofs_put_gbuf(void *ptr) __releases(gbuf->lock)
gbuf = &z_erofs_gbufpool[z_erofs_gbuf_id()];
DBG_BUGON(gbuf->ptr != ptr);
spin_unlock(&gbuf->lock);
+ migrate_enable();
}
int z_erofs_gbuf_growsize(unsigned int nrpages)
diff --git a/fs/exec.c b/fs/exec.c
index a47d0e4c54f6..a126e3d1cacb 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -2204,7 +2204,7 @@ COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
#ifdef CONFIG_SYSCTL
-static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
+static int proc_dointvec_minmax_coredump(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int error = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
@@ -2236,5 +2236,5 @@ fs_initcall(init_fs_exec_sysctls);
#endif /* CONFIG_SYSCTL */
#ifdef CONFIG_EXEC_KUNIT_TEST
-#include "exec_test.c"
+#include "tests/exec_kunit.c"
#endif
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 55d444bec5c0..bdd96329dddd 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -1186,6 +1186,11 @@ static void __prepare_cp_block(struct f2fs_sb_info *sbi)
ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi));
ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi));
ckpt->next_free_nid = cpu_to_le32(last_nid);
+
+ /* update user_block_counts */
+ sbi->last_valid_block_count = sbi->total_valid_block_count;
+ percpu_counter_set(&sbi->alloc_valid_block_count, 0);
+ percpu_counter_set(&sbi->rf_node_block_count, 0);
}
static bool __need_flush_quota(struct f2fs_sb_info *sbi)
@@ -1575,11 +1580,6 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
start_blk += NR_CURSEG_NODE_TYPE;
}
- /* update user_block_counts */
- sbi->last_valid_block_count = sbi->total_valid_block_count;
- percpu_counter_set(&sbi->alloc_valid_block_count, 0);
- percpu_counter_set(&sbi->rf_node_block_count, 0);
-
/* Here, we have one bio having CP pack except cp pack 2 page */
f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
/* Wait for all dirty meta pages to be submitted for IO */
@@ -1718,6 +1718,7 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
}
f2fs_restore_inmem_curseg(sbi);
+ f2fs_reinit_atgc_curseg(sbi);
stat_inc_cp_count(sbi);
stop:
unblock_operations(sbi);
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 1ef82a546391..990b93689b46 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -1100,7 +1100,7 @@ retry:
struct bio *bio = NULL;
ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
- &last_block_in_bio, false, true);
+ &last_block_in_bio, NULL, true);
f2fs_put_rpages(cc);
f2fs_destroy_compress_ctx(cc, true);
if (ret)
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index b9b0debc6b3d..6457e5bca9c9 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -925,6 +925,7 @@ alloc_new:
#ifdef CONFIG_BLK_DEV_ZONED
static bool is_end_zone_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr)
{
+ struct block_device *bdev = sbi->sb->s_bdev;
int devi = 0;
if (f2fs_is_multi_device(sbi)) {
@@ -935,8 +936,9 @@ static bool is_end_zone_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr)
return false;
}
blkaddr -= FDEV(devi).start_blk;
+ bdev = FDEV(devi).bdev;
}
- return bdev_is_zoned(FDEV(devi).bdev) &&
+ return bdev_is_zoned(bdev) &&
f2fs_blkz_is_seq(sbi, devi, blkaddr) &&
(blkaddr % sbi->blocks_per_blkz == sbi->blocks_per_blkz - 1);
}
@@ -2067,12 +2069,17 @@ static inline loff_t f2fs_readpage_limit(struct inode *inode)
return i_size_read(inode);
}
+static inline blk_opf_t f2fs_ra_op_flags(struct readahead_control *rac)
+{
+ return rac ? REQ_RAHEAD : 0;
+}
+
static int f2fs_read_single_page(struct inode *inode, struct folio *folio,
unsigned nr_pages,
struct f2fs_map_blocks *map,
struct bio **bio_ret,
sector_t *last_block_in_bio,
- bool is_readahead)
+ struct readahead_control *rac)
{
struct bio *bio = *bio_ret;
const unsigned blocksize = blks_to_bytes(inode, 1);
@@ -2148,7 +2155,7 @@ submit_and_realloc:
}
if (bio == NULL) {
bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
- is_readahead ? REQ_RAHEAD : 0, index,
+ f2fs_ra_op_flags(rac), index,
false);
if (IS_ERR(bio)) {
ret = PTR_ERR(bio);
@@ -2178,7 +2185,7 @@ out:
#ifdef CONFIG_F2FS_FS_COMPRESSION
int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
unsigned nr_pages, sector_t *last_block_in_bio,
- bool is_readahead, bool for_write)
+ struct readahead_control *rac, bool for_write)
{
struct dnode_of_data dn;
struct inode *inode = cc->inode;
@@ -2301,7 +2308,7 @@ submit_and_realloc:
if (!bio) {
bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
- is_readahead ? REQ_RAHEAD : 0,
+ f2fs_ra_op_flags(rac),
page->index, for_write);
if (IS_ERR(bio)) {
ret = PTR_ERR(bio);
@@ -2399,7 +2406,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
ret = f2fs_read_multi_pages(&cc, &bio,
max_nr_pages,
&last_block_in_bio,
- rac != NULL, false);
+ rac, false);
f2fs_destroy_compress_ctx(&cc, false);
if (ret)
goto set_error_page;
@@ -2449,7 +2456,7 @@ next_page:
ret = f2fs_read_multi_pages(&cc, &bio,
max_nr_pages,
&last_block_in_bio,
- rac != NULL, false);
+ rac, false);
f2fs_destroy_compress_ctx(&cc, false);
}
}
@@ -2601,7 +2608,7 @@ bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
return true;
if (IS_NOQUOTA(inode))
return true;
- if (f2fs_is_atomic_file(inode))
+ if (f2fs_used_in_atomic_write(inode))
return true;
/* rewrite low ratio compress data w/ OPU mode to avoid fragmentation */
if (f2fs_compressed_file(inode) &&
@@ -2688,7 +2695,7 @@ got_it:
}
/* wait for GCed page writeback via META_MAPPING */
- if (fio->post_read)
+ if (fio->meta_gc)
f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
/*
@@ -2783,7 +2790,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
.submitted = 0,
.compr_blocks = compr_blocks,
.need_lock = compr_blocks ? LOCK_DONE : LOCK_RETRY,
- .post_read = f2fs_post_read_required(inode) ? 1 : 0,
+ .meta_gc = f2fs_meta_inode_gc_required(inode) ? 1 : 0,
.io_type = io_type,
.io_wbc = wbc,
.bio = bio,
diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
index 48048fa36427..fd1fc06359ee 100644
--- a/fs/f2fs/extent_cache.c
+++ b/fs/f2fs/extent_cache.c
@@ -19,33 +19,23 @@
#include "node.h"
#include <trace/events/f2fs.h>
-bool sanity_check_extent_cache(struct inode *inode)
+bool sanity_check_extent_cache(struct inode *inode, struct page *ipage)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct f2fs_inode_info *fi = F2FS_I(inode);
- struct extent_tree *et = fi->extent_tree[EX_READ];
- struct extent_info *ei;
-
- if (!et)
- return true;
+ struct f2fs_extent *i_ext = &F2FS_INODE(ipage)->i_ext;
+ struct extent_info ei;
- ei = &et->largest;
- if (!ei->len)
- return true;
+ get_read_extent_info(&ei, i_ext);
- /* Let's drop, if checkpoint got corrupted. */
- if (is_set_ckpt_flags(sbi, CP_ERROR_FLAG)) {
- ei->len = 0;
- et->largest_updated = true;
+ if (!ei.len)
return true;
- }
- if (!f2fs_is_valid_blkaddr(sbi, ei->blk, DATA_GENERIC_ENHANCE) ||
- !f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
+ if (!f2fs_is_valid_blkaddr(sbi, ei.blk, DATA_GENERIC_ENHANCE) ||
+ !f2fs_is_valid_blkaddr(sbi, ei.blk + ei.len - 1,
DATA_GENERIC_ENHANCE)) {
f2fs_warn(sbi, "%s: inode (ino=%lx) extent info [%u, %u, %u] is incorrect, run fsck to fix",
__func__, inode->i_ino,
- ei->blk, ei->fofs, ei->len);
+ ei.blk, ei.fofs, ei.len);
return false;
}
return true;
@@ -394,24 +384,22 @@ void f2fs_init_read_extent_tree(struct inode *inode, struct page *ipage)
if (!__may_extent_tree(inode, EX_READ)) {
/* drop largest read extent */
- if (i_ext && i_ext->len) {
+ if (i_ext->len) {
f2fs_wait_on_page_writeback(ipage, NODE, true, true);
i_ext->len = 0;
set_page_dirty(ipage);
}
- goto out;
+ set_inode_flag(inode, FI_NO_EXTENT);
+ return;
}
et = __grab_extent_tree(inode, EX_READ);
- if (!i_ext || !i_ext->len)
- goto out;
-
get_read_extent_info(&ei, i_ext);
write_lock(&et->lock);
- if (atomic_read(&et->node_cnt))
- goto unlock_out;
+ if (atomic_read(&et->node_cnt) || !ei.len)
+ goto skip;
en = __attach_extent_node(sbi, et, &ei, NULL,
&et->root.rb_root.rb_node, true);
@@ -423,11 +411,13 @@ void f2fs_init_read_extent_tree(struct inode *inode, struct page *ipage)
list_add_tail(&en->list, &eti->extent_list);
spin_unlock(&eti->extent_lock);
}
-unlock_out:
+skip:
+ /* Let's drop, if checkpoint got corrupted. */
+ if (f2fs_cp_error(sbi)) {
+ et->largest.len = 0;
+ et->largest_updated = true;
+ }
write_unlock(&et->lock);
-out:
- if (!F2FS_I(inode)->extent_tree[EX_READ])
- set_inode_flag(inode, FI_NO_EXTENT);
}
void f2fs_init_age_extent_tree(struct inode *inode)
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 8a9d910aa552..ac19c61f0c3e 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -803,6 +803,7 @@ enum {
FI_COW_FILE, /* indicate COW file */
FI_ATOMIC_COMMITTED, /* indicate atomic commit completed except disk sync */
FI_ATOMIC_REPLACE, /* indicate atomic replace */
+ FI_OPENED_FILE, /* indicate file has been opened */
FI_MAX, /* max flag, never be used */
};
@@ -842,7 +843,11 @@ struct f2fs_inode_info {
struct task_struct *atomic_write_task; /* store atomic write task */
struct extent_tree *extent_tree[NR_EXTENT_CACHES];
/* cached extent_tree entry */
- struct inode *cow_inode; /* copy-on-write inode for atomic write */
+ union {
+ struct inode *cow_inode; /* copy-on-write inode for atomic write */
+ struct inode *atomic_inode;
+ /* point to atomic_inode, available only for cow_inode */
+ };
/* avoid racing between foreground op and gc */
struct f2fs_rwsem i_gc_rwsem[2];
@@ -1210,7 +1215,7 @@ struct f2fs_io_info {
unsigned int in_list:1; /* indicate fio is in io_list */
unsigned int is_por:1; /* indicate IO is from recovery or not */
unsigned int encrypted:1; /* indicate file is encrypted */
- unsigned int post_read:1; /* require post read */
+ unsigned int meta_gc:1; /* require meta inode GC */
enum iostat_type io_type; /* io type */
struct writeback_control *io_wbc; /* writeback control */
struct bio **bio; /* bio for ipu */
@@ -3222,21 +3227,15 @@ static inline bool f2fs_need_compress_data(struct inode *inode)
return false;
}
-static inline unsigned int addrs_per_inode(struct inode *inode)
+static inline unsigned int addrs_per_page(struct inode *inode,
+ bool is_inode)
{
- unsigned int addrs = CUR_ADDRS_PER_INODE(inode) -
- get_inline_xattr_addrs(inode);
+ unsigned int addrs = is_inode ? (CUR_ADDRS_PER_INODE(inode) -
+ get_inline_xattr_addrs(inode)) : DEF_ADDRS_PER_BLOCK;
- if (!f2fs_compressed_file(inode))
- return addrs;
- return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size);
-}
-
-static inline unsigned int addrs_per_block(struct inode *inode)
-{
- if (!f2fs_compressed_file(inode))
- return DEF_ADDRS_PER_BLOCK;
- return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, F2FS_I(inode)->i_cluster_size);
+ if (f2fs_compressed_file(inode))
+ return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size);
+ return addrs;
}
static inline void *inline_xattr_addr(struct inode *inode, struct page *page)
@@ -3706,6 +3705,7 @@ void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno);
int f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi);
+int f2fs_reinit_atgc_curseg(struct f2fs_sb_info *sbi);
void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi);
void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi);
int f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
@@ -4163,7 +4163,7 @@ extern struct kmem_cache *f2fs_inode_entry_slab;
* inline.c
*/
bool f2fs_may_inline_data(struct inode *inode);
-bool f2fs_sanity_check_inline_data(struct inode *inode);
+bool f2fs_sanity_check_inline_data(struct inode *inode, struct page *ipage);
bool f2fs_may_inline_dentry(struct inode *inode);
void f2fs_do_read_inline_data(struct folio *folio, struct page *ipage);
void f2fs_truncate_inline_inode(struct inode *inode,
@@ -4204,7 +4204,7 @@ void f2fs_leave_shrinker(struct f2fs_sb_info *sbi);
/*
* extent_cache.c
*/
-bool sanity_check_extent_cache(struct inode *inode);
+bool sanity_check_extent_cache(struct inode *inode, struct page *ipage);
void f2fs_init_extent_tree(struct inode *inode);
void f2fs_drop_extent_tree(struct inode *inode);
void f2fs_destroy_extent_node(struct inode *inode);
@@ -4275,6 +4275,16 @@ static inline bool f2fs_post_read_required(struct inode *inode)
f2fs_compressed_file(inode);
}
+static inline bool f2fs_used_in_atomic_write(struct inode *inode)
+{
+ return f2fs_is_atomic_file(inode) || f2fs_is_cow_file(inode);
+}
+
+static inline bool f2fs_meta_inode_gc_required(struct inode *inode)
+{
+ return f2fs_post_read_required(inode) || f2fs_used_in_atomic_write(inode);
+}
+
/*
* compress.c
*/
@@ -4310,7 +4320,7 @@ void f2fs_update_read_extent_tree_range_compressed(struct inode *inode,
unsigned int llen, unsigned int c_len);
int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
unsigned nr_pages, sector_t *last_block_in_bio,
- bool is_readahead, bool for_write);
+ struct readahead_control *rac, bool for_write);
struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
bool in_task);
@@ -4401,22 +4411,18 @@ static inline int set_compress_context(struct inode *inode)
{
#ifdef CONFIG_F2FS_FS_COMPRESSION
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
- F2FS_I(inode)->i_compress_algorithm =
- F2FS_OPTION(sbi).compress_algorithm;
- F2FS_I(inode)->i_log_cluster_size =
- F2FS_OPTION(sbi).compress_log_size;
- F2FS_I(inode)->i_compress_flag =
- F2FS_OPTION(sbi).compress_chksum ?
- BIT(COMPRESS_CHKSUM) : 0;
- F2FS_I(inode)->i_cluster_size =
- BIT(F2FS_I(inode)->i_log_cluster_size);
- if ((F2FS_I(inode)->i_compress_algorithm == COMPRESS_LZ4 ||
- F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD) &&
+ fi->i_compress_algorithm = F2FS_OPTION(sbi).compress_algorithm;
+ fi->i_log_cluster_size = F2FS_OPTION(sbi).compress_log_size;
+ fi->i_compress_flag = F2FS_OPTION(sbi).compress_chksum ?
+ BIT(COMPRESS_CHKSUM) : 0;
+ fi->i_cluster_size = BIT(fi->i_log_cluster_size);
+ if ((fi->i_compress_algorithm == COMPRESS_LZ4 ||
+ fi->i_compress_algorithm == COMPRESS_ZSTD) &&
F2FS_OPTION(sbi).compress_level)
- F2FS_I(inode)->i_compress_level =
- F2FS_OPTION(sbi).compress_level;
- F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
+ fi->i_compress_level = F2FS_OPTION(sbi).compress_level;
+ fi->i_flags |= F2FS_COMPR_FL;
set_inode_flag(inode, FI_COMPRESSED_FILE);
stat_inc_compr_inode(inode);
inc_compr_inode_stat(inode);
@@ -4431,15 +4437,15 @@ static inline bool f2fs_disable_compressed_file(struct inode *inode)
{
struct f2fs_inode_info *fi = F2FS_I(inode);
- f2fs_down_write(&F2FS_I(inode)->i_sem);
+ f2fs_down_write(&fi->i_sem);
if (!f2fs_compressed_file(inode)) {
- f2fs_up_write(&F2FS_I(inode)->i_sem);
+ f2fs_up_write(&fi->i_sem);
return true;
}
if (f2fs_is_mmap_file(inode) ||
(S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))) {
- f2fs_up_write(&F2FS_I(inode)->i_sem);
+ f2fs_up_write(&fi->i_sem);
return false;
}
@@ -4448,7 +4454,7 @@ static inline bool f2fs_disable_compressed_file(struct inode *inode)
clear_inode_flag(inode, FI_COMPRESSED_FILE);
f2fs_mark_inode_dirty_sync(inode, true);
- f2fs_up_write(&F2FS_I(inode)->i_sem);
+ f2fs_up_write(&fi->i_sem);
return true;
}
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index c1ad9b278c47..168f08507004 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -554,6 +554,42 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
return 0;
}
+static int finish_preallocate_blocks(struct inode *inode)
+{
+ int ret;
+
+ inode_lock(inode);
+ if (is_inode_flag_set(inode, FI_OPENED_FILE)) {
+ inode_unlock(inode);
+ return 0;
+ }
+
+ if (!file_should_truncate(inode)) {
+ set_inode_flag(inode, FI_OPENED_FILE);
+ inode_unlock(inode);
+ return 0;
+ }
+
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ filemap_invalidate_lock(inode->i_mapping);
+
+ truncate_setsize(inode, i_size_read(inode));
+ ret = f2fs_truncate(inode);
+
+ filemap_invalidate_unlock(inode->i_mapping);
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+
+ if (!ret)
+ set_inode_flag(inode, FI_OPENED_FILE);
+
+ inode_unlock(inode);
+ if (ret)
+ return ret;
+
+ file_dont_truncate(inode);
+ return 0;
+}
+
static int f2fs_file_open(struct inode *inode, struct file *filp)
{
int err = fscrypt_file_open(inode, filp);
@@ -571,7 +607,11 @@ static int f2fs_file_open(struct inode *inode, struct file *filp)
filp->f_mode |= FMODE_NOWAIT;
filp->f_mode |= FMODE_CAN_ODIRECT;
- return dquot_file_open(inode, filp);
+ err = dquot_file_open(inode, filp);
+ if (err)
+ return err;
+
+ return finish_preallocate_blocks(inode);
}
void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
@@ -825,6 +865,8 @@ static bool f2fs_force_buffered_io(struct inode *inode, int rw)
return true;
if (f2fs_compressed_file(inode))
return true;
+ if (f2fs_has_inline_data(inode))
+ return true;
/* disallow direct IO if any of devices has unaligned blksize */
if (f2fs_is_multi_device(sbi) && !sbi->aligned_blksize)
@@ -937,6 +979,7 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
int err;
if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
@@ -955,7 +998,7 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
return -EOPNOTSUPP;
if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED) &&
!IS_ALIGNED(attr->ia_size,
- F2FS_BLK_TO_BYTES(F2FS_I(inode)->i_cluster_size)))
+ F2FS_BLK_TO_BYTES(fi->i_cluster_size)))
return -EINVAL;
}
@@ -1009,7 +1052,7 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
return err;
}
- f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
filemap_invalidate_lock(inode->i_mapping);
truncate_setsize(inode, attr->ia_size);
@@ -1021,14 +1064,14 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
* larger than i_size.
*/
filemap_invalidate_unlock(inode->i_mapping);
- f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
if (err)
return err;
- spin_lock(&F2FS_I(inode)->i_size_lock);
+ spin_lock(&fi->i_size_lock);
inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
- F2FS_I(inode)->last_disk_size = i_size_read(inode);
- spin_unlock(&F2FS_I(inode)->i_size_lock);
+ fi->last_disk_size = i_size_read(inode);
+ spin_unlock(&fi->i_size_lock);
}
__setattr_copy(idmap, inode, attr);
@@ -1038,7 +1081,7 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
if (is_inode_flag_set(inode, FI_ACL_MODE)) {
if (!err)
- inode->i_mode = F2FS_I(inode)->i_acl_mode;
+ inode->i_mode = fi->i_acl_mode;
clear_inode_flag(inode, FI_ACL_MODE);
}
}
@@ -1946,15 +1989,15 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
if (err)
return err;
- f2fs_down_write(&F2FS_I(inode)->i_sem);
+ f2fs_down_write(&fi->i_sem);
if (!f2fs_may_compress(inode) ||
(S_ISREG(inode->i_mode) &&
F2FS_HAS_BLOCKS(inode))) {
- f2fs_up_write(&F2FS_I(inode)->i_sem);
+ f2fs_up_write(&fi->i_sem);
return -EINVAL;
}
err = set_compress_context(inode);
- f2fs_up_write(&F2FS_I(inode)->i_sem);
+ f2fs_up_write(&fi->i_sem);
if (err)
return err;
@@ -2139,6 +2182,9 @@ static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate)
set_inode_flag(fi->cow_inode, FI_COW_FILE);
clear_inode_flag(fi->cow_inode, FI_INLINE_DATA);
+
+ /* Set the COW inode's atomic_inode to the atomic inode */
+ F2FS_I(fi->cow_inode)->atomic_inode = inode;
} else {
/* Reuse the already created COW inode */
ret = f2fs_do_truncate_blocks(fi->cow_inode, 0, true);
@@ -3541,6 +3587,7 @@ next:
static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
pgoff_t page_idx = 0, last_idx;
unsigned int released_blocks = 0;
@@ -3578,7 +3625,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
if (ret)
goto out;
- if (!atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
+ if (!atomic_read(&fi->i_compr_blocks)) {
ret = -EPERM;
goto out;
}
@@ -3587,7 +3634,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
inode_set_ctime_current(inode);
f2fs_mark_inode_dirty_sync(inode, true);
- f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
filemap_invalidate_lock(inode->i_mapping);
last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
@@ -3613,7 +3660,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
- count = round_up(count, F2FS_I(inode)->i_cluster_size);
+ count = round_up(count, fi->i_cluster_size);
ret = release_compress_blocks(&dn, count);
@@ -3629,7 +3676,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
}
filemap_invalidate_unlock(inode->i_mapping);
- f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
out:
if (released_blocks)
f2fs_update_time(sbi, REQ_TIME);
@@ -3640,14 +3687,14 @@ out:
if (ret >= 0) {
ret = put_user(released_blocks, (u64 __user *)arg);
} else if (released_blocks &&
- atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
+ atomic_read(&fi->i_compr_blocks)) {
set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
"iblocks=%llu, released=%u, compr_blocks=%u, "
"run fsck to fix.",
__func__, inode->i_ino, inode->i_blocks,
released_blocks,
- atomic_read(&F2FS_I(inode)->i_compr_blocks));
+ atomic_read(&fi->i_compr_blocks));
}
return ret;
@@ -3736,6 +3783,7 @@ next:
static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
pgoff_t page_idx = 0, last_idx;
unsigned int reserved_blocks = 0;
@@ -3761,10 +3809,10 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
goto unlock_inode;
}
- if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
+ if (atomic_read(&fi->i_compr_blocks))
goto unlock_inode;
- f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
filemap_invalidate_lock(inode->i_mapping);
last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
@@ -3790,7 +3838,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
- count = round_up(count, F2FS_I(inode)->i_cluster_size);
+ count = round_up(count, fi->i_cluster_size);
ret = reserve_compress_blocks(&dn, count, &reserved_blocks);
@@ -3805,7 +3853,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
}
filemap_invalidate_unlock(inode->i_mapping);
- f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
if (!ret) {
clear_inode_flag(inode, FI_COMPRESS_RELEASED);
@@ -3821,14 +3869,14 @@ unlock_inode:
if (!ret) {
ret = put_user(reserved_blocks, (u64 __user *)arg);
} else if (reserved_blocks &&
- atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
+ atomic_read(&fi->i_compr_blocks)) {
set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: partial blocks were reserved i_ino=%lx "
"iblocks=%llu, reserved=%u, compr_blocks=%u, "
"run fsck to fix.",
__func__, inode->i_ino, inode->i_blocks,
reserved_blocks,
- atomic_read(&F2FS_I(inode)->i_compr_blocks));
+ atomic_read(&fi->i_compr_blocks));
}
return ret;
@@ -3891,7 +3939,9 @@ static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
return -EOPNOTSUPP;
- file_start_write(filp);
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
inode_lock(inode);
if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
@@ -4017,7 +4067,7 @@ out:
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
err:
inode_unlock(inode);
- file_end_write(filp);
+ mnt_drop_write_file(filp);
return ret;
}
@@ -4052,6 +4102,7 @@ static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg)
static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_comp_option option;
int ret = 0;
@@ -4071,7 +4122,9 @@ static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
option.algorithm >= COMPRESS_MAX)
return -EINVAL;
- file_start_write(filp);
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
inode_lock(inode);
f2fs_down_write(&F2FS_I(inode)->i_sem);
@@ -4090,27 +4143,27 @@ static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
goto out;
}
- F2FS_I(inode)->i_compress_algorithm = option.algorithm;
- F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
- F2FS_I(inode)->i_cluster_size = BIT(option.log_cluster_size);
+ fi->i_compress_algorithm = option.algorithm;
+ fi->i_log_cluster_size = option.log_cluster_size;
+ fi->i_cluster_size = BIT(option.log_cluster_size);
/* Set default level */
- if (F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD)
- F2FS_I(inode)->i_compress_level = F2FS_ZSTD_DEFAULT_CLEVEL;
+ if (fi->i_compress_algorithm == COMPRESS_ZSTD)
+ fi->i_compress_level = F2FS_ZSTD_DEFAULT_CLEVEL;
else
- F2FS_I(inode)->i_compress_level = 0;
+ fi->i_compress_level = 0;
/* Adjust mount option level */
if (option.algorithm == F2FS_OPTION(sbi).compress_algorithm &&
F2FS_OPTION(sbi).compress_level)
- F2FS_I(inode)->i_compress_level = F2FS_OPTION(sbi).compress_level;
+ fi->i_compress_level = F2FS_OPTION(sbi).compress_level;
f2fs_mark_inode_dirty_sync(inode, true);
if (!f2fs_is_compress_backend_ready(inode))
f2fs_warn(sbi, "compression algorithm is successfully set, "
"but current kernel doesn't support this algorithm.");
out:
- f2fs_up_write(&F2FS_I(inode)->i_sem);
+ f2fs_up_write(&fi->i_sem);
inode_unlock(inode);
- file_end_write(filp);
+ mnt_drop_write_file(filp);
return ret;
}
@@ -4167,7 +4220,9 @@ static int f2fs_ioc_decompress_file(struct file *filp)
f2fs_balance_fs(sbi, true);
- file_start_write(filp);
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
inode_lock(inode);
if (!f2fs_is_compress_backend_ready(inode)) {
@@ -4222,7 +4277,7 @@ static int f2fs_ioc_decompress_file(struct file *filp)
f2fs_update_time(sbi, REQ_TIME);
out:
inode_unlock(inode);
- file_end_write(filp);
+ mnt_drop_write_file(filp);
return ret;
}
@@ -4244,7 +4299,9 @@ static int f2fs_ioc_compress_file(struct file *filp)
f2fs_balance_fs(sbi, true);
- file_start_write(filp);
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
inode_lock(inode);
if (!f2fs_is_compress_backend_ready(inode)) {
@@ -4300,7 +4357,7 @@ static int f2fs_ioc_compress_file(struct file *filp)
f2fs_update_time(sbi, REQ_TIME);
out:
inode_unlock(inode);
- file_end_write(filp);
+ mnt_drop_write_file(filp);
return ret;
}
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 6066c6eecf41..724bbcb447d3 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -1171,7 +1171,8 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
static int ra_data_block(struct inode *inode, pgoff_t index)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct address_space *mapping = inode->i_mapping;
+ struct address_space *mapping = f2fs_is_cow_file(inode) ?
+ F2FS_I(inode)->atomic_inode->i_mapping : inode->i_mapping;
struct dnode_of_data dn;
struct page *page;
struct f2fs_io_info fio = {
@@ -1260,6 +1261,8 @@ put_page:
static int move_data_block(struct inode *inode, block_t bidx,
int gc_type, unsigned int segno, int off)
{
+ struct address_space *mapping = f2fs_is_cow_file(inode) ?
+ F2FS_I(inode)->atomic_inode->i_mapping : inode->i_mapping;
struct f2fs_io_info fio = {
.sbi = F2FS_I_SB(inode),
.ino = inode->i_ino,
@@ -1282,7 +1285,7 @@ static int move_data_block(struct inode *inode, block_t bidx,
CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA;
/* do not read out */
- page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
+ page = f2fs_grab_cache_page(mapping, bidx, false);
if (!page)
return -ENOMEM;
@@ -1563,6 +1566,16 @@ next_step:
continue;
}
+ if (f2fs_has_inline_data(inode)) {
+ iput(inode);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_err_ratelimited(sbi,
+ "inode %lx has both inline_data flag and "
+ "data block, nid=%u, ofs_in_node=%u",
+ inode->i_ino, dni.nid, ofs_in_node);
+ continue;
+ }
+
err = f2fs_gc_pinned_control(inode, gc_type, segno);
if (err == -EAGAIN) {
iput(inode);
@@ -1579,7 +1592,7 @@ next_step:
start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
ofs_in_node;
- if (f2fs_post_read_required(inode)) {
+ if (f2fs_meta_inode_gc_required(inode)) {
int err = ra_data_block(inode, start_bidx);
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
@@ -1630,7 +1643,7 @@ next_step:
start_bidx = f2fs_start_bidx_of_node(nofs, inode)
+ ofs_in_node;
- if (f2fs_post_read_required(inode))
+ if (f2fs_meta_inode_gc_required(inode))
err = move_data_block(inode, start_bidx,
gc_type, segno, off);
else
@@ -1638,7 +1651,7 @@ next_step:
segno, off);
if (!err && (gc_type == FG_GC ||
- f2fs_post_read_required(inode)))
+ f2fs_meta_inode_gc_required(inode)))
submitted++;
if (locked) {
@@ -1742,7 +1755,6 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
if (type != GET_SUM_TYPE((&sum->footer))) {
f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
segno, type, GET_SUM_TYPE((&sum->footer)));
- set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_stop_checkpoint(sbi, false,
STOP_CP_REASON_CORRUPTED_SUMMARY);
goto skip;
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 7638d0d7b7ee..cca7d448e55c 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -16,7 +16,7 @@
static bool support_inline_data(struct inode *inode)
{
- if (f2fs_is_atomic_file(inode))
+ if (f2fs_used_in_atomic_write(inode))
return false;
if (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))
return false;
@@ -33,11 +33,29 @@ bool f2fs_may_inline_data(struct inode *inode)
return !f2fs_post_read_required(inode);
}
-bool f2fs_sanity_check_inline_data(struct inode *inode)
+static bool inode_has_blocks(struct inode *inode, struct page *ipage)
+{
+ struct f2fs_inode *ri = F2FS_INODE(ipage);
+ int i;
+
+ if (F2FS_HAS_BLOCKS(inode))
+ return true;
+
+ for (i = 0; i < DEF_NIDS_PER_INODE; i++) {
+ if (ri->i_nid[i])
+ return true;
+ }
+ return false;
+}
+
+bool f2fs_sanity_check_inline_data(struct inode *inode, struct page *ipage)
{
if (!f2fs_has_inline_data(inode))
return false;
+ if (inode_has_blocks(inode, ipage))
+ return false;
+
if (!support_inline_data(inode))
return true;
@@ -203,8 +221,10 @@ int f2fs_convert_inline_inode(struct inode *inode)
struct page *ipage, *page;
int err = 0;
- if (!f2fs_has_inline_data(inode) ||
- f2fs_hw_is_readonly(sbi) || f2fs_readonly(sbi->sb))
+ if (f2fs_hw_is_readonly(sbi) || f2fs_readonly(sbi->sb))
+ return -EROFS;
+
+ if (!f2fs_has_inline_data(inode))
return 0;
err = f2fs_dquot_initialize(inode);
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 005dde72aff3..aef57172014f 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -29,6 +29,9 @@ void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
if (is_inode_flag_set(inode, FI_NEW_INODE))
return;
+ if (f2fs_readonly(F2FS_I_SB(inode)->sb))
+ return;
+
if (f2fs_inode_dirtied(inode, sync))
return;
@@ -310,10 +313,6 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
if (!sanity_check_compress_inode(inode, ri))
return false;
}
- } else if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
- f2fs_warn(sbi, "%s: corrupted inode ino=%lx, run fsck to fix.",
- __func__, inode->i_ino);
- return false;
}
if (!f2fs_sb_has_extra_attr(sbi)) {
@@ -344,7 +343,7 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
}
}
- if (f2fs_sanity_check_inline_data(inode)) {
+ if (f2fs_sanity_check_inline_data(inode, node_page)) {
f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix",
__func__, inode->i_ino, inode->i_mode);
return false;
@@ -508,16 +507,16 @@ static int do_read_inode(struct inode *inode)
init_idisk_time(inode);
- /* Need all the flag bits */
- f2fs_init_read_extent_tree(inode, node_page);
- f2fs_init_age_extent_tree(inode);
-
- if (!sanity_check_extent_cache(inode)) {
+ if (!sanity_check_extent_cache(inode, node_page)) {
f2fs_put_page(node_page, 1);
f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
return -EFSCORRUPTED;
}
+ /* Need all the flag bits */
+ f2fs_init_read_extent_tree(inode, node_page);
+ f2fs_init_age_extent_tree(inode);
+
f2fs_put_page(node_page, 1);
stat_inc_inline_xattr(inode);
@@ -610,14 +609,6 @@ make_now:
}
f2fs_set_inode_flags(inode);
- if (file_should_truncate(inode) &&
- !is_sbi_flag_set(sbi, SBI_POR_DOING)) {
- ret = f2fs_truncate(inode);
- if (ret)
- goto bad_inode;
- file_dont_truncate(inode);
- }
-
unlock_new_inode(inode);
trace_f2fs_iget(inode);
return inode;
@@ -645,8 +636,9 @@ retry:
void f2fs_update_inode(struct inode *inode, struct page *node_page)
{
+ struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_inode *ri;
- struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ];
+ struct extent_tree *et = fi->extent_tree[EX_READ];
f2fs_wait_on_page_writeback(node_page, NODE, true, true);
set_page_dirty(node_page);
@@ -656,7 +648,7 @@ void f2fs_update_inode(struct inode *inode, struct page *node_page)
ri = F2FS_INODE(node_page);
ri->i_mode = cpu_to_le16(inode->i_mode);
- ri->i_advise = F2FS_I(inode)->i_advise;
+ ri->i_advise = fi->i_advise;
ri->i_uid = cpu_to_le32(i_uid_read(inode));
ri->i_gid = cpu_to_le32(i_gid_read(inode));
ri->i_links = cpu_to_le32(inode->i_nlink);
@@ -682,58 +674,49 @@ void f2fs_update_inode(struct inode *inode, struct page *node_page)
ri->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
ri->i_mtime_nsec = cpu_to_le32(inode_get_mtime_nsec(inode));
if (S_ISDIR(inode->i_mode))
- ri->i_current_depth =
- cpu_to_le32(F2FS_I(inode)->i_current_depth);
+ ri->i_current_depth = cpu_to_le32(fi->i_current_depth);
else if (S_ISREG(inode->i_mode))
- ri->i_gc_failures = cpu_to_le16(F2FS_I(inode)->i_gc_failures);
- ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid);
- ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
- ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
+ ri->i_gc_failures = cpu_to_le16(fi->i_gc_failures);
+ ri->i_xattr_nid = cpu_to_le32(fi->i_xattr_nid);
+ ri->i_flags = cpu_to_le32(fi->i_flags);
+ ri->i_pino = cpu_to_le32(fi->i_pino);
ri->i_generation = cpu_to_le32(inode->i_generation);
- ri->i_dir_level = F2FS_I(inode)->i_dir_level;
+ ri->i_dir_level = fi->i_dir_level;
if (f2fs_has_extra_attr(inode)) {
- ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize);
+ ri->i_extra_isize = cpu_to_le16(fi->i_extra_isize);
if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode)))
ri->i_inline_xattr_size =
- cpu_to_le16(F2FS_I(inode)->i_inline_xattr_size);
+ cpu_to_le16(fi->i_inline_xattr_size);
if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
- F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
- i_projid)) {
+ F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid)) {
projid_t i_projid;
- i_projid = from_kprojid(&init_user_ns,
- F2FS_I(inode)->i_projid);
+ i_projid = from_kprojid(&init_user_ns, fi->i_projid);
ri->i_projid = cpu_to_le32(i_projid);
}
if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
- F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
- i_crtime)) {
- ri->i_crtime =
- cpu_to_le64(F2FS_I(inode)->i_crtime.tv_sec);
- ri->i_crtime_nsec =
- cpu_to_le32(F2FS_I(inode)->i_crtime.tv_nsec);
+ F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
+ ri->i_crtime = cpu_to_le64(fi->i_crtime.tv_sec);
+ ri->i_crtime_nsec = cpu_to_le32(fi->i_crtime.tv_nsec);
}
if (f2fs_sb_has_compression(F2FS_I_SB(inode)) &&
- F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
+ F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
i_compress_flag)) {
unsigned short compress_flag;
- ri->i_compr_blocks =
- cpu_to_le64(atomic_read(
- &F2FS_I(inode)->i_compr_blocks));
- ri->i_compress_algorithm =
- F2FS_I(inode)->i_compress_algorithm;
- compress_flag = F2FS_I(inode)->i_compress_flag |
- F2FS_I(inode)->i_compress_level <<
+ ri->i_compr_blocks = cpu_to_le64(
+ atomic_read(&fi->i_compr_blocks));
+ ri->i_compress_algorithm = fi->i_compress_algorithm;
+ compress_flag = fi->i_compress_flag |
+ fi->i_compress_level <<
COMPRESS_LEVEL_OFFSET;
ri->i_compress_flag = cpu_to_le16(compress_flag);
- ri->i_log_cluster_size =
- F2FS_I(inode)->i_log_cluster_size;
+ ri->i_log_cluster_size = fi->i_log_cluster_size;
}
}
@@ -813,8 +796,9 @@ void f2fs_evict_inode(struct inode *inode)
f2fs_abort_atomic_write(inode, true);
- if (fi->cow_inode) {
+ if (fi->cow_inode && f2fs_is_cow_file(fi->cow_inode)) {
clear_inode_flag(fi->cow_inode, FI_COW_FILE);
+ F2FS_I(fi->cow_inode)->atomic_inode = NULL;
iput(fi->cow_inode);
fi->cow_inode = NULL;
}
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 1ecde2b45e99..38b4750475db 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -221,6 +221,7 @@ static struct inode *f2fs_new_inode(struct mnt_idmap *idmap,
const char *name)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
+ struct f2fs_inode_info *fi;
nid_t ino;
struct inode *inode;
bool nid_free = false;
@@ -241,14 +242,15 @@ static struct inode *f2fs_new_inode(struct mnt_idmap *idmap,
inode_init_owner(idmap, inode, dir, mode);
+ fi = F2FS_I(inode);
inode->i_ino = ino;
inode->i_blocks = 0;
simple_inode_init_ts(inode);
- F2FS_I(inode)->i_crtime = inode_get_mtime(inode);
+ fi->i_crtime = inode_get_mtime(inode);
inode->i_generation = get_random_u32();
if (S_ISDIR(inode->i_mode))
- F2FS_I(inode)->i_current_depth = 1;
+ fi->i_current_depth = 1;
err = insert_inode_locked(inode);
if (err) {
@@ -258,9 +260,9 @@ static struct inode *f2fs_new_inode(struct mnt_idmap *idmap,
if (f2fs_sb_has_project_quota(sbi) &&
(F2FS_I(dir)->i_flags & F2FS_PROJINHERIT_FL))
- F2FS_I(inode)->i_projid = F2FS_I(dir)->i_projid;
+ fi->i_projid = F2FS_I(dir)->i_projid;
else
- F2FS_I(inode)->i_projid = make_kprojid(&init_user_ns,
+ fi->i_projid = make_kprojid(&init_user_ns,
F2FS_DEF_PROJID);
err = fscrypt_prepare_new_inode(dir, inode, &encrypt);
@@ -278,7 +280,7 @@ static struct inode *f2fs_new_inode(struct mnt_idmap *idmap,
if (f2fs_sb_has_extra_attr(sbi)) {
set_inode_flag(inode, FI_EXTRA_ATTR);
- F2FS_I(inode)->i_extra_isize = F2FS_TOTAL_EXTRA_ATTR_SIZE;
+ fi->i_extra_isize = F2FS_TOTAL_EXTRA_ATTR_SIZE;
}
if (test_opt(sbi, INLINE_XATTR))
@@ -296,15 +298,15 @@ static struct inode *f2fs_new_inode(struct mnt_idmap *idmap,
f2fs_has_inline_dentry(inode)) {
xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
}
- F2FS_I(inode)->i_inline_xattr_size = xattr_size;
+ fi->i_inline_xattr_size = xattr_size;
- F2FS_I(inode)->i_flags =
+ fi->i_flags =
f2fs_mask_flags(mode, F2FS_I(dir)->i_flags & F2FS_FL_INHERITED);
if (S_ISDIR(inode->i_mode))
- F2FS_I(inode)->i_flags |= F2FS_INDEX_FL;
+ fi->i_flags |= F2FS_INDEX_FL;
- if (F2FS_I(inode)->i_flags & F2FS_PROJINHERIT_FL)
+ if (fi->i_flags & F2FS_PROJINHERIT_FL)
set_inode_flag(inode, FI_PROJ_INHERIT);
/* Check compression first. */
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 8712e264071f..9756f0f2b7f7 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -280,6 +280,7 @@ static void recover_inline_flags(struct inode *inode, struct f2fs_inode *ri)
static int recover_inode(struct inode *inode, struct page *page)
{
struct f2fs_inode *raw = F2FS_INODE(page);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
char *name;
int err;
@@ -302,12 +303,12 @@ static int recover_inode(struct inode *inode, struct page *page)
i_projid = (projid_t)le32_to_cpu(raw->i_projid);
kprojid = make_kprojid(&init_user_ns, i_projid);
- if (!projid_eq(kprojid, F2FS_I(inode)->i_projid)) {
+ if (!projid_eq(kprojid, fi->i_projid)) {
err = f2fs_transfer_project_quota(inode,
kprojid);
if (err)
return err;
- F2FS_I(inode)->i_projid = kprojid;
+ fi->i_projid = kprojid;
}
}
}
@@ -320,10 +321,10 @@ static int recover_inode(struct inode *inode, struct page *page)
inode_set_mtime(inode, le64_to_cpu(raw->i_mtime),
le32_to_cpu(raw->i_mtime_nsec));
- F2FS_I(inode)->i_advise = raw->i_advise;
- F2FS_I(inode)->i_flags = le32_to_cpu(raw->i_flags);
+ fi->i_advise = raw->i_advise;
+ fi->i_flags = le32_to_cpu(raw->i_flags);
f2fs_set_inode_flags(inode);
- F2FS_I(inode)->i_gc_failures = le16_to_cpu(raw->i_gc_failures);
+ fi->i_gc_failures = le16_to_cpu(raw->i_gc_failures);
recover_inline_flags(inode, raw);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index a0ce3d080f80..78c3198a6308 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -2784,11 +2784,19 @@ static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
unsigned short seg_type = curseg->seg_type;
sanity_check_seg_type(sbi, seg_type);
- if (f2fs_need_rand_seg(sbi))
- return get_random_u32_below(MAIN_SECS(sbi) * SEGS_PER_SEC(sbi));
+ if (__is_large_section(sbi)) {
+ if (f2fs_need_rand_seg(sbi)) {
+ unsigned int hint = GET_SEC_FROM_SEG(sbi, curseg->segno);
- if (__is_large_section(sbi))
+ if (GET_SEC_FROM_SEG(sbi, curseg->segno + 1) != hint)
+ return curseg->segno;
+ return get_random_u32_inclusive(curseg->segno + 1,
+ GET_SEG_FROM_SEC(sbi, hint + 1) - 1);
+ }
return curseg->segno;
+ } else if (f2fs_need_rand_seg(sbi)) {
+ return get_random_u32_below(MAIN_SECS(sbi) * SEGS_PER_SEC(sbi));
+ }
/* inmem log may not locate on any segment after mount */
if (!curseg->inited)
@@ -2931,12 +2939,12 @@ static int get_atssr_segment(struct f2fs_sb_info *sbi, int type,
return ret;
}
-static int __f2fs_init_atgc_curseg(struct f2fs_sb_info *sbi)
+static int __f2fs_init_atgc_curseg(struct f2fs_sb_info *sbi, bool force)
{
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC);
int ret = 0;
- if (!sbi->am.atgc_enabled)
+ if (!sbi->am.atgc_enabled && !force)
return 0;
f2fs_down_read(&SM_I(sbi)->curseg_lock);
@@ -2953,9 +2961,30 @@ static int __f2fs_init_atgc_curseg(struct f2fs_sb_info *sbi)
f2fs_up_read(&SM_I(sbi)->curseg_lock);
return ret;
}
+
int f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi)
{
- return __f2fs_init_atgc_curseg(sbi);
+ return __f2fs_init_atgc_curseg(sbi, false);
+}
+
+int f2fs_reinit_atgc_curseg(struct f2fs_sb_info *sbi)
+{
+ int ret;
+
+ if (!test_opt(sbi, ATGC))
+ return 0;
+ if (sbi->am.atgc_enabled)
+ return 0;
+ if (le64_to_cpu(F2FS_CKPT(sbi)->elapsed_time) <
+ sbi->am.age_threshold)
+ return 0;
+
+ ret = __f2fs_init_atgc_curseg(sbi, true);
+ if (!ret) {
+ sbi->am.atgc_enabled = true;
+ f2fs_info(sbi, "reenabled age threshold GC");
+ }
+ return ret;
}
static void __f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi, int type)
@@ -3483,7 +3512,9 @@ static int __get_segment_type_6(struct f2fs_io_info *fio)
if (page_private_gcing(fio->page)) {
if (fio->sbi->am.atgc_enabled &&
(fio->io_type == FS_DATA_IO) &&
- (fio->sbi->gc_mode != GC_URGENT_HIGH))
+ (fio->sbi->gc_mode != GC_URGENT_HIGH) &&
+ __is_valid_data_blkaddr(fio->old_blkaddr) &&
+ !is_inode_flag_set(inode, FI_OPU_WRITE))
return CURSEG_ALL_DATA_ATGC;
else
return CURSEG_COLD_DATA;
@@ -3828,7 +3859,7 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio)
goto drop_bio;
}
- if (fio->post_read)
+ if (fio->meta_gc)
f2fs_truncate_meta_inode_pages(sbi, fio->new_blkaddr, 1);
stat_inc_inplace_blocks(fio->sbi);
@@ -3998,7 +4029,7 @@ void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr)
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct page *cpage;
- if (!f2fs_post_read_required(inode))
+ if (!f2fs_meta_inode_gc_required(inode))
return;
if (!__is_valid_data_blkaddr(blkaddr))
@@ -4017,7 +4048,7 @@ void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
block_t i;
- if (!f2fs_post_read_required(inode))
+ if (!f2fs_meta_inode_gc_required(inode))
return;
for (i = 0; i < len; i++)
@@ -5187,7 +5218,8 @@ static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
}
/* Allocate a new section if it's not new. */
- if (cs->next_blkoff) {
+ if (cs->next_blkoff ||
+ cs->segno != GET_SEG_FROM_SEC(sbi, GET_ZONE_FROM_SEC(sbi, cs_section))) {
unsigned int old_segno = cs->segno, old_blkoff = cs->next_blkoff;
f2fs_allocate_new_section(sbi, type, true);
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index e1c0f418aa11..bfc01a521cb9 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -347,7 +347,8 @@ static inline unsigned int get_ckpt_valid_blocks(struct f2fs_sb_info *sbi,
unsigned int segno, bool use_section)
{
if (use_section && __is_large_section(sbi)) {
- unsigned int start_segno = START_SEGNO(segno);
+ unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
+ unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
unsigned int blocks = 0;
int i;
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index df4cf31f93df..3959fd137cc9 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -151,8 +151,6 @@ enum {
Opt_mode,
Opt_fault_injection,
Opt_fault_type,
- Opt_lazytime,
- Opt_nolazytime,
Opt_quota,
Opt_noquota,
Opt_usrquota,
@@ -229,8 +227,6 @@ static match_table_t f2fs_tokens = {
{Opt_mode, "mode=%s"},
{Opt_fault_injection, "fault_injection=%u"},
{Opt_fault_type, "fault_type=%u"},
- {Opt_lazytime, "lazytime"},
- {Opt_nolazytime, "nolazytime"},
{Opt_quota, "quota"},
{Opt_noquota, "noquota"},
{Opt_usrquota, "usrquota"},
@@ -918,12 +914,6 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
f2fs_info(sbi, "fault_type options not supported");
break;
#endif
- case Opt_lazytime:
- sb->s_flags |= SB_LAZYTIME;
- break;
- case Opt_nolazytime:
- sb->s_flags &= ~SB_LAZYTIME;
- break;
#ifdef CONFIG_QUOTA
case Opt_quota:
case Opt_usrquota:
@@ -4481,6 +4471,7 @@ try_onemore:
sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
(test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
super_set_uuid(sb, (void *) raw_super->uuid, sizeof(raw_super->uuid));
+ super_set_sysfs_name_bdev(sb);
sb->s_iflags |= SB_I_CGROUPWB;
/* init f2fs-specific super block info */
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index 09d3ecfaa4f1..fee7ee45ceaa 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -340,13 +340,13 @@ static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
if (!strcmp(a->attr.name, "ckpt_thread_ioprio")) {
struct ckpt_req_control *cprc = &sbi->cprc_info;
int class = IOPRIO_PRIO_CLASS(cprc->ckpt_thread_ioprio);
- int data = IOPRIO_PRIO_DATA(cprc->ckpt_thread_ioprio);
+ int level = IOPRIO_PRIO_LEVEL(cprc->ckpt_thread_ioprio);
if (class != IOPRIO_CLASS_RT && class != IOPRIO_CLASS_BE)
return -EINVAL;
return sysfs_emit(buf, "%s,%d\n",
- class == IOPRIO_CLASS_RT ? "rt" : "be", data);
+ class == IOPRIO_CLASS_RT ? "rt" : "be", level);
}
#ifdef CONFIG_F2FS_FS_COMPRESSION
@@ -450,7 +450,7 @@ out:
const char *name = strim((char *)buf);
struct ckpt_req_control *cprc = &sbi->cprc_info;
int class;
- long data;
+ long level;
int ret;
if (!strncmp(name, "rt,", 3))
@@ -461,13 +461,13 @@ out:
return -EINVAL;
name += 3;
- ret = kstrtol(name, 10, &data);
+ ret = kstrtol(name, 10, &level);
if (ret)
return ret;
- if (data >= IOPRIO_NR_LEVELS || data < 0)
+ if (level >= IOPRIO_NR_LEVELS || level < 0)
return -EINVAL;
- cprc->ckpt_thread_ioprio = IOPRIO_PRIO_VALUE(class, data);
+ cprc->ckpt_thread_ioprio = IOPRIO_PRIO_VALUE(class, level);
if (test_opt(sbi, MERGE_CHECKPOINT)) {
ret = set_task_ioprio(cprc->f2fs_issue_ckpt,
cprc->ckpt_thread_ioprio);
diff --git a/fs/file_table.c b/fs/file_table.c
index 4f03beed4737..ca7843dde56d 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -96,7 +96,7 @@ EXPORT_SYMBOL_GPL(get_max_files);
/*
* Handle nr_files sysctl
*/
-static int proc_nr_files(struct ctl_table *table, int write, void *buffer,
+static int proc_nr_files(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
files_stat.nr_files = get_nr_files();
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 92a5b8283528..b865a3fa52f3 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -2413,7 +2413,7 @@ static int __init start_dirtytime_writeback(void)
}
__initcall(start_dirtytime_writeback);
-int dirtytime_interval_handler(struct ctl_table *table, int write,
+int dirtytime_interval_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
diff --git a/fs/hostfs/hostfs.h b/fs/hostfs/hostfs.h
index 0239e3af3945..8b39c15c408c 100644
--- a/fs/hostfs/hostfs.h
+++ b/fs/hostfs/hostfs.h
@@ -63,9 +63,10 @@ struct hostfs_stat {
struct hostfs_timespec atime, mtime, ctime;
unsigned int blksize;
unsigned long long blocks;
- unsigned int maj;
- unsigned int min;
- dev_t dev;
+ struct {
+ unsigned int maj;
+ unsigned int min;
+ } rdev, dev;
};
extern int stat_file(const char *path, struct hostfs_stat *p, int fd);
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 3eb747d26924..22df574ca99e 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -17,6 +17,7 @@
#include <linux/writeback.h>
#include <linux/mount.h>
#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
#include <linux/namei.h>
#include "hostfs.h"
#include <init.h>
@@ -455,7 +456,7 @@ static int hostfs_read_folio(struct file *file, struct folio *folio)
if (bytes_read < 0)
ret = bytes_read;
else
- buffer = folio_zero_tail(folio, bytes_read, buffer);
+ buffer = folio_zero_tail(folio, bytes_read, buffer + bytes_read);
kunmap_local(buffer);
folio_end_read(folio, ret == 0);
@@ -532,10 +533,11 @@ static int hostfs_inode_update(struct inode *ino, const struct hostfs_stat *st)
static int hostfs_inode_set(struct inode *ino, void *data)
{
struct hostfs_stat *st = data;
- dev_t rdev;
+ dev_t dev, rdev;
/* Reencode maj and min with the kernel encoding.*/
- rdev = MKDEV(st->maj, st->min);
+ rdev = MKDEV(st->rdev.maj, st->rdev.min);
+ dev = MKDEV(st->dev.maj, st->dev.min);
switch (st->mode & S_IFMT) {
case S_IFLNK:
@@ -561,7 +563,7 @@ static int hostfs_inode_set(struct inode *ino, void *data)
return -EIO;
}
- HOSTFS_I(ino)->dev = st->dev;
+ HOSTFS_I(ino)->dev = dev;
ino->i_ino = st->ino;
ino->i_mode = st->mode;
return hostfs_inode_update(ino, st);
@@ -570,8 +572,9 @@ static int hostfs_inode_set(struct inode *ino, void *data)
static int hostfs_inode_test(struct inode *inode, void *data)
{
const struct hostfs_stat *st = data;
+ dev_t dev = MKDEV(st->dev.maj, st->dev.min);
- return inode->i_ino == st->ino && HOSTFS_I(inode)->dev == st->dev;
+ return inode->i_ino == st->ino && HOSTFS_I(inode)->dev == dev;
}
static struct inode *hostfs_iget(struct super_block *sb, char *name)
@@ -927,7 +930,6 @@ static const struct inode_operations hostfs_link_iops = {
static int hostfs_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct hostfs_fs_info *fsi = sb->s_fs_info;
- const char *host_root = fc->source;
struct inode *root_inode;
int err;
@@ -941,15 +943,6 @@ static int hostfs_fill_super(struct super_block *sb, struct fs_context *fc)
if (err)
return err;
- /* NULL is printed as '(null)' by printf(): avoid that. */
- if (fc->source == NULL)
- host_root = "";
-
- fsi->host_root_path =
- kasprintf(GFP_KERNEL, "%s/%s", root_ino, host_root);
- if (fsi->host_root_path == NULL)
- return -ENOMEM;
-
root_inode = hostfs_iget(sb, fsi->host_root_path);
if (IS_ERR(root_inode))
return PTR_ERR(root_inode);
@@ -975,6 +968,58 @@ static int hostfs_fill_super(struct super_block *sb, struct fs_context *fc)
return 0;
}
+enum hostfs_parma {
+ Opt_hostfs,
+};
+
+static const struct fs_parameter_spec hostfs_param_specs[] = {
+ fsparam_string_empty("hostfs", Opt_hostfs),
+ {}
+};
+
+static int hostfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ struct hostfs_fs_info *fsi = fc->s_fs_info;
+ struct fs_parse_result result;
+ char *host_root;
+ int opt;
+
+ opt = fs_parse(fc, hostfs_param_specs, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_hostfs:
+ host_root = param->string;
+ if (!*host_root)
+ host_root = "";
+ fsi->host_root_path =
+ kasprintf(GFP_KERNEL, "%s/%s", root_ino, host_root);
+ if (fsi->host_root_path == NULL)
+ return -ENOMEM;
+ break;
+ }
+
+ return 0;
+}
+
+static int hostfs_parse_monolithic(struct fs_context *fc, void *data)
+{
+ struct hostfs_fs_info *fsi = fc->s_fs_info;
+ char *host_root = (char *)data;
+
+ /* NULL is printed as '(null)' by printf(): avoid that. */
+ if (host_root == NULL)
+ host_root = "";
+
+ fsi->host_root_path =
+ kasprintf(GFP_KERNEL, "%s/%s", root_ino, host_root);
+ if (fsi->host_root_path == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
static int hostfs_fc_get_tree(struct fs_context *fc)
{
return get_tree_nodev(fc, hostfs_fill_super);
@@ -992,6 +1037,8 @@ static void hostfs_fc_free(struct fs_context *fc)
}
static const struct fs_context_operations hostfs_context_ops = {
+ .parse_monolithic = hostfs_parse_monolithic,
+ .parse_param = hostfs_parse_param,
.get_tree = hostfs_fc_get_tree,
.free = hostfs_fc_free,
};
@@ -1040,4 +1087,5 @@ static void __exit exit_hostfs(void)
module_init(init_hostfs)
module_exit(exit_hostfs)
+MODULE_DESCRIPTION("User-Mode Linux Host filesystem");
MODULE_LICENSE("GPL");
diff --git a/fs/hostfs/hostfs_user.c b/fs/hostfs/hostfs_user.c
index 840619e39a1a..97e9c40a9448 100644
--- a/fs/hostfs/hostfs_user.c
+++ b/fs/hostfs/hostfs_user.c
@@ -34,9 +34,10 @@ static void stat64_to_hostfs(const struct stat64 *buf, struct hostfs_stat *p)
p->mtime.tv_nsec = 0;
p->blksize = buf->st_blksize;
p->blocks = buf->st_blocks;
- p->maj = os_major(buf->st_rdev);
- p->min = os_minor(buf->st_rdev);
- p->dev = buf->st_dev;
+ p->rdev.maj = os_major(buf->st_rdev);
+ p->rdev.min = os_minor(buf->st_rdev);
+ p->dev.maj = os_major(buf->st_dev);
+ p->dev.min = os_minor(buf->st_dev);
}
int stat_file(const char *path, struct hostfs_stat *p, int fd)
diff --git a/fs/inode.c b/fs/inode.c
index f356fe2ec2b6..86670941884b 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -107,7 +107,7 @@ long get_nr_dirty_inodes(void)
*/
static struct inodes_stat_t inodes_stat;
-static int proc_nr_inodes(struct ctl_table *table, int write, void *buffer,
+static int proc_nr_inodes(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
inodes_stat.nr_inodes = get_nr_inodes();
@@ -676,6 +676,16 @@ static void evict(struct inode *inode)
remove_inode_hash(inode);
+ /*
+ * Wake up waiters in __wait_on_freeing_inode().
+ *
+ * Lockless hash lookup may end up finding the inode before we removed
+ * it above, but only lock it *after* we are done with the wakeup below.
+ * In this case the potential waiter cannot safely block.
+ *
+ * The inode being unhashed after the call to remove_inode_hash() is
+ * used as an indicator whether blocking on it is safe.
+ */
spin_lock(&inode->i_lock);
wake_up_bit(&inode->i_state, __I_NEW);
BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
@@ -888,18 +898,18 @@ long prune_icache_sb(struct super_block *sb, struct shrink_control *sc)
return freed;
}
-static void __wait_on_freeing_inode(struct inode *inode, bool locked);
+static void __wait_on_freeing_inode(struct inode *inode, bool is_inode_hash_locked);
/*
* Called with the inode lock held.
*/
static struct inode *find_inode(struct super_block *sb,
struct hlist_head *head,
int (*test)(struct inode *, void *),
- void *data, bool locked)
+ void *data, bool is_inode_hash_locked)
{
struct inode *inode = NULL;
- if (locked)
+ if (is_inode_hash_locked)
lockdep_assert_held(&inode_hash_lock);
else
lockdep_assert_not_held(&inode_hash_lock);
@@ -913,7 +923,7 @@ repeat:
continue;
spin_lock(&inode->i_lock);
if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
- __wait_on_freeing_inode(inode, locked);
+ __wait_on_freeing_inode(inode, is_inode_hash_locked);
goto repeat;
}
if (unlikely(inode->i_state & I_CREATING)) {
@@ -936,11 +946,11 @@ repeat:
*/
static struct inode *find_inode_fast(struct super_block *sb,
struct hlist_head *head, unsigned long ino,
- bool locked)
+ bool is_inode_hash_locked)
{
struct inode *inode = NULL;
- if (locked)
+ if (is_inode_hash_locked)
lockdep_assert_held(&inode_hash_lock);
else
lockdep_assert_not_held(&inode_hash_lock);
@@ -954,7 +964,7 @@ repeat:
continue;
spin_lock(&inode->i_lock);
if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
- __wait_on_freeing_inode(inode, locked);
+ __wait_on_freeing_inode(inode, is_inode_hash_locked);
goto repeat;
}
if (unlikely(inode->i_state & I_CREATING)) {
@@ -2287,19 +2297,29 @@ EXPORT_SYMBOL(inode_needs_sync);
* wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
* will DTRT.
*/
-static void __wait_on_freeing_inode(struct inode *inode, bool locked)
+static void __wait_on_freeing_inode(struct inode *inode, bool is_inode_hash_locked)
{
wait_queue_head_t *wq;
DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
+
+ /*
+ * Handle racing against evict(), see that routine for more details.
+ */
+ if (unlikely(inode_unhashed(inode))) {
+ WARN_ON(is_inode_hash_locked);
+ spin_unlock(&inode->i_lock);
+ return;
+ }
+
wq = bit_waitqueue(&inode->i_state, __I_NEW);
prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
spin_unlock(&inode->i_lock);
rcu_read_unlock();
- if (locked)
+ if (is_inode_hash_locked)
spin_unlock(&inode_hash_lock);
schedule();
finish_wait(wq, &wait.wq_entry);
- if (locked)
+ if (is_inode_hash_locked)
spin_lock(&inode_hash_lock);
rcu_read_lock();
}
diff --git a/fs/jffs2/Kconfig b/fs/jffs2/Kconfig
index 7c96bc107218..560187d61562 100644
--- a/fs/jffs2/Kconfig
+++ b/fs/jffs2/Kconfig
@@ -151,8 +151,9 @@ config JFFS2_RUBIN
RUBINMIPS and DYNRUBIN compressors. Say 'N' if unsure.
choice
- prompt "JFFS2 default compression mode" if JFFS2_COMPRESSION_OPTIONS
+ prompt "JFFS2 default compression mode"
default JFFS2_CMODE_PRIORITY
+ depends on JFFS2_COMPRESSION_OPTIONS
depends on JFFS2_FS
help
You can set here the default compression mode of JFFS2 from
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
index cb3cda1390ad..5713994328cb 100644
--- a/fs/jfs/jfs_dmap.c
+++ b/fs/jfs/jfs_dmap.c
@@ -1626,6 +1626,8 @@ s64 dbDiscardAG(struct inode *ip, int agno, s64 minlen)
} else if (rc == -ENOSPC) {
/* search for next smaller log2 block */
l2nb = BLKSTOL2(nblocks) - 1;
+ if (unlikely(l2nb < 0))
+ break;
nblocks = 1LL << l2nb;
} else {
/* Trim any already allocated blocks */
diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c
index 031d8f570f58..5d3127ca68a4 100644
--- a/fs/jfs/jfs_dtree.c
+++ b/fs/jfs/jfs_dtree.c
@@ -834,6 +834,8 @@ int dtInsert(tid_t tid, struct inode *ip,
* the full page.
*/
DT_GETSEARCH(ip, btstack->top, bn, mp, p, index);
+ if (p->header.freelist == 0)
+ return -EINVAL;
/*
* insert entry for new key
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
index 2ec35889ad24..1407feccbc2d 100644
--- a/fs/jfs/jfs_imap.c
+++ b/fs/jfs/jfs_imap.c
@@ -290,7 +290,7 @@ int diSync(struct inode *ipimap)
int diRead(struct inode *ip)
{
struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
- int iagno, ino, extno, rc;
+ int iagno, ino, extno, rc, agno;
struct inode *ipimap;
struct dinode *dp;
struct iag *iagp;
@@ -339,8 +339,11 @@ int diRead(struct inode *ip)
/* get the ag for the iag */
agstart = le64_to_cpu(iagp->agstart);
+ agno = BLKTOAG(agstart, JFS_SBI(ip->i_sb));
release_metapage(mp);
+ if (agno >= MAXAG || agno < 0)
+ return -EIO;
rel_inode = (ino & (INOSPERPAGE - 1));
pageno = blkno >> sbi->l2nbperpage;
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index 9609349e92e5..270808b6219b 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -1600,7 +1600,7 @@ void jfs_flush_journal(struct jfs_log *log, int wait)
mp, sizeof(struct metapage), 0);
print_hex_dump(KERN_ERR, "page: ",
DUMP_PREFIX_ADDRESS, 16,
- sizeof(long), mp->page,
+ sizeof(long), mp->folio,
sizeof(struct page), 0);
} else
print_hex_dump(KERN_ERR, "tblock:",
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index 961569c11159..df575a873ec6 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -4,6 +4,7 @@
* Portions Copyright (C) Christoph Hellwig, 2001-2002
*/
+#include <linux/blkdev.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/module.h>
@@ -46,9 +47,9 @@ static inline void __lock_metapage(struct metapage *mp)
do {
set_current_state(TASK_UNINTERRUPTIBLE);
if (metapage_locked(mp)) {
- unlock_page(mp->page);
+ folio_unlock(mp->folio);
io_schedule();
- lock_page(mp->page);
+ folio_lock(mp->folio);
}
} while (trylock_metapage(mp));
__set_current_state(TASK_RUNNING);
@@ -56,7 +57,7 @@ static inline void __lock_metapage(struct metapage *mp)
}
/*
- * Must have mp->page locked
+ * Must have mp->folio locked
*/
static inline void lock_metapage(struct metapage *mp)
{
@@ -75,36 +76,36 @@ static mempool_t *metapage_mempool;
struct meta_anchor {
int mp_count;
atomic_t io_count;
+ blk_status_t status;
struct metapage *mp[MPS_PER_PAGE];
};
-#define mp_anchor(page) ((struct meta_anchor *)page_private(page))
-static inline struct metapage *page_to_mp(struct page *page, int offset)
+static inline struct metapage *folio_to_mp(struct folio *folio, int offset)
{
- if (!PagePrivate(page))
+ struct meta_anchor *anchor = folio->private;
+
+ if (!anchor)
return NULL;
- return mp_anchor(page)->mp[offset >> L2PSIZE];
+ return anchor->mp[offset >> L2PSIZE];
}
-static inline int insert_metapage(struct page *page, struct metapage *mp)
+static inline int insert_metapage(struct folio *folio, struct metapage *mp)
{
struct meta_anchor *a;
int index;
int l2mp_blocks; /* log2 blocks per metapage */
- if (PagePrivate(page))
- a = mp_anchor(page);
- else {
+ a = folio->private;
+ if (!a) {
a = kzalloc(sizeof(struct meta_anchor), GFP_NOFS);
if (!a)
return -ENOMEM;
- set_page_private(page, (unsigned long)a);
- SetPagePrivate(page);
- kmap(page);
+ folio_attach_private(folio, a);
+ kmap(&folio->page);
}
if (mp) {
- l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
+ l2mp_blocks = L2PSIZE - folio->mapping->host->i_blkbits;
index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
a->mp_count++;
a->mp[index] = mp;
@@ -113,10 +114,10 @@ static inline int insert_metapage(struct page *page, struct metapage *mp)
return 0;
}
-static inline void remove_metapage(struct page *page, struct metapage *mp)
+static inline void remove_metapage(struct folio *folio, struct metapage *mp)
{
- struct meta_anchor *a = mp_anchor(page);
- int l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
+ struct meta_anchor *a = folio->private;
+ int l2mp_blocks = L2PSIZE - folio->mapping->host->i_blkbits;
int index;
index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
@@ -126,48 +127,53 @@ static inline void remove_metapage(struct page *page, struct metapage *mp)
a->mp[index] = NULL;
if (--a->mp_count == 0) {
kfree(a);
- set_page_private(page, 0);
- ClearPagePrivate(page);
- kunmap(page);
+ folio_detach_private(folio);
+ kunmap(&folio->page);
}
}
-static inline void inc_io(struct page *page)
+static inline void inc_io(struct folio *folio)
{
- atomic_inc(&mp_anchor(page)->io_count);
+ struct meta_anchor *anchor = folio->private;
+
+ atomic_inc(&anchor->io_count);
}
-static inline void dec_io(struct page *page, void (*handler) (struct page *))
+static inline void dec_io(struct folio *folio, blk_status_t status,
+ void (*handler)(struct folio *, blk_status_t))
{
- if (atomic_dec_and_test(&mp_anchor(page)->io_count))
- handler(page);
+ struct meta_anchor *anchor = folio->private;
+
+ if (anchor->status == BLK_STS_OK)
+ anchor->status = status;
+
+ if (atomic_dec_and_test(&anchor->io_count))
+ handler(folio, anchor->status);
}
#else
-static inline struct metapage *page_to_mp(struct page *page, int offset)
+static inline struct metapage *folio_to_mp(struct folio *folio, int offset)
{
- return PagePrivate(page) ? (struct metapage *)page_private(page) : NULL;
+ return folio->private;
}
-static inline int insert_metapage(struct page *page, struct metapage *mp)
+static inline int insert_metapage(struct folio *folio, struct metapage *mp)
{
if (mp) {
- set_page_private(page, (unsigned long)mp);
- SetPagePrivate(page);
- kmap(page);
+ folio_attach_private(folio, mp);
+ kmap(&folio->page);
}
return 0;
}
-static inline void remove_metapage(struct page *page, struct metapage *mp)
+static inline void remove_metapage(struct folio *folio, struct metapage *mp)
{
- set_page_private(page, 0);
- ClearPagePrivate(page);
- kunmap(page);
+ folio_detach_private(folio);
+ kunmap(&folio->page);
}
-#define inc_io(page) do {} while(0)
-#define dec_io(page, handler) handler(page)
+#define inc_io(folio) do {} while(0)
+#define dec_io(folio, status, handler) handler(folio, status)
#endif
@@ -218,12 +224,12 @@ void metapage_exit(void)
kmem_cache_destroy(metapage_cache);
}
-static inline void drop_metapage(struct page *page, struct metapage *mp)
+static inline void drop_metapage(struct folio *folio, struct metapage *mp)
{
if (mp->count || mp->nohomeok || test_bit(META_dirty, &mp->flag) ||
test_bit(META_io, &mp->flag))
return;
- remove_metapage(page, mp);
+ remove_metapage(folio, mp);
INCREMENT(mpStat.pagefree);
free_metapage(mp);
}
@@ -257,23 +263,20 @@ static sector_t metapage_get_blocks(struct inode *inode, sector_t lblock,
return lblock;
}
-static void last_read_complete(struct page *page)
+static void last_read_complete(struct folio *folio, blk_status_t status)
{
- if (!PageError(page))
- SetPageUptodate(page);
- unlock_page(page);
+ if (status)
+ printk(KERN_ERR "Read error %d at %#llx\n", status,
+ folio_pos(folio));
+
+ folio_end_read(folio, status == 0);
}
static void metapage_read_end_io(struct bio *bio)
{
- struct page *page = bio->bi_private;
-
- if (bio->bi_status) {
- printk(KERN_ERR "metapage_read_end_io: I/O error\n");
- SetPageError(page);
- }
+ struct folio *folio = bio->bi_private;
- dec_io(page, last_read_complete);
+ dec_io(folio, bio->bi_status, last_read_complete);
bio_put(bio);
}
@@ -299,13 +302,19 @@ static void remove_from_logsync(struct metapage *mp)
LOGSYNC_UNLOCK(log, flags);
}
-static void last_write_complete(struct page *page)
+static void last_write_complete(struct folio *folio, blk_status_t status)
{
struct metapage *mp;
unsigned int offset;
+ if (status) {
+ int err = blk_status_to_errno(status);
+ printk(KERN_ERR "metapage_write_end_io: I/O error\n");
+ mapping_set_error(folio->mapping, err);
+ }
+
for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
- mp = page_to_mp(page, offset);
+ mp = folio_to_mp(folio, offset);
if (mp && test_bit(META_io, &mp->flag)) {
if (mp->lsn)
remove_from_logsync(mp);
@@ -316,28 +325,25 @@ static void last_write_complete(struct page *page)
* safe unless I have the page locked
*/
}
- end_page_writeback(page);
+ folio_end_writeback(folio);
}
static void metapage_write_end_io(struct bio *bio)
{
- struct page *page = bio->bi_private;
+ struct folio *folio = bio->bi_private;
- BUG_ON(!PagePrivate(page));
+ BUG_ON(!folio->private);
- if (bio->bi_status) {
- printk(KERN_ERR "metapage_write_end_io: I/O error\n");
- SetPageError(page);
- }
- dec_io(page, last_write_complete);
+ dec_io(folio, bio->bi_status, last_write_complete);
bio_put(bio);
}
-static int metapage_writepage(struct page *page, struct writeback_control *wbc)
+static int metapage_write_folio(struct folio *folio,
+ struct writeback_control *wbc, void *unused)
{
struct bio *bio = NULL;
int block_offset; /* block offset of mp within page */
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
int blocks_per_mp = JFS_SBI(inode->i_sb)->nbperpage;
int len;
int xlen;
@@ -353,14 +359,13 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
int offset;
int bad_blocks = 0;
- page_start = (sector_t)page->index <<
- (PAGE_SHIFT - inode->i_blkbits);
- BUG_ON(!PageLocked(page));
- BUG_ON(PageWriteback(page));
- set_page_writeback(page);
+ page_start = folio_pos(folio) >> inode->i_blkbits;
+ BUG_ON(!folio_test_locked(folio));
+ BUG_ON(folio_test_writeback(folio));
+ folio_start_writeback(folio);
for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
- mp = page_to_mp(page, offset);
+ mp = folio_to_mp(folio, offset);
if (!mp || !test_bit(META_dirty, &mp->flag))
continue;
@@ -389,22 +394,20 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
continue;
}
/* Not contiguous */
- if (bio_add_page(bio, page, bio_bytes, bio_offset) <
- bio_bytes)
- goto add_failed;
+ bio_add_folio_nofail(bio, folio, bio_bytes, bio_offset);
/*
* Increment counter before submitting i/o to keep
* count from hitting zero before we're through
*/
- inc_io(page);
+ inc_io(folio);
if (!bio->bi_iter.bi_size)
goto dump_bio;
submit_bio(bio);
nr_underway++;
bio = NULL;
} else
- inc_io(page);
- xlen = (PAGE_SIZE - offset) >> inode->i_blkbits;
+ inc_io(folio);
+ xlen = (folio_size(folio) - offset) >> inode->i_blkbits;
pblock = metapage_get_blocks(inode, lblock, &xlen);
if (!pblock) {
printk(KERN_ERR "JFS: metapage_get_blocks failed\n");
@@ -420,7 +423,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
bio = bio_alloc(inode->i_sb->s_bdev, 1, REQ_OP_WRITE, GFP_NOFS);
bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
bio->bi_end_io = metapage_write_end_io;
- bio->bi_private = page;
+ bio->bi_private = folio;
/* Don't call bio_add_page yet, we may add to this vec */
bio_offset = offset;
@@ -430,8 +433,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
next_block = lblock + len;
}
if (bio) {
- if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes)
- goto add_failed;
+ bio_add_folio_nofail(bio, folio, bio_bytes, bio_offset);
if (!bio->bi_iter.bi_size)
goto dump_bio;
@@ -439,50 +441,56 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
nr_underway++;
}
if (redirty)
- redirty_page_for_writepage(wbc, page);
+ folio_redirty_for_writepage(wbc, folio);
- unlock_page(page);
+ folio_unlock(folio);
if (bad_blocks)
goto err_out;
if (nr_underway == 0)
- end_page_writeback(page);
+ folio_end_writeback(folio);
return 0;
-add_failed:
- /* We should never reach here, since we're only adding one vec */
- printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
- goto skip;
dump_bio:
print_hex_dump(KERN_ERR, "JFS: dump of bio: ", DUMP_PREFIX_ADDRESS, 16,
4, bio, sizeof(*bio), 0);
-skip:
bio_put(bio);
- unlock_page(page);
- dec_io(page, last_write_complete);
+ folio_unlock(folio);
+ dec_io(folio, BLK_STS_OK, last_write_complete);
err_out:
while (bad_blocks--)
- dec_io(page, last_write_complete);
+ dec_io(folio, BLK_STS_OK, last_write_complete);
return -EIO;
}
+static int metapage_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ struct blk_plug plug;
+ int err;
+
+ blk_start_plug(&plug);
+ err = write_cache_pages(mapping, wbc, metapage_write_folio, NULL);
+ blk_finish_plug(&plug);
+
+ return err;
+}
+
static int metapage_read_folio(struct file *fp, struct folio *folio)
{
- struct page *page = &folio->page;
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct bio *bio = NULL;
int block_offset;
- int blocks_per_page = i_blocks_per_page(inode, page);
+ int blocks_per_page = i_blocks_per_folio(inode, folio);
sector_t page_start; /* address of page in fs blocks */
sector_t pblock;
int xlen;
unsigned int len;
int offset;
- BUG_ON(!PageLocked(page));
- page_start = (sector_t)page->index <<
- (PAGE_SHIFT - inode->i_blkbits);
+ BUG_ON(!folio_test_locked(folio));
+ page_start = folio_pos(folio) >> inode->i_blkbits;
block_offset = 0;
while (block_offset < blocks_per_page) {
@@ -490,9 +498,9 @@ static int metapage_read_folio(struct file *fp, struct folio *folio)
pblock = metapage_get_blocks(inode, page_start + block_offset,
&xlen);
if (pblock) {
- if (!PagePrivate(page))
- insert_metapage(page, NULL);
- inc_io(page);
+ if (!folio->private)
+ insert_metapage(folio, NULL);
+ inc_io(folio);
if (bio)
submit_bio(bio);
@@ -501,11 +509,10 @@ static int metapage_read_folio(struct file *fp, struct folio *folio)
bio->bi_iter.bi_sector =
pblock << (inode->i_blkbits - 9);
bio->bi_end_io = metapage_read_end_io;
- bio->bi_private = page;
+ bio->bi_private = folio;
len = xlen << inode->i_blkbits;
offset = block_offset << inode->i_blkbits;
- if (bio_add_page(bio, page, len, offset) < len)
- goto add_failed;
+ bio_add_folio_nofail(bio, folio, len, offset);
block_offset += xlen;
} else
block_offset++;
@@ -513,15 +520,9 @@ static int metapage_read_folio(struct file *fp, struct folio *folio)
if (bio)
submit_bio(bio);
else
- unlock_page(page);
+ folio_unlock(folio);
return 0;
-
-add_failed:
- printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
- bio_put(bio);
- dec_io(page, last_read_complete);
- return -EIO;
}
static bool metapage_release_folio(struct folio *folio, gfp_t gfp_mask)
@@ -531,7 +532,7 @@ static bool metapage_release_folio(struct folio *folio, gfp_t gfp_mask)
int offset;
for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
- mp = page_to_mp(&folio->page, offset);
+ mp = folio_to_mp(folio, offset);
if (!mp)
continue;
@@ -546,7 +547,7 @@ static bool metapage_release_folio(struct folio *folio, gfp_t gfp_mask)
}
if (mp->lsn)
remove_from_logsync(mp);
- remove_metapage(&folio->page, mp);
+ remove_metapage(folio, mp);
INCREMENT(mpStat.pagefree);
free_metapage(mp);
}
@@ -565,7 +566,7 @@ static void metapage_invalidate_folio(struct folio *folio, size_t offset,
const struct address_space_operations jfs_metapage_aops = {
.read_folio = metapage_read_folio,
- .writepage = metapage_writepage,
+ .writepages = metapage_writepages,
.release_folio = metapage_release_folio,
.invalidate_folio = metapage_invalidate_folio,
.dirty_folio = filemap_dirty_folio,
@@ -579,7 +580,7 @@ struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
int l2bsize;
struct address_space *mapping;
struct metapage *mp = NULL;
- struct page *page;
+ struct folio *folio;
unsigned long page_index;
unsigned long page_offset;
@@ -610,22 +611,22 @@ struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
}
if (new && (PSIZE == PAGE_SIZE)) {
- page = grab_cache_page(mapping, page_index);
- if (!page) {
- jfs_err("grab_cache_page failed!");
+ folio = filemap_grab_folio(mapping, page_index);
+ if (IS_ERR(folio)) {
+ jfs_err("filemap_grab_folio failed!");
return NULL;
}
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
} else {
- page = read_mapping_page(mapping, page_index, NULL);
- if (IS_ERR(page)) {
+ folio = read_mapping_folio(mapping, page_index, NULL);
+ if (IS_ERR(folio)) {
jfs_err("read_mapping_page failed!");
return NULL;
}
- lock_page(page);
+ folio_lock(folio);
}
- mp = page_to_mp(page, page_offset);
+ mp = folio_to_mp(folio, page_offset);
if (mp) {
if (mp->logical_size != size) {
jfs_error(inode->i_sb,
@@ -651,16 +652,16 @@ struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
mp = alloc_metapage(GFP_NOFS);
if (!mp)
goto unlock;
- mp->page = page;
+ mp->folio = folio;
mp->sb = inode->i_sb;
mp->flag = 0;
mp->xflag = COMMIT_PAGE;
mp->count = 1;
mp->nohomeok = 0;
mp->logical_size = size;
- mp->data = page_address(page) + page_offset;
+ mp->data = folio_address(folio) + page_offset;
mp->index = lblock;
- if (unlikely(insert_metapage(page, mp))) {
+ if (unlikely(insert_metapage(folio, mp))) {
free_metapage(mp);
goto unlock;
}
@@ -672,28 +673,27 @@ struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
memset(mp->data, 0, PSIZE);
}
- unlock_page(page);
+ folio_unlock(folio);
jfs_info("__get_metapage: returning = 0x%p data = 0x%p", mp, mp->data);
return mp;
unlock:
- unlock_page(page);
+ folio_unlock(folio);
return NULL;
}
void grab_metapage(struct metapage * mp)
{
jfs_info("grab_metapage: mp = 0x%p", mp);
- get_page(mp->page);
- lock_page(mp->page);
+ folio_get(mp->folio);
+ folio_lock(mp->folio);
mp->count++;
lock_metapage(mp);
- unlock_page(mp->page);
+ folio_unlock(mp->folio);
}
-static int metapage_write_one(struct page *page)
+static int metapage_write_one(struct folio *folio)
{
- struct folio *folio = page_folio(page);
struct address_space *mapping = folio->mapping;
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
@@ -707,7 +707,7 @@ static int metapage_write_one(struct page *page)
if (folio_clear_dirty_for_io(folio)) {
folio_get(folio);
- ret = metapage_writepage(page, &wbc);
+ ret = metapage_write_folio(folio, &wbc, NULL);
if (ret == 0)
folio_wait_writeback(folio);
folio_put(folio);
@@ -722,71 +722,69 @@ static int metapage_write_one(struct page *page)
void force_metapage(struct metapage *mp)
{
- struct page *page = mp->page;
+ struct folio *folio = mp->folio;
jfs_info("force_metapage: mp = 0x%p", mp);
set_bit(META_forcewrite, &mp->flag);
clear_bit(META_sync, &mp->flag);
- get_page(page);
- lock_page(page);
- set_page_dirty(page);
- if (metapage_write_one(page))
+ folio_get(folio);
+ folio_lock(folio);
+ folio_mark_dirty(folio);
+ if (metapage_write_one(folio))
jfs_error(mp->sb, "metapage_write_one() failed\n");
clear_bit(META_forcewrite, &mp->flag);
- put_page(page);
+ folio_put(folio);
}
void hold_metapage(struct metapage *mp)
{
- lock_page(mp->page);
+ folio_lock(mp->folio);
}
void put_metapage(struct metapage *mp)
{
if (mp->count || mp->nohomeok) {
/* Someone else will release this */
- unlock_page(mp->page);
+ folio_unlock(mp->folio);
return;
}
- get_page(mp->page);
+ folio_get(mp->folio);
mp->count++;
lock_metapage(mp);
- unlock_page(mp->page);
+ folio_unlock(mp->folio);
release_metapage(mp);
}
void release_metapage(struct metapage * mp)
{
- struct page *page = mp->page;
+ struct folio *folio = mp->folio;
jfs_info("release_metapage: mp = 0x%p, flag = 0x%lx", mp, mp->flag);
- BUG_ON(!page);
-
- lock_page(page);
+ folio_lock(folio);
unlock_metapage(mp);
assert(mp->count);
if (--mp->count || mp->nohomeok) {
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
return;
}
if (test_bit(META_dirty, &mp->flag)) {
- set_page_dirty(page);
+ folio_mark_dirty(folio);
if (test_bit(META_sync, &mp->flag)) {
clear_bit(META_sync, &mp->flag);
- if (metapage_write_one(page))
+ if (metapage_write_one(folio))
jfs_error(mp->sb, "metapage_write_one() failed\n");
- lock_page(page);
+ folio_lock(folio);
}
} else if (mp->lsn) /* discard_metapage doesn't remove it */
remove_from_logsync(mp);
/* Try to keep metapages from using up too much memory */
- drop_metapage(page, mp);
+ drop_metapage(folio, mp);
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
}
void __invalidate_metapages(struct inode *ip, s64 addr, int len)
@@ -798,7 +796,6 @@ void __invalidate_metapages(struct inode *ip, s64 addr, int len)
struct address_space *mapping =
JFS_SBI(ip->i_sb)->direct_inode->i_mapping;
struct metapage *mp;
- struct page *page;
unsigned int offset;
/*
@@ -807,11 +804,12 @@ void __invalidate_metapages(struct inode *ip, s64 addr, int len)
*/
for (lblock = addr & ~(BlocksPerPage - 1); lblock < addr + len;
lblock += BlocksPerPage) {
- page = find_lock_page(mapping, lblock >> l2BlocksPerPage);
- if (!page)
+ struct folio *folio = filemap_lock_folio(mapping,
+ lblock >> l2BlocksPerPage);
+ if (IS_ERR(folio))
continue;
for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
- mp = page_to_mp(page, offset);
+ mp = folio_to_mp(folio, offset);
if (!mp)
continue;
if (mp->index < addr)
@@ -824,8 +822,8 @@ void __invalidate_metapages(struct inode *ip, s64 addr, int len)
if (mp->lsn)
remove_from_logsync(mp);
}
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
}
}
diff --git a/fs/jfs/jfs_metapage.h b/fs/jfs/jfs_metapage.h
index 4179f9df4deb..2e5015c2705b 100644
--- a/fs/jfs/jfs_metapage.h
+++ b/fs/jfs/jfs_metapage.h
@@ -24,7 +24,7 @@ struct metapage {
wait_queue_head_t wait;
/* implementation */
- struct page *page;
+ struct folio *folio;
struct super_block *sb;
unsigned int logical_size;
@@ -90,14 +90,14 @@ static inline void discard_metapage(struct metapage *mp)
static inline void metapage_nohomeok(struct metapage *mp)
{
- struct page *page = mp->page;
- lock_page(page);
+ struct folio *folio = mp->folio;
+ folio_lock(folio);
if (!mp->nohomeok++) {
mark_metapage_dirty(mp);
- get_page(page);
- wait_on_page_writeback(page);
+ folio_get(folio);
+ folio_wait_writeback(folio);
}
- unlock_page(page);
+ folio_unlock(folio);
}
/*
@@ -107,7 +107,7 @@ static inline void metapage_nohomeok(struct metapage *mp)
static inline void metapage_wait_for_io(struct metapage *mp)
{
if (test_bit(META_io, &mp->flag))
- wait_on_page_writeback(mp->page);
+ folio_wait_writeback(mp->folio);
}
/*
@@ -116,7 +116,7 @@ static inline void metapage_wait_for_io(struct metapage *mp)
static inline void _metapage_homeok(struct metapage *mp)
{
if (!--mp->nohomeok)
- put_page(mp->page);
+ folio_put(mp->folio);
}
static inline void metapage_homeok(struct metapage *mp)
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index 9987055293b3..2999ed5d83f5 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -797,7 +797,7 @@ ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data,
size_t buf_size)
{
struct jfs_ea_list *ealist;
- struct jfs_ea *ea;
+ struct jfs_ea *ea, *ealist_end;
struct ea_buffer ea_buf;
int xattr_size;
ssize_t size;
@@ -817,9 +817,16 @@ ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data,
goto not_found;
ealist = (struct jfs_ea_list *) ea_buf.xattr;
+ ealist_end = END_EALIST(ealist);
/* Find the named attribute */
- for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea))
+ for (ea = FIRST_EA(ealist); ea < ealist_end; ea = NEXT_EA(ea)) {
+ if (unlikely(ea + 1 > ealist_end) ||
+ unlikely(NEXT_EA(ea) > ealist_end)) {
+ size = -EUCLEAN;
+ goto release;
+ }
+
if ((namelen == ea->namelen) &&
memcmp(name, ea->name, namelen) == 0) {
/* Found it */
@@ -834,6 +841,7 @@ ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data,
memcpy(data, value, size);
goto release;
}
+ }
not_found:
size = -ENODATA;
release:
@@ -861,7 +869,7 @@ ssize_t jfs_listxattr(struct dentry * dentry, char *data, size_t buf_size)
ssize_t size = 0;
int xattr_size;
struct jfs_ea_list *ealist;
- struct jfs_ea *ea;
+ struct jfs_ea *ea, *ealist_end;
struct ea_buffer ea_buf;
down_read(&JFS_IP(inode)->xattr_sem);
@@ -876,9 +884,16 @@ ssize_t jfs_listxattr(struct dentry * dentry, char *data, size_t buf_size)
goto release;
ealist = (struct jfs_ea_list *) ea_buf.xattr;
+ ealist_end = END_EALIST(ealist);
/* compute required size of list */
- for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) {
+ for (ea = FIRST_EA(ealist); ea < ealist_end; ea = NEXT_EA(ea)) {
+ if (unlikely(ea + 1 > ealist_end) ||
+ unlikely(NEXT_EA(ea) > ealist_end)) {
+ size = -EUCLEAN;
+ goto release;
+ }
+
if (can_list(ea))
size += name_size(ea) + 1;
}
diff --git a/fs/locks.c b/fs/locks.c
index bdd94c32256f..9afb16e0683f 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -2570,8 +2570,9 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
error = do_lock_file_wait(filp, cmd, file_lock);
/*
- * Attempt to detect a close/fcntl race and recover by releasing the
- * lock that was just acquired. There is no need to do that when we're
+ * Detect close/fcntl races and recover by zapping all POSIX locks
+ * associated with this file and our files_struct, just like on
+ * filp_flush(). There is no need to do that when we're
* unlocking though, or for OFD locks.
*/
if (!error && file_lock->c.flc_type != F_UNLCK &&
@@ -2586,9 +2587,7 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
f = files_lookup_fd_locked(files, fd);
spin_unlock(&files->file_lock);
if (f != filp) {
- file_lock->c.flc_type = F_UNLCK;
- error = do_lock_file_wait(filp, cmd, file_lock);
- WARN_ON_ONCE(error);
+ locks_remove_posix(filp, files);
error = -EBADF;
}
}
diff --git a/fs/namei.c b/fs/namei.c
index 3a4c40e12f78..5512cb10fa89 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -3248,9 +3248,9 @@ static inline umode_t vfs_prepare_mode(struct mnt_idmap *idmap,
/**
* vfs_create - create new file
* @idmap: idmap of the mount the inode was found from
- * @dir: inode of @dentry
- * @dentry: pointer to dentry of the base directory
- * @mode: mode of the new file
+ * @dir: inode of the parent directory
+ * @dentry: dentry of the child file
+ * @mode: mode of the child file
* @want_excl: whether the file must not yet exist
*
* Create a new file.
@@ -4047,9 +4047,9 @@ EXPORT_SYMBOL(user_path_create);
/**
* vfs_mknod - create device node or file
* @idmap: idmap of the mount the inode was found from
- * @dir: inode of @dentry
- * @dentry: pointer to dentry of the base directory
- * @mode: mode of the new device node or file
+ * @dir: inode of the parent directory
+ * @dentry: dentry of the child device node
+ * @mode: mode of the child device node
* @dev: device number of device to create
*
* Create a device node or file.
@@ -4174,9 +4174,9 @@ SYSCALL_DEFINE3(mknod, const char __user *, filename, umode_t, mode, unsigned, d
/**
* vfs_mkdir - create directory
* @idmap: idmap of the mount the inode was found from
- * @dir: inode of @dentry
- * @dentry: pointer to dentry of the base directory
- * @mode: mode of the new directory
+ * @dir: inode of the parent directory
+ * @dentry: dentry of the child directory
+ * @mode: mode of the child directory
*
* Create a directory.
*
@@ -4256,8 +4256,8 @@ SYSCALL_DEFINE2(mkdir, const char __user *, pathname, umode_t, mode)
/**
* vfs_rmdir - remove directory
* @idmap: idmap of the mount the inode was found from
- * @dir: inode of @dentry
- * @dentry: pointer to dentry of the base directory
+ * @dir: inode of the parent directory
+ * @dentry: dentry of the child directory
*
* Remove a directory.
*
@@ -4537,8 +4537,8 @@ SYSCALL_DEFINE1(unlink, const char __user *, pathname)
/**
* vfs_symlink - create symlink
* @idmap: idmap of the mount the inode was found from
- * @dir: inode of @dentry
- * @dentry: pointer to dentry of the base directory
+ * @dir: inode of the parent directory
+ * @dentry: dentry of the child symlink file
* @oldname: name of the file to link to
*
* Create a symlink.
diff --git a/fs/namespace.c b/fs/namespace.c
index 221db9de4729..328087a4df8a 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -70,7 +70,7 @@ static DEFINE_IDA(mnt_id_ida);
static DEFINE_IDA(mnt_group_ida);
/* Don't allow confusion with old 32bit mount ID */
-#define MNT_UNIQUE_ID_OFFSET (1ULL << 32)
+#define MNT_UNIQUE_ID_OFFSET (1ULL << 31)
static atomic64_t mnt_id_ctr = ATOMIC64_INIT(MNT_UNIQUE_ID_OFFSET);
static struct hlist_head *mount_hashtable __ro_after_init;
diff --git a/fs/netfs/Kconfig b/fs/netfs/Kconfig
index bec805e0c44c..1b78e8b65ebc 100644
--- a/fs/netfs/Kconfig
+++ b/fs/netfs/Kconfig
@@ -22,6 +22,14 @@ config NETFS_STATS
between CPUs. On the other hand, the stats are very useful for
debugging purposes. Saying 'Y' here is recommended.
+config NETFS_DEBUG
+ bool "Enable dynamic debugging netfslib and FS-Cache"
+ depends on NETFS
+ help
+ This permits debugging to be dynamically enabled in the local caching
+ management module. If this is set, the debugging output may be
+ enabled by setting bits in /sys/module/netfs/parameters/debug.
+
config FSCACHE
bool "General filesystem local caching manager"
depends on NETFS_SUPPORT
@@ -50,13 +58,3 @@ config FSCACHE_STATS
debugging purposes. Saying 'Y' here is recommended.
See Documentation/filesystems/caching/fscache.rst for more information.
-
-config FSCACHE_DEBUG
- bool "Debug FS-Cache"
- depends on FSCACHE
- help
- This permits debugging to be dynamically enabled in the local caching
- management module. If this is set, the debugging output may be
- enabled by setting bits in /sys/modules/fscache/parameter/debug.
-
- See Documentation/filesystems/caching/fscache.rst for more information.
diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c
index a6d5d07cd436..a688d4c75d99 100644
--- a/fs/netfs/buffered_read.c
+++ b/fs/netfs/buffered_read.c
@@ -117,7 +117,7 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
if (folio->index == rreq->no_unlock_folio &&
test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags))
- kdebug("no unlock");
+ _debug("no unlock");
else
folio_unlock(folio);
}
@@ -204,7 +204,7 @@ void netfs_readahead(struct readahead_control *ractl)
struct netfs_inode *ctx = netfs_inode(ractl->mapping->host);
int ret;
- kenter("%lx,%x", readahead_index(ractl), readahead_count(ractl));
+ _enter("%lx,%x", readahead_index(ractl), readahead_count(ractl));
if (readahead_count(ractl) == 0)
return;
@@ -268,7 +268,7 @@ int netfs_read_folio(struct file *file, struct folio *folio)
struct folio *sink = NULL;
int ret;
- kenter("%lx", folio->index);
+ _enter("%lx", folio->index);
rreq = netfs_alloc_request(mapping, file,
folio_pos(folio), folio_size(folio),
@@ -508,7 +508,7 @@ retry:
have_folio:
*_folio = folio;
- kleave(" = 0");
+ _leave(" = 0");
return 0;
error_put:
@@ -518,7 +518,7 @@ error:
folio_unlock(folio);
folio_put(folio);
}
- kleave(" = %d", ret);
+ _leave(" = %d", ret);
return ret;
}
EXPORT_SYMBOL(netfs_write_begin);
@@ -536,7 +536,7 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio,
size_t flen = folio_size(folio);
int ret;
- kenter("%zx @%llx", flen, start);
+ _enter("%zx @%llx", flen, start);
ret = -ENOMEM;
@@ -567,7 +567,7 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio,
error_put:
netfs_put_request(rreq, false, netfs_rreq_trace_put_discard);
error:
- kleave(" = %d", ret);
+ _leave(" = %d", ret);
return ret;
}
diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c
index 68a3f1383cee..4726c315453c 100644
--- a/fs/netfs/buffered_write.c
+++ b/fs/netfs/buffered_write.c
@@ -56,7 +56,7 @@ static enum netfs_how_to_modify netfs_how_to_modify(struct netfs_inode *ctx,
struct netfs_group *group = netfs_folio_group(folio);
loff_t pos = folio_pos(folio);
- kenter("");
+ _enter("");
if (group != netfs_group && group != NETFS_FOLIO_COPY_TO_CACHE)
return NETFS_FLUSH_CONTENT;
@@ -272,12 +272,12 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
*/
howto = netfs_how_to_modify(ctx, file, folio, netfs_group,
flen, offset, part, maybe_trouble);
- kdebug("howto %u", howto);
+ _debug("howto %u", howto);
switch (howto) {
case NETFS_JUST_PREFETCH:
ret = netfs_prefetch_for_write(file, folio, offset, part);
if (ret < 0) {
- kdebug("prefetch = %zd", ret);
+ _debug("prefetch = %zd", ret);
goto error_folio_unlock;
}
break;
@@ -418,7 +418,7 @@ out:
}
iocb->ki_pos += written;
- kleave(" = %zd [%zd]", written, ret);
+ _leave(" = %zd [%zd]", written, ret);
return written ? written : ret;
error_folio_unlock:
@@ -491,7 +491,7 @@ ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct netfs_inode *ictx = netfs_inode(inode);
ssize_t ret;
- kenter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode));
+ _enter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode));
if (!iov_iter_count(from))
return 0;
@@ -529,7 +529,7 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr
vm_fault_t ret = VM_FAULT_RETRY;
int err;
- kenter("%lx", folio->index);
+ _enter("%lx", folio->index);
sb_start_pagefault(inode->i_sb);
diff --git a/fs/netfs/direct_read.c b/fs/netfs/direct_read.c
index b6debac6205f..10a1e4da6bda 100644
--- a/fs/netfs/direct_read.c
+++ b/fs/netfs/direct_read.c
@@ -33,7 +33,7 @@ ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *i
size_t orig_count = iov_iter_count(iter);
bool async = !is_sync_kiocb(iocb);
- kenter("");
+ _enter("");
if (!orig_count)
return 0; /* Don't update atime */
diff --git a/fs/netfs/direct_write.c b/fs/netfs/direct_write.c
index 792ef17bae21..88f2adfab75e 100644
--- a/fs/netfs/direct_write.c
+++ b/fs/netfs/direct_write.c
@@ -37,7 +37,7 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
size_t len = iov_iter_count(iter);
bool async = !is_sync_kiocb(iocb);
- kenter("");
+ _enter("");
/* We're going to need a bounce buffer if what we transmit is going to
* be different in some way to the source buffer, e.g. because it gets
@@ -45,7 +45,7 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
*/
// TODO
- kdebug("uw %llx-%llx", start, end);
+ _debug("uw %llx-%llx", start, end);
wreq = netfs_create_write_req(iocb->ki_filp->f_mapping, iocb->ki_filp, start,
iocb->ki_flags & IOCB_DIRECT ?
@@ -96,7 +96,7 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
wreq->cleanup = netfs_cleanup_dio_write;
ret = netfs_unbuffered_write(wreq, is_sync_kiocb(iocb), wreq->len);
if (ret < 0) {
- kdebug("begin = %zd", ret);
+ _debug("begin = %zd", ret);
goto out;
}
@@ -143,7 +143,7 @@ ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from)
loff_t pos = iocb->ki_pos;
unsigned long long end = pos + iov_iter_count(from) - 1;
- kenter("%llx,%zx,%llx", pos, iov_iter_count(from), i_size_read(inode));
+ _enter("%llx,%zx,%llx", pos, iov_iter_count(from), i_size_read(inode));
if (!iov_iter_count(from))
return 0;
diff --git a/fs/netfs/fscache_cache.c b/fs/netfs/fscache_cache.c
index 288a73c3072d..9397ed39b0b4 100644
--- a/fs/netfs/fscache_cache.c
+++ b/fs/netfs/fscache_cache.c
@@ -237,7 +237,7 @@ int fscache_add_cache(struct fscache_cache *cache,
{
int n_accesses;
- kenter("{%s,%s}", ops->name, cache->name);
+ _enter("{%s,%s}", ops->name, cache->name);
BUG_ON(fscache_cache_state(cache) != FSCACHE_CACHE_IS_PREPARING);
@@ -257,7 +257,7 @@ int fscache_add_cache(struct fscache_cache *cache,
up_write(&fscache_addremove_sem);
pr_notice("Cache \"%s\" added (type %s)\n", cache->name, ops->name);
- kleave(" = 0 [%s]", cache->name);
+ _leave(" = 0 [%s]", cache->name);
return 0;
}
EXPORT_SYMBOL(fscache_add_cache);
diff --git a/fs/netfs/fscache_cookie.c b/fs/netfs/fscache_cookie.c
index 4d1e8bf4c615..bce2492186d0 100644
--- a/fs/netfs/fscache_cookie.c
+++ b/fs/netfs/fscache_cookie.c
@@ -456,7 +456,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
{
struct fscache_cookie *cookie;
- kenter("V=%x", volume->debug_id);
+ _enter("V=%x", volume->debug_id);
if (!index_key || !index_key_len || index_key_len > 255 || aux_data_len > 255)
return NULL;
@@ -484,7 +484,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
trace_fscache_acquire(cookie);
fscache_stat(&fscache_n_acquires_ok);
- kleave(" = c=%08x", cookie->debug_id);
+ _leave(" = c=%08x", cookie->debug_id);
return cookie;
}
EXPORT_SYMBOL(__fscache_acquire_cookie);
@@ -505,7 +505,7 @@ static void fscache_perform_lookup(struct fscache_cookie *cookie)
enum fscache_access_trace trace = fscache_access_lookup_cookie_end_failed;
bool need_withdraw = false;
- kenter("");
+ _enter("");
if (!cookie->volume->cache_priv) {
fscache_create_volume(cookie->volume, true);
@@ -519,7 +519,7 @@ static void fscache_perform_lookup(struct fscache_cookie *cookie)
if (cookie->state != FSCACHE_COOKIE_STATE_FAILED)
fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT);
need_withdraw = true;
- kleave(" [fail]");
+ _leave(" [fail]");
goto out;
}
@@ -572,7 +572,7 @@ void __fscache_use_cookie(struct fscache_cookie *cookie, bool will_modify)
bool queue = false;
int n_active;
- kenter("c=%08x", cookie->debug_id);
+ _enter("c=%08x", cookie->debug_id);
if (WARN(test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags),
"Trying to use relinquished cookie\n"))
@@ -636,7 +636,7 @@ again:
spin_unlock(&cookie->lock);
if (queue)
fscache_queue_cookie(cookie, fscache_cookie_get_use_work);
- kleave("");
+ _leave("");
}
EXPORT_SYMBOL(__fscache_use_cookie);
@@ -702,7 +702,7 @@ static void fscache_cookie_state_machine(struct fscache_cookie *cookie)
enum fscache_cookie_state state;
bool wake = false;
- kenter("c=%x", cookie->debug_id);
+ _enter("c=%x", cookie->debug_id);
again:
spin_lock(&cookie->lock);
@@ -820,7 +820,7 @@ out:
spin_unlock(&cookie->lock);
if (wake)
wake_up_cookie_state(cookie);
- kleave("");
+ _leave("");
}
static void fscache_cookie_worker(struct work_struct *work)
@@ -867,7 +867,7 @@ static void fscache_cookie_lru_do_one(struct fscache_cookie *cookie)
set_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags);
spin_unlock(&cookie->lock);
fscache_stat(&fscache_n_cookies_lru_expired);
- kdebug("lru c=%x", cookie->debug_id);
+ _debug("lru c=%x", cookie->debug_id);
__fscache_withdraw_cookie(cookie);
}
@@ -971,7 +971,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
if (retire)
fscache_stat(&fscache_n_relinquishes_retire);
- kenter("c=%08x{%d},%d",
+ _enter("c=%08x{%d},%d",
cookie->debug_id, atomic_read(&cookie->n_active), retire);
if (WARN(test_and_set_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags),
@@ -1050,7 +1050,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie,
{
bool is_caching;
- kenter("c=%x", cookie->debug_id);
+ _enter("c=%x", cookie->debug_id);
fscache_stat(&fscache_n_invalidates);
@@ -1072,7 +1072,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie,
case FSCACHE_COOKIE_STATE_INVALIDATING: /* is_still_valid will catch it */
default:
spin_unlock(&cookie->lock);
- kleave(" [no %u]", cookie->state);
+ _leave(" [no %u]", cookie->state);
return;
case FSCACHE_COOKIE_STATE_LOOKING_UP:
@@ -1081,7 +1081,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie,
fallthrough;
case FSCACHE_COOKIE_STATE_CREATING:
spin_unlock(&cookie->lock);
- kleave(" [look %x]", cookie->inval_counter);
+ _leave(" [look %x]", cookie->inval_counter);
return;
case FSCACHE_COOKIE_STATE_ACTIVE:
@@ -1094,7 +1094,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie,
if (is_caching)
fscache_queue_cookie(cookie, fscache_cookie_get_inval_work);
- kleave(" [inv]");
+ _leave(" [inv]");
return;
}
}
diff --git a/fs/netfs/fscache_io.c b/fs/netfs/fscache_io.c
index bf4eaeec44fb..38637e5c9b57 100644
--- a/fs/netfs/fscache_io.c
+++ b/fs/netfs/fscache_io.c
@@ -28,12 +28,12 @@ bool fscache_wait_for_operation(struct netfs_cache_resources *cres,
again:
if (!fscache_cache_is_live(cookie->volume->cache)) {
- kleave(" [broken]");
+ _leave(" [broken]");
return false;
}
state = fscache_cookie_state(cookie);
- kenter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
+ _enter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
switch (state) {
case FSCACHE_COOKIE_STATE_CREATING:
@@ -52,7 +52,7 @@ again:
case FSCACHE_COOKIE_STATE_DROPPED:
case FSCACHE_COOKIE_STATE_RELINQUISHING:
default:
- kleave(" [not live]");
+ _leave(" [not live]");
return false;
}
@@ -92,7 +92,7 @@ again:
spin_lock(&cookie->lock);
state = fscache_cookie_state(cookie);
- kenter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
+ _enter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
switch (state) {
case FSCACHE_COOKIE_STATE_LOOKING_UP:
@@ -140,7 +140,7 @@ failed:
cres->cache_priv = NULL;
cres->ops = NULL;
fscache_end_cookie_access(cookie, fscache_access_io_not_live);
- kleave(" = -ENOBUFS");
+ _leave(" = -ENOBUFS");
return -ENOBUFS;
}
@@ -224,7 +224,7 @@ void __fscache_write_to_cache(struct fscache_cookie *cookie,
if (len == 0)
goto abandon;
- kenter("%llx,%zx", start, len);
+ _enter("%llx,%zx", start, len);
wreq = kzalloc(sizeof(struct fscache_write_request), GFP_NOFS);
if (!wreq)
diff --git a/fs/netfs/fscache_main.c b/fs/netfs/fscache_main.c
index bf9b33d26e31..42e98bb523e3 100644
--- a/fs/netfs/fscache_main.c
+++ b/fs/netfs/fscache_main.c
@@ -99,7 +99,7 @@ error_wq:
*/
void __exit fscache_exit(void)
{
- kenter("");
+ _enter("");
kmem_cache_destroy(fscache_cookie_jar);
fscache_proc_cleanup();
diff --git a/fs/netfs/fscache_volume.c b/fs/netfs/fscache_volume.c
index 2e2a405ca9b0..cb75c07b5281 100644
--- a/fs/netfs/fscache_volume.c
+++ b/fs/netfs/fscache_volume.c
@@ -264,7 +264,7 @@ static struct fscache_volume *fscache_alloc_volume(const char *volume_key,
fscache_see_volume(volume, fscache_volume_new_acquire);
fscache_stat(&fscache_n_volumes);
up_write(&fscache_addremove_sem);
- kleave(" = v=%x", volume->debug_id);
+ _leave(" = v=%x", volume->debug_id);
return volume;
err_vol:
@@ -466,7 +466,7 @@ void fscache_withdraw_volume(struct fscache_volume *volume)
{
int n_accesses;
- kdebug("withdraw V=%x", volume->debug_id);
+ _debug("withdraw V=%x", volume->debug_id);
/* Allow wakeups on dec-to-0 */
n_accesses = atomic_dec_return(&volume->n_accesses);
diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h
index 21e46bc9aa49..7773f3d855a9 100644
--- a/fs/netfs/internal.h
+++ b/fs/netfs/internal.h
@@ -34,6 +34,7 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync);
/*
* main.c
*/
+extern unsigned int netfs_debug;
extern struct list_head netfs_io_requests;
extern spinlock_t netfs_proc_lock;
extern mempool_t netfs_request_pool;
@@ -353,12 +354,42 @@ void fscache_create_volume(struct fscache_volume *volume, bool wait);
* debug tracing
*/
#define dbgprintk(FMT, ...) \
- pr_debug("[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__)
+ printk("[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__)
#define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__)
#define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
#define kdebug(FMT, ...) dbgprintk(FMT, ##__VA_ARGS__)
+#ifdef __KDEBUG
+#define _enter(FMT, ...) kenter(FMT, ##__VA_ARGS__)
+#define _leave(FMT, ...) kleave(FMT, ##__VA_ARGS__)
+#define _debug(FMT, ...) kdebug(FMT, ##__VA_ARGS__)
+
+#elif defined(CONFIG_NETFS_DEBUG)
+#define _enter(FMT, ...) \
+do { \
+ if (netfs_debug) \
+ kenter(FMT, ##__VA_ARGS__); \
+} while (0)
+
+#define _leave(FMT, ...) \
+do { \
+ if (netfs_debug) \
+ kleave(FMT, ##__VA_ARGS__); \
+} while (0)
+
+#define _debug(FMT, ...) \
+do { \
+ if (netfs_debug) \
+ kdebug(FMT, ##__VA_ARGS__); \
+} while (0)
+
+#else
+#define _enter(FMT, ...) no_printk("==> %s("FMT")", __func__, ##__VA_ARGS__)
+#define _leave(FMT, ...) no_printk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
+#define _debug(FMT, ...) no_printk(FMT, ##__VA_ARGS__)
+#endif
+
/*
* assertions
*/
diff --git a/fs/netfs/io.c b/fs/netfs/io.c
index c7576481c321..c93851b98368 100644
--- a/fs/netfs/io.c
+++ b/fs/netfs/io.c
@@ -130,7 +130,7 @@ static void netfs_reset_subreq_iter(struct netfs_io_request *rreq,
if (count == remaining)
return;
- kdebug("R=%08x[%u] ITER RESUB-MISMATCH %zx != %zx-%zx-%llx %x\n",
+ _debug("R=%08x[%u] ITER RESUB-MISMATCH %zx != %zx-%zx-%llx %x\n",
rreq->debug_id, subreq->debug_index,
iov_iter_count(&subreq->io_iter), subreq->transferred,
subreq->len, rreq->i_size,
@@ -326,7 +326,7 @@ void netfs_subreq_terminated(struct netfs_io_subrequest *subreq,
struct netfs_io_request *rreq = subreq->rreq;
int u;
- kenter("R=%x[%x]{%llx,%lx},%zd",
+ _enter("R=%x[%x]{%llx,%lx},%zd",
rreq->debug_id, subreq->debug_index,
subreq->start, subreq->flags, transferred_or_error);
@@ -435,7 +435,7 @@ netfs_rreq_prepare_read(struct netfs_io_request *rreq,
struct netfs_inode *ictx = netfs_inode(rreq->inode);
size_t lsize;
- kenter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size);
+ _enter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size);
if (rreq->origin != NETFS_DIO_READ) {
source = netfs_cache_prepare_read(subreq, rreq->i_size);
@@ -518,7 +518,7 @@ static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq,
subreq->start = rreq->start + rreq->submitted;
subreq->len = io_iter->count;
- kdebug("slice %llx,%zx,%llx", subreq->start, subreq->len, rreq->submitted);
+ _debug("slice %llx,%zx,%llx", subreq->start, subreq->len, rreq->submitted);
list_add_tail(&subreq->rreq_link, &rreq->subrequests);
/* Call out to the cache to find out what it can do with the remaining
@@ -570,7 +570,7 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
struct iov_iter io_iter;
int ret;
- kenter("R=%x %llx-%llx",
+ _enter("R=%x %llx-%llx",
rreq->debug_id, rreq->start, rreq->start + rreq->len - 1);
if (rreq->len == 0) {
@@ -593,7 +593,7 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
atomic_set(&rreq->nr_outstanding, 1);
io_iter = rreq->io_iter;
do {
- kdebug("submit %llx + %llx >= %llx",
+ _debug("submit %llx + %llx >= %llx",
rreq->start, rreq->submitted, rreq->i_size);
if (rreq->origin == NETFS_DIO_READ &&
rreq->start + rreq->submitted >= rreq->i_size)
diff --git a/fs/netfs/main.c b/fs/netfs/main.c
index db824c372842..5f0f438e5d21 100644
--- a/fs/netfs/main.c
+++ b/fs/netfs/main.c
@@ -20,6 +20,10 @@ MODULE_LICENSE("GPL");
EXPORT_TRACEPOINT_SYMBOL(netfs_sreq);
+unsigned netfs_debug;
+module_param_named(debug, netfs_debug, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(netfs_debug, "Netfs support debugging mask");
+
static struct kmem_cache *netfs_request_slab;
static struct kmem_cache *netfs_subrequest_slab;
mempool_t netfs_request_pool;
diff --git a/fs/netfs/misc.c b/fs/netfs/misc.c
index 172808e83ca8..83e644bd518f 100644
--- a/fs/netfs/misc.c
+++ b/fs/netfs/misc.c
@@ -26,7 +26,7 @@ bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio)
struct fscache_cookie *cookie = netfs_i_cookie(ictx);
bool need_use = false;
- kenter("");
+ _enter("");
if (!filemap_dirty_folio(mapping, folio))
return false;
@@ -99,7 +99,7 @@ void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
struct netfs_folio *finfo;
size_t flen = folio_size(folio);
- kenter("{%lx},%zx,%zx", folio->index, offset, length);
+ _enter("{%lx},%zx,%zx", folio->index, offset, length);
if (!folio_test_private(folio))
return;
diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c
index 488147439fe0..426cf87aaf2e 100644
--- a/fs/netfs/write_collect.c
+++ b/fs/netfs/write_collect.c
@@ -161,7 +161,7 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
{
struct list_head *next;
- kenter("R=%x[%x:]", wreq->debug_id, stream->stream_nr);
+ _enter("R=%x[%x:]", wreq->debug_id, stream->stream_nr);
if (list_empty(&stream->subrequests))
return;
@@ -374,7 +374,7 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
unsigned int notes;
int s;
- kenter("%llx-%llx", wreq->start, wreq->start + wreq->len);
+ _enter("%llx-%llx", wreq->start, wreq->start + wreq->len);
trace_netfs_collect(wreq);
trace_netfs_rreq(wreq, netfs_rreq_trace_collect);
@@ -409,7 +409,7 @@ reassess_streams:
front = stream->front;
while (front) {
trace_netfs_collect_sreq(wreq, front);
- //kdebug("sreq [%x] %llx %zx/%zx",
+ //_debug("sreq [%x] %llx %zx/%zx",
// front->debug_index, front->start, front->transferred, front->len);
/* Stall if there may be a discontinuity. */
@@ -598,7 +598,7 @@ reassess_streams:
out:
netfs_put_group_many(wreq->group, wreq->nr_group_rel);
wreq->nr_group_rel = 0;
- kleave(" = %x", notes);
+ _leave(" = %x", notes);
return;
need_retry:
@@ -606,7 +606,7 @@ need_retry:
* that any partially completed op will have had any wholly transferred
* folios removed from it.
*/
- kdebug("retry");
+ _debug("retry");
netfs_retry_writes(wreq);
goto out;
}
@@ -621,7 +621,7 @@ void netfs_write_collection_worker(struct work_struct *work)
size_t transferred;
int s;
- kenter("R=%x", wreq->debug_id);
+ _enter("R=%x", wreq->debug_id);
netfs_see_request(wreq, netfs_rreq_trace_see_work);
if (!test_bit(NETFS_RREQ_IN_PROGRESS, &wreq->flags)) {
@@ -684,7 +684,7 @@ void netfs_write_collection_worker(struct work_struct *work)
if (wreq->origin == NETFS_DIO_WRITE)
inode_dio_end(wreq->inode);
- kdebug("finished");
+ _debug("finished");
trace_netfs_rreq(wreq, netfs_rreq_trace_wake_ip);
clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &wreq->flags);
wake_up_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS);
@@ -744,7 +744,7 @@ void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
struct netfs_io_request *wreq = subreq->rreq;
struct netfs_io_stream *stream = &wreq->io_streams[subreq->stream_nr];
- kenter("%x[%x] %zd", wreq->debug_id, subreq->debug_index, transferred_or_error);
+ _enter("%x[%x] %zd", wreq->debug_id, subreq->debug_index, transferred_or_error);
switch (subreq->source) {
case NETFS_UPLOAD_TO_SERVER:
diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c
index d7c971df8866..9258d30cffe3 100644
--- a/fs/netfs/write_issue.c
+++ b/fs/netfs/write_issue.c
@@ -99,7 +99,7 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
if (IS_ERR(wreq))
return wreq;
- kenter("R=%x", wreq->debug_id);
+ _enter("R=%x", wreq->debug_id);
ictx = netfs_inode(wreq->inode);
if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &wreq->flags))
@@ -122,6 +122,7 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
wreq->io_streams[1].transferred = LONG_MAX;
if (fscache_resources_valid(&wreq->cache_resources)) {
wreq->io_streams[1].avail = true;
+ wreq->io_streams[1].active = true;
wreq->io_streams[1].prepare_write = wreq->cache_resources.ops->prepare_write_subreq;
wreq->io_streams[1].issue_write = wreq->cache_resources.ops->issue_write;
}
@@ -159,7 +160,7 @@ static void netfs_prepare_write(struct netfs_io_request *wreq,
subreq->max_nr_segs = INT_MAX;
subreq->stream_nr = stream->stream_nr;
- kenter("R=%x[%x]", wreq->debug_id, subreq->debug_index);
+ _enter("R=%x[%x]", wreq->debug_id, subreq->debug_index);
trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index,
refcount_read(&subreq->ref),
@@ -215,7 +216,7 @@ static void netfs_do_issue_write(struct netfs_io_stream *stream,
{
struct netfs_io_request *wreq = subreq->rreq;
- kenter("R=%x[%x],%zx", wreq->debug_id, subreq->debug_index, subreq->len);
+ _enter("R=%x[%x],%zx", wreq->debug_id, subreq->debug_index, subreq->len);
if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
return netfs_write_subrequest_terminated(subreq, subreq->error, false);
@@ -272,11 +273,11 @@ int netfs_advance_write(struct netfs_io_request *wreq,
size_t part;
if (!stream->avail) {
- kleave("no write");
+ _leave("no write");
return len;
}
- kenter("R=%x[%x]", wreq->debug_id, subreq ? subreq->debug_index : 0);
+ _enter("R=%x[%x]", wreq->debug_id, subreq ? subreq->debug_index : 0);
if (subreq && start != subreq->start + subreq->len) {
netfs_issue_write(wreq, stream);
@@ -288,7 +289,7 @@ int netfs_advance_write(struct netfs_io_request *wreq,
subreq = stream->construct;
part = min(subreq->max_len - subreq->len, len);
- kdebug("part %zx/%zx %zx/%zx", subreq->len, subreq->max_len, part, len);
+ _debug("part %zx/%zx %zx/%zx", subreq->len, subreq->max_len, part, len);
subreq->len += part;
subreq->nr_segs++;
@@ -319,7 +320,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
bool to_eof = false, streamw = false;
bool debug = false;
- kenter("");
+ _enter("");
/* netfs_perform_write() may shift i_size around the page or from out
* of the page to beyond it, but cannot move i_size into or through the
@@ -329,7 +330,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
if (fpos >= i_size) {
/* mmap beyond eof. */
- kdebug("beyond eof");
+ _debug("beyond eof");
folio_start_writeback(folio);
folio_unlock(folio);
wreq->nr_group_rel += netfs_folio_written_back(folio);
@@ -363,7 +364,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
}
flen -= foff;
- kdebug("folio %zx %zx %zx", foff, flen, fsize);
+ _debug("folio %zx %zx %zx", foff, flen, fsize);
/* Deal with discontinuities in the stream of dirty pages. These can
* arise from a number of sources:
@@ -487,7 +488,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
for (int s = 0; s < NR_IO_STREAMS; s++)
netfs_issue_write(wreq, &wreq->io_streams[s]);
- kleave(" = 0");
+ _leave(" = 0");
return 0;
}
@@ -522,7 +523,7 @@ int netfs_writepages(struct address_space *mapping,
netfs_stat(&netfs_n_wh_writepages);
do {
- kdebug("wbiter %lx %llx", folio->index, wreq->start + wreq->submitted);
+ _debug("wbiter %lx %llx", folio->index, wreq->start + wreq->submitted);
/* It appears we don't have to handle cyclic writeback wrapping. */
WARN_ON_ONCE(wreq && folio_pos(folio) < wreq->start + wreq->submitted);
@@ -546,14 +547,14 @@ int netfs_writepages(struct address_space *mapping,
mutex_unlock(&ictx->wb_lock);
netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
- kleave(" = %d", error);
+ _leave(" = %d", error);
return error;
couldnt_start:
netfs_kill_dirty_pages(mapping, wbc, folio);
out:
mutex_unlock(&ictx->wb_lock);
- kleave(" = %d", error);
+ _leave(" = %d", error);
return error;
}
EXPORT_SYMBOL(netfs_writepages);
@@ -590,7 +591,7 @@ int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_c
struct folio *folio, size_t copied, bool to_page_end,
struct folio **writethrough_cache)
{
- kenter("R=%x ic=%zu ws=%u cp=%zu tp=%u",
+ _enter("R=%x ic=%zu ws=%u cp=%zu tp=%u",
wreq->debug_id, wreq->iter.count, wreq->wsize, copied, to_page_end);
if (!*writethrough_cache) {
@@ -624,7 +625,7 @@ int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_contr
struct netfs_inode *ictx = netfs_inode(wreq->inode);
int ret;
- kenter("R=%x", wreq->debug_id);
+ _enter("R=%x", wreq->debug_id);
if (writethrough_cache)
netfs_write_folio(wreq, wbc, writethrough_cache);
@@ -657,7 +658,7 @@ int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t
loff_t start = wreq->start;
int error = 0;
- kenter("%zx", len);
+ _enter("%zx", len);
if (wreq->origin == NETFS_DIO_WRITE)
inode_dio_begin(wreq->inode);
@@ -665,7 +666,7 @@ int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t
while (len) {
// TODO: Prepare content encryption
- kdebug("unbuffered %zx", len);
+ _debug("unbuffered %zx", len);
part = netfs_advance_write(wreq, upload, start, len, false);
start += part;
len -= part;
@@ -684,6 +685,6 @@ int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t
if (list_empty(&upload->subrequests))
netfs_wake_write_collector(wreq, false);
- kleave(" = %d", error);
+ _leave(" = %d", error);
return error;
}
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index 0131d83b912d..c034080c334b 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -51,12 +51,21 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node));
if (unlikely(!bh))
- return NULL;
+ return ERR_PTR(-ENOMEM);
if (unlikely(buffer_mapped(bh) || buffer_uptodate(bh) ||
buffer_dirty(bh))) {
- brelse(bh);
- BUG();
+ /*
+ * The block buffer at the specified new address was already
+ * in use. This can happen if it is a virtual block number
+ * and has been reallocated due to corruption of the bitmap
+ * used to manage its allocation state (if not, the buffer
+ * clearing of an abandoned b-tree node is missing somewhere).
+ */
+ nilfs_error(inode->i_sb,
+ "state inconsistency probably due to duplicate use of b-tree node block address %llu (ino=%lu)",
+ (unsigned long long)blocknr, inode->i_ino);
+ goto failed;
}
memset(bh->b_data, 0, i_blocksize(inode));
bh->b_bdev = inode->i_sb->s_bdev;
@@ -67,6 +76,12 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
folio_unlock(bh->b_folio);
folio_put(bh->b_folio);
return bh;
+
+failed:
+ folio_unlock(bh->b_folio);
+ folio_put(bh->b_folio);
+ brelse(bh);
+ return ERR_PTR(-EIO);
}
int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
@@ -217,8 +232,8 @@ retry:
}
nbh = nilfs_btnode_create_block(btnc, newkey);
- if (!nbh)
- return -ENOMEM;
+ if (IS_ERR(nbh))
+ return PTR_ERR(nbh);
BUG_ON(nbh == obh);
ctxt->newbh = nbh;
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index a139970e4804..862bdf23120e 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -63,8 +63,8 @@ static int nilfs_btree_get_new_block(const struct nilfs_bmap *btree,
struct buffer_head *bh;
bh = nilfs_btnode_create_block(btnc, ptr);
- if (!bh)
- return -ENOMEM;
+ if (IS_ERR(bh))
+ return PTR_ERR(bh);
set_buffer_nilfs_volatile(bh);
*bhp = bh;
diff --git a/fs/pidfs.c b/fs/pidfs.c
index c9cb14181def..7ffdc88dfb52 100644
--- a/fs/pidfs.c
+++ b/fs/pidfs.c
@@ -119,7 +119,7 @@ static long pidfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct task_struct *task __free(put_task) = NULL;
struct nsproxy *nsp __free(put_nsproxy) = NULL;
struct pid *pid = pidfd_pid(file);
- struct ns_common *ns_common;
+ struct ns_common *ns_common = NULL;
if (arg)
return -EINVAL;
@@ -146,52 +146,73 @@ static long pidfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
switch (cmd) {
/* Namespaces that hang of nsproxy. */
case PIDFD_GET_CGROUP_NAMESPACE:
- get_cgroup_ns(nsp->cgroup_ns);
- ns_common = to_ns_common(nsp->cgroup_ns);
+ if (IS_ENABLED(CONFIG_CGROUPS)) {
+ get_cgroup_ns(nsp->cgroup_ns);
+ ns_common = to_ns_common(nsp->cgroup_ns);
+ }
break;
case PIDFD_GET_IPC_NAMESPACE:
- get_ipc_ns(nsp->ipc_ns);
- ns_common = to_ns_common(nsp->ipc_ns);
+ if (IS_ENABLED(CONFIG_IPC_NS)) {
+ get_ipc_ns(nsp->ipc_ns);
+ ns_common = to_ns_common(nsp->ipc_ns);
+ }
break;
case PIDFD_GET_MNT_NAMESPACE:
get_mnt_ns(nsp->mnt_ns);
ns_common = to_ns_common(nsp->mnt_ns);
break;
case PIDFD_GET_NET_NAMESPACE:
- ns_common = to_ns_common(nsp->net_ns);
- get_net_ns(ns_common);
+ if (IS_ENABLED(CONFIG_NET_NS)) {
+ ns_common = to_ns_common(nsp->net_ns);
+ get_net_ns(ns_common);
+ }
break;
case PIDFD_GET_PID_FOR_CHILDREN_NAMESPACE:
- get_pid_ns(nsp->pid_ns_for_children);
- ns_common = to_ns_common(nsp->pid_ns_for_children);
+ if (IS_ENABLED(CONFIG_PID_NS)) {
+ get_pid_ns(nsp->pid_ns_for_children);
+ ns_common = to_ns_common(nsp->pid_ns_for_children);
+ }
break;
case PIDFD_GET_TIME_NAMESPACE:
- get_time_ns(nsp->time_ns);
- ns_common = to_ns_common(nsp->time_ns);
+ if (IS_ENABLED(CONFIG_TIME_NS)) {
+ get_time_ns(nsp->time_ns);
+ ns_common = to_ns_common(nsp->time_ns);
+ }
break;
case PIDFD_GET_TIME_FOR_CHILDREN_NAMESPACE:
- get_time_ns(nsp->time_ns_for_children);
- ns_common = to_ns_common(nsp->time_ns_for_children);
+ if (IS_ENABLED(CONFIG_TIME_NS)) {
+ get_time_ns(nsp->time_ns_for_children);
+ ns_common = to_ns_common(nsp->time_ns_for_children);
+ }
break;
case PIDFD_GET_UTS_NAMESPACE:
- get_uts_ns(nsp->uts_ns);
- ns_common = to_ns_common(nsp->uts_ns);
+ if (IS_ENABLED(CONFIG_UTS_NS)) {
+ get_uts_ns(nsp->uts_ns);
+ ns_common = to_ns_common(nsp->uts_ns);
+ }
break;
/* Namespaces that don't hang of nsproxy. */
case PIDFD_GET_USER_NAMESPACE:
- rcu_read_lock();
- ns_common = to_ns_common(get_user_ns(task_cred_xxx(task, user_ns)));
- rcu_read_unlock();
+ if (IS_ENABLED(CONFIG_USER_NS)) {
+ rcu_read_lock();
+ ns_common = to_ns_common(get_user_ns(task_cred_xxx(task, user_ns)));
+ rcu_read_unlock();
+ }
break;
case PIDFD_GET_PID_NAMESPACE:
- rcu_read_lock();
- ns_common = to_ns_common(get_pid_ns(task_active_pid_ns(task)));
- rcu_read_unlock();
+ if (IS_ENABLED(CONFIG_PID_NS)) {
+ rcu_read_lock();
+ ns_common = to_ns_common( get_pid_ns(task_active_pid_ns(task)));
+ rcu_read_unlock();
+ }
break;
default:
return -ENOIOCTLCMD;
}
+ if (!ns_common)
+ return -EOPNOTSUPP;
+
/* open_namespace() unconditionally consumes the reference */
return open_namespace(ns_common);
}
diff --git a/fs/pipe.c b/fs/pipe.c
index 50c8a8596b52..7dff2aa50a6d 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -1469,7 +1469,7 @@ static int do_proc_dopipe_max_size_conv(unsigned long *lvalp,
return 0;
}
-static int proc_dopipe_max_size(struct ctl_table *table, int write,
+static int proc_dopipe_max_size(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
return do_proc_douintvec(table, write, buffer, lenp, ppos,
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 775a2e8d600c..5f171ad7b436 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -988,6 +988,7 @@ static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
[ilog2(VM_SHADOW_STACK)] = "ss",
#endif
#ifdef CONFIG_64BIT
+ [ilog2(VM_DROPPABLE)] = "dp",
[ilog2(VM_SEALED)] = "sl",
#endif
};
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index a2b256dac36e..7ae885e6d5d7 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -2913,7 +2913,7 @@ const struct quotactl_ops dquot_quotactl_sysfile_ops = {
};
EXPORT_SYMBOL(dquot_quotactl_sysfile_ops);
-static int do_proc_dqstats(struct ctl_table *table, int write,
+static int do_proc_dqstats(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
unsigned int type = (unsigned long *)table->data - dqstats.stat;
diff --git a/fs/super.c b/fs/super.c
index 095ba793e10c..38d72a3cf6fc 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -736,6 +736,17 @@ struct super_block *sget_fc(struct fs_context *fc,
struct user_namespace *user_ns = fc->global ? &init_user_ns : fc->user_ns;
int err;
+ /*
+ * Never allow s_user_ns != &init_user_ns when FS_USERNS_MOUNT is
+ * not set, as the filesystem is likely unprepared to handle it.
+ * This can happen when fsconfig() is called from init_user_ns with
+ * an fs_fd opened in another user namespace.
+ */
+ if (user_ns != &init_user_ns && !(fc->fs_type->fs_flags & FS_USERNS_MOUNT)) {
+ errorfc(fc, "VFS: Mounting from non-initial user namespace is not allowed");
+ return ERR_PTR(-EPERM);
+ }
+
retry:
spin_lock(&sb_lock);
if (test) {
diff --git a/fs/binfmt_elf_test.c b/fs/tests/binfmt_elf_kunit.c
index 11d734fec366..11d734fec366 100644
--- a/fs/binfmt_elf_test.c
+++ b/fs/tests/binfmt_elf_kunit.c
diff --git a/fs/exec_test.c b/fs/tests/exec_kunit.c
index 7c77d039680b..7c77d039680b 100644
--- a/fs/exec_test.c
+++ b/fs/tests/exec_kunit.c
diff --git a/fs/xattr.c b/fs/xattr.c
index f8b643f91a98..7672ce5486c5 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -630,10 +630,9 @@ int do_setxattr(struct mnt_idmap *idmap, struct dentry *dentry,
ctx->kvalue, ctx->size, ctx->flags);
}
-static long
-setxattr(struct mnt_idmap *idmap, struct dentry *d,
- const char __user *name, const void __user *value, size_t size,
- int flags)
+static int path_setxattr(const char __user *pathname,
+ const char __user *name, const void __user *value,
+ size_t size, int flags, unsigned int lookup_flags)
{
struct xattr_name kname;
struct xattr_ctx ctx = {
@@ -643,33 +642,20 @@ setxattr(struct mnt_idmap *idmap, struct dentry *d,
.kname = &kname,
.flags = flags,
};
+ struct path path;
int error;
error = setxattr_copy(name, &ctx);
if (error)
return error;
- error = do_setxattr(idmap, d, &ctx);
-
- kvfree(ctx.kvalue);
- return error;
-}
-
-static int path_setxattr(const char __user *pathname,
- const char __user *name, const void __user *value,
- size_t size, int flags, unsigned int lookup_flags)
-{
- struct path path;
- int error;
-
retry:
error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path);
if (error)
- return error;
+ goto out;
error = mnt_want_write(path.mnt);
if (!error) {
- error = setxattr(mnt_idmap(path.mnt), path.dentry, name,
- value, size, flags);
+ error = do_setxattr(mnt_idmap(path.mnt), path.dentry, &ctx);
mnt_drop_write(path.mnt);
}
path_put(&path);
@@ -677,6 +663,9 @@ retry:
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
+
+out:
+ kvfree(ctx.kvalue);
return error;
}
@@ -697,20 +686,32 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
const void __user *,value, size_t, size, int, flags)
{
- struct fd f = fdget(fd);
- int error = -EBADF;
+ struct xattr_name kname;
+ struct xattr_ctx ctx = {
+ .cvalue = value,
+ .kvalue = NULL,
+ .size = size,
+ .kname = &kname,
+ .flags = flags,
+ };
+ int error;
+ CLASS(fd, f)(fd);
if (!f.file)
- return error;
+ return -EBADF;
+
audit_file(f.file);
+ error = setxattr_copy(name, &ctx);
+ if (error)
+ return error;
+
error = mnt_want_write_file(f.file);
if (!error) {
- error = setxattr(file_mnt_idmap(f.file),
- f.file->f_path.dentry, name,
- value, size, flags);
+ error = do_setxattr(file_mnt_idmap(f.file),
+ f.file->f_path.dentry, &ctx);
mnt_drop_write_file(f.file);
}
- fdput(f);
+ kvfree(ctx.kvalue);
return error;
}
@@ -899,9 +900,17 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
* Extended attribute REMOVE operations
*/
static long
-removexattr(struct mnt_idmap *idmap, struct dentry *d,
- const char __user *name)
+removexattr(struct mnt_idmap *idmap, struct dentry *d, const char *name)
{
+ if (is_posix_acl_xattr(name))
+ return vfs_remove_acl(idmap, d, name);
+ return vfs_removexattr(idmap, d, name);
+}
+
+static int path_removexattr(const char __user *pathname,
+ const char __user *name, unsigned int lookup_flags)
+{
+ struct path path;
int error;
char kname[XATTR_NAME_MAX + 1];
@@ -910,25 +919,13 @@ removexattr(struct mnt_idmap *idmap, struct dentry *d,
error = -ERANGE;
if (error < 0)
return error;
-
- if (is_posix_acl_xattr(kname))
- return vfs_remove_acl(idmap, d, kname);
-
- return vfs_removexattr(idmap, d, kname);
-}
-
-static int path_removexattr(const char __user *pathname,
- const char __user *name, unsigned int lookup_flags)
-{
- struct path path;
- int error;
retry:
error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path);
if (error)
return error;
error = mnt_want_write(path.mnt);
if (!error) {
- error = removexattr(mnt_idmap(path.mnt), path.dentry, name);
+ error = removexattr(mnt_idmap(path.mnt), path.dentry, kname);
mnt_drop_write(path.mnt);
}
path_put(&path);
@@ -954,15 +951,23 @@ SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname,
SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
{
struct fd f = fdget(fd);
+ char kname[XATTR_NAME_MAX + 1];
int error = -EBADF;
if (!f.file)
return error;
audit_file(f.file);
+
+ error = strncpy_from_user(kname, name, sizeof(kname));
+ if (error == 0 || error == sizeof(kname))
+ error = -ERANGE;
+ if (error < 0)
+ return error;
+
error = mnt_want_write_file(f.file);
if (!error) {
error = removexattr(file_mnt_idmap(f.file),
- f.file->f_path.dentry, name);
+ f.file->f_path.dentry, kname);
mnt_drop_write_file(f.file);
}
fdput(f);
diff --git a/fs/xfs/xfs_sysctl.c b/fs/xfs/xfs_sysctl.c
index a191f6560f98..c84df23b494d 100644
--- a/fs/xfs/xfs_sysctl.c
+++ b/fs/xfs/xfs_sysctl.c
@@ -11,7 +11,7 @@ static struct ctl_table_header *xfs_table_header;
#ifdef CONFIG_PROC_FS
STATIC int
xfs_stats_clear_proc_handler(
- struct ctl_table *ctl,
+ const struct ctl_table *ctl,
int write,
void *buffer,
size_t *lenp,
@@ -31,7 +31,7 @@ xfs_stats_clear_proc_handler(
STATIC int
xfs_panic_mask_proc_handler(
- struct ctl_table *ctl,
+ const struct ctl_table *ctl,
int write,
void *buffer,
size_t *lenp,
@@ -52,7 +52,7 @@ xfs_panic_mask_proc_handler(
STATIC int
xfs_deprecated_dointvec_minmax(
- struct ctl_table *ctl,
+ const struct ctl_table *ctl,
int write,
void *buffer,
size_t *lenp,
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 5d148343628a..8db5bd382915 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -563,7 +563,7 @@ static inline void *acpi_driver_data(struct acpi_device *d)
}
#define to_acpi_device(d) container_of(d, struct acpi_device, dev)
-#define to_acpi_driver(d) container_of(d, struct acpi_driver, drv)
+#define to_acpi_driver(d) container_of_const(d, struct acpi_driver, drv)
static inline struct acpi_device *acpi_dev_parent(struct acpi_device *adev)
{
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 677315e51e54..ad6afc5c4918 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -443,29 +443,10 @@
#endif
/*
- * Some symbol definitions will not exist yet during the first pass of the
- * link, but are guaranteed to exist in the final link. Provide preliminary
- * definitions that will be superseded in the final link to avoid having to
- * rely on weak external linkage, which requires a GOT when used in position
- * independent code.
- */
-#define PRELIMINARY_SYMBOL_DEFINITIONS \
- PROVIDE(kallsyms_addresses = .); \
- PROVIDE(kallsyms_offsets = .); \
- PROVIDE(kallsyms_names = .); \
- PROVIDE(kallsyms_num_syms = .); \
- PROVIDE(kallsyms_relative_base = .); \
- PROVIDE(kallsyms_token_table = .); \
- PROVIDE(kallsyms_token_index = .); \
- PROVIDE(kallsyms_markers = .); \
- PROVIDE(kallsyms_seqs_of_names = .);
-
-/*
* Read only Data
*/
#define RO_DATA(align) \
. = ALIGN((align)); \
- PRELIMINARY_SYMBOL_DEFINITIONS \
.rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
__start_rodata = .; \
*(.rodata) *(.rodata.*) \
diff --git a/include/dt-bindings/i3c/i3c.h b/include/dt-bindings/i3c/i3c.h
new file mode 100644
index 000000000000..373439218bba
--- /dev/null
+++ b/include/dt-bindings/i3c/i3c.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright 2024 NXP
+ */
+
+#ifndef _DT_BINDINGS_I3C_I3C_H
+#define _DT_BINDINGS_I3C_I3C_H
+
+#define I2C_FM (1 << 4)
+#define I2C_FM_PLUS (0 << 4)
+
+#define I2C_FILTER (0 << 5)
+#define I2C_NO_FILTER_HIGH_FREQUENCY (1 << 5)
+#define I2C_NO_FILTER_LOW_FREQUENCY (2 << 5)
+
+#endif
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index f0b95c76c707..0687a442fec7 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -259,6 +259,12 @@ static inline void
acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa) { }
#endif
+#ifdef CONFIG_RISCV
+void acpi_numa_rintc_affinity_init(struct acpi_srat_rintc_affinity *pa);
+#else
+static inline void acpi_numa_rintc_affinity_init(struct acpi_srat_rintc_affinity *pa) { }
+#endif
+
#ifndef PHYS_CPUID_INVALID
typedef u32 phys_cpuid_t;
#define PHYS_CPUID_INVALID (phys_cpuid_t)(-1)
diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h
index c6d18f50f671..89683f31ae12 100644
--- a/include/linux/arm_ffa.h
+++ b/include/linux/arm_ffa.h
@@ -149,7 +149,7 @@ struct ffa_driver {
struct device_driver driver;
};
-#define to_ffa_driver(d) container_of(d, struct ffa_driver, driver)
+#define to_ffa_driver(d) container_of_const(d, struct ffa_driver, driver)
static inline void ffa_dev_set_drvdata(struct ffa_device *fdev, void *data)
{
diff --git a/include/linux/auxiliary_bus.h b/include/linux/auxiliary_bus.h
index 1539bbd263d2..662b8ae54b6a 100644
--- a/include/linux/auxiliary_bus.h
+++ b/include/linux/auxiliary_bus.h
@@ -212,7 +212,7 @@ static inline struct auxiliary_device *to_auxiliary_dev(struct device *dev)
return container_of(dev, struct auxiliary_device, dev);
}
-static inline struct auxiliary_driver *to_auxiliary_drv(struct device_driver *drv)
+static inline const struct auxiliary_driver *to_auxiliary_drv(const struct device_driver *drv)
{
return container_of(drv, struct auxiliary_driver, driver);
}
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 46d4bdc634c0..ba35bbf07798 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -47,12 +47,17 @@ extern unsigned long __sw_hweight64(__u64 w);
__builtin_constant_p(*(const unsigned long *)(addr))) ? \
const##op(nr, addr) : op(nr, addr))
+/*
+ * The following macros are non-atomic versions of their non-underscored
+ * counterparts.
+ */
#define __set_bit(nr, addr) bitop(___set_bit, nr, addr)
#define __clear_bit(nr, addr) bitop(___clear_bit, nr, addr)
#define __change_bit(nr, addr) bitop(___change_bit, nr, addr)
#define __test_and_set_bit(nr, addr) bitop(___test_and_set_bit, nr, addr)
#define __test_and_clear_bit(nr, addr) bitop(___test_and_clear_bit, nr, addr)
#define __test_and_change_bit(nr, addr) bitop(___test_and_change_bit, nr, addr)
+
#define test_bit(nr, addr) bitop(_test_bit, nr, addr)
#define test_bit_acquire(nr, addr) bitop(_test_bit_acquire, nr, addr)
diff --git a/include/linux/cdx/cdx_bus.h b/include/linux/cdx/cdx_bus.h
index b57118aaa679..79bb80e56790 100644
--- a/include/linux/cdx/cdx_bus.h
+++ b/include/linux/cdx/cdx_bus.h
@@ -211,7 +211,7 @@ struct cdx_driver {
};
#define to_cdx_driver(_drv) \
- container_of(_drv, struct cdx_driver, driver)
+ container_of_const(_drv, struct cdx_driver, driver)
/* Macro to avoid include chaining to get THIS_MODULE */
#define cdx_driver_register(drv) \
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 89f5c34ce4df..51ba681b915a 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -122,6 +122,7 @@ enum cpuhp_state {
CPUHP_KVM_PPC_BOOK3S_PREPARE,
CPUHP_ZCOMP_PREPARE,
CPUHP_TIMERS_PREPARE,
+ CPUHP_TMIGR_PREPARE,
CPUHP_MIPS_SOC_PREPARE,
CPUHP_BP_PREPARE_DYN,
CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20,
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 099e8b32dd68..801a7e524113 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -1033,53 +1033,17 @@ void init_cpu_present(const struct cpumask *src);
void init_cpu_possible(const struct cpumask *src);
void init_cpu_online(const struct cpumask *src);
-static inline void
-set_cpu_possible(unsigned int cpu, bool possible)
-{
- if (possible)
- cpumask_set_cpu(cpu, &__cpu_possible_mask);
- else
- cpumask_clear_cpu(cpu, &__cpu_possible_mask);
-}
+#define assign_cpu(cpu, mask, val) \
+ assign_bit(cpumask_check(cpu), cpumask_bits(mask), (val))
-static inline void
-set_cpu_enabled(unsigned int cpu, bool can_be_onlined)
-{
- if (can_be_onlined)
- cpumask_set_cpu(cpu, &__cpu_enabled_mask);
- else
- cpumask_clear_cpu(cpu, &__cpu_enabled_mask);
-}
-
-static inline void
-set_cpu_present(unsigned int cpu, bool present)
-{
- if (present)
- cpumask_set_cpu(cpu, &__cpu_present_mask);
- else
- cpumask_clear_cpu(cpu, &__cpu_present_mask);
-}
+#define set_cpu_possible(cpu, possible) assign_cpu((cpu), &__cpu_possible_mask, (possible))
+#define set_cpu_enabled(cpu, enabled) assign_cpu((cpu), &__cpu_possible_mask, (enabled))
+#define set_cpu_present(cpu, present) assign_cpu((cpu), &__cpu_present_mask, (present))
+#define set_cpu_active(cpu, active) assign_cpu((cpu), &__cpu_active_mask, (active))
+#define set_cpu_dying(cpu, dying) assign_cpu((cpu), &__cpu_dying_mask, (dying))
void set_cpu_online(unsigned int cpu, bool online);
-static inline void
-set_cpu_active(unsigned int cpu, bool active)
-{
- if (active)
- cpumask_set_cpu(cpu, &__cpu_active_mask);
- else
- cpumask_clear_cpu(cpu, &__cpu_active_mask);
-}
-
-static inline void
-set_cpu_dying(unsigned int cpu, bool dying)
-{
- if (dying)
- cpumask_set_cpu(cpu, &__cpu_dying_mask);
- else
- cpumask_clear_cpu(cpu, &__cpu_dying_mask);
-}
-
/**
* to_cpumask - convert a NR_CPUS bitmap to a struct cpumask *
* @bitmap: the bitmap
diff --git a/include/linux/device.h b/include/linux/device.h
index ace039151cb8..34eb20f5966f 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -1031,13 +1031,6 @@ static inline void device_lock_assert(struct device *dev)
lockdep_assert_held(&dev->mutex);
}
-static inline struct device_node *dev_of_node(struct device *dev)
-{
- if (!IS_ENABLED(CONFIG_OF) || !dev)
- return NULL;
- return dev->of_node;
-}
-
static inline bool dev_has_sync_state(struct device *dev)
{
if (!dev)
@@ -1144,10 +1137,18 @@ void unlock_device_hotplug(void);
int lock_device_hotplug_sysfs(void);
int device_offline(struct device *dev);
int device_online(struct device *dev);
+
void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
-void device_set_of_node_from_dev(struct device *dev, const struct device *dev2);
void device_set_node(struct device *dev, struct fwnode_handle *fwnode);
+void device_set_of_node_from_dev(struct device *dev, const struct device *dev2);
+
+static inline struct device_node *dev_of_node(struct device *dev)
+{
+ if (!IS_ENABLED(CONFIG_OF) || !dev)
+ return NULL;
+ return dev->of_node;
+}
static inline int dev_num_vf(struct device *dev)
{
@@ -1176,12 +1177,12 @@ static inline void *dev_get_platdata(const struct device *dev)
* Manual binding of a device to driver. See drivers/base/bus.c
* for information on use.
*/
-int __must_check device_driver_attach(struct device_driver *drv,
+int __must_check device_driver_attach(const struct device_driver *drv,
struct device *dev);
int __must_check device_bind_driver(struct device *dev);
void device_release_driver(struct device *dev);
int __must_check device_attach(struct device *dev);
-int __must_check driver_attach(struct device_driver *drv);
+int __must_check driver_attach(const struct device_driver *drv);
void device_initial_probe(struct device *dev);
int __must_check device_reprobe(struct device *dev);
diff --git a/include/linux/device/bus.h b/include/linux/device/bus.h
index 5ef4ec1c36c3..807831d6bf0f 100644
--- a/include/linux/device/bus.h
+++ b/include/linux/device/bus.h
@@ -81,7 +81,7 @@ struct bus_type {
const struct attribute_group **dev_groups;
const struct attribute_group **drv_groups;
- int (*match)(struct device *dev, struct device_driver *drv);
+ int (*match)(struct device *dev, const struct device_driver *drv);
int (*uevent)(const struct device *dev, struct kobj_uevent_env *env);
int (*probe)(struct device *dev);
void (*sync_state)(struct device *dev);
diff --git a/include/linux/device/driver.h b/include/linux/device/driver.h
index 7738f458995f..1fc8b68786de 100644
--- a/include/linux/device/driver.h
+++ b/include/linux/device/driver.h
@@ -146,16 +146,16 @@ struct driver_attribute {
#define DRIVER_ATTR_WO(_name) \
struct driver_attribute driver_attr_##_name = __ATTR_WO(_name)
-int __must_check driver_create_file(struct device_driver *driver,
+int __must_check driver_create_file(const struct device_driver *driver,
const struct driver_attribute *attr);
-void driver_remove_file(struct device_driver *driver,
+void driver_remove_file(const struct device_driver *driver,
const struct driver_attribute *attr);
int driver_set_override(struct device *dev, const char **override,
const char *s, size_t len);
int __must_check driver_for_each_device(struct device_driver *drv, struct device *start,
void *data, int (*fn)(struct device *dev, void *));
-struct device *driver_find_device(struct device_driver *drv,
+struct device *driver_find_device(const struct device_driver *drv,
struct device *start, const void *data,
int (*match)(struct device *dev, const void *data));
@@ -165,7 +165,7 @@ struct device *driver_find_device(struct device_driver *drv,
* @drv: the driver we're iterating
* @name: name of the device to match
*/
-static inline struct device *driver_find_device_by_name(struct device_driver *drv,
+static inline struct device *driver_find_device_by_name(const struct device_driver *drv,
const char *name)
{
return driver_find_device(drv, NULL, name, device_match_name);
@@ -178,7 +178,7 @@ static inline struct device *driver_find_device_by_name(struct device_driver *dr
* @np: of_node pointer to match.
*/
static inline struct device *
-driver_find_device_by_of_node(struct device_driver *drv,
+driver_find_device_by_of_node(const struct device_driver *drv,
const struct device_node *np)
{
return driver_find_device(drv, NULL, np, device_match_of_node);
@@ -203,13 +203,13 @@ driver_find_device_by_fwnode(struct device_driver *drv,
* @drv: the driver we're iterating
* @devt: devt pointer to match.
*/
-static inline struct device *driver_find_device_by_devt(struct device_driver *drv,
+static inline struct device *driver_find_device_by_devt(const struct device_driver *drv,
dev_t devt)
{
return driver_find_device(drv, NULL, &devt, device_match_devt);
}
-static inline struct device *driver_find_next_device(struct device_driver *drv,
+static inline struct device *driver_find_next_device(const struct device_driver *drv,
struct device *start)
{
return driver_find_device(drv, start, NULL, device_match_any);
@@ -223,14 +223,14 @@ static inline struct device *driver_find_next_device(struct device_driver *drv,
* @adev: ACPI_COMPANION device to match.
*/
static inline struct device *
-driver_find_device_by_acpi_dev(struct device_driver *drv,
+driver_find_device_by_acpi_dev(const struct device_driver *drv,
const struct acpi_device *adev)
{
return driver_find_device(drv, NULL, adev, device_match_acpi_dev);
}
#else
static inline struct device *
-driver_find_device_by_acpi_dev(struct device_driver *drv, const void *adev)
+driver_find_device_by_acpi_dev(const struct device_driver *drv, const void *adev)
{
return NULL;
}
diff --git a/include/linux/dfl.h b/include/linux/dfl.h
index 0a7a00a0ee7f..1f02db0c1897 100644
--- a/include/linux/dfl.h
+++ b/include/linux/dfl.h
@@ -71,7 +71,7 @@ struct dfl_driver {
};
#define to_dfl_dev(d) container_of(d, struct dfl_device, dev)
-#define to_dfl_drv(d) container_of(d, struct dfl_driver, drv)
+#define to_dfl_drv(d) container_of_const(d, struct dfl_driver, drv)
/*
* use a macro to avoid include chaining to get THIS_MODULE.
diff --git a/include/linux/dio.h b/include/linux/dio.h
index 2b5923909f96..464331c4c4a7 100644
--- a/include/linux/dio.h
+++ b/include/linux/dio.h
@@ -93,7 +93,7 @@ struct dio_driver {
struct device_driver driver;
};
-#define to_dio_driver(drv) container_of(drv, struct dio_driver, driver)
+#define to_dio_driver(drv) container_of_const(drv, struct dio_driver, driver)
/* DIO/DIO-II boards all have the following 8bit registers.
* These are offsets from the base of the device.
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 9fc03068cabc..b137fdb56093 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -1608,7 +1608,8 @@ int dma_async_device_register(struct dma_device *device);
int dmaenginem_async_device_register(struct dma_device *device);
void dma_async_device_unregister(struct dma_device *device);
int dma_async_device_channel_register(struct dma_device *device,
- struct dma_chan *chan);
+ struct dma_chan *chan,
+ const char *name);
void dma_async_device_channel_unregister(struct dma_device *device,
struct dma_chan *chan);
void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
diff --git a/include/linux/eisa.h b/include/linux/eisa.h
index b012e30afebd..f98200cae637 100644
--- a/include/linux/eisa.h
+++ b/include/linux/eisa.h
@@ -60,7 +60,7 @@ struct eisa_driver {
struct device_driver driver;
};
-#define to_eisa_driver(drv) container_of(drv,struct eisa_driver, driver)
+#define to_eisa_driver(drv) container_of_const(drv,struct eisa_driver, driver)
/* These external functions are only available when EISA support is enabled. */
#ifdef CONFIG_EISA
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 41d1d71c36ff..01bee2b289c2 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -259,15 +259,14 @@ struct node_footer {
#define CUR_ADDRS_PER_INODE(inode) (DEF_ADDRS_PER_INODE - \
get_extra_isize(inode))
#define DEF_NIDS_PER_INODE 5 /* Node IDs in an Inode */
-#define ADDRS_PER_INODE(inode) addrs_per_inode(inode)
+#define ADDRS_PER_INODE(inode) addrs_per_page(inode, true)
/* Address Pointers in a Direct Block */
#define DEF_ADDRS_PER_BLOCK ((F2FS_BLKSIZE - sizeof(struct node_footer)) / sizeof(__le32))
-#define ADDRS_PER_BLOCK(inode) addrs_per_block(inode)
+#define ADDRS_PER_BLOCK(inode) addrs_per_page(inode, false)
/* Node IDs in an Indirect Block */
#define NIDS_PER_BLOCK ((F2FS_BLKSIZE - sizeof(struct node_footer)) / sizeof(__le32))
-#define ADDRS_PER_PAGE(page, inode) \
- (IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK(inode))
+#define ADDRS_PER_PAGE(page, inode) (addrs_per_page(inode, IS_INODE(page)))
#define NODE_DIR1_BLOCK (DEF_ADDRS_PER_INODE + 1)
#define NODE_DIR2_BLOCK (DEF_ADDRS_PER_INODE + 2)
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index 00abe0e5d602..1cca14cf5652 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -462,9 +462,8 @@ struct fw_iso_packet {
/* rx: Sync bit, wait for matching sy */
u32 tag:2; /* tx: Tag in packet header */
u32 sy:4; /* tx: Sy in packet header */
- u32 header_length:8; /* Length of immediate header */
- /* tx: Top of 1394 isoch. data_block */
- u32 header[] __counted_by(header_length);
+ u32 header_length:8; /* Size of immediate header */
+ u32 header[]; /* tx: Top of 1394 isoch. data_block */
};
#define FW_ISO_CONTEXT_TRANSMIT 0
diff --git a/include/linux/firmware.h b/include/linux/firmware.h
index f026f8926d79..aae1b85ffc10 100644
--- a/include/linux/firmware.h
+++ b/include/linux/firmware.h
@@ -98,6 +98,10 @@ static inline bool firmware_request_builtin(struct firmware *fw,
#if IS_REACHABLE(CONFIG_FW_LOADER)
int request_firmware(const struct firmware **fw, const char *name,
struct device *device);
+int firmware_request_nowait_nowarn(
+ struct module *module, const char *name,
+ struct device *device, gfp_t gfp, void *context,
+ void (*cont)(const struct firmware *fw, void *context));
int firmware_request_nowarn(const struct firmware **fw, const char *name,
struct device *device);
int firmware_request_platform(const struct firmware **fw, const char *name,
@@ -123,6 +127,14 @@ static inline int request_firmware(const struct firmware **fw,
return -EINVAL;
}
+static inline int firmware_request_nowait_nowarn(
+ struct module *module, const char *name,
+ struct device *device, gfp_t gfp, void *context,
+ void (*cont)(const struct firmware *fw, void *context))
+{
+ return -EINVAL;
+}
+
static inline int firmware_request_nowarn(const struct firmware **fw,
const char *name,
struct device *device)
diff --git a/include/linux/fsi.h b/include/linux/fsi.h
index 3df8c54868df..8c5eef808788 100644
--- a/include/linux/fsi.h
+++ b/include/linux/fsi.h
@@ -44,7 +44,7 @@ struct fsi_driver {
};
#define to_fsi_dev(devp) container_of(devp, struct fsi_device, dev)
-#define to_fsi_drv(drvp) container_of(drvp, struct fsi_driver, drv)
+#define to_fsi_drv(drvp) container_of_const(drvp, struct fsi_driver, drv)
extern int fsi_driver_register(struct fsi_driver *fsi_drv);
extern void fsi_driver_unregister(struct fsi_driver *fsi_drv);
diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h
index a1b3de87a3d1..083c860fd28e 100644
--- a/include/linux/fsl/mc.h
+++ b/include/linux/fsl/mc.h
@@ -56,7 +56,7 @@ struct fsl_mc_driver {
};
#define to_fsl_mc_driver(_drv) \
- container_of(_drv, struct fsl_mc_driver, driver)
+ container_of_const(_drv, struct fsl_mc_driver, driver)
/**
* enum fsl_mc_pool_type - Types of allocatable MC bus resources
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 51575b76818e..fd5e84d0ec47 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -471,7 +471,7 @@ static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs,
extern int stack_tracer_enabled;
-int stack_trace_sysctl(struct ctl_table *table, int write, void *buffer,
+int stack_trace_sysctl(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos);
/* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
@@ -1175,7 +1175,7 @@ extern int tracepoint_printk;
extern void disable_trace_on_warning(void);
extern int __disable_trace_on_warning;
-int tracepoint_printk_sysctl(struct ctl_table *table, int write,
+int tracepoint_printk_sysctl(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
#else /* CONFIG_TRACING */
diff --git a/include/linux/gameport.h b/include/linux/gameport.h
index 07e370113b2b..86d62fdafd7a 100644
--- a/include/linux/gameport.h
+++ b/include/linux/gameport.h
@@ -58,7 +58,7 @@ struct gameport_driver {
bool ignore;
};
-#define to_gameport_driver(d) container_of(d, struct gameport_driver, driver)
+#define to_gameport_driver(d) container_of_const(d, struct gameport_driver, driver)
int gameport_open(struct gameport *gameport, struct gameport_driver *drv, int mode);
void gameport_close(struct gameport *gameport);
diff --git a/include/linux/greybus.h b/include/linux/greybus.h
index 634c9511cf78..4d58e27ceaf6 100644
--- a/include/linux/greybus.h
+++ b/include/linux/greybus.h
@@ -64,7 +64,7 @@ struct greybus_driver {
struct device_driver driver;
};
-#define to_greybus_driver(d) container_of(d, struct greybus_driver, driver)
+#define to_greybus_driver(d) container_of_const(d, struct greybus_driver, driver)
static inline void greybus_set_drvdata(struct gb_bundle *bundle, void *data)
{
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index cff002be83eb..e25d9ebfdf89 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -74,14 +74,20 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr;
#define THP_ORDERS_ALL_ANON ((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1)))
/*
- * Mask of all large folio orders supported for file THP.
+ * Mask of all large folio orders supported for file THP. Folios in a DAX
+ * file is never split and the MAX_PAGECACHE_ORDER limit does not apply to
+ * it.
*/
-#define THP_ORDERS_ALL_FILE (BIT(PMD_ORDER) | BIT(PUD_ORDER))
+#define THP_ORDERS_ALL_FILE_DAX \
+ (BIT(PMD_ORDER) | BIT(PUD_ORDER))
+#define THP_ORDERS_ALL_FILE_DEFAULT \
+ ((BIT(MAX_PAGECACHE_ORDER + 1) - 1) & ~BIT(0))
/*
* Mask of all large folio orders supported for THP.
*/
-#define THP_ORDERS_ALL (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE)
+#define THP_ORDERS_ALL \
+ (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE_DAX | THP_ORDERS_ALL_FILE_DEFAULT)
#define TVA_SMAPS (1 << 0) /* Will be used for procfs */
#define TVA_IN_PF (1 << 1) /* Page fault handler */
diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h
index bfe7c1f1ac6d..f0231dbc4777 100644
--- a/include/linux/hwspinlock.h
+++ b/include/linux/hwspinlock.h
@@ -68,6 +68,7 @@ int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int,
int __hwspin_trylock(struct hwspinlock *, int, unsigned long *);
void __hwspin_unlock(struct hwspinlock *, int, unsigned long *);
int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name);
+int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id);
int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock);
struct hwspinlock *devm_hwspin_lock_request(struct device *dev);
struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
@@ -127,6 +128,11 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
{
}
+static inline int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id)
+{
+ return 0;
+}
+
static inline int of_hwspin_lock_get_id(struct device_node *np, int index)
{
return 0;
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 5e39baa7f6cb..22c22fb91042 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1330,11 +1330,7 @@ struct hv_device {
#define device_to_hv_device(d) container_of_const(d, struct hv_device, device)
-
-static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
-{
- return container_of(d, struct hv_driver, driver);
-}
+#define drv_to_hv_drv(d) container_of_const(d, struct hv_driver, driver)
static inline void hv_set_drvdata(struct hv_device *dev, void *data)
{
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index e9cc14b1f9a1..07e33bbc9256 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -30,7 +30,6 @@ extern const struct device_type i2c_client_type;
/* --- General options ------------------------------------------------ */
struct i2c_msg;
-struct i2c_algorithm;
struct i2c_adapter;
struct i2c_client;
struct i2c_driver;
@@ -304,7 +303,7 @@ struct i2c_driver {
u32 flags;
};
-#define to_i2c_driver(d) container_of(d, struct i2c_driver, driver)
+#define to_i2c_driver(d) container_of_const(d, struct i2c_driver, driver)
/**
* struct i2c_client - represent an I2C slave device
@@ -512,16 +511,15 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info,
#endif /* I2C_BOARDINFO */
/**
- * struct i2c_algorithm - represent I2C transfer method
- * @xfer: Issue a set of i2c transactions to the given I2C adapter
- * defined by the msgs array, with num messages available to transfer via
- * the adapter specified by adap.
- * @xfer_atomic: same as @xfer. Yet, only using atomic context
- * so e.g. PMICs can be accessed very late before shutdown. Optional.
- * @smbus_xfer: Issue smbus transactions to the given I2C adapter. If this
+ * struct i2c_algorithm - represent I2C transfer methods
+ * @xfer: Transfer a given number of messages defined by the msgs array via
+ * the specified adapter.
+ * @xfer_atomic: Same as @xfer. Yet, only using atomic context so e.g. PMICs
+ * can be accessed very late before shutdown. Optional.
+ * @smbus_xfer: Issue SMBus transactions to the given I2C adapter. If this
* is not present, then the bus layer will try and convert the SMBus calls
* into I2C transfers instead.
- * @smbus_xfer_atomic: same as @smbus_xfer. Yet, only using atomic context
+ * @smbus_xfer_atomic: Same as @smbus_xfer. Yet, only using atomic context
* so e.g. PMICs can be accessed very late before shutdown. Optional.
* @functionality: Return the flags that this algorithm/adapter pair supports
* from the ``I2C_FUNC_*`` flags.
@@ -533,8 +531,6 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info,
* @reg_slave: deprecated, use @reg_target
* @unreg_slave: deprecated, use @unreg_target
*
- *
- * The following structs are for those who like to implement new bus drivers:
* i2c_algorithm is the interface to a class of hardware solutions which can
* be addressed using the same bus algorithms - i.e. bit-banging or the PCF8584
* to name two of the most common.
@@ -550,9 +546,6 @@ struct i2c_algorithm {
* to NULL. If an adapter algorithm can do SMBus access, set
* smbus_xfer. If set to NULL, the SMBus protocol is simulated
* using common I2C messages.
- *
- * xfer should return the number of messages successfully
- * processed, or a negative value on error
*/
union {
int (*xfer)(struct i2c_adapter *adap, struct i2c_msg *msgs,
diff --git a/include/linux/i3c/device.h b/include/linux/i3c/device.h
index e119f11948ef..0a8a44ac2f02 100644
--- a/include/linux/i3c/device.h
+++ b/include/linux/i3c/device.h
@@ -183,10 +183,7 @@ struct i3c_driver {
const struct i3c_device_id *id_table;
};
-static inline struct i3c_driver *drv_to_i3cdrv(struct device_driver *drv)
-{
- return container_of(drv, struct i3c_driver, driver);
-}
+#define drv_to_i3cdrv(__drv) container_of_const(__drv, struct i3c_driver, driver)
struct device *i3cdev_to_dev(struct i3c_device *i3cdev);
diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h
index 0ca27dd86956..074f632868d9 100644
--- a/include/linux/i3c/master.h
+++ b/include/linux/i3c/master.h
@@ -33,6 +33,7 @@ enum {
struct i3c_master_controller;
struct i3c_bus;
struct i3c_device;
+extern const struct bus_type i3c_bus_type;
/**
* struct i3c_i2c_dev_desc - Common part of the I3C/I2C device descriptor
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 3bb6198d1523..3315005df117 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -404,7 +404,7 @@ struct io_ring_ctx {
spinlock_t napi_lock; /* napi_list lock */
/* napi busy poll default timeout */
- unsigned int napi_busy_poll_to;
+ ktime_t napi_busy_poll_dt;
bool napi_prefer_busy_poll;
bool napi_enabled;
@@ -461,7 +461,6 @@ enum {
REQ_F_SUPPORT_NOWAIT_BIT,
REQ_F_ISREG_BIT,
REQ_F_POLL_NO_LAZY_BIT,
- REQ_F_CANCEL_SEQ_BIT,
REQ_F_CAN_POLL_BIT,
REQ_F_BL_EMPTY_BIT,
REQ_F_BL_NO_RECYCLE_BIT,
@@ -536,8 +535,6 @@ enum {
REQ_F_HASH_LOCKED = IO_REQ_FLAG(REQ_F_HASH_LOCKED_BIT),
/* don't use lazy poll wake for this request */
REQ_F_POLL_NO_LAZY = IO_REQ_FLAG(REQ_F_POLL_NO_LAZY_BIT),
- /* cancel sequence is set and valid */
- REQ_F_CANCEL_SEQ = IO_REQ_FLAG(REQ_F_CANCEL_SEQ_BIT),
/* file is pollable */
REQ_F_CAN_POLL = IO_REQ_FLAG(REQ_F_CAN_POLL_BIT),
/* buffer list was empty after selection of buffer */
diff --git a/include/linux/maple.h b/include/linux/maple.h
index 9aae44efcfd4..3be4e567473c 100644
--- a/include/linux/maple.h
+++ b/include/linux/maple.h
@@ -97,7 +97,7 @@ int maple_add_packet(struct maple_device *mdev, u32 function,
void maple_clear_dev(struct maple_device *mdev);
#define to_maple_dev(n) container_of(n, struct maple_device, dev)
-#define to_maple_driver(n) container_of(n, struct maple_driver, drv)
+#define to_maple_driver(n) container_of_const(n, struct maple_driver, drv)
#define maple_get_drvdata(d) dev_get_drvdata(&(d)->dev)
#define maple_set_drvdata(d,p) dev_set_drvdata(&(d)->dev, (p))
diff --git a/include/linux/mcb.h b/include/linux/mcb.h
index 0b971b24a804..4ab2691f51a6 100644
--- a/include/linux/mcb.h
+++ b/include/linux/mcb.h
@@ -94,10 +94,7 @@ struct mcb_driver {
void (*shutdown)(struct mcb_device *mdev);
};
-static inline struct mcb_driver *to_mcb_driver(struct device_driver *drv)
-{
- return container_of(drv, struct mcb_driver, driver);
-}
+#define to_mcb_driver(__drv) container_of_const(__drv, struct mcb_driver, driver)
static inline void *mcb_get_drvdata(struct mcb_device *dev)
{
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index 68f8d2e970d4..efeca5bd7600 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -31,7 +31,7 @@ struct mdio_device {
struct mii_bus *bus;
char modalias[MDIO_NAME_SIZE];
- int (*bus_match)(struct device *dev, struct device_driver *drv);
+ int (*bus_match)(struct device *dev, const struct device_driver *drv);
void (*device_free)(struct mdio_device *mdiodev);
void (*device_remove)(struct mdio_device *mdiodev);
@@ -57,11 +57,8 @@ struct mdio_driver_common {
};
#define MDIO_DEVICE_FLAG_PHY 1
-static inline struct mdio_driver_common *
-to_mdio_common_driver(const struct device_driver *driver)
-{
- return container_of(driver, struct mdio_driver_common, driver);
-}
+#define to_mdio_common_driver(__drv_c) container_of_const(__drv_c, struct mdio_driver_common, \
+ driver)
/* struct mdio_driver: Generic MDIO driver */
struct mdio_driver {
@@ -80,12 +77,8 @@ struct mdio_driver {
void (*shutdown)(struct mdio_device *mdiodev);
};
-static inline struct mdio_driver *
-to_mdio_driver(const struct device_driver *driver)
-{
- return container_of(to_mdio_common_driver(driver), struct mdio_driver,
- mdiodrv);
-}
+#define to_mdio_driver(__drv_m) container_of_const(to_mdio_common_driver(__drv_m), \
+ struct mdio_driver, mdiodrv)
/* device driver data */
static inline void mdiodev_set_drvdata(struct mdio_device *mdio, void *data)
@@ -105,7 +98,7 @@ void mdio_device_remove(struct mdio_device *mdiodev);
void mdio_device_reset(struct mdio_device *mdiodev, int value);
int mdio_driver_register(struct mdio_driver *drv);
void mdio_driver_unregister(struct mdio_driver *drv);
-int mdio_device_bus_match(struct device *dev, struct device_driver *drv);
+int mdio_device_bus_match(struct device *dev, const struct device_driver *drv);
static inline void mdio_device_get(struct mdio_device *mdiodev)
{
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 7e2eb091049a..0e5bf25d324f 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -109,6 +109,7 @@ struct mem_cgroup_per_node {
/* Fields which get updated often at the end. */
struct lruvec lruvec;
+ CACHELINE_PADDING(_pad2_);
unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
struct mem_cgroup_reclaim_iter iter;
};
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
index fabd6ed8d258..059dc94d20bb 100644
--- a/include/linux/mhi.h
+++ b/include/linux/mhi.h
@@ -528,7 +528,7 @@ struct mhi_driver {
struct device_driver driver;
};
-#define to_mhi_driver(drv) container_of(drv, struct mhi_driver, driver)
+#define to_mhi_driver(drv) container_of_const(drv, struct mhi_driver, driver)
#define to_mhi_device(dev) container_of(dev, struct mhi_device, dev)
/**
diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h
index 11bf3212f782..7b40fc8cbe77 100644
--- a/include/linux/mhi_ep.h
+++ b/include/linux/mhi_ep.h
@@ -221,7 +221,7 @@ struct mhi_ep_driver {
};
#define to_mhi_ep_device(dev) container_of(dev, struct mhi_ep_device, dev)
-#define to_mhi_ep_driver(drv) container_of(drv, struct mhi_ep_driver, driver)
+#define to_mhi_ep_driver(drv) container_of_const(drv, struct mhi_ep_driver, driver)
/*
* module_mhi_ep_driver() - Helper macro for drivers that don't do
diff --git a/include/linux/minmax.h b/include/linux/minmax.h
index 2ec559284a9f..a7ef65f78933 100644
--- a/include/linux/minmax.h
+++ b/include/linux/minmax.h
@@ -270,4 +270,11 @@ static inline bool in_range32(u32 val, u32 start, u32 len)
#define swap(a, b) \
do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
+/*
+ * Use these carefully: no type checking, and uses the arguments
+ * multiple times. Use for obvious constants only.
+ */
+#define MIN_T(type,a,b) __cmp(min,(type)(a),(type)(b))
+#define MAX_T(type,a,b) __cmp(max,(type)(a),(type)(b))
+
#endif /* _LINUX_MINMAX_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 7d044e737dba..c4b238a20b76 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -204,11 +204,11 @@ extern int sysctl_overcommit_memory;
extern int sysctl_overcommit_ratio;
extern unsigned long sysctl_overcommit_kbytes;
-int overcommit_ratio_handler(struct ctl_table *, int, void *, size_t *,
+int overcommit_ratio_handler(const struct ctl_table *, int, void *, size_t *,
loff_t *);
-int overcommit_kbytes_handler(struct ctl_table *, int, void *, size_t *,
+int overcommit_kbytes_handler(const struct ctl_table *, int, void *, size_t *,
loff_t *);
-int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *,
+int overcommit_policy_handler(const struct ctl_table *, int, void *, size_t *,
loff_t *);
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
@@ -407,6 +407,13 @@ extern unsigned int kobjsize(const void *objp);
#endif
#ifdef CONFIG_64BIT
+#define VM_DROPPABLE_BIT 40
+#define VM_DROPPABLE BIT(VM_DROPPABLE_BIT)
+#else
+#define VM_DROPPABLE VM_NONE
+#endif
+
+#ifdef CONFIG_64BIT
/* VM is sealed, in vm_flags */
#define VM_SEALED _BITUL(63)
#endif
@@ -3130,21 +3137,7 @@ extern void reserve_bootmem_region(phys_addr_t start,
phys_addr_t end, int nid);
/* Free the reserved page into the buddy system, so it gets managed. */
-static inline void free_reserved_page(struct page *page)
-{
- if (mem_alloc_profiling_enabled()) {
- union codetag_ref *ref = get_page_tag_ref(page);
-
- if (ref) {
- set_codetag_empty(ref);
- put_page_tag_ref(ref);
- }
- }
- ClearPageReserved(page);
- init_page_count(page);
- __free_page(page);
- adjust_managed_page_count(page, 1);
-}
+void free_reserved_page(struct page *page);
#define free_highmem_page(page) free_reserved_page(page)
static inline void mark_page_reserved(struct page *page)
@@ -3847,7 +3840,7 @@ extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
#ifdef CONFIG_SYSCTL
extern int sysctl_drop_caches;
-int drop_caches_sysctl_handler(struct ctl_table *, int, void *, size_t *,
+int drop_caches_sysctl_handler(const struct ctl_table *, int, void *, size_t *,
loff_t *);
#endif
diff --git a/include/linux/module.h b/include/linux/module.h
index 4213d8993cd8..88ecc5e9f523 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -174,6 +174,12 @@ extern void cleanup_module(void);
#define MODULE_SOFTDEP(_softdep) MODULE_INFO(softdep, _softdep)
/*
+ * Weak module dependencies. See man modprobe.d for details.
+ * Example: MODULE_WEAKDEP("module-foo")
+ */
+#define MODULE_WEAKDEP(_weakdep) MODULE_INFO(weakdep, _weakdep)
+
+/*
* MODULE_FILE is used for generating modules.builtin
* So, make it no-op when this is being built as a module
*/
diff --git a/include/linux/moxtet.h b/include/linux/moxtet.h
index ac577699edfd..dfa4800306ee 100644
--- a/include/linux/moxtet.h
+++ b/include/linux/moxtet.h
@@ -61,13 +61,8 @@ struct moxtet_driver {
struct device_driver driver;
};
-static inline struct moxtet_driver *
-to_moxtet_driver(struct device_driver *drv)
-{
- if (!drv)
- return NULL;
- return container_of(drv, struct moxtet_driver, driver);
-}
+#define to_moxtet_driver(__drv) \
+ ( __drv ? container_of_const(__drv, struct moxtet_driver, driver) : NULL )
extern int __moxtet_register_driver(struct module *owner,
struct moxtet_driver *mdrv);
diff --git a/include/linux/nd.h b/include/linux/nd.h
index b9771ba1ef87..fa099e295f78 100644
--- a/include/linux/nd.h
+++ b/include/linux/nd.h
@@ -84,11 +84,7 @@ struct nd_device_driver {
void (*notify)(struct device *dev, enum nvdimm_event event);
};
-static inline struct nd_device_driver *to_nd_device_driver(
- struct device_driver *drv)
-{
- return container_of(drv, struct nd_device_driver, drv);
-};
+#define to_nd_device_driver(__drv) container_of_const(__drv, struct nd_device_driver, drv)
/**
* struct nd_namespace_common - core infrastructure of a namespace
diff --git a/include/linux/of.h b/include/linux/of.h
index 13cf7a43b473..85b60ac9eec5 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -430,11 +430,9 @@ extern int of_detach_node(struct device_node *);
#define of_match_ptr(_ptr) (_ptr)
/*
- * struct property *prop;
- * const __be32 *p;
* u32 u;
*
- * of_property_for_each_u32(np, "propname", prop, p, u)
+ * of_property_for_each_u32(np, "propname", u)
* printk("U32 value: %x\n", u);
*/
const __be32 *of_prop_next_u32(struct property *prop, const __be32 *cur,
@@ -1431,11 +1429,12 @@ static inline int of_property_read_s32(const struct device_node *np,
err == 0; \
err = of_phandle_iterator_next(it))
-#define of_property_for_each_u32(np, propname, prop, p, u) \
- for (prop = of_find_property(np, propname, NULL), \
- p = of_prop_next_u32(prop, NULL, &u); \
- p; \
- p = of_prop_next_u32(prop, p, &u))
+#define of_property_for_each_u32(np, propname, u) \
+ for (struct {struct property *prop; const __be32 *item; } _it = \
+ {of_find_property(np, propname, NULL), \
+ of_prop_next_u32(_it.prop, NULL, &u)}; \
+ _it.item; \
+ _it.item = of_prop_next_u32(_it.prop, _it.item, &u))
#define of_property_for_each_string(np, propname, prop, s) \
for (prop = of_find_property(np, propname, NULL), \
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
index 547e82cdc89a..fc6b9c87cb0a 100644
--- a/include/linux/pageblock-flags.h
+++ b/include/linux/pageblock-flags.h
@@ -41,13 +41,13 @@ extern unsigned int pageblock_order;
* Huge pages are a constant size, but don't exceed the maximum allocation
* granularity.
*/
-#define pageblock_order min_t(unsigned int, HUGETLB_PAGE_ORDER, MAX_PAGE_ORDER)
+#define pageblock_order MIN_T(unsigned int, HUGETLB_PAGE_ORDER, MAX_PAGE_ORDER)
#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
#elif defined(CONFIG_TRANSPARENT_HUGEPAGE)
-#define pageblock_order min_t(unsigned int, HPAGE_PMD_ORDER, MAX_PAGE_ORDER)
+#define pageblock_order MIN_T(unsigned int, HPAGE_PMD_ORDER, MAX_PAGE_ORDER)
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 483a191bb4df..d9c7edb6422b 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -1537,10 +1537,4 @@ unsigned int i_blocks_per_folio(struct inode *inode, struct folio *folio)
{
return folio_size(folio) >> inode->i_blkbits;
}
-
-static inline
-unsigned int i_blocks_per_page(struct inode *inode, struct page *page)
-{
- return i_blocks_per_folio(inode, page_folio(page));
-}
#endif /* _LINUX_PAGEMAP_H */
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
index 0639d4dc8986..18a3aeb62ae4 100644
--- a/include/linux/pci-epf.h
+++ b/include/linux/pci-epf.h
@@ -107,8 +107,7 @@ struct pci_epf_driver {
const struct pci_epf_device_id *id_table;
};
-#define to_pci_epf_driver(drv) (container_of((drv), struct pci_epf_driver, \
- driver))
+#define to_pci_epf_driver(drv) container_of_const((drv), struct pci_epf_driver, driver)
/**
* struct pci_epf_bar - represents the BAR of EPF device
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 9e36b6c1810e..4cf89a4b4cbc 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -959,10 +959,8 @@ struct pci_driver {
bool driver_managed_dma;
};
-static inline struct pci_driver *to_pci_driver(struct device_driver *drv)
-{
- return drv ? container_of(drv, struct pci_driver, driver) : NULL;
-}
+#define to_pci_driver(__drv) \
+ ( __drv ? container_of_const(__drv, struct pci_driver, driver) : NULL )
/**
* PCI_DEVICE - macro used to describe a specific PCI device
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 65ece0d5b4b6..1a8942277dda 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1582,11 +1582,11 @@ extern int sysctl_perf_cpu_time_max_percent;
extern void perf_sample_event_took(u64 sample_len_ns);
-int perf_event_max_sample_rate_handler(struct ctl_table *table, int write,
+int perf_event_max_sample_rate_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
-int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
+int perf_cpu_time_max_percent_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
-int perf_event_max_stack_handler(struct ctl_table *table, int write,
+int perf_event_max_stack_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
/* Access to perf_event_open(2) syscall. */
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 04ae5c811cfb..6b7d40d49129 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -1187,7 +1187,7 @@ struct phy_driver {
int (*led_polarity_set)(struct phy_device *dev, int index,
unsigned long modes);
};
-#define to_phy_driver(d) container_of(to_mdio_common_driver(d), \
+#define to_phy_driver(d) container_of_const(to_mdio_common_driver(d), \
struct phy_driver, mdiodrv)
#define PHY_ANY_ID "MATCH ANY PHY"
diff --git a/include/linux/platform_data/i2c-mux-gpio.h b/include/linux/platform_data/i2c-mux-gpio.h
index 816a4cd3ccb5..96843aab4d1e 100644
--- a/include/linux/platform_data/i2c-mux-gpio.h
+++ b/include/linux/platform_data/i2c-mux-gpio.h
@@ -19,6 +19,7 @@
* position
* @n_values: Number of multiplexer positions (busses to instantiate)
* @idle: Bitmask to write to MUX when idle or GPIO_I2CMUX_NO_IDLE if not used
+ * @settle_time: Delay to wait when a new bus is selected
*/
struct i2c_mux_gpio_platform_data {
int parent;
@@ -26,6 +27,7 @@ struct i2c_mux_gpio_platform_data {
const unsigned *values;
int n_values;
unsigned idle;
+ u32 settle_time;
};
#endif /* _LINUX_I2C_MUX_GPIO_H */
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 7a41c72c1959..d422db6eec63 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -237,15 +237,14 @@ struct platform_driver {
int (*probe)(struct platform_device *);
/*
- * Traditionally the remove callback returned an int which however is
- * ignored by the driver core. This led to wrong expectations by driver
- * authors who thought returning an error code was a valid error
- * handling strategy. To convert to a callback returning void, new
- * drivers should implement .remove_new() until the conversion it done
- * that eventually makes .remove() return void.
+ * .remove_new() is a relic from a prototype conversion of .remove().
+ * New drivers are supposed to implement .remove(). Once all drivers are
+ * converted to not use .remove_new any more, it will be dropped.
*/
- int (*remove)(struct platform_device *);
- void (*remove_new)(struct platform_device *);
+ union {
+ void (*remove)(struct platform_device *);
+ void (*remove_new)(struct platform_device *);
+ };
void (*shutdown)(struct platform_device *);
int (*suspend)(struct platform_device *, pm_message_t state);
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index 7f2ff95d2deb..b7a7158aaf65 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -383,7 +383,7 @@ struct pnp_driver {
struct device_driver driver;
};
-#define to_pnp_driver(drv) container_of(drv, struct pnp_driver, driver)
+#define to_pnp_driver(drv) container_of_const(drv, struct pnp_driver, driver)
struct pnp_card_driver {
struct list_head global_list;
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index c852cc882501..72dc7e45c90c 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -309,18 +309,11 @@ struct power_supply {
#endif
#ifdef CONFIG_LEDS_TRIGGERS
- struct led_trigger *charging_full_trig;
- char *charging_full_trig_name;
+ struct led_trigger *trig;
struct led_trigger *charging_trig;
- char *charging_trig_name;
struct led_trigger *full_trig;
- char *full_trig_name;
- struct led_trigger *online_trig;
- char *online_trig_name;
struct led_trigger *charging_blink_full_solid_trig;
- char *charging_blink_full_solid_trig_name;
struct led_trigger *charging_orange_full_green_trig;
- char *charging_orange_full_green_trig_name;
#endif
};
@@ -743,7 +736,7 @@ struct power_supply_battery_info {
int overvoltage_limit_uv;
int constant_charge_current_max_ua;
int constant_charge_voltage_max_uv;
- struct power_supply_maintenance_charge_table *maintenance_charge;
+ const struct power_supply_maintenance_charge_table *maintenance_charge;
int maintenance_charge_size;
int alert_low_temp_charge_current_ua;
int alert_low_temp_charge_voltage_uv;
@@ -762,9 +755,9 @@ struct power_supply_battery_info {
int ocv_table_size[POWER_SUPPLY_OCV_TEMP_MAX];
struct power_supply_resistance_temp_table *resist_table;
int resist_table_size;
- struct power_supply_vbat_ri_table *vbat2ri_discharging;
+ const struct power_supply_vbat_ri_table *vbat2ri_discharging;
int vbat2ri_discharging_size;
- struct power_supply_vbat_ri_table *vbat2ri_charging;
+ const struct power_supply_vbat_ri_table *vbat2ri_charging;
int vbat2ri_charging_size;
int bti_resistance_ohm;
int bti_resistance_tolerance;
@@ -817,7 +810,7 @@ power_supply_temp2resist_simple(struct power_supply_resistance_temp_table *table
int table_len, int temp);
extern int power_supply_vbat2ri(struct power_supply_battery_info *info,
int vbat_uv, bool charging);
-extern struct power_supply_maintenance_charge_table *
+extern const struct power_supply_maintenance_charge_table *
power_supply_get_maintenance_charging_setting(struct power_supply_battery_info *info, int index);
extern bool power_supply_battery_bti_in_range(struct power_supply_battery_info *info,
int resistance);
@@ -831,7 +824,7 @@ extern int power_supply_set_battery_charged(struct power_supply *psy);
static inline bool
power_supply_supports_maintenance_charging(struct power_supply_battery_info *info)
{
- struct power_supply_maintenance_charge_table *mt;
+ const struct power_supply_maintenance_charge_table *mt;
mt = power_supply_get_maintenance_charging_setting(info, 0);
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 7239976698e4..b937cefcb31c 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -196,7 +196,7 @@ void show_regs_print_info(const char *log_lvl);
extern asmlinkage void dump_stack_lvl(const char *log_lvl) __cold;
extern asmlinkage void dump_stack(void) __cold;
void printk_trigger_flush(void);
-void console_replay_all(void);
+void console_try_replay_all(void);
#else
static inline __printf(1, 0)
int vprintk(const char *s, va_list args)
@@ -276,7 +276,7 @@ static inline void dump_stack(void)
static inline void printk_trigger_flush(void)
{
}
-static inline void console_replay_all(void)
+static inline void console_try_replay_all(void)
{
}
#endif
diff --git a/include/linux/rio.h b/include/linux/rio.h
index 2cd637268b4f..3c29f40f3c94 100644
--- a/include/linux/rio.h
+++ b/include/linux/rio.h
@@ -465,7 +465,7 @@ struct rio_driver {
struct device_driver driver;
};
-#define to_rio_driver(drv) container_of(drv,struct rio_driver, driver)
+#define to_rio_driver(drv) container_of_const(drv,struct rio_driver, driver)
union rio_pw_msg {
struct {
diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h
index 3a9bb5b9a9e8..688466a0e816 100644
--- a/include/linux/scmi_protocol.h
+++ b/include/linux/scmi_protocol.h
@@ -945,7 +945,7 @@ struct scmi_device {
struct scmi_handle *handle;
};
-#define to_scmi_dev(d) container_of(d, struct scmi_device, dev)
+#define to_scmi_dev(d) container_of_const(d, struct scmi_device, dev)
struct scmi_device_id {
u8 protocol_id;
diff --git a/include/linux/security.h b/include/linux/security.h
index de3af33e6ff5..1390f1efb4f0 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -228,7 +228,7 @@ struct request_sock;
#define LSM_UNSAFE_NO_NEW_PRIVS 4
#ifdef CONFIG_MMU
-extern int mmap_min_addr_handler(struct ctl_table *table, int write,
+extern int mmap_min_addr_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
#endif
diff --git a/include/linux/serio.h b/include/linux/serio.h
index 7ca41af93b37..bf2191f25350 100644
--- a/include/linux/serio.h
+++ b/include/linux/serio.h
@@ -80,7 +80,7 @@ struct serio_driver {
struct device_driver driver;
};
-#define to_serio_driver(d) container_of(d, struct serio_driver, driver)
+#define to_serio_driver(d) container_of_const(d, struct serio_driver, driver)
int serio_open(struct serio *serio, struct serio_driver *drv);
void serio_close(struct serio *serio);
diff --git a/include/linux/slimbus.h b/include/linux/slimbus.h
index 3042385b7b40..a4608d9a9684 100644
--- a/include/linux/slimbus.h
+++ b/include/linux/slimbus.h
@@ -91,7 +91,7 @@ struct slim_driver {
struct device_driver driver;
const struct slim_device_id *id_table;
};
-#define to_slim_driver(d) container_of(d, struct slim_driver, driver)
+#define to_slim_driver(d) container_of_const(d, struct slim_driver, driver)
/**
* struct slim_val_inf - Slimbus value or information element
diff --git a/include/linux/soc/qcom/apr.h b/include/linux/soc/qcom/apr.h
index 7161a3183eda..a532d1e4b1f4 100644
--- a/include/linux/soc/qcom/apr.h
+++ b/include/linux/soc/qcom/apr.h
@@ -162,7 +162,7 @@ struct apr_driver {
};
typedef struct apr_driver gpr_driver_t;
-#define to_apr_driver(d) container_of(d, struct apr_driver, driver)
+#define to_apr_driver(d) container_of_const(d, struct apr_driver, driver)
/*
* use a macro to avoid include chaining to get THIS_MODULE
diff --git a/include/linux/soc/qcom/smem.h b/include/linux/soc/qcom/smem.h
index 0943bf419e11..f946e3beca21 100644
--- a/include/linux/soc/qcom/smem.h
+++ b/include/linux/soc/qcom/smem.h
@@ -15,4 +15,6 @@ phys_addr_t qcom_smem_virt_to_phys(void *p);
int qcom_smem_get_soc_id(u32 *id);
int qcom_smem_get_feature_code(u32 *code);
+int qcom_smem_bust_hwspin_lock_by_host(unsigned int host);
+
#endif
diff --git a/include/linux/soc/samsung/exynos-regs-pmu.h b/include/linux/soc/samsung/exynos-regs-pmu.h
index f411c176536d..ce1a3790d6fb 100644
--- a/include/linux/soc/samsung/exynos-regs-pmu.h
+++ b/include/linux/soc/samsung/exynos-regs-pmu.h
@@ -660,5 +660,7 @@
/* For Tensor GS101 */
#define GS101_SYSIP_DAT0 (0x810)
#define GS101_SYSTEM_CONFIGURATION (0x3A00)
+#define GS101_PHY_CTRL_USB20 (0x3EB0)
+#define GS101_PHY_CTRL_USBDP (0x3EB4)
#endif /* __LINUX_SOC_EXYNOS_REGS_PMU_H */
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index 13e96d8b7423..94fc1b57c57b 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -903,6 +903,7 @@ struct sdw_master_ops {
* meaningful if multi_link is set. If set to 1, hardware-based
* synchronization will be used even if a stream only uses a single
* SoundWire segment.
+ * @stream_refcount: number of streams currently using this bus
*/
struct sdw_bus {
struct device *dev;
@@ -933,6 +934,7 @@ struct sdw_bus {
u32 bank_switch_timeout;
bool multi_link;
int hw_sync_min_links;
+ int stream_refcount;
};
int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h
index 8e78417156e3..d537587b4499 100644
--- a/include/linux/soundwire/sdw_intel.h
+++ b/include/linux/soundwire/sdw_intel.h
@@ -182,6 +182,11 @@
#define SDW_SHIM2_INTEL_VS_ACTMCTL_DODSE BIT(2)
#define SDW_SHIM2_INTEL_VS_ACTMCTL_DOAIS GENMASK(4, 3)
#define SDW_SHIM2_INTEL_VS_ACTMCTL_DOAISE BIT(5)
+#define SDW_SHIM3_INTEL_VS_ACTMCTL_CLSS BIT(6)
+#define SDW_SHIM3_INTEL_VS_ACTMCTL_CLDS GENMASK(11, 7)
+#define SDW_SHIM3_INTEL_VS_ACTMCTL_DODSE2 GENMASK(13, 12)
+#define SDW_SHIM3_INTEL_VS_ACTMCTL_DOAISE2 BIT(14)
+#define SDW_SHIM3_INTEL_VS_ACTMCTL_CLDE BIT(15)
/**
* struct sdw_intel_stream_params_data: configuration passed during
diff --git a/include/linux/soundwire/sdw_type.h b/include/linux/soundwire/sdw_type.h
index 693320b4f5c2..d405935a45fe 100644
--- a/include/linux/soundwire/sdw_type.h
+++ b/include/linux/soundwire/sdw_type.h
@@ -13,7 +13,7 @@ static inline int is_sdw_slave(const struct device *dev)
return dev->type == &sdw_slave_type;
}
-#define drv_to_sdw_driver(_drv) container_of(_drv, struct sdw_driver, driver)
+#define drv_to_sdw_driver(_drv) container_of_const(_drv, struct sdw_driver, driver)
#define sdw_register_driver(drv) \
__sdw_register_driver(drv, THIS_MODULE)
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index d7a16e0adf44..e4f3f3d30a03 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -351,10 +351,8 @@ struct spi_driver {
struct device_driver driver;
};
-static inline struct spi_driver *to_spi_driver(struct device_driver *drv)
-{
- return drv ? container_of(drv, struct spi_driver, driver) : NULL;
-}
+#define to_spi_driver(__drv) \
+ ( __drv ? container_of_const(__drv, struct spi_driver, driver) : NULL )
extern int __spi_register_driver(struct module *owner, struct spi_driver *sdrv);
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index a2257380c3f1..e1fb11e0f12c 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -325,7 +325,7 @@ struct ssb_driver {
struct device_driver drv;
};
-#define drv_to_ssb_drv(_drv) container_of(_drv, struct ssb_driver, drv)
+#define drv_to_ssb_drv(_drv) container_of_const(_drv, struct ssb_driver, drv)
extern int __ssb_driver_register(struct ssb_driver *drv, struct module *owner);
#define ssb_driver_register(drv) \
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 54fbec062772..aa4c6d44aaa0 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -61,31 +61,31 @@ extern const int sysctl_vals[];
extern const unsigned long sysctl_long_vals[];
-typedef int proc_handler(struct ctl_table *ctl, int write, void *buffer,
+typedef int proc_handler(const struct ctl_table *ctl, int write, void *buffer,
size_t *lenp, loff_t *ppos);
-int proc_dostring(struct ctl_table *, int, void *, size_t *, loff_t *);
-int proc_dobool(struct ctl_table *table, int write, void *buffer,
+int proc_dostring(const struct ctl_table *, int, void *, size_t *, loff_t *);
+int proc_dobool(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos);
-int proc_dointvec(struct ctl_table *, int, void *, size_t *, loff_t *);
-int proc_douintvec(struct ctl_table *, int, void *, size_t *, loff_t *);
-int proc_dointvec_minmax(struct ctl_table *, int, void *, size_t *, loff_t *);
-int proc_douintvec_minmax(struct ctl_table *table, int write, void *buffer,
+int proc_dointvec(const struct ctl_table *, int, void *, size_t *, loff_t *);
+int proc_douintvec(const struct ctl_table *, int, void *, size_t *, loff_t *);
+int proc_dointvec_minmax(const struct ctl_table *, int, void *, size_t *, loff_t *);
+int proc_douintvec_minmax(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos);
-int proc_dou8vec_minmax(struct ctl_table *table, int write, void *buffer,
+int proc_dou8vec_minmax(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos);
-int proc_dointvec_jiffies(struct ctl_table *, int, void *, size_t *, loff_t *);
-int proc_dointvec_ms_jiffies_minmax(struct ctl_table *table, int write,
+int proc_dointvec_jiffies(const struct ctl_table *, int, void *, size_t *, loff_t *);
+int proc_dointvec_ms_jiffies_minmax(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
-int proc_dointvec_userhz_jiffies(struct ctl_table *, int, void *, size_t *,
+int proc_dointvec_userhz_jiffies(const struct ctl_table *, int, void *, size_t *,
loff_t *);
-int proc_dointvec_ms_jiffies(struct ctl_table *, int, void *, size_t *,
+int proc_dointvec_ms_jiffies(const struct ctl_table *, int, void *, size_t *,
loff_t *);
-int proc_doulongvec_minmax(struct ctl_table *, int, void *, size_t *, loff_t *);
-int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int, void *,
+int proc_doulongvec_minmax(const struct ctl_table *, int, void *, size_t *, loff_t *);
+int proc_doulongvec_ms_jiffies_minmax(const struct ctl_table *table, int, void *,
size_t *, loff_t *);
-int proc_do_large_bitmap(struct ctl_table *, int, void *, size_t *, loff_t *);
-int proc_do_static_key(struct ctl_table *table, int write, void *buffer,
+int proc_do_large_bitmap(const struct ctl_table *, int, void *, size_t *, loff_t *);
+int proc_do_static_key(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos);
/*
@@ -287,7 +287,7 @@ static inline bool sysctl_is_alias(char *param)
}
#endif /* CONFIG_SYSCTL */
-int sysctl_max_threads(struct ctl_table *table, int write, void *buffer,
+int sysctl_max_threads(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos);
#endif /* _LINUX_SYSCTL_H */
diff --git a/include/linux/tc.h b/include/linux/tc.h
index 1638660abf5e..8416bae9b126 100644
--- a/include/linux/tc.h
+++ b/include/linux/tc.h
@@ -108,7 +108,7 @@ struct tc_driver {
struct device_driver driver;
};
-#define to_tc_driver(drv) container_of(drv, struct tc_driver, driver)
+#define to_tc_driver(drv) container_of_const(drv, struct tc_driver, driver)
/*
* Return TURBOchannel clock frequency in Hz.
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
index 786b9ae6cf4d..a54c203000ed 100644
--- a/include/linux/tee_drv.h
+++ b/include/linux/tee_drv.h
@@ -298,6 +298,6 @@ struct tee_client_driver {
};
#define to_tee_client_driver(d) \
- container_of(d, struct tee_client_driver, driver)
+ container_of_const(d, struct tee_client_driver, driver)
#endif /*__TEE_DRV_H*/
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 3064314f4832..d8e4105a2f21 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -5,6 +5,7 @@
#include <linux/fault-inject-usercopy.h>
#include <linux/instrumented.h>
#include <linux/minmax.h>
+#include <linux/nospec.h>
#include <linux/sched.h>
#include <linux/thread_info.h>
@@ -138,13 +139,26 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
return raw_copy_to_user(to, from, n);
}
-#ifdef INLINE_COPY_FROM_USER
+/*
+ * Architectures that #define INLINE_COPY_TO_USER use this function
+ * directly in the normal copy_to/from_user(), the other ones go
+ * through an extern _copy_to/from_user(), which expands the same code
+ * here.
+ *
+ * Rust code always uses the extern definition.
+ */
static inline __must_check unsigned long
-_copy_from_user(void *to, const void __user *from, unsigned long n)
+_inline_copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long res = n;
might_fault();
if (!should_fail_usercopy() && likely(access_ok(from, n))) {
+ /*
+ * Ensure that bad access_ok() speculation will not
+ * lead to nasty side effects *after* the copy is
+ * finished:
+ */
+ barrier_nospec();
instrument_copy_from_user_before(to, from, n);
res = raw_copy_from_user(to, from, n);
instrument_copy_from_user_after(to, from, n, res);
@@ -153,14 +167,11 @@ _copy_from_user(void *to, const void __user *from, unsigned long n)
memset(to + (n - res), 0, res);
return res;
}
-#else
extern __must_check unsigned long
_copy_from_user(void *, const void __user *, unsigned long);
-#endif
-#ifdef INLINE_COPY_TO_USER
static inline __must_check unsigned long
-_copy_to_user(void __user *to, const void *from, unsigned long n)
+_inline_copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
if (should_fail_usercopy())
@@ -171,25 +182,32 @@ _copy_to_user(void __user *to, const void *from, unsigned long n)
}
return n;
}
-#else
extern __must_check unsigned long
_copy_to_user(void __user *, const void *, unsigned long);
-#endif
static __always_inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n)
{
- if (check_copy_size(to, n, false))
- n = _copy_from_user(to, from, n);
- return n;
+ if (!check_copy_size(to, n, false))
+ return n;
+#ifdef INLINE_COPY_FROM_USER
+ return _inline_copy_from_user(to, from, n);
+#else
+ return _copy_from_user(to, from, n);
+#endif
}
static __always_inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
- if (check_copy_size(from, n, true))
- n = _copy_to_user(to, from, n);
- return n;
+ if (!check_copy_size(from, n, true))
+ return n;
+
+#ifdef INLINE_COPY_TO_USER
+ return _inline_copy_to_user(to, from, n);
+#else
+ return _copy_to_user(to, from, n);
+#endif
}
#ifndef copy_mc_to_kernel
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index 05d59f74fc88..a12bcf042551 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -218,6 +218,9 @@ static inline bool vma_can_userfault(struct vm_area_struct *vma,
{
vm_flags &= __VM_UFFD_FLAGS;
+ if (vm_flags & VM_DROPPABLE)
+ return false;
+
if ((vm_flags & VM_UFFD_MINOR) &&
(!is_vm_hugetlb_page(vma) && !vma_is_shmem(vma)))
return false;
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 96fea920873b..ecc5cb7b8c91 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -209,10 +209,7 @@ struct virtio_driver {
int (*restore)(struct virtio_device *dev);
};
-static inline struct virtio_driver *drv_to_virtio(struct device_driver *drv)
-{
- return container_of(drv, struct virtio_driver, driver);
-}
+#define drv_to_virtio(__drv) container_of_const(__drv, struct virtio_driver, driver)
/* use a macro to avoid include chaining to get THIS_MODULE */
#define register_virtio_driver(drv) \
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 16b0cfa80502..23cd17942036 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -17,7 +17,7 @@ extern int sysctl_stat_interval;
#define DISABLE_NUMA_STAT 0
extern int sysctl_vm_numa_stat;
DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key);
-int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
+int sysctl_vm_numa_stat_handler(const struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos);
#endif
@@ -301,7 +301,7 @@ void cpu_vm_stats_fold(int cpu);
void refresh_zone_stat_thresholds(void);
struct ctl_table;
-int vmstat_refresh(struct ctl_table *, int write, void *buffer, size_t *lenp,
+int vmstat_refresh(const struct ctl_table *, int write, void *buffer, size_t *lenp,
loff_t *ppos);
void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *);
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 112d806ddbe4..1a54676d843a 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -350,7 +350,7 @@ extern unsigned int dirty_expire_interval;
extern unsigned int dirtytime_expire_interval;
extern int laptop_mode;
-int dirtytime_interval_handler(struct ctl_table *table, int write,
+int dirtytime_interval_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty);
diff --git a/include/linux/zorro.h b/include/linux/zorro.h
index db7416ed6057..f36c8d39553d 100644
--- a/include/linux/zorro.h
+++ b/include/linux/zorro.h
@@ -52,7 +52,7 @@ struct zorro_driver {
struct device_driver driver;
};
-#define to_zorro_driver(drv) container_of(drv, struct zorro_driver, driver)
+#define to_zorro_driver(drv) container_of_const(drv, struct zorro_driver, driver)
#define zorro_for_each_dev(dev) \
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 9bbdf6eaa942..7a533d5b1d59 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -486,7 +486,7 @@ void igmp6_event_report(struct sk_buff *skb);
#ifdef CONFIG_SYSCTL
-int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write,
+int ndisc_ifinfo_sysctl_change(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos);
#endif
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 0d28172193fa..a44f262a7384 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -412,12 +412,12 @@ void *neigh_seq_start(struct seq_file *, loff_t *, struct neigh_table *,
void *neigh_seq_next(struct seq_file *, void *, loff_t *);
void neigh_seq_stop(struct seq_file *, void *);
-int neigh_proc_dointvec(struct ctl_table *ctl, int write,
+int neigh_proc_dointvec(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos);
-int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
+int neigh_proc_dointvec_jiffies(const struct ctl_table *ctl, int write,
void *buffer,
size_t *lenp, loff_t *ppos);
-int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
+int neigh_proc_dointvec_ms_jiffies(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos);
int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
diff --git a/include/net/netfilter/nf_hooks_lwtunnel.h b/include/net/netfilter/nf_hooks_lwtunnel.h
index 52e27920f829..cef7a4eb8f97 100644
--- a/include/net/netfilter/nf_hooks_lwtunnel.h
+++ b/include/net/netfilter/nf_hooks_lwtunnel.h
@@ -2,6 +2,6 @@
#include <linux/types.h>
#ifdef CONFIG_SYSCTL
-int nf_hooks_lwtunnel_sysctl_handler(struct ctl_table *table, int write,
+int nf_hooks_lwtunnel_sysctl_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
#endif
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index fb3399e4cd29..bd1243657c01 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -495,7 +495,7 @@ iscsi_destroy_flashnode_sess(struct iscsi_bus_flash_session *fnode_sess);
extern void iscsi_destroy_all_flashnode(struct Scsi_Host *shost);
extern int iscsi_flashnode_bus_match(struct device *dev,
- struct device_driver *drv);
+ const struct device_driver *drv);
extern struct device *
iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data,
int (*fn)(struct device *dev, void *data));
diff --git a/include/sound/ac97/codec.h b/include/sound/ac97/codec.h
index 2fc641cb1982..882b849b9255 100644
--- a/include/sound/ac97/codec.h
+++ b/include/sound/ac97/codec.h
@@ -73,10 +73,7 @@ static inline struct ac97_codec_device *to_ac97_device(struct device *d)
return container_of(d, struct ac97_codec_device, dev);
}
-static inline struct ac97_codec_driver *to_ac97_driver(struct device_driver *d)
-{
- return container_of(d, struct ac97_codec_driver, driver);
-}
+#define to_ac97_driver(__drv) container_of_const(__drv, struct ac97_codec_driver, driver)
#if IS_ENABLED(CONFIG_AC97_BUS_NEW)
int snd_ac97_codec_driver_register(struct ac97_codec_driver *drv);
diff --git a/include/sound/tas2781-tlv.h b/include/sound/tas2781-tlv.h
index 99c41bfc7827..00fd4d449ff3 100644
--- a/include/sound/tas2781-tlv.h
+++ b/include/sound/tas2781-tlv.h
@@ -16,11 +16,11 @@
#define __TAS2781_TLV_H__
static const __maybe_unused DECLARE_TLV_DB_SCALE(dvc_tlv, -10000, 100, 0);
-static const DECLARE_TLV_DB_SCALE(amp_vol_tlv, 1100, 50, 0);
-static const DECLARE_TLV_DB_SCALE(tas2563_dvc_tlv, -12150, 50, 1);
+static const __maybe_unused DECLARE_TLV_DB_SCALE(amp_vol_tlv, 1100, 50, 0);
+static const __maybe_unused DECLARE_TLV_DB_SCALE(tas2563_dvc_tlv, -12150, 50, 1);
/* pow(10, db/20) * pow(2,30) */
-static const unsigned char tas2563_dvc_table[][4] = {
+static const __maybe_unused unsigned char tas2563_dvc_table[][4] = {
{ 0X00, 0X00, 0X00, 0X00 }, /* -121.5db */
{ 0X00, 0X00, 0X03, 0XBC }, /* -121.0db */
{ 0X00, 0X00, 0X03, 0XF5 }, /* -120.5db */
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index e46d6e82765e..b63d211bd141 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -165,6 +165,12 @@ IF_HAVE_PG_ARCH_X(arch_3)
# define IF_HAVE_UFFD_MINOR(flag, name)
#endif
+#ifdef CONFIG_64BIT
+# define IF_HAVE_VM_DROPPABLE(flag, name) {flag, name},
+#else
+# define IF_HAVE_VM_DROPPABLE(flag, name)
+#endif
+
#define __def_vmaflag_names \
{VM_READ, "read" }, \
{VM_WRITE, "write" }, \
@@ -197,6 +203,7 @@ IF_HAVE_VM_SOFTDIRTY(VM_SOFTDIRTY, "softdirty" ) \
{VM_MIXEDMAP, "mixedmap" }, \
{VM_HUGEPAGE, "hugepage" }, \
{VM_NOHUGEPAGE, "nohugepage" }, \
+IF_HAVE_VM_DROPPABLE(VM_DROPPABLE, "droppable" ) \
{VM_MERGEABLE, "mergeable" } \
#define show_vma_flags(flags) \
diff --git a/include/trace/events/timer_migration.h b/include/trace/events/timer_migration.h
index 79f19e76a80b..47db5eaf2f9a 100644
--- a/include/trace/events/timer_migration.h
+++ b/include/trace/events/timer_migration.h
@@ -43,7 +43,7 @@ TRACE_EVENT(tmigr_connect_child_parent,
__field( unsigned int, lvl )
__field( unsigned int, numa_node )
__field( unsigned int, num_children )
- __field( u32, childmask )
+ __field( u32, groupmask )
),
TP_fast_assign(
@@ -52,11 +52,11 @@ TRACE_EVENT(tmigr_connect_child_parent,
__entry->lvl = child->parent->level;
__entry->numa_node = child->parent->numa_node;
__entry->num_children = child->parent->num_children;
- __entry->childmask = child->childmask;
+ __entry->groupmask = child->groupmask;
),
- TP_printk("group=%p childmask=%0x parent=%p lvl=%d numa=%d num_children=%d",
- __entry->child, __entry->childmask, __entry->parent,
+ TP_printk("group=%p groupmask=%0x parent=%p lvl=%d numa=%d num_children=%d",
+ __entry->child, __entry->groupmask, __entry->parent,
__entry->lvl, __entry->numa_node, __entry->num_children)
);
@@ -72,7 +72,7 @@ TRACE_EVENT(tmigr_connect_cpu_parent,
__field( unsigned int, lvl )
__field( unsigned int, numa_node )
__field( unsigned int, num_children )
- __field( u32, childmask )
+ __field( u32, groupmask )
),
TP_fast_assign(
@@ -81,11 +81,11 @@ TRACE_EVENT(tmigr_connect_cpu_parent,
__entry->lvl = tmc->tmgroup->level;
__entry->numa_node = tmc->tmgroup->numa_node;
__entry->num_children = tmc->tmgroup->num_children;
- __entry->childmask = tmc->childmask;
+ __entry->groupmask = tmc->groupmask;
),
- TP_printk("cpu=%d childmask=%0x parent=%p lvl=%d numa=%d num_children=%d",
- __entry->cpu, __entry->childmask, __entry->parent,
+ TP_printk("cpu=%d groupmask=%0x parent=%p lvl=%d numa=%d num_children=%d",
+ __entry->cpu, __entry->groupmask, __entry->parent,
__entry->lvl, __entry->numa_node, __entry->num_children)
);
diff --git a/include/uapi/linux/if_xdp.h b/include/uapi/linux/if_xdp.h
index d31698410410..42ec5ddaab8d 100644
--- a/include/uapi/linux/if_xdp.h
+++ b/include/uapi/linux/if_xdp.h
@@ -41,6 +41,10 @@
*/
#define XDP_UMEM_TX_SW_CSUM (1 << 1)
+/* Request to reserve tx_metadata_len bytes of per-chunk metadata.
+ */
+#define XDP_UMEM_TX_METADATA_LEN (1 << 2)
+
struct sockaddr_xdp {
__u16 sxdp_family;
__u16 sxdp_flags;
diff --git a/include/uapi/linux/mman.h b/include/uapi/linux/mman.h
index a246e11988d5..e89d00528f2f 100644
--- a/include/uapi/linux/mman.h
+++ b/include/uapi/linux/mman.h
@@ -17,6 +17,7 @@
#define MAP_SHARED 0x01 /* Share changes */
#define MAP_PRIVATE 0x02 /* Changes are private */
#define MAP_SHARED_VALIDATE 0x03 /* share + validate extension flags */
+#define MAP_DROPPABLE 0x08 /* Zero memory under memory pressure. */
/*
* Huge page size encoding when MAP_HUGETLB is specified, and a huge page
diff --git a/include/uapi/linux/random.h b/include/uapi/linux/random.h
index e744c23582eb..1dd047ec98a1 100644
--- a/include/uapi/linux/random.h
+++ b/include/uapi/linux/random.h
@@ -20,7 +20,7 @@
/* Add to (or subtract from) the entropy count. (Superuser only.) */
#define RNDADDTOENTCNT _IOW( 'R', 0x01, int )
-/* Get the contents of the entropy pool. (Superuser only.) */
+/* Get the contents of the entropy pool. (Superuser only.) (Removed in 2.6.9-rc2.) */
#define RNDGETPOOL _IOR( 'R', 0x02, int [2] )
/*
@@ -55,4 +55,19 @@ struct rand_pool_info {
#define GRND_RANDOM 0x0002
#define GRND_INSECURE 0x0004
+/**
+ * struct vgetrandom_opaque_params - arguments for allocating memory for vgetrandom
+ *
+ * @size_per_opaque_state: Size of each state that is to be passed to vgetrandom().
+ * @mmap_prot: Value of the prot argument in mmap(2).
+ * @mmap_flags: Value of the flags argument in mmap(2).
+ * @reserved: Reserved for future use.
+ */
+struct vgetrandom_opaque_params {
+ __u32 size_of_opaque_state;
+ __u32 mmap_prot;
+ __u32 mmap_flags;
+ __u32 reserved[13];
+};
+
#endif /* _UAPI_LINUX_RANDOM_H */
diff --git a/include/uapi/linux/um_timetravel.h b/include/uapi/linux/um_timetravel.h
index ca3238222b6d..546a690b0346 100644
--- a/include/uapi/linux/um_timetravel.h
+++ b/include/uapi/linux/um_timetravel.h
@@ -1,17 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
/*
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- *
- * Copyright (C) 2019 Intel Corporation
+ * Copyright (C) 2019 - 2023 Intel Corporation
*/
#ifndef _UAPI_LINUX_UM_TIMETRAVEL_H
#define _UAPI_LINUX_UM_TIMETRAVEL_H
@@ -50,6 +39,36 @@ struct um_timetravel_msg {
__u64 time;
};
+/* max number of file descriptors that can be sent/received in a message */
+#define UM_TIMETRAVEL_MAX_FDS 2
+
+/**
+ * enum um_timetravel_shared_mem_fds - fds sent in ACK message for START message
+ */
+enum um_timetravel_shared_mem_fds {
+ /**
+ * @UM_TIMETRAVEL_SHARED_MEMFD: Index of the shared memory file
+ * descriptor in the control message
+ */
+ UM_TIMETRAVEL_SHARED_MEMFD,
+ /**
+ * @UM_TIMETRAVEL_SHARED_LOGFD: Index of the logging file descriptor
+ * in the control message
+ */
+ UM_TIMETRAVEL_SHARED_LOGFD,
+ UM_TIMETRAVEL_SHARED_MAX_FDS,
+};
+
+/**
+ * enum um_timetravel_start_ack - ack-time mask for start message
+ */
+enum um_timetravel_start_ack {
+ /**
+ * @UM_TIMETRAVEL_START_ACK_ID: client ID that controller allocated.
+ */
+ UM_TIMETRAVEL_START_ACK_ID = 0xffff,
+};
+
/**
* enum um_timetravel_ops - Operation codes
*/
@@ -57,7 +76,9 @@ enum um_timetravel_ops {
/**
* @UM_TIMETRAVEL_ACK: response (ACK) to any previous message,
* this usually doesn't carry any data in the 'time' field
- * unless otherwise specified below
+ * unless otherwise specified below, note: while using shared
+ * memory no ACK for WAIT and RUN messages, for more info see
+ * &struct um_timetravel_schedshm.
*/
UM_TIMETRAVEL_ACK = 0,
@@ -123,6 +144,147 @@ enum um_timetravel_ops {
* the simulation.
*/
UM_TIMETRAVEL_GET_TOD = 8,
+
+ /**
+ * @UM_TIMETRAVEL_BROADCAST: Send/Receive a broadcast message.
+ * This message can be used to sync all components in the system
+ * with a single message, if the calender gets the message, the
+ * calender broadcast the message to all components, and if a
+ * component receives it it should act based on it e.g print a
+ * message to it's log system.
+ * (calendar <-> host)
+ */
+ UM_TIMETRAVEL_BROADCAST = 9,
+};
+
+/* version of struct um_timetravel_schedshm */
+#define UM_TIMETRAVEL_SCHEDSHM_VERSION 2
+
+/**
+ * enum um_timetravel_schedshm_cap - time travel capabilities of every client
+ *
+ * These flags must be set immediately after processing the ACK to
+ * the START message, before sending any message to the controller.
+ */
+enum um_timetravel_schedshm_cap {
+ /**
+ * @UM_TIMETRAVEL_SCHEDSHM_CAP_TIME_SHARE: client can read current time
+ * update internal time request to shared memory and read
+ * free until and send no Ack on RUN and doesn't expect ACK on
+ * WAIT.
+ */
+ UM_TIMETRAVEL_SCHEDSHM_CAP_TIME_SHARE = 0x1,
+};
+
+/**
+ * enum um_timetravel_schedshm_flags - time travel flags of every client
+ */
+enum um_timetravel_schedshm_flags {
+ /**
+ * @UM_TIMETRAVEL_SCHEDSHM_FLAGS_REQ_RUN: client has a request to run.
+ * It's set by client when it has a request to run, if (and only
+ * if) the @running_id points to a client that is able to use
+ * shared memory, i.e. has %UM_TIMETRAVEL_SCHEDSHM_CAP_TIME_SHARE
+ * (this includes the client itself). Otherwise, a message must
+ * be used.
+ */
+ UM_TIMETRAVEL_SCHEDSHM_FLAGS_REQ_RUN = 0x1,
+};
+
+/**
+ * DOC: Time travel shared memory overview
+ *
+ * The main purpose of the shared memory is to avoid all time travel message
+ * that don't need any action, for example current time can be held in shared
+ * memory without the need of any client to send a message UM_TIMETRAVEL_GET
+ * in order to know what's the time.
+ *
+ * Since this is shared memory with all clients and controller and controller
+ * creates the shared memory space, all time values are absolute to controller
+ * time. So first time client connects to shared memory mode it should take the
+ * current_time value in shared memory and keep it internally as a diff to
+ * shared memory times, and once shared memory is initialized, any interaction
+ * with the controller must happen in the controller time domain, including any
+ * messages (for clients that are not using shared memory, the controller will
+ * handle an offset and make the clients think they start at time zero.)
+ *
+ * Along with the shared memory file descriptor is sent to the client a logging
+ * file descriptor, to have all logs related to shared memory,
+ * logged into one place. note: to have all logs synced into log file at write,
+ * file should be flushed (fflush) after writing to it.
+ *
+ * To avoid memory corruption, we define below for each field who can write to
+ * it at what time, defined in the structure fields.
+ *
+ * To avoid having to pack this struct, all fields in it must be naturally aligned
+ * (i.e. aligned to their size).
+ */
+
+/**
+ * union um_timetravel_schedshm_client - UM time travel client struct
+ *
+ * Every entity using the shared memory including the controller has a place in
+ * the um_timetravel_schedshm clients array, that holds info related to the client
+ * using the shared memory, and can be set only by the client after it gets the
+ * fd memory.
+ *
+ * @capa: bit fields with client capabilities see
+ * &enum um_timetravel_schedshm_cap, set by client once after getting the
+ * shared memory file descriptor.
+ * @flags: bit fields for flags see &enum um_timetravel_schedshm_flags for doc.
+ * @req_time: request time to run, set by client on every request it needs.
+ * @name: unique id sent to the controller by client with START message.
+ */
+union um_timetravel_schedshm_client {
+ struct {
+ __u32 capa;
+ __u32 flags;
+ __u64 req_time;
+ __u64 name;
+ };
+ char reserve[128]; /* reserved for future usage */
};
+/**
+ * struct um_timetravel_schedshm - UM time travel shared memory struct
+ *
+ * @hdr: header fields:
+ * @version: Current version struct UM_TIMETRAVEL_SCHEDSHM_VERSION,
+ * set by controller once at init, clients must check this after mapping
+ * and work without shared memory if they cannot handle the indicated
+ * version.
+ * @len: Length of all the memory including header (@hdr), clients should once
+ * per connection first mmap the header and take the length (@len) to remap the entire size.
+ * This is done in order to support dynamic struct size letting number of
+ * clients be dynamic based on controller support.
+ * @free_until: Stores the next request to run by any client, in order for the
+ * current client to know how long it can still run. A client needs to (at
+ * least) reload this value immediately after communicating with any other
+ * client, since the controller will update this field when a new request
+ * is made by any client. Clients also must update this value when they
+ * insert/update an own request into the shared memory while not running
+ * themselves, and the new request is before than the current value.
+ * current_time: Current time, can only be set by the client in running state
+ * (indicated by @running_id), though that client may only run until @free_until,
+ * so it must remain smaller than @free_until.
+ * @running_id: The current client in state running, set before a client is
+ * notified that it's now running.
+ * @max_clients: size of @clients array, set once at init by the controller.
+ * @clients: clients array see &union um_timetravel_schedshm_client for doc,
+ * set only by client.
+ */
+struct um_timetravel_schedshm {
+ union {
+ struct {
+ __u32 version;
+ __u32 len;
+ __u64 free_until;
+ __u64 current_time;
+ __u16 running_id;
+ __u16 max_clients;
+ };
+ char hdr[4096]; /* align to 4K page size */
+ };
+ union um_timetravel_schedshm_client clients[];
+};
#endif /* _UAPI_LINUX_UM_TIMETRAVEL_H */
diff --git a/include/vdso/datapage.h b/include/vdso/datapage.h
index 7647e0946f50..b85f24cac3f5 100644
--- a/include/vdso/datapage.h
+++ b/include/vdso/datapage.h
@@ -117,6 +117,16 @@ struct vdso_data {
struct arch_vdso_data arch_data;
};
+/**
+ * struct vdso_rng_data - vdso RNG state information
+ * @generation: counter representing the number of RNG reseeds
+ * @is_ready: boolean signaling whether the RNG is initialized
+ */
+struct vdso_rng_data {
+ u64 generation;
+ u8 is_ready;
+};
+
/*
* We use the hidden visibility to prevent the compiler from generating a GOT
* relocation. Not only is going through a GOT useless (the entry couldn't and
@@ -128,6 +138,7 @@ struct vdso_data {
*/
extern struct vdso_data _vdso_data[CS_BASES] __attribute__((visibility("hidden")));
extern struct vdso_data _timens_data[CS_BASES] __attribute__((visibility("hidden")));
+extern struct vdso_rng_data _vdso_rng_data __attribute__((visibility("hidden")));
/**
* union vdso_data_store - Generic vDSO data page
diff --git a/include/vdso/getrandom.h b/include/vdso/getrandom.h
new file mode 100644
index 000000000000..a8b7c14b0ae0
--- /dev/null
+++ b/include/vdso/getrandom.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022-2024 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef _VDSO_GETRANDOM_H
+#define _VDSO_GETRANDOM_H
+
+#include <linux/types.h>
+
+#define CHACHA_KEY_SIZE 32
+#define CHACHA_BLOCK_SIZE 64
+
+/**
+ * struct vgetrandom_state - State used by vDSO getrandom().
+ *
+ * @batch: One and a half ChaCha20 blocks of buffered RNG output.
+ *
+ * @key: Key to be used for generating next batch.
+ *
+ * @batch_key: Union of the prior two members, which is exactly two full
+ * ChaCha20 blocks in size, so that @batch and @key can be filled
+ * together.
+ *
+ * @generation: Snapshot of @rng_info->generation in the vDSO data page at
+ * the time @key was generated.
+ *
+ * @pos: Offset into @batch of the next available random byte.
+ *
+ * @in_use: Reentrancy guard for reusing a state within the same thread
+ * due to signal handlers.
+ */
+struct vgetrandom_state {
+ union {
+ struct {
+ u8 batch[CHACHA_BLOCK_SIZE * 3 / 2];
+ u32 key[CHACHA_KEY_SIZE / sizeof(u32)];
+ };
+ u8 batch_key[CHACHA_BLOCK_SIZE * 2];
+ };
+ u64 generation;
+ u8 pos;
+ bool in_use;
+};
+
+#endif /* _VDSO_GETRANDOM_H */
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index ac22cf08c09f..3f90bdd387b6 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -124,10 +124,7 @@ struct xenbus_driver {
void (*reclaim_memory)(struct xenbus_device *dev);
};
-static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv)
-{
- return container_of(drv, struct xenbus_driver, driver);
-}
+#define to_xenbus_driver(__drv) container_of_const(__drv, struct xenbus_driver, driver)
int __must_check __xenbus_register_frontend(struct xenbus_driver *drv,
struct module *owner,
diff --git a/init/Kconfig b/init/Kconfig
index 4b81a49a25c4..a465ea9525bd 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1803,24 +1803,6 @@ config KALLSYMS_ABSOLUTE_PERCPU
depends on KALLSYMS
default X86_64 && SMP
-config KALLSYMS_BASE_RELATIVE
- bool
- depends on KALLSYMS
- default y
- help
- Instead of emitting them as absolute values in the native word size,
- emit the symbol references in the kallsyms table as 32-bit entries,
- each containing a relative value in the range [base, base + U32_MAX]
- or, when KALLSYMS_ABSOLUTE_PERCPU is in effect, each containing either
- an absolute value in the range [0, S32_MAX] or a relative value in the
- range [base, base + S32_MAX], where base is the lowest relative symbol
- address encountered in the image.
-
- On 64-bit builds, this reduces the size of the address table by 50%,
- but more importantly, it results in entries whose values are build
- time constants, and no relocation pass is required at runtime to fix
- up the entries based on the runtime load address of the kernel.
-
# end of the "standard kernel features (expert users)" menu
config ARCH_HAS_MEMBARRIER_CALLBACKS
@@ -1942,7 +1924,10 @@ config RUSTC_VERSION_TEXT
config BINDGEN_VERSION_TEXT
string
depends on RUST
- default $(shell,command -v $(BINDGEN) >/dev/null 2>&1 && $(BINDGEN) --version || echo n)
+ # The dummy parameter `workaround-for-0.69.0` is required to support 0.69.0
+ # (https://github.com/rust-lang/rust-bindgen/pull/2678). It can be removed when
+ # the minimum version is upgraded past that (0.69.1 already fixed the issue).
+ default $(shell,command -v $(BINDGEN) >/dev/null 2>&1 && $(BINDGEN) --version workaround-for-0.69.0 || echo n)
#
# Place an empty function call at each tracepoint site. Can be
diff --git a/init/Makefile b/init/Makefile
index ab71cedc5fd6..10b652d33e87 100644
--- a/init/Makefile
+++ b/init/Makefile
@@ -52,7 +52,7 @@ CFLAGS_version.o := -include $(obj)/utsversion-tmp.h
# Build version-timestamp.c with final UTS_VERSION
#
-include/generated/utsversion.h: build-version-auto = $(shell $(src)/build-version)
+include/generated/utsversion.h: build-version-auto = $(shell $(srctree)/scripts/build-version)
include/generated/utsversion.h: build-timestamp-auto = $(shell LC_ALL=C date)
include/generated/utsversion.h: FORCE
$(call filechk,uts_version)
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 8e6faa942a6f..3942db160f18 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1849,7 +1849,7 @@ fail:
} while (1);
/* avoid locking problems by failing it from a clean context */
- if (ret < 0)
+ if (ret)
io_req_task_queue_fail(req, ret);
}
@@ -2416,12 +2416,14 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
if (uts) {
struct timespec64 ts;
+ ktime_t dt;
if (get_timespec64(&ts, uts))
return -EFAULT;
- iowq.timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
- io_napi_adjust_timeout(ctx, &iowq, &ts);
+ dt = timespec64_to_ktime(ts);
+ iowq.timeout = ktime_add(dt, ktime_get());
+ io_napi_adjust_timeout(ctx, &iowq, dt);
}
if (sig) {
@@ -3031,8 +3033,11 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
bool loop = false;
io_uring_drop_tctx_refs(current);
+ if (!tctx_inflight(tctx, !cancel_all))
+ break;
+
/* read completions before cancelations */
- inflight = tctx_inflight(tctx, !cancel_all);
+ inflight = tctx_inflight(tctx, false);
if (!inflight)
break;
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index e1ce908f0679..c2acf6180845 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -43,7 +43,7 @@ struct io_wait_queue {
ktime_t timeout;
#ifdef CONFIG_NET_RX_BUSY_POLL
- unsigned int napi_busy_poll_to;
+ ktime_t napi_busy_poll_dt;
bool napi_prefer_busy_poll;
#endif
};
diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
index 29fa9285a33d..7fd9badcfaf8 100644
--- a/io_uring/msg_ring.c
+++ b/io_uring/msg_ring.c
@@ -110,10 +110,10 @@ static struct io_kiocb *io_msg_get_kiocb(struct io_ring_ctx *ctx)
if (spin_trylock(&ctx->msg_lock)) {
req = io_alloc_cache_get(&ctx->msg_cache);
spin_unlock(&ctx->msg_lock);
+ if (req)
+ return req;
}
- if (req)
- return req;
- return kmem_cache_alloc(req_cachep, GFP_KERNEL | __GFP_NOWARN);
+ return kmem_cache_alloc(req_cachep, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
}
static int io_msg_data_remote(struct io_kiocb *req)
diff --git a/io_uring/napi.c b/io_uring/napi.c
index 762254a7ff3f..4fd6bb331e1e 100644
--- a/io_uring/napi.c
+++ b/io_uring/napi.c
@@ -33,6 +33,12 @@ static struct io_napi_entry *io_napi_hash_find(struct hlist_head *hash_list,
return NULL;
}
+static inline ktime_t net_to_ktime(unsigned long t)
+{
+ /* napi approximating usecs, reverse busy_loop_current_time */
+ return ns_to_ktime(t << 10);
+}
+
void __io_napi_add(struct io_ring_ctx *ctx, struct socket *sock)
{
struct hlist_head *hash_list;
@@ -102,14 +108,14 @@ static inline void io_napi_remove_stale(struct io_ring_ctx *ctx, bool is_stale)
__io_napi_remove_stale(ctx);
}
-static inline bool io_napi_busy_loop_timeout(unsigned long start_time,
- unsigned long bp_usec)
+static inline bool io_napi_busy_loop_timeout(ktime_t start_time,
+ ktime_t bp)
{
- if (bp_usec) {
- unsigned long end_time = start_time + bp_usec;
- unsigned long now = busy_loop_current_time();
+ if (bp) {
+ ktime_t end_time = ktime_add(start_time, bp);
+ ktime_t now = net_to_ktime(busy_loop_current_time());
- return time_after(now, end_time);
+ return ktime_after(now, end_time);
}
return true;
@@ -124,7 +130,8 @@ static bool io_napi_busy_loop_should_end(void *data,
return true;
if (io_should_wake(iowq) || io_has_work(iowq->ctx))
return true;
- if (io_napi_busy_loop_timeout(start_time, iowq->napi_busy_poll_to))
+ if (io_napi_busy_loop_timeout(net_to_ktime(start_time),
+ iowq->napi_busy_poll_dt))
return true;
return false;
@@ -181,10 +188,12 @@ static void io_napi_blocking_busy_loop(struct io_ring_ctx *ctx,
*/
void io_napi_init(struct io_ring_ctx *ctx)
{
+ u64 sys_dt = READ_ONCE(sysctl_net_busy_poll) * NSEC_PER_USEC;
+
INIT_LIST_HEAD(&ctx->napi_list);
spin_lock_init(&ctx->napi_lock);
ctx->napi_prefer_busy_poll = false;
- ctx->napi_busy_poll_to = READ_ONCE(sysctl_net_busy_poll);
+ ctx->napi_busy_poll_dt = ns_to_ktime(sys_dt);
}
/*
@@ -217,11 +226,13 @@ void io_napi_free(struct io_ring_ctx *ctx)
int io_register_napi(struct io_ring_ctx *ctx, void __user *arg)
{
const struct io_uring_napi curr = {
- .busy_poll_to = ctx->napi_busy_poll_to,
+ .busy_poll_to = ktime_to_us(ctx->napi_busy_poll_dt),
.prefer_busy_poll = ctx->napi_prefer_busy_poll
};
struct io_uring_napi napi;
+ if (ctx->flags & IORING_SETUP_IOPOLL)
+ return -EINVAL;
if (copy_from_user(&napi, arg, sizeof(napi)))
return -EFAULT;
if (napi.pad[0] || napi.pad[1] || napi.pad[2] || napi.resv)
@@ -230,7 +241,7 @@ int io_register_napi(struct io_ring_ctx *ctx, void __user *arg)
if (copy_to_user(arg, &curr, sizeof(curr)))
return -EFAULT;
- WRITE_ONCE(ctx->napi_busy_poll_to, napi.busy_poll_to);
+ WRITE_ONCE(ctx->napi_busy_poll_dt, napi.busy_poll_to * NSEC_PER_USEC);
WRITE_ONCE(ctx->napi_prefer_busy_poll, !!napi.prefer_busy_poll);
WRITE_ONCE(ctx->napi_enabled, true);
return 0;
@@ -247,14 +258,14 @@ int io_register_napi(struct io_ring_ctx *ctx, void __user *arg)
int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg)
{
const struct io_uring_napi curr = {
- .busy_poll_to = ctx->napi_busy_poll_to,
+ .busy_poll_to = ktime_to_us(ctx->napi_busy_poll_dt),
.prefer_busy_poll = ctx->napi_prefer_busy_poll
};
if (arg && copy_to_user(arg, &curr, sizeof(curr)))
return -EFAULT;
- WRITE_ONCE(ctx->napi_busy_poll_to, 0);
+ WRITE_ONCE(ctx->napi_busy_poll_dt, 0);
WRITE_ONCE(ctx->napi_prefer_busy_poll, false);
WRITE_ONCE(ctx->napi_enabled, false);
return 0;
@@ -271,25 +282,14 @@ int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg)
* the NAPI timeout accordingly.
*/
void __io_napi_adjust_timeout(struct io_ring_ctx *ctx, struct io_wait_queue *iowq,
- struct timespec64 *ts)
+ ktime_t to_wait)
{
- unsigned int poll_to = READ_ONCE(ctx->napi_busy_poll_to);
-
- if (ts) {
- struct timespec64 poll_to_ts;
-
- poll_to_ts = ns_to_timespec64(1000 * (s64)poll_to);
- if (timespec64_compare(ts, &poll_to_ts) < 0) {
- s64 poll_to_ns = timespec64_to_ns(ts);
- if (poll_to_ns > 0) {
- u64 val = poll_to_ns + 999;
- do_div(val, 1000);
- poll_to = val;
- }
- }
- }
+ ktime_t poll_dt = READ_ONCE(ctx->napi_busy_poll_dt);
+
+ if (to_wait)
+ poll_dt = min(poll_dt, to_wait);
- iowq->napi_busy_poll_to = poll_to;
+ iowq->napi_busy_poll_dt = poll_dt;
}
/*
@@ -318,7 +318,7 @@ int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx)
LIST_HEAD(napi_list);
bool is_stale = false;
- if (!READ_ONCE(ctx->napi_busy_poll_to))
+ if (!READ_ONCE(ctx->napi_busy_poll_dt))
return 0;
if (list_empty_careful(&ctx->napi_list))
return 0;
diff --git a/io_uring/napi.h b/io_uring/napi.h
index 6fc0393d0dbe..88f1c21d5548 100644
--- a/io_uring/napi.h
+++ b/io_uring/napi.h
@@ -18,7 +18,7 @@ int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg);
void __io_napi_add(struct io_ring_ctx *ctx, struct socket *sock);
void __io_napi_adjust_timeout(struct io_ring_ctx *ctx,
- struct io_wait_queue *iowq, struct timespec64 *ts);
+ struct io_wait_queue *iowq, ktime_t to_wait);
void __io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq);
int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx);
@@ -29,11 +29,11 @@ static inline bool io_napi(struct io_ring_ctx *ctx)
static inline void io_napi_adjust_timeout(struct io_ring_ctx *ctx,
struct io_wait_queue *iowq,
- struct timespec64 *ts)
+ ktime_t to_wait)
{
if (!io_napi(ctx))
return;
- __io_napi_adjust_timeout(ctx, iowq, ts);
+ __io_napi_adjust_timeout(ctx, iowq, to_wait);
}
static inline void io_napi_busy_loop(struct io_ring_ctx *ctx,
@@ -55,7 +55,7 @@ static inline void io_napi_add(struct io_kiocb *req)
struct io_ring_ctx *ctx = req->ctx;
struct socket *sock;
- if (!READ_ONCE(ctx->napi_busy_poll_to))
+ if (!READ_ONCE(ctx->napi_busy_poll_dt))
return;
sock = sock_from_file(req->file);
@@ -88,7 +88,7 @@ static inline void io_napi_add(struct io_kiocb *req)
}
static inline void io_napi_adjust_timeout(struct io_ring_ctx *ctx,
struct io_wait_queue *iowq,
- struct timespec64 *ts)
+ ktime_t to_wait)
{
}
static inline void io_napi_busy_loop(struct io_ring_ctx *ctx,
diff --git a/io_uring/timeout.c b/io_uring/timeout.c
index 1c9bf07499b1..9973876d91b0 100644
--- a/io_uring/timeout.c
+++ b/io_uring/timeout.c
@@ -639,7 +639,7 @@ void io_queue_linked_timeout(struct io_kiocb *req)
static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
bool cancel_all)
- __must_hold(&req->ctx->timeout_lock)
+ __must_hold(&head->ctx->timeout_lock)
{
struct io_kiocb *req;
diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
index a54163a83968..8391c7c7c1ec 100644
--- a/io_uring/uring_cmd.c
+++ b/io_uring/uring_cmd.c
@@ -265,7 +265,7 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
req_set_fail(req);
io_req_uring_cleanup(req, issue_flags);
io_req_set_res(req, ret, 0);
- return ret < 0 ? ret : IOU_OK;
+ return IOU_OK;
}
int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
index 9465b0ae470b..54318e0b4557 100644
--- a/ipc/ipc_sysctl.c
+++ b/ipc/ipc_sysctl.c
@@ -17,7 +17,7 @@
#include <linux/cred.h>
#include "util.h"
-static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
+static int proc_ipc_dointvec_minmax_orphans(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct ipc_namespace *ns =
@@ -33,7 +33,7 @@ static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
return err;
}
-static int proc_ipc_auto_msgmni(struct ctl_table *table, int write,
+static int proc_ipc_auto_msgmni(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table ipc_table;
@@ -48,7 +48,7 @@ static int proc_ipc_auto_msgmni(struct ctl_table *table, int write,
return proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos);
}
-static int proc_ipc_sem_dointvec(struct ctl_table *table, int write,
+static int proc_ipc_sem_dointvec(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct ipc_namespace *ns =
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 0719192a3482..bf6c5f685ea2 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -5983,7 +5983,7 @@ const struct bpf_prog_ops bpf_syscall_prog_ops = {
};
#ifdef CONFIG_SYSCTL
-static int bpf_stats_handler(struct ctl_table *table, int write,
+static int bpf_stats_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct static_key *key = (struct static_key *)table->data;
@@ -6018,7 +6018,7 @@ void __weak unpriv_ebpf_notify(int new_state)
{
}
-static int bpf_unpriv_handler(struct ctl_table *table, int write,
+static int bpf_unpriv_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret, unpriv_enable = *(int *)table->data;
diff --git a/kernel/debug/kdb/kdb_bt.c b/kernel/debug/kdb/kdb_bt.c
index 10b454554ab0..137ba73f56fc 100644
--- a/kernel/debug/kdb/kdb_bt.c
+++ b/kernel/debug/kdb/kdb_bt.c
@@ -144,7 +144,7 @@ kdb_bt(int argc, const char **argv)
kdb_ps_suppressed();
/* Run the active tasks first */
for_each_online_cpu(cpu) {
- p = kdb_curr_task(cpu);
+ p = curr_task(cpu);
if (kdb_bt1(p, mask, btaprompt))
return 0;
}
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
index 3131334d7a81..6a77f1c779c4 100644
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
@@ -206,7 +206,7 @@ char kdb_getchar(void)
*/
static void kdb_position_cursor(char *prompt, char *buffer, char *cp)
{
- kdb_printf("\r%s", kdb_prompt_str);
+ kdb_printf("\r%s", prompt);
if (cp > buffer)
kdb_printf("%.*s", (int)(cp - buffer), buffer);
}
@@ -362,7 +362,7 @@ poll_again:
if (i >= dtab_count)
kdb_printf("...");
kdb_printf("\n");
- kdb_printf(kdb_prompt_str);
+ kdb_printf("%s", kdb_prompt_str);
kdb_printf("%s", buffer);
if (cp != lastchar)
kdb_position_cursor(kdb_prompt_str, buffer, cp);
@@ -453,7 +453,7 @@ char *kdb_getstr(char *buffer, size_t bufsize, const char *prompt)
{
if (prompt && kdb_prompt_str != prompt)
strscpy(kdb_prompt_str, prompt, CMD_BUFLEN);
- kdb_printf(kdb_prompt_str);
+ kdb_printf("%s", kdb_prompt_str);
kdb_nextline = 1; /* Prompt and input resets line number */
return kdb_read(buffer, bufsize);
}
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 664bae55f2c9..f5f7d7fb5936 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -155,16 +155,6 @@ static char *__env[31] = {
static const int __nenv = ARRAY_SIZE(__env);
-struct task_struct *kdb_curr_task(int cpu)
-{
- struct task_struct *p = curr_task(cpu);
-#ifdef _TIF_MCA_INIT
- if ((task_thread_info(p)->flags & _TIF_MCA_INIT) && KDB_TSK(cpu))
- p = krp->p;
-#endif
- return p;
-}
-
/*
* Update the permissions flags (kdb_cmd_enabled) to match the
* current lockdown state.
@@ -1228,7 +1218,7 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
char *cmdbuf;
int diag;
struct task_struct *kdb_current =
- kdb_curr_task(raw_smp_processor_id());
+ curr_task(raw_smp_processor_id());
KDB_DEBUG_STATE("kdb_local 1", reason);
@@ -2278,7 +2268,7 @@ void kdb_ps_suppressed(void)
unsigned long cpu;
const struct task_struct *p, *g;
for_each_online_cpu(cpu) {
- p = kdb_curr_task(cpu);
+ p = curr_task(cpu);
if (kdb_task_state(p, "-"))
++idle;
}
@@ -2314,7 +2304,7 @@ void kdb_ps1(const struct task_struct *p)
kdb_task_has_cpu(p), kdb_process_cpu(p),
kdb_task_state_char(p),
(void *)(&p->thread),
- p == kdb_curr_task(raw_smp_processor_id()) ? '*' : ' ',
+ p == curr_task(raw_smp_processor_id()) ? '*' : ' ',
p->comm);
if (kdb_task_has_cpu(p)) {
if (!KDB_TSK(cpu)) {
@@ -2350,7 +2340,7 @@ static int kdb_ps(int argc, const char **argv)
for_each_online_cpu(cpu) {
if (KDB_FLAG(CMD_INTERRUPT))
return 0;
- p = kdb_curr_task(cpu);
+ p = curr_task(cpu);
if (kdb_task_state(p, mask))
kdb_ps1(p);
}
diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h
index 548fd4059bf9..d2520d72b1f5 100644
--- a/kernel/debug/kdb/kdb_private.h
+++ b/kernel/debug/kdb/kdb_private.h
@@ -210,8 +210,6 @@ extern void kdb_gdb_state_pass(char *buf);
#define KDB_TSK(cpu) kgdb_info[cpu].task
#define KDB_TSKREGS(cpu) kgdb_info[cpu].debuggerinfo
-extern struct task_struct *kdb_curr_task(int);
-
#define kdb_task_has_cpu(p) (task_curr(p))
#define GFP_KDB (in_dbg_master() ? GFP_ATOMIC : GFP_KERNEL)
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index e039b0f99a0b..dead51de8eb5 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -44,7 +44,7 @@ void delayacct_init(void)
}
#ifdef CONFIG_PROC_SYSCTL
-static int sysctl_delayacct(struct ctl_table *table, int write, void *buffer,
+static int sysctl_delayacct(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
int state = delayacct_on;
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index 81de84318ccc..b1c18058d55f 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -67,8 +67,8 @@ void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
{
struct dma_devres match_data = { size, vaddr, dma_handle };
- dma_free_coherent(dev, size, vaddr, dma_handle);
WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
+ dma_free_coherent(dev, size, vaddr, dma_handle);
}
EXPORT_SYMBOL(dmam_free_coherent);
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index 8d57255e5b29..8a47e52a454f 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -270,7 +270,7 @@ exit_put:
* Used for sysctl_perf_event_max_stack and
* sysctl_perf_event_max_contexts_per_stack.
*/
-int perf_event_max_stack_handler(struct ctl_table *table, int write,
+int perf_event_max_stack_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int *value = table->data;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index af2e3a06b239..aa3450bdc227 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -450,7 +450,7 @@ static void update_perf_cpu_limits(void)
static bool perf_rotate_context(struct perf_cpu_pmu_context *cpc);
-int perf_event_max_sample_rate_handler(struct ctl_table *table, int write,
+int perf_event_max_sample_rate_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
@@ -474,7 +474,7 @@ int perf_event_max_sample_rate_handler(struct ctl_table *table, int write,
int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT;
-int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
+int perf_cpu_time_max_percent_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
@@ -9328,21 +9328,19 @@ static void perf_event_bpf_emit_ksymbols(struct bpf_prog *prog,
bool unregister = type == PERF_BPF_EVENT_PROG_UNLOAD;
int i;
- if (prog->aux->func_cnt == 0) {
- perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF,
- (u64)(unsigned long)prog->bpf_func,
- prog->jited_len, unregister,
- prog->aux->ksym.name);
- } else {
- for (i = 0; i < prog->aux->func_cnt; i++) {
- struct bpf_prog *subprog = prog->aux->func[i];
-
- perf_event_ksymbol(
- PERF_RECORD_KSYMBOL_TYPE_BPF,
- (u64)(unsigned long)subprog->bpf_func,
- subprog->jited_len, unregister,
- subprog->aux->ksym.name);
- }
+ perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF,
+ (u64)(unsigned long)prog->bpf_func,
+ prog->jited_len, unregister,
+ prog->aux->ksym.name);
+
+ for (i = 1; i < prog->aux->func_cnt; i++) {
+ struct bpf_prog *subprog = prog->aux->func[i];
+
+ perf_event_ksymbol(
+ PERF_RECORD_KSYMBOL_TYPE_BPF,
+ (u64)(unsigned long)subprog->bpf_func,
+ subprog->jited_len, unregister,
+ subprog->aux->ksym.name);
}
}
diff --git a/kernel/fork.c b/kernel/fork.c
index a8362c26ebcb..cc760491f201 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -3404,7 +3404,7 @@ int unshare_files(void)
return 0;
}
-int sysctl_max_threads(struct ctl_table *table, int write,
+int sysctl_max_threads(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table t;
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 6ca859715d8a..959d99583d1c 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -239,7 +239,7 @@ static long hung_timeout_jiffies(unsigned long last_checked,
/*
* Process updating of timeout sysctl
*/
-static int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
+static int proc_dohung_task_timeout_secs(const struct ctl_table *table, int write,
void *buffer,
size_t *lenp, loff_t *ppos)
{
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 98b9622d372e..fb2c77368d18 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -148,9 +148,6 @@ static unsigned int get_symbol_offset(unsigned long pos)
unsigned long kallsyms_sym_address(int idx)
{
- if (!IS_ENABLED(CONFIG_KALLSYMS_BASE_RELATIVE))
- return kallsyms_addresses[idx];
-
/* values are unsigned offsets if --absolute-percpu is not in effect */
if (!IS_ENABLED(CONFIG_KALLSYMS_ABSOLUTE_PERCPU))
return kallsyms_relative_base + (u32)kallsyms_offsets[idx];
@@ -325,7 +322,7 @@ static unsigned long get_symbol_pos(unsigned long addr,
unsigned long symbol_start = 0, symbol_end = 0;
unsigned long i, low, high, mid;
- /* Do a binary search on the sorted kallsyms_addresses array. */
+ /* Do a binary search on the sorted kallsyms_offsets array. */
low = 0;
high = kallsyms_num_syms;
diff --git a/kernel/kallsyms_internal.h b/kernel/kallsyms_internal.h
index 85480274fc8f..9633782f8250 100644
--- a/kernel/kallsyms_internal.h
+++ b/kernel/kallsyms_internal.h
@@ -4,12 +4,6 @@
#include <linux/types.h>
-/*
- * These will be re-linked against their real values during the second link
- * stage. Preliminary values must be provided in the linker script using the
- * PROVIDE() directive so that the first link stage can complete successfully.
- */
-extern const unsigned long kallsyms_addresses[];
extern const int kallsyms_offsets[];
extern const u8 kallsyms_names[];
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index 9112d69d68b0..c0caa14880c3 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -888,7 +888,7 @@ struct kimage *kexec_crash_image;
static int kexec_load_disabled;
#ifdef CONFIG_SYSCTL
-static int kexec_limit_handler(struct ctl_table *table, int write,
+static int kexec_limit_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct kexec_load_limit *limit = table->data;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 6a76a8100073..e85de37d9e1e 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -939,7 +939,7 @@ static void unoptimize_all_kprobes(void)
static DEFINE_MUTEX(kprobe_sysctl_mutex);
static int sysctl_kprobes_optimization;
-static int proc_kprobes_optimization_handler(struct ctl_table *table,
+static int proc_kprobes_optimization_handler(const struct ctl_table *table,
int write, void *buffer,
size_t *length, loff_t *ppos)
{
diff --git a/kernel/latencytop.c b/kernel/latencytop.c
index 84c53285f499..7a75eab9c179 100644
--- a/kernel/latencytop.c
+++ b/kernel/latencytop.c
@@ -65,7 +65,7 @@ static struct latency_record latency_record[MAXLR];
int latencytop_enabled;
#ifdef CONFIG_SYSCTL
-static int sysctl_latencytop(struct ctl_table *table, int write, void *buffer,
+static int sysctl_latencytop(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
int err;
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 52426665eecc..3c21c31796db 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -346,6 +346,7 @@ int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
* /sys/kernel/livepatch/<patch>/enabled
* /sys/kernel/livepatch/<patch>/transition
* /sys/kernel/livepatch/<patch>/force
+ * /sys/kernel/livepatch/<patch>/replace
* /sys/kernel/livepatch/<patch>/<object>
* /sys/kernel/livepatch/<patch>/<object>/patched
* /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
@@ -401,7 +402,7 @@ static ssize_t enabled_show(struct kobject *kobj,
struct klp_patch *patch;
patch = container_of(kobj, struct klp_patch, kobj);
- return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled);
+ return sysfs_emit(buf, "%d\n", patch->enabled);
}
static ssize_t transition_show(struct kobject *kobj,
@@ -410,8 +411,7 @@ static ssize_t transition_show(struct kobject *kobj,
struct klp_patch *patch;
patch = container_of(kobj, struct klp_patch, kobj);
- return snprintf(buf, PAGE_SIZE-1, "%d\n",
- patch == klp_transition_patch);
+ return sysfs_emit(buf, "%d\n", patch == klp_transition_patch);
}
static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
@@ -443,13 +443,24 @@ static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
return count;
}
+static ssize_t replace_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct klp_patch *patch;
+
+ patch = container_of(kobj, struct klp_patch, kobj);
+ return sysfs_emit(buf, "%d\n", patch->replace);
+}
+
static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
+static struct kobj_attribute replace_kobj_attr = __ATTR_RO(replace);
static struct attribute *klp_patch_attrs[] = {
&enabled_kobj_attr.attr,
&transition_kobj_attr.attr,
&force_kobj_attr.attr,
+ &replace_kobj_attr.attr,
NULL
};
ATTRIBUTE_GROUPS(klp_patch);
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index bdf0087d6442..d70ab49d5b4a 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -261,7 +261,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
}
#ifdef CONFIG_CHECKPOINT_RESTORE
-static int pid_ns_ctl_handler(struct ctl_table *table, int write,
+static int pid_ns_ctl_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct pid_namespace *pid_ns = task_active_pid_ns(current);
diff --git a/kernel/pid_sysctl.h b/kernel/pid_sysctl.h
index fe9fb991dc42..18ecaef6be41 100644
--- a/kernel/pid_sysctl.h
+++ b/kernel/pid_sysctl.h
@@ -5,7 +5,7 @@
#include <linux/pid_namespace.h>
#if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE)
-static int pid_mfd_noexec_dointvec_minmax(struct ctl_table *table,
+static int pid_mfd_noexec_dointvec_minmax(const struct ctl_table *table,
int write, void *buf, size_t *lenp, loff_t *ppos)
{
struct pid_namespace *ns = task_active_pid_ns(current);
diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h
index 6c2afee5ef62..19dcc5832651 100644
--- a/kernel/printk/internal.h
+++ b/kernel/printk/internal.h
@@ -8,7 +8,7 @@
#if defined(CONFIG_PRINTK) && defined(CONFIG_SYSCTL)
void __init printk_sysctl_init(void);
-int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write,
+int devkmsg_sysctl_set_loglvl(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
#else
#define printk_sysctl_init() do { } while (0)
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 7d91593f0ecf..054c0e7784fd 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -197,7 +197,7 @@ __setup("printk.devkmsg=", control_devkmsg);
char devkmsg_log_str[DEVKMSG_STR_MAX_SIZE] = "ratelimit";
#if defined(CONFIG_PRINTK) && defined(CONFIG_SYSCTL)
-int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write,
+int devkmsg_sysctl_set_loglvl(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
char old_str[DEVKMSG_STR_MAX_SIZE];
@@ -4372,15 +4372,15 @@ void kmsg_dump_rewind(struct kmsg_dump_iter *iter)
EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
/**
- * console_replay_all - replay kernel log on consoles
+ * console_try_replay_all - try to replay kernel log on consoles
*
* Try to obtain lock on console subsystem and replay all
* available records in printk buffer on the consoles.
* Does nothing if lock is not obtained.
*
- * Context: Any context.
+ * Context: Any, except for NMI.
*/
-void console_replay_all(void)
+void console_try_replay_all(void)
{
if (console_trylock()) {
__console_rewind_all();
diff --git a/kernel/printk/sysctl.c b/kernel/printk/sysctl.c
index 3e47dedce9e5..f5072dc85f7a 100644
--- a/kernel/printk/sysctl.c
+++ b/kernel/printk/sysctl.c
@@ -11,7 +11,7 @@
static const int ten_thousand = 10000;
-static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
+static int proc_dointvec_minmax_sysadmin(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
if (write && !capable(CAP_SYS_ADMIN))
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index ae5ef3013a55..a9f655025607 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1806,7 +1806,7 @@ static void uclamp_sync_util_min_rt_default(void)
uclamp_update_util_min_rt_default(p);
}
-static int sysctl_sched_uclamp_handler(struct ctl_table *table, int write,
+static int sysctl_sched_uclamp_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
bool update_root_tg = false;
@@ -4392,7 +4392,7 @@ static void reset_memory_tiering(void)
}
}
-static int sysctl_numa_balancing(struct ctl_table *table, int write,
+static int sysctl_numa_balancing(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table t;
@@ -4461,7 +4461,7 @@ out:
__setup("schedstats=", setup_schedstats);
#ifdef CONFIG_PROC_SYSCTL
-static int sysctl_schedstats(struct ctl_table *table, int write, void *buffer,
+static int sysctl_schedstats(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
struct ctl_table t;
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 63e49c8ffc4d..310523c1b9e3 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -26,9 +26,9 @@ int sysctl_sched_rt_runtime = 950000;
#ifdef CONFIG_SYSCTL
static int sysctl_sched_rr_timeslice = (MSEC_PER_SEC * RR_TIMESLICE) / HZ;
-static int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
+static int sched_rt_handler(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos);
-static int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
+static int sched_rr_handler(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos);
static struct ctl_table sched_rt_sysctls[] = {
{
@@ -2952,7 +2952,7 @@ static void sched_rt_do_global(void)
raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
}
-static int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
+static int sched_rt_handler(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
int old_period, old_runtime;
@@ -2991,7 +2991,7 @@ undo:
return ret;
}
-static int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
+static int sched_rr_handler(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
int ret;
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 784a0be81e84..76504b776d03 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -285,7 +285,7 @@ void rebuild_sched_domains_energy(void)
}
#ifdef CONFIG_PROC_SYSCTL
-static int sched_energy_aware_handler(struct ctl_table *table, int write,
+static int sched_energy_aware_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret, state;
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index dc51e521bc1d..385d48293a5f 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -2431,7 +2431,7 @@ static void audit_actions_logged(u32 actions_logged, u32 old_actions_logged,
return audit_seccomp_actions_logged(new, old, !ret);
}
-static int seccomp_actions_logged_handler(struct ctl_table *ro_table, int write,
+static int seccomp_actions_logged_handler(const struct ctl_table *ro_table, int write,
void *buffer, size_t *lenp,
loff_t *ppos)
{
diff --git a/kernel/stackleak.c b/kernel/stackleak.c
index 0f9712584913..39fd620a7db6 100644
--- a/kernel/stackleak.c
+++ b/kernel/stackleak.c
@@ -21,7 +21,7 @@
static DEFINE_STATIC_KEY_FALSE(stack_erasing_bypass);
#ifdef CONFIG_SYSCTL
-static int stack_erasing_sysctl(struct ctl_table *table, int write,
+static int stack_erasing_sysctl(const struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int ret = 0;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index e4421594fc25..79e6cb1d5c48 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -256,7 +256,7 @@ static bool proc_first_pos_non_zero_ignore(loff_t *ppos,
*
* Returns 0 on success.
*/
-int proc_dostring(struct ctl_table *table, int write,
+int proc_dostring(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
if (write)
@@ -702,7 +702,7 @@ int do_proc_douintvec(const struct ctl_table *table, int write,
*
* Returns 0 on success.
*/
-int proc_dobool(struct ctl_table *table, int write, void *buffer,
+int proc_dobool(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
struct ctl_table tmp;
@@ -739,7 +739,7 @@ int proc_dobool(struct ctl_table *table, int write, void *buffer,
*
* Returns 0 on success.
*/
-int proc_dointvec(struct ctl_table *table, int write, void *buffer,
+int proc_dointvec(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
return do_proc_dointvec(table, write, buffer, lenp, ppos, NULL, NULL);
@@ -758,7 +758,7 @@ int proc_dointvec(struct ctl_table *table, int write, void *buffer,
*
* Returns 0 on success.
*/
-int proc_douintvec(struct ctl_table *table, int write, void *buffer,
+int proc_douintvec(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
return do_proc_douintvec(table, write, buffer, lenp, ppos,
@@ -769,7 +769,7 @@ int proc_douintvec(struct ctl_table *table, int write, void *buffer,
* Taint values can only be increased
* This means we can safely use a temporary.
*/
-static int proc_taint(struct ctl_table *table, int write,
+static int proc_taint(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table t;
@@ -864,7 +864,7 @@ static int do_proc_dointvec_minmax_conv(bool *negp, unsigned long *lvalp,
*
* Returns 0 on success or -EINVAL on write when the range check fails.
*/
-int proc_dointvec_minmax(struct ctl_table *table, int write,
+int proc_dointvec_minmax(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct do_proc_dointvec_minmax_conv_param param = {
@@ -933,7 +933,7 @@ static int do_proc_douintvec_minmax_conv(unsigned long *lvalp,
*
* Returns 0 on success or -ERANGE on write when the range check fails.
*/
-int proc_douintvec_minmax(struct ctl_table *table, int write,
+int proc_douintvec_minmax(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct do_proc_douintvec_minmax_conv_param param = {
@@ -961,7 +961,7 @@ int proc_douintvec_minmax(struct ctl_table *table, int write,
*
* Returns 0 on success or an error on write when the range check fails.
*/
-int proc_dou8vec_minmax(struct ctl_table *table, int write,
+int proc_dou8vec_minmax(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table tmp;
@@ -998,7 +998,7 @@ int proc_dou8vec_minmax(struct ctl_table *table, int write,
EXPORT_SYMBOL_GPL(proc_dou8vec_minmax);
#ifdef CONFIG_MAGIC_SYSRQ
-static int sysrq_sysctl_handler(struct ctl_table *table, int write,
+static int sysrq_sysctl_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int tmp, ret;
@@ -1115,7 +1115,7 @@ static int do_proc_doulongvec_minmax(const struct ctl_table *table, int write,
*
* Returns 0 on success.
*/
-int proc_doulongvec_minmax(struct ctl_table *table, int write,
+int proc_doulongvec_minmax(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
return do_proc_doulongvec_minmax(table, write, buffer, lenp, ppos, 1l, 1l);
@@ -1138,7 +1138,7 @@ int proc_doulongvec_minmax(struct ctl_table *table, int write,
*
* Returns 0 on success.
*/
-int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write,
+int proc_doulongvec_ms_jiffies_minmax(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
return do_proc_doulongvec_minmax(table, write, buffer,
@@ -1259,14 +1259,14 @@ static int do_proc_dointvec_ms_jiffies_minmax_conv(bool *negp, unsigned long *lv
*
* Returns 0 on success.
*/
-int proc_dointvec_jiffies(struct ctl_table *table, int write,
+int proc_dointvec_jiffies(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
return do_proc_dointvec(table,write,buffer,lenp,ppos,
do_proc_dointvec_jiffies_conv,NULL);
}
-int proc_dointvec_ms_jiffies_minmax(struct ctl_table *table, int write,
+int proc_dointvec_ms_jiffies_minmax(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct do_proc_dointvec_minmax_conv_param param = {
@@ -1292,7 +1292,7 @@ int proc_dointvec_ms_jiffies_minmax(struct ctl_table *table, int write,
*
* Returns 0 on success.
*/
-int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write,
+int proc_dointvec_userhz_jiffies(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
return do_proc_dointvec(table, write, buffer, lenp, ppos,
@@ -1315,14 +1315,14 @@ int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write,
*
* Returns 0 on success.
*/
-int proc_dointvec_ms_jiffies(struct ctl_table *table, int write, void *buffer,
+int proc_dointvec_ms_jiffies(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
return do_proc_dointvec(table, write, buffer, lenp, ppos,
do_proc_dointvec_ms_jiffies_conv, NULL);
}
-static int proc_do_cad_pid(struct ctl_table *table, int write, void *buffer,
+static int proc_do_cad_pid(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
struct pid *new_pid;
@@ -1361,7 +1361,7 @@ static int proc_do_cad_pid(struct ctl_table *table, int write, void *buffer,
*
* Returns 0 on success.
*/
-int proc_do_large_bitmap(struct ctl_table *table, int write,
+int proc_do_large_bitmap(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int err = 0;
@@ -1493,85 +1493,85 @@ int proc_do_large_bitmap(struct ctl_table *table, int write,
#else /* CONFIG_PROC_SYSCTL */
-int proc_dostring(struct ctl_table *table, int write,
+int proc_dostring(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
-int proc_dobool(struct ctl_table *table, int write,
+int proc_dobool(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
-int proc_dointvec(struct ctl_table *table, int write,
+int proc_dointvec(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
-int proc_douintvec(struct ctl_table *table, int write,
+int proc_douintvec(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
-int proc_dointvec_minmax(struct ctl_table *table, int write,
+int proc_dointvec_minmax(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
-int proc_douintvec_minmax(struct ctl_table *table, int write,
+int proc_douintvec_minmax(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
-int proc_dou8vec_minmax(struct ctl_table *table, int write,
+int proc_dou8vec_minmax(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
-int proc_dointvec_jiffies(struct ctl_table *table, int write,
+int proc_dointvec_jiffies(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
-int proc_dointvec_ms_jiffies_minmax(struct ctl_table *table, int write,
+int proc_dointvec_ms_jiffies_minmax(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
-int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write,
+int proc_dointvec_userhz_jiffies(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
-int proc_dointvec_ms_jiffies(struct ctl_table *table, int write,
+int proc_dointvec_ms_jiffies(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
-int proc_doulongvec_minmax(struct ctl_table *table, int write,
+int proc_doulongvec_minmax(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
-int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write,
+int proc_doulongvec_ms_jiffies_minmax(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
-int proc_do_large_bitmap(struct ctl_table *table, int write,
+int proc_do_large_bitmap(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
@@ -1580,7 +1580,7 @@ int proc_do_large_bitmap(struct ctl_table *table, int write,
#endif /* CONFIG_PROC_SYSCTL */
#if defined(CONFIG_SYSCTL)
-int proc_do_static_key(struct ctl_table *table, int write,
+int proc_do_static_key(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct static_key *key = (struct static_key *)table->data;
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 48288dd4a102..64b0d8a0aa0f 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -289,7 +289,7 @@ static void timers_update_migration(void)
}
#ifdef CONFIG_SYSCTL
-static int timer_migration_handler(struct ctl_table *table, int write,
+static int timer_migration_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c
index 84413114db5c..8d57f7686bb0 100644
--- a/kernel/time/timer_migration.c
+++ b/kernel/time/timer_migration.c
@@ -475,9 +475,54 @@ static bool tmigr_check_lonely(struct tmigr_group *group)
return bitmap_weight(&active, BIT_CNT) <= 1;
}
-typedef bool (*up_f)(struct tmigr_group *, struct tmigr_group *, void *);
+/**
+ * struct tmigr_walk - data required for walking the hierarchy
+ * @nextexp: Next CPU event expiry information which is handed into
+ * the timer migration code by the timer code
+ * (get_next_timer_interrupt())
+ * @firstexp: Contains the first event expiry information when
+ * hierarchy is completely idle. When CPU itself was the
+ * last going idle, information makes sure, that CPU will
+ * be back in time. When using this value in the remote
+ * expiry case, firstexp is stored in the per CPU tmigr_cpu
+ * struct of CPU which expires remote timers. It is updated
+ * in top level group only. Be aware, there could occur a
+ * new top level of the hierarchy between the 'top level
+ * call' in tmigr_update_events() and the check for the
+ * parent group in walk_groups(). Then @firstexp might
+ * contain a value != KTIME_MAX even if it was not the
+ * final top level. This is not a problem, as the worst
+ * outcome is a CPU which might wake up a little early.
+ * @evt: Pointer to tmigr_event which needs to be queued (of idle
+ * child group)
+ * @childmask: groupmask of child group
+ * @remote: Is set, when the new timer path is executed in
+ * tmigr_handle_remote_cpu()
+ * @basej: timer base in jiffies
+ * @now: timer base monotonic
+ * @check: is set if there is the need to handle remote timers;
+ * required in tmigr_requires_handle_remote() only
+ * @tmc_active: this flag indicates, whether the CPU which triggers
+ * the hierarchy walk is !idle in the timer migration
+ * hierarchy. When the CPU is idle and the whole hierarchy is
+ * idle, only the first event of the top level has to be
+ * considered.
+ */
+struct tmigr_walk {
+ u64 nextexp;
+ u64 firstexp;
+ struct tmigr_event *evt;
+ u8 childmask;
+ bool remote;
+ unsigned long basej;
+ u64 now;
+ bool check;
+ bool tmc_active;
+};
+
+typedef bool (*up_f)(struct tmigr_group *, struct tmigr_group *, struct tmigr_walk *);
-static void __walk_groups(up_f up, void *data,
+static void __walk_groups(up_f up, struct tmigr_walk *data,
struct tmigr_cpu *tmc)
{
struct tmigr_group *child = NULL, *group = tmc->tmgroup;
@@ -490,64 +535,17 @@ static void __walk_groups(up_f up, void *data,
child = group;
group = group->parent;
+ data->childmask = child->groupmask;
} while (group);
}
-static void walk_groups(up_f up, void *data, struct tmigr_cpu *tmc)
+static void walk_groups(up_f up, struct tmigr_walk *data, struct tmigr_cpu *tmc)
{
lockdep_assert_held(&tmc->lock);
__walk_groups(up, data, tmc);
}
-/**
- * struct tmigr_walk - data required for walking the hierarchy
- * @nextexp: Next CPU event expiry information which is handed into
- * the timer migration code by the timer code
- * (get_next_timer_interrupt())
- * @firstexp: Contains the first event expiry information when last
- * active CPU of hierarchy is on the way to idle to make
- * sure CPU will be back in time.
- * @evt: Pointer to tmigr_event which needs to be queued (of idle
- * child group)
- * @childmask: childmask of child group
- * @remote: Is set, when the new timer path is executed in
- * tmigr_handle_remote_cpu()
- */
-struct tmigr_walk {
- u64 nextexp;
- u64 firstexp;
- struct tmigr_event *evt;
- u8 childmask;
- bool remote;
-};
-
-/**
- * struct tmigr_remote_data - data required for remote expiry hierarchy walk
- * @basej: timer base in jiffies
- * @now: timer base monotonic
- * @firstexp: returns expiry of the first timer in the idle timer
- * migration hierarchy to make sure the timer is handled in
- * time; it is stored in the per CPU tmigr_cpu struct of
- * CPU which expires remote timers
- * @childmask: childmask of child group
- * @check: is set if there is the need to handle remote timers;
- * required in tmigr_requires_handle_remote() only
- * @tmc_active: this flag indicates, whether the CPU which triggers
- * the hierarchy walk is !idle in the timer migration
- * hierarchy. When the CPU is idle and the whole hierarchy is
- * idle, only the first event of the top level has to be
- * considered.
- */
-struct tmigr_remote_data {
- unsigned long basej;
- u64 now;
- u64 firstexp;
- u8 childmask;
- bool check;
- bool tmc_active;
-};
-
/*
* Returns the next event of the timerqueue @group->events
*
@@ -618,10 +616,9 @@ static u64 tmigr_next_groupevt_expires(struct tmigr_group *group)
static bool tmigr_active_up(struct tmigr_group *group,
struct tmigr_group *child,
- void *ptr)
+ struct tmigr_walk *data)
{
union tmigr_state curstate, newstate;
- struct tmigr_walk *data = ptr;
bool walk_done;
u8 childmask;
@@ -649,8 +646,7 @@ static bool tmigr_active_up(struct tmigr_group *group,
} while (!atomic_try_cmpxchg(&group->migr_state, &curstate.state, newstate.state));
- if ((walk_done == false) && group->parent)
- data->childmask = group->childmask;
+ trace_tmigr_group_set_cpu_active(group, newstate, childmask);
/*
* The group is active (again). The group event might be still queued
@@ -666,8 +662,6 @@ static bool tmigr_active_up(struct tmigr_group *group,
*/
group->groupevt.ignore = true;
- trace_tmigr_group_set_cpu_active(group, newstate, childmask);
-
return walk_done;
}
@@ -675,7 +669,7 @@ static void __tmigr_cpu_activate(struct tmigr_cpu *tmc)
{
struct tmigr_walk data;
- data.childmask = tmc->childmask;
+ data.childmask = tmc->groupmask;
trace_tmigr_cpu_active(tmc);
@@ -860,10 +854,8 @@ unlock:
static bool tmigr_new_timer_up(struct tmigr_group *group,
struct tmigr_group *child,
- void *ptr)
+ struct tmigr_walk *data)
{
- struct tmigr_walk *data = ptr;
-
return tmigr_update_events(group, child, data);
}
@@ -995,9 +987,8 @@ unlock:
static bool tmigr_handle_remote_up(struct tmigr_group *group,
struct tmigr_group *child,
- void *ptr)
+ struct tmigr_walk *data)
{
- struct tmigr_remote_data *data = ptr;
struct tmigr_event *evt;
unsigned long jif;
u8 childmask;
@@ -1034,12 +1025,10 @@ again:
}
/*
- * Update of childmask for the next level and keep track of the expiry
- * of the first event that needs to be handled (group->next_expiry was
- * updated by tmigr_next_expired_groupevt(), next was set by
- * tmigr_handle_remote_cpu()).
+ * Keep track of the expiry of the first event that needs to be handled
+ * (group->next_expiry was updated by tmigr_next_expired_groupevt(),
+ * next was set by tmigr_handle_remote_cpu()).
*/
- data->childmask = group->childmask;
data->firstexp = group->next_expiry;
raw_spin_unlock_irq(&group->lock);
@@ -1055,12 +1044,12 @@ again:
void tmigr_handle_remote(void)
{
struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
- struct tmigr_remote_data data;
+ struct tmigr_walk data;
if (tmigr_is_not_available(tmc))
return;
- data.childmask = tmc->childmask;
+ data.childmask = tmc->groupmask;
data.firstexp = KTIME_MAX;
/*
@@ -1068,7 +1057,7 @@ void tmigr_handle_remote(void)
* in tmigr_handle_remote_up() anyway. Keep this check to speed up the
* return when nothing has to be done.
*/
- if (!tmigr_check_migrator(tmc->tmgroup, tmc->childmask)) {
+ if (!tmigr_check_migrator(tmc->tmgroup, tmc->groupmask)) {
/*
* If this CPU was an idle migrator, make sure to clear its wakeup
* value so it won't chase timers that have already expired elsewhere.
@@ -1097,9 +1086,8 @@ void tmigr_handle_remote(void)
static bool tmigr_requires_handle_remote_up(struct tmigr_group *group,
struct tmigr_group *child,
- void *ptr)
+ struct tmigr_walk *data)
{
- struct tmigr_remote_data *data = ptr;
u8 childmask;
childmask = data->childmask;
@@ -1118,7 +1106,7 @@ static bool tmigr_requires_handle_remote_up(struct tmigr_group *group,
* group before reading the next_expiry value.
*/
if (group->parent && !data->tmc_active)
- goto out;
+ return false;
/*
* The lock is required on 32bit architectures to read the variable
@@ -1143,9 +1131,6 @@ static bool tmigr_requires_handle_remote_up(struct tmigr_group *group,
raw_spin_unlock(&group->lock);
}
-out:
- /* Update of childmask for the next level */
- data->childmask = group->childmask;
return false;
}
@@ -1157,7 +1142,7 @@ out:
bool tmigr_requires_handle_remote(void)
{
struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
- struct tmigr_remote_data data;
+ struct tmigr_walk data;
unsigned long jif;
bool ret = false;
@@ -1165,7 +1150,7 @@ bool tmigr_requires_handle_remote(void)
return ret;
data.now = get_jiffies_update(&jif);
- data.childmask = tmc->childmask;
+ data.childmask = tmc->groupmask;
data.firstexp = KTIME_MAX;
data.tmc_active = !tmc->idle;
data.check = false;
@@ -1230,14 +1215,13 @@ u64 tmigr_cpu_new_timer(u64 nextexp)
if (nextexp != tmc->cpuevt.nextevt.expires ||
tmc->cpuevt.ignore) {
ret = tmigr_new_timer(tmc, nextexp);
+ /*
+ * Make sure the reevaluation of timers in idle path
+ * will not miss an event.
+ */
+ WRITE_ONCE(tmc->wakeup, ret);
}
}
- /*
- * Make sure the reevaluation of timers in idle path will not miss an
- * event.
- */
- WRITE_ONCE(tmc->wakeup, ret);
-
trace_tmigr_cpu_new_timer_idle(tmc, nextexp);
raw_spin_unlock(&tmc->lock);
return ret;
@@ -1245,10 +1229,9 @@ u64 tmigr_cpu_new_timer(u64 nextexp)
static bool tmigr_inactive_up(struct tmigr_group *group,
struct tmigr_group *child,
- void *ptr)
+ struct tmigr_walk *data)
{
union tmigr_state curstate, newstate, childstate;
- struct tmigr_walk *data = ptr;
bool walk_done;
u8 childmask;
@@ -1299,9 +1282,10 @@ static bool tmigr_inactive_up(struct tmigr_group *group,
WARN_ON_ONCE((newstate.migrator != TMIGR_NONE) && !(newstate.active));
- if (atomic_try_cmpxchg(&group->migr_state, &curstate.state,
- newstate.state))
+ if (atomic_try_cmpxchg(&group->migr_state, &curstate.state, newstate.state)) {
+ trace_tmigr_group_set_cpu_inactive(group, newstate, childmask);
break;
+ }
/*
* The memory barrier is paired with the cmpxchg() in
@@ -1317,22 +1301,6 @@ static bool tmigr_inactive_up(struct tmigr_group *group,
/* Event Handling */
tmigr_update_events(group, child, data);
- if (group->parent && (walk_done == false))
- data->childmask = group->childmask;
-
- /*
- * data->firstexp was set by tmigr_update_events() and contains the
- * expiry of the first global event which needs to be handled. It
- * differs from KTIME_MAX if:
- * - group is the top level group and
- * - group is idle (which means CPU was the last active CPU in the
- * hierarchy) and
- * - there is a pending event in the hierarchy
- */
- WARN_ON_ONCE(data->firstexp != KTIME_MAX && group->parent);
-
- trace_tmigr_group_set_cpu_inactive(group, newstate, childmask);
-
return walk_done;
}
@@ -1341,7 +1309,7 @@ static u64 __tmigr_cpu_deactivate(struct tmigr_cpu *tmc, u64 nextexp)
struct tmigr_walk data = { .nextexp = nextexp,
.firstexp = KTIME_MAX,
.evt = &tmc->cpuevt,
- .childmask = tmc->childmask };
+ .childmask = tmc->groupmask };
/*
* If nextexp is KTIME_MAX, the CPU event will be ignored because the
@@ -1400,7 +1368,7 @@ u64 tmigr_cpu_deactivate(u64 nextexp)
* the only one in the level 0 group; and if it is the
* only one in level 0 group, but there are more than a
* single group active on the way to top level)
- * * nextevt - when CPU is offline and has to handle timer on his own
+ * * nextevt - when CPU is offline and has to handle timer on its own
* or when on the way to top in every group only a single
* child is active but @nextevt is before the lowest
* next_expiry encountered while walking up to top level.
@@ -1419,7 +1387,7 @@ u64 tmigr_quick_check(u64 nextevt)
if (WARN_ON_ONCE(tmc->idle))
return nextevt;
- if (!tmigr_check_migrator_and_lonely(tmc->tmgroup, tmc->childmask))
+ if (!tmigr_check_migrator_and_lonely(tmc->tmgroup, tmc->groupmask))
return KTIME_MAX;
do {
@@ -1442,6 +1410,66 @@ u64 tmigr_quick_check(u64 nextevt)
return KTIME_MAX;
}
+/*
+ * tmigr_trigger_active() - trigger a CPU to become active again
+ *
+ * This function is executed on a CPU which is part of cpu_online_mask, when the
+ * last active CPU in the hierarchy is offlining. With this, it is ensured that
+ * the other CPU is active and takes over the migrator duty.
+ */
+static long tmigr_trigger_active(void *unused)
+{
+ struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
+
+ WARN_ON_ONCE(!tmc->online || tmc->idle);
+
+ return 0;
+}
+
+static int tmigr_cpu_offline(unsigned int cpu)
+{
+ struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
+ int migrator;
+ u64 firstexp;
+
+ raw_spin_lock_irq(&tmc->lock);
+ tmc->online = false;
+ WRITE_ONCE(tmc->wakeup, KTIME_MAX);
+
+ /*
+ * CPU has to handle the local events on his own, when on the way to
+ * offline; Therefore nextevt value is set to KTIME_MAX
+ */
+ firstexp = __tmigr_cpu_deactivate(tmc, KTIME_MAX);
+ trace_tmigr_cpu_offline(tmc);
+ raw_spin_unlock_irq(&tmc->lock);
+
+ if (firstexp != KTIME_MAX) {
+ migrator = cpumask_any_but(cpu_online_mask, cpu);
+ work_on_cpu(migrator, tmigr_trigger_active, NULL);
+ }
+
+ return 0;
+}
+
+static int tmigr_cpu_online(unsigned int cpu)
+{
+ struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
+
+ /* Check whether CPU data was successfully initialized */
+ if (WARN_ON_ONCE(!tmc->tmgroup))
+ return -EINVAL;
+
+ raw_spin_lock_irq(&tmc->lock);
+ trace_tmigr_cpu_online(tmc);
+ tmc->idle = timer_base_is_idle();
+ if (!tmc->idle)
+ __tmigr_cpu_activate(tmc);
+ tmc->online = true;
+ raw_spin_unlock_irq(&tmc->lock);
+ return 0;
+}
+
static void tmigr_init_group(struct tmigr_group *group, unsigned int lvl,
int node)
{
@@ -1514,21 +1542,25 @@ static struct tmigr_group *tmigr_get_group(unsigned int cpu, int node,
}
static void tmigr_connect_child_parent(struct tmigr_group *child,
- struct tmigr_group *parent)
+ struct tmigr_group *parent,
+ bool activate)
{
- union tmigr_state childstate;
+ struct tmigr_walk data;
raw_spin_lock_irq(&child->lock);
raw_spin_lock_nested(&parent->lock, SINGLE_DEPTH_NESTING);
child->parent = parent;
- child->childmask = BIT(parent->num_children++);
+ child->groupmask = BIT(parent->num_children++);
raw_spin_unlock(&parent->lock);
raw_spin_unlock_irq(&child->lock);
trace_tmigr_connect_child_parent(child);
+ if (!activate)
+ return;
+
/*
* To prevent inconsistent states, active children need to be active in
* the new parent as well. Inactive children are already marked inactive
@@ -1544,21 +1576,24 @@ static void tmigr_connect_child_parent(struct tmigr_group *child,
* child to the new parent. So tmigr_connect_child_parent() is
* executed with the formerly top level group (child) and the newly
* created group (parent).
+ *
+ * * It is ensured that the child is active, as this setup path is
+ * executed in hotplug prepare callback. This is exectued by an
+ * already connected and !idle CPU. Even if all other CPUs go idle,
+ * the CPU executing the setup will be responsible up to current top
+ * level group. And the next time it goes inactive, it will release
+ * the new childmask and parent to subsequent walkers through this
+ * @child. Therefore propagate active state unconditionally.
*/
- childstate.state = atomic_read(&child->migr_state);
- if (childstate.migrator != TMIGR_NONE) {
- struct tmigr_walk data;
-
- data.childmask = child->childmask;
+ data.childmask = child->groupmask;
- /*
- * There is only one new level per time. When connecting the
- * child and the parent and set the child active when the parent
- * is inactive, the parent needs to be the uppermost
- * level. Otherwise there went something wrong!
- */
- WARN_ON(!tmigr_active_up(parent, child, &data) && parent->parent);
- }
+ /*
+ * There is only one new level per time (which is protected by
+ * tmigr_mutex). When connecting the child and the parent and set the
+ * child active when the parent is inactive, the parent needs to be the
+ * uppermost level. Otherwise there went something wrong!
+ */
+ WARN_ON(!tmigr_active_up(parent, child, &data) && parent->parent);
}
static int tmigr_setup_groups(unsigned int cpu, unsigned int node)
@@ -1611,12 +1646,12 @@ static int tmigr_setup_groups(unsigned int cpu, unsigned int node)
* Update tmc -> group / child -> group connection
*/
if (i == 0) {
- struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
+ struct tmigr_cpu *tmc = per_cpu_ptr(&tmigr_cpu, cpu);
raw_spin_lock_irq(&group->lock);
tmc->tmgroup = group;
- tmc->childmask = BIT(group->num_children++);
+ tmc->groupmask = BIT(group->num_children++);
raw_spin_unlock_irq(&group->lock);
@@ -1626,7 +1661,8 @@ static int tmigr_setup_groups(unsigned int cpu, unsigned int node)
continue;
} else {
child = stack[i - 1];
- tmigr_connect_child_parent(child, group);
+ /* Will be activated at online time */
+ tmigr_connect_child_parent(child, group, false);
}
/* check if uppermost level was newly created */
@@ -1637,12 +1673,21 @@ static int tmigr_setup_groups(unsigned int cpu, unsigned int node)
lvllist = &tmigr_level_list[top];
if (group->num_children == 1 && list_is_singular(lvllist)) {
+ /*
+ * The target CPU must never do the prepare work, except
+ * on early boot when the boot CPU is the target. Otherwise
+ * it may spuriously activate the old top level group inside
+ * the new one (nevertheless whether old top level group is
+ * active or not) and/or release an uninitialized childmask.
+ */
+ WARN_ON_ONCE(cpu == raw_smp_processor_id());
+
lvllist = &tmigr_level_list[top - 1];
list_for_each_entry(child, lvllist, list) {
if (child->parent)
continue;
- tmigr_connect_child_parent(child, group);
+ tmigr_connect_child_parent(child, group, true);
}
}
}
@@ -1664,80 +1709,31 @@ static int tmigr_add_cpu(unsigned int cpu)
return ret;
}
-static int tmigr_cpu_online(unsigned int cpu)
-{
- struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
- int ret;
-
- /* First online attempt? Initialize CPU data */
- if (!tmc->tmgroup) {
- raw_spin_lock_init(&tmc->lock);
-
- ret = tmigr_add_cpu(cpu);
- if (ret < 0)
- return ret;
-
- if (tmc->childmask == 0)
- return -EINVAL;
-
- timerqueue_init(&tmc->cpuevt.nextevt);
- tmc->cpuevt.nextevt.expires = KTIME_MAX;
- tmc->cpuevt.ignore = true;
- tmc->cpuevt.cpu = cpu;
-
- tmc->remote = false;
- WRITE_ONCE(tmc->wakeup, KTIME_MAX);
- }
- raw_spin_lock_irq(&tmc->lock);
- trace_tmigr_cpu_online(tmc);
- tmc->idle = timer_base_is_idle();
- if (!tmc->idle)
- __tmigr_cpu_activate(tmc);
- tmc->online = true;
- raw_spin_unlock_irq(&tmc->lock);
- return 0;
-}
-
-/*
- * tmigr_trigger_active() - trigger a CPU to become active again
- *
- * This function is executed on a CPU which is part of cpu_online_mask, when the
- * last active CPU in the hierarchy is offlining. With this, it is ensured that
- * the other CPU is active and takes over the migrator duty.
- */
-static long tmigr_trigger_active(void *unused)
+static int tmigr_cpu_prepare(unsigned int cpu)
{
- struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
+ struct tmigr_cpu *tmc = per_cpu_ptr(&tmigr_cpu, cpu);
+ int ret = 0;
- WARN_ON_ONCE(!tmc->online || tmc->idle);
-
- return 0;
-}
-
-static int tmigr_cpu_offline(unsigned int cpu)
-{
- struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
- int migrator;
- u64 firstexp;
+ /* Not first online attempt? */
+ if (tmc->tmgroup)
+ return ret;
- raw_spin_lock_irq(&tmc->lock);
- tmc->online = false;
+ raw_spin_lock_init(&tmc->lock);
+ timerqueue_init(&tmc->cpuevt.nextevt);
+ tmc->cpuevt.nextevt.expires = KTIME_MAX;
+ tmc->cpuevt.ignore = true;
+ tmc->cpuevt.cpu = cpu;
+ tmc->remote = false;
WRITE_ONCE(tmc->wakeup, KTIME_MAX);
- /*
- * CPU has to handle the local events on his own, when on the way to
- * offline; Therefore nextevt value is set to KTIME_MAX
- */
- firstexp = __tmigr_cpu_deactivate(tmc, KTIME_MAX);
- trace_tmigr_cpu_offline(tmc);
- raw_spin_unlock_irq(&tmc->lock);
+ ret = tmigr_add_cpu(cpu);
+ if (ret < 0)
+ return ret;
- if (firstexp != KTIME_MAX) {
- migrator = cpumask_any_but(cpu_online_mask, cpu);
- work_on_cpu(migrator, tmigr_trigger_active, NULL);
- }
+ if (tmc->groupmask == 0)
+ return -EINVAL;
- return 0;
+ return ret;
}
static int __init tmigr_init(void)
@@ -1796,6 +1792,11 @@ static int __init tmigr_init(void)
tmigr_hierarchy_levels, TMIGR_CHILDREN_PER_GROUP,
tmigr_crossnode_level);
+ ret = cpuhp_setup_state(CPUHP_TMIGR_PREPARE, "tmigr:prepare",
+ tmigr_cpu_prepare, NULL);
+ if (ret)
+ goto err;
+
ret = cpuhp_setup_state(CPUHP_AP_TMIGR_ONLINE, "tmigr:online",
tmigr_cpu_online, tmigr_cpu_offline);
if (ret)
@@ -1807,4 +1808,4 @@ err:
pr_err("Timer migration setup failed\n");
return ret;
}
-late_initcall(tmigr_init);
+early_initcall(tmigr_init);
diff --git a/kernel/time/timer_migration.h b/kernel/time/timer_migration.h
index 6c37d94a37d9..154accc7a543 100644
--- a/kernel/time/timer_migration.h
+++ b/kernel/time/timer_migration.h
@@ -22,7 +22,17 @@ struct tmigr_event {
* struct tmigr_group - timer migration hierarchy group
* @lock: Lock protecting the event information and group hierarchy
* information during setup
- * @parent: Pointer to the parent group
+ * @parent: Pointer to the parent group. Pointer is updated when a
+ * new hierarchy level is added because of a CPU coming
+ * online the first time. Once it is set, the pointer will
+ * not be removed or updated. When accessing parent pointer
+ * lock less to decide whether to abort a propagation or
+ * not, it is not a problem. The worst outcome is an
+ * unnecessary/early CPU wake up. But do not access parent
+ * pointer several times in the same 'action' (like
+ * activation, deactivation, check for remote expiry,...)
+ * without holding the lock as it is not ensured that value
+ * will not change.
* @groupevt: Next event of the group which is only used when the
* group is !active. The group event is then queued into
* the parent timer queue.
@@ -41,9 +51,8 @@ struct tmigr_event {
* @num_children: Counter of group children to make sure the group is only
* filled with TMIGR_CHILDREN_PER_GROUP; Required for setup
* only
- * @childmask: childmask of the group in the parent group; is set
- * during setup and will never change; can be read
- * lockless
+ * @groupmask: mask of the group in the parent group; is set during
+ * setup and will never change; can be read lockless
* @list: List head that is added to the per level
* tmigr_level_list; is required during setup when a
* new group needs to be connected to the existing
@@ -59,7 +68,7 @@ struct tmigr_group {
unsigned int level;
int numa_node;
unsigned int num_children;
- u8 childmask;
+ u8 groupmask;
struct list_head list;
};
@@ -79,7 +88,7 @@ struct tmigr_group {
* hierarchy
* @remote: Is set when timers of the CPU are expired remotely
* @tmgroup: Pointer to the parent group
- * @childmask: childmask of tmigr_cpu in the parent group
+ * @groupmask: mask of tmigr_cpu in the parent group
* @wakeup: Stores the first timer when the timer migration
* hierarchy is completely idle and remote expiry was done;
* is returned to timer code in the idle path and is only
@@ -92,7 +101,7 @@ struct tmigr_cpu {
bool idle;
bool remote;
struct tmigr_group *tmgroup;
- u8 childmask;
+ u8 groupmask;
u64 wakeup;
struct tmigr_event cpuevt;
};
@@ -108,8 +117,8 @@ union tmigr_state {
u32 state;
/**
* struct - split state of tmigr_group
- * @active: Contains each childmask bit of the active children
- * @migrator: Contains childmask of the child which is migrator
+ * @active: Contains each mask bit of the active children
+ * @migrator: Contains mask of the child which is migrator
* @seq: Sequence counter needs to be increased when an update
* to the tmigr_state is done. It prevents a race when
* updates in the child groups are propagated in changed
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 0f579430f02a..4c28dd177ca6 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -8735,7 +8735,7 @@ static bool is_permanent_ops_registered(void)
}
static int
-ftrace_enable_sysctl(struct ctl_table *table, int write,
+ftrace_enable_sysctl(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret = -ENODEV;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 578a49ff5c32..10cd38bce2f1 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2767,7 +2767,7 @@ static void output_printk(struct trace_event_buffer *fbuffer)
raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
}
-int tracepoint_printk_sysctl(struct ctl_table *table, int write,
+int tracepoint_printk_sysctl(const struct ctl_table *table, int write,
void *buffer, size_t *lenp,
loff_t *ppos)
{
diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c
index 3a2b46847c8b..42b0d998d103 100644
--- a/kernel/trace/trace_events_user.c
+++ b/kernel/trace/trace_events_user.c
@@ -2885,7 +2885,7 @@ err:
return -ENODEV;
}
-static int set_max_user_events_sysctl(struct ctl_table *table, int write,
+static int set_max_user_events_sysctl(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 5a48dba912ea..7f9572a37333 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -514,7 +514,7 @@ static const struct file_operations stack_trace_filter_fops = {
#endif /* CONFIG_DYNAMIC_FTRACE */
int
-stack_trace_sysctl(struct ctl_table *table, int write, void *buffer,
+stack_trace_sysctl(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
int was_enabled;
diff --git a/kernel/umh.c b/kernel/umh.c
index 598b3ffe1522..ff1f13a27d29 100644
--- a/kernel/umh.c
+++ b/kernel/umh.c
@@ -495,7 +495,7 @@ int call_usermodehelper(const char *path, char **argv, char **envp, int wait)
EXPORT_SYMBOL(call_usermodehelper);
#if defined(CONFIG_SYSCTL)
-static int proc_cap_handler(struct ctl_table *table, int write,
+static int proc_cap_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table t;
diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
index 04e4513f2985..7282f61a8650 100644
--- a/kernel/utsname_sysctl.c
+++ b/kernel/utsname_sysctl.c
@@ -30,7 +30,7 @@ static void *get_uts(const struct ctl_table *table)
* Special case of dostring for the UTS structure. This has locks
* to observe. Should this be in kernel/sys.c ????
*/
-static int proc_do_uts_string(struct ctl_table *table, int write,
+static int proc_do_uts_string(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table uts_table;
diff --git a/kernel/vmcore_info.c b/kernel/vmcore_info.c
index 1d5eadd9dd61..8b4f8cc2e0ec 100644
--- a/kernel/vmcore_info.c
+++ b/kernel/vmcore_info.c
@@ -216,12 +216,8 @@ static int __init crash_save_vmcoreinfo_init(void)
VMCOREINFO_SYMBOL(kallsyms_num_syms);
VMCOREINFO_SYMBOL(kallsyms_token_table);
VMCOREINFO_SYMBOL(kallsyms_token_index);
-#ifdef CONFIG_KALLSYMS_BASE_RELATIVE
VMCOREINFO_SYMBOL(kallsyms_offsets);
VMCOREINFO_SYMBOL(kallsyms_relative_base);
-#else
- VMCOREINFO_SYMBOL(kallsyms_addresses);
-#endif /* CONFIG_KALLSYMS_BASE_RELATIVE */
#endif /* CONFIG_KALLSYMS */
arch_crash_save_vmcoreinfo();
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 51915b44ac73..830a83895493 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -983,7 +983,7 @@ static void proc_watchdog_update(void)
* -------------------|----------------------------------|-------------------------------
* proc_soft_watchdog | watchdog_softlockup_user_enabled | WATCHDOG_SOFTOCKUP_ENABLED
*/
-static int proc_watchdog_common(int which, struct ctl_table *table, int write,
+static int proc_watchdog_common(int which, const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int err, old, *param = table->data;
@@ -1010,7 +1010,7 @@ static int proc_watchdog_common(int which, struct ctl_table *table, int write,
/*
* /proc/sys/kernel/watchdog
*/
-static int proc_watchdog(struct ctl_table *table, int write,
+static int proc_watchdog(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
return proc_watchdog_common(WATCHDOG_HARDLOCKUP_ENABLED |
@@ -1021,7 +1021,7 @@ static int proc_watchdog(struct ctl_table *table, int write,
/*
* /proc/sys/kernel/nmi_watchdog
*/
-static int proc_nmi_watchdog(struct ctl_table *table, int write,
+static int proc_nmi_watchdog(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
if (!watchdog_hardlockup_available && write)
@@ -1034,7 +1034,7 @@ static int proc_nmi_watchdog(struct ctl_table *table, int write,
/*
* /proc/sys/kernel/soft_watchdog
*/
-static int proc_soft_watchdog(struct ctl_table *table, int write,
+static int proc_soft_watchdog(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
return proc_watchdog_common(WATCHDOG_SOFTOCKUP_ENABLED,
@@ -1045,7 +1045,7 @@ static int proc_soft_watchdog(struct ctl_table *table, int write,
/*
* /proc/sys/kernel/watchdog_thresh
*/
-static int proc_watchdog_thresh(struct ctl_table *table, int write,
+static int proc_watchdog_thresh(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int err, old;
@@ -1068,7 +1068,7 @@ static int proc_watchdog_thresh(struct ctl_table *table, int write,
* user to specify a mask that will include cpus that have not yet
* been brought online, if desired.
*/
-static int proc_watchdog_cpumask(struct ctl_table *table, int write,
+static int proc_watchdog_cpumask(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int err;
diff --git a/lib/cpumask_kunit.c b/lib/cpumask_kunit.c
index a105e6369efc..6b62a6bdd50e 100644
--- a/lib/cpumask_kunit.c
+++ b/lib/cpumask_kunit.c
@@ -152,4 +152,5 @@ static struct kunit_suite test_cpumask_suite = {
};
kunit_test_suite(test_cpumask_suite);
+MODULE_DESCRIPTION("KUnit tests for cpumask");
MODULE_LICENSE("GPL");
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index 3518e7394eca..ca736166f100 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -232,7 +232,8 @@ static int INIT get_next_block(struct bunzip_data *bd)
RUNB) */
symCount = symTotal+2;
for (j = 0; j < groupCount; j++) {
- unsigned char length[MAX_SYMBOLS], temp[MAX_HUFCODE_BITS+1];
+ unsigned char length[MAX_SYMBOLS];
+ unsigned short temp[MAX_HUFCODE_BITS+1];
int minLen, maxLen, pp;
/* Read Huffman code lengths for each symbol. They're
stored in a way similar to mtf; record a starting
diff --git a/lib/find_bit_benchmark.c b/lib/find_bit_benchmark.c
index d3fb09e6eff1..402e160e7186 100644
--- a/lib/find_bit_benchmark.c
+++ b/lib/find_bit_benchmark.c
@@ -194,4 +194,5 @@ static int __init find_bit_test(void)
}
module_init(find_bit_test);
+MODULE_DESCRIPTION("Test for find_*_bit functions");
MODULE_LICENSE("GPL");
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 03b427e2707e..b7f2fa08d9c8 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -433,8 +433,23 @@ static void zap_modalias_env(struct kobj_uevent_env *env)
len = strlen(env->envp[i]) + 1;
if (i != env->envp_idx - 1) {
+ /* @env->envp[] contains pointers to @env->buf[]
+ * with @env->buflen chars, and we are removing
+ * variable MODALIAS here pointed by @env->envp[i]
+ * with length @len as shown below:
+ *
+ * 0 @env->buf[] @env->buflen
+ * ---------------------------------------------
+ * ^ ^ ^ ^
+ * | |-> @len <-| target block |
+ * @env->envp[0] @env->envp[i] @env->envp[i + 1]
+ *
+ * so the "target block" indicated above is moved
+ * backward by @len, and its right size is
+ * @env->buflen - (@env->envp[i + 1] - @env->envp[0]).
+ */
memmove(env->envp[i], env->envp[i + 1],
- env->buflen - len);
+ env->buflen - (env->envp[i + 1] - env->envp[0]));
for (j = i; j < env->envp_idx - 1; j++)
env->envp[j] = env->envp[j + 1] - len;
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index 6dfb8d46a4ff..65a75d58ed9e 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -1486,4 +1486,5 @@ static void __init selftest(void)
KSTM_MODULE_LOADERS(test_bitmap);
MODULE_AUTHOR("david decotigny <david.decotigny@googlers.com>");
+MODULE_DESCRIPTION("Test cases for bitmap API");
MODULE_LICENSE("GPL");
diff --git a/lib/test_printf.c b/lib/test_printf.c
index 69b6a5e177f2..965cb6f28527 100644
--- a/lib/test_printf.c
+++ b/lib/test_printf.c
@@ -824,4 +824,5 @@ static void __init selftest(void)
KSTM_MODULE_LOADERS(test_printf);
MODULE_AUTHOR("Rasmus Villemoes <linux@rasmusvillemoes.dk>");
+MODULE_DESCRIPTION("Test cases for printf facility");
MODULE_LICENSE("GPL");
diff --git a/lib/test_scanf.c b/lib/test_scanf.c
index a2707af2951a..7257b1768545 100644
--- a/lib/test_scanf.c
+++ b/lib/test_scanf.c
@@ -810,4 +810,5 @@ static void __init selftest(void)
KSTM_MODULE_LOADERS(test_scanf);
MODULE_AUTHOR("Richard Fitzgerald <rf@opensource.cirrus.com>");
+MODULE_DESCRIPTION("Test cases for sscanf facility");
MODULE_LICENSE("GPL v2");
diff --git a/lib/usercopy.c b/lib/usercopy.c
index 499a7a7d54db..7b17b83c8042 100644
--- a/lib/usercopy.c
+++ b/lib/usercopy.c
@@ -12,40 +12,18 @@
/* out-of-line parts */
-#ifndef INLINE_COPY_FROM_USER
+#if !defined(INLINE_COPY_FROM_USER) || defined(CONFIG_RUST)
unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n)
{
- unsigned long res = n;
- might_fault();
- if (!should_fail_usercopy() && likely(access_ok(from, n))) {
- /*
- * Ensure that bad access_ok() speculation will not
- * lead to nasty side effects *after* the copy is
- * finished:
- */
- barrier_nospec();
- instrument_copy_from_user_before(to, from, n);
- res = raw_copy_from_user(to, from, n);
- instrument_copy_from_user_after(to, from, n, res);
- }
- if (unlikely(res))
- memset(to + (n - res), 0, res);
- return res;
+ return _inline_copy_from_user(to, from, n);
}
EXPORT_SYMBOL(_copy_from_user);
#endif
-#ifndef INLINE_COPY_TO_USER
+#if !defined(INLINE_COPY_TO_USER) || defined(CONFIG_RUST)
unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n)
{
- might_fault();
- if (should_fail_usercopy())
- return n;
- if (likely(access_ok(to, n))) {
- instrument_copy_to_user(to, from, n);
- n = raw_copy_to_user(to, from, n);
- }
- return n;
+ return _inline_copy_to_user(to, from, n);
}
EXPORT_SYMBOL(_copy_to_user);
#endif
diff --git a/lib/vdso/Kconfig b/lib/vdso/Kconfig
index c46c2300517c..82fe827af542 100644
--- a/lib/vdso/Kconfig
+++ b/lib/vdso/Kconfig
@@ -38,3 +38,8 @@ config GENERIC_VDSO_OVERFLOW_PROTECT
in the hotpath.
endif
+
+config VDSO_GETRANDOM
+ bool
+ help
+ Selected by architectures that support vDSO getrandom().
diff --git a/lib/vdso/getrandom.c b/lib/vdso/getrandom.c
new file mode 100644
index 000000000000..b230f0b10832
--- /dev/null
+++ b/lib/vdso/getrandom.c
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include <linux/cache.h>
+#include <linux/kernel.h>
+#include <linux/time64.h>
+#include <vdso/datapage.h>
+#include <vdso/getrandom.h>
+#include <asm/vdso/getrandom.h>
+#include <asm/vdso/vsyscall.h>
+#include <asm/unaligned.h>
+#include <uapi/linux/mman.h>
+
+#define MEMCPY_AND_ZERO_SRC(type, dst, src, len) do { \
+ while (len >= sizeof(type)) { \
+ __put_unaligned_t(type, __get_unaligned_t(type, src), dst); \
+ __put_unaligned_t(type, 0, src); \
+ dst += sizeof(type); \
+ src += sizeof(type); \
+ len -= sizeof(type); \
+ } \
+} while (0)
+
+static void memcpy_and_zero_src(void *dst, void *src, size_t len)
+{
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
+ if (IS_ENABLED(CONFIG_64BIT))
+ MEMCPY_AND_ZERO_SRC(u64, dst, src, len);
+ MEMCPY_AND_ZERO_SRC(u32, dst, src, len);
+ MEMCPY_AND_ZERO_SRC(u16, dst, src, len);
+ }
+ MEMCPY_AND_ZERO_SRC(u8, dst, src, len);
+}
+
+/**
+ * __cvdso_getrandom_data - Generic vDSO implementation of getrandom() syscall.
+ * @rng_info: Describes state of kernel RNG, memory shared with kernel.
+ * @buffer: Destination buffer to fill with random bytes.
+ * @len: Size of @buffer in bytes.
+ * @flags: Zero or more GRND_* flags.
+ * @opaque_state: Pointer to an opaque state area.
+ * @opaque_len: Length of opaque state area.
+ *
+ * This implements a "fast key erasure" RNG using ChaCha20, in the same way that the kernel's
+ * getrandom() syscall does. It periodically reseeds its key from the kernel's RNG, at the same
+ * schedule that the kernel's RNG is reseeded. If the kernel's RNG is not ready, then this always
+ * calls into the syscall.
+ *
+ * If @buffer, @len, and @flags are 0, and @opaque_len is ~0UL, then @opaque_state is populated
+ * with a struct vgetrandom_opaque_params and the function returns 0; if it does not return 0,
+ * this function should not be used.
+ *
+ * @opaque_state *must* be allocated by calling mmap(2) using the mmap_prot and mmap_flags fields
+ * from the struct vgetrandom_opaque_params, and states must not straddle pages. Unless external
+ * locking is used, one state must be allocated per thread, as it is not safe to call this function
+ * concurrently with the same @opaque_state. However, it is safe to call this using the same
+ * @opaque_state that is shared between main code and signal handling code, within the same thread.
+ *
+ * Returns: The number of random bytes written to @buffer, or a negative value indicating an error.
+ */
+static __always_inline ssize_t
+__cvdso_getrandom_data(const struct vdso_rng_data *rng_info, void *buffer, size_t len,
+ unsigned int flags, void *opaque_state, size_t opaque_len)
+{
+ ssize_t ret = min_t(size_t, INT_MAX & PAGE_MASK /* = MAX_RW_COUNT */, len);
+ struct vgetrandom_state *state = opaque_state;
+ size_t batch_len, nblocks, orig_len = len;
+ bool in_use, have_retried = false;
+ unsigned long current_generation;
+ void *orig_buffer = buffer;
+ u32 counter[2] = { 0 };
+
+ if (unlikely(opaque_len == ~0UL && !buffer && !len && !flags)) {
+ *(struct vgetrandom_opaque_params *)opaque_state = (struct vgetrandom_opaque_params) {
+ .size_of_opaque_state = sizeof(*state),
+ .mmap_prot = PROT_READ | PROT_WRITE,
+ .mmap_flags = MAP_DROPPABLE | MAP_ANONYMOUS
+ };
+ return 0;
+ }
+
+ /* The state must not straddle a page, since pages can be zeroed at any time. */
+ if (unlikely(((unsigned long)opaque_state & ~PAGE_MASK) + sizeof(*state) > PAGE_SIZE))
+ return -EFAULT;
+
+ /* If the caller passes the wrong size, which might happen due to CRIU, fallback. */
+ if (unlikely(opaque_len != sizeof(*state)))
+ goto fallback_syscall;
+
+ /*
+ * If the kernel's RNG is not yet ready, then it's not possible to provide random bytes from
+ * userspace, because A) the various @flags require this to block, or not, depending on
+ * various factors unavailable to userspace, and B) the kernel's behavior before the RNG is
+ * ready is to reseed from the entropy pool at every invocation.
+ */
+ if (unlikely(!READ_ONCE(rng_info->is_ready)))
+ goto fallback_syscall;
+
+ /*
+ * This condition is checked after @rng_info->is_ready, because before the kernel's RNG is
+ * initialized, the @flags parameter may require this to block or return an error, even when
+ * len is zero.
+ */
+ if (unlikely(!len))
+ return 0;
+
+ /*
+ * @state->in_use is basic reentrancy protection against this running in a signal handler
+ * with the same @opaque_state, but obviously not atomic wrt multiple CPUs or more than one
+ * level of reentrancy. If a signal interrupts this after reading @state->in_use, but before
+ * writing @state->in_use, there is still no race, because the signal handler will run to
+ * its completion before returning execution.
+ */
+ in_use = READ_ONCE(state->in_use);
+ if (unlikely(in_use))
+ /* The syscall simply fills the buffer and does not touch @state, so fallback. */
+ goto fallback_syscall;
+ WRITE_ONCE(state->in_use, true);
+
+retry_generation:
+ /*
+ * @rng_info->generation must always be read here, as it serializes @state->key with the
+ * kernel's RNG reseeding schedule.
+ */
+ current_generation = READ_ONCE(rng_info->generation);
+
+ /*
+ * If @state->generation doesn't match the kernel RNG's generation, then it means the
+ * kernel's RNG has reseeded, and so @state->key is reseeded as well.
+ */
+ if (unlikely(state->generation != current_generation)) {
+ /*
+ * Write the generation before filling the key, in case of fork. If there is a fork
+ * just after this line, the parent and child will get different random bytes from
+ * the syscall, which is good. However, were this line to occur after the getrandom
+ * syscall, then both child and parent could have the same bytes and the same
+ * generation counter, so the fork would not be detected. Therefore, write
+ * @state->generation before the call to the getrandom syscall.
+ */
+ WRITE_ONCE(state->generation, current_generation);
+
+ /*
+ * Prevent the syscall from being reordered wrt current_generation. Pairs with the
+ * smp_store_release(&_vdso_rng_data.generation) in random.c.
+ */
+ smp_rmb();
+
+ /* Reseed @state->key using fresh bytes from the kernel. */
+ if (getrandom_syscall(state->key, sizeof(state->key), 0) != sizeof(state->key)) {
+ /*
+ * If the syscall failed to refresh the key, then @state->key is now
+ * invalid, so invalidate the generation so that it is not used again, and
+ * fallback to using the syscall entirely.
+ */
+ WRITE_ONCE(state->generation, 0);
+
+ /*
+ * Set @state->in_use to false only after the last write to @state in the
+ * line above.
+ */
+ WRITE_ONCE(state->in_use, false);
+
+ goto fallback_syscall;
+ }
+
+ /*
+ * Set @state->pos to beyond the end of the batch, so that the batch is refilled
+ * using the new key.
+ */
+ state->pos = sizeof(state->batch);
+ }
+
+ /* Set len to the total amount of bytes that this function is allowed to read, ret. */
+ len = ret;
+more_batch:
+ /*
+ * First use bytes out of @state->batch, which may have been filled by the last call to this
+ * function.
+ */
+ batch_len = min_t(size_t, sizeof(state->batch) - state->pos, len);
+ if (batch_len) {
+ /* Zeroing at the same time as memcpying helps preserve forward secrecy. */
+ memcpy_and_zero_src(buffer, state->batch + state->pos, batch_len);
+ state->pos += batch_len;
+ buffer += batch_len;
+ len -= batch_len;
+ }
+
+ if (!len) {
+ /* Prevent the loop from being reordered wrt ->generation. */
+ barrier();
+
+ /*
+ * Since @rng_info->generation will never be 0, re-read @state->generation, rather
+ * than using the local current_generation variable, to learn whether a fork
+ * occurred or if @state was zeroed due to memory pressure. Primarily, though, this
+ * indicates whether the kernel's RNG has reseeded, in which case generate a new key
+ * and start over.
+ */
+ if (unlikely(READ_ONCE(state->generation) != READ_ONCE(rng_info->generation))) {
+ /*
+ * Prevent this from looping forever in case of low memory or racing with a
+ * user force-reseeding the kernel's RNG using the ioctl.
+ */
+ if (have_retried) {
+ WRITE_ONCE(state->in_use, false);
+ goto fallback_syscall;
+ }
+
+ have_retried = true;
+ buffer = orig_buffer;
+ goto retry_generation;
+ }
+
+ /*
+ * Set @state->in_use to false only when there will be no more reads or writes of
+ * @state.
+ */
+ WRITE_ONCE(state->in_use, false);
+ return ret;
+ }
+
+ /* Generate blocks of RNG output directly into @buffer while there's enough room left. */
+ nblocks = len / CHACHA_BLOCK_SIZE;
+ if (nblocks) {
+ __arch_chacha20_blocks_nostack(buffer, state->key, counter, nblocks);
+ buffer += nblocks * CHACHA_BLOCK_SIZE;
+ len -= nblocks * CHACHA_BLOCK_SIZE;
+ }
+
+ BUILD_BUG_ON(sizeof(state->batch_key) % CHACHA_BLOCK_SIZE != 0);
+
+ /* Refill the batch and overwrite the key, in order to preserve forward secrecy. */
+ __arch_chacha20_blocks_nostack(state->batch_key, state->key, counter,
+ sizeof(state->batch_key) / CHACHA_BLOCK_SIZE);
+
+ /* Since the batch was just refilled, set the position back to 0 to indicate a full batch. */
+ state->pos = 0;
+ goto more_batch;
+
+fallback_syscall:
+ return getrandom_syscall(orig_buffer, orig_len, flags);
+}
+
+static __always_inline ssize_t
+__cvdso_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state, size_t opaque_len)
+{
+ return __cvdso_getrandom_data(__arch_get_vdso_rng_data(), buffer, len, flags, opaque_state, opaque_len);
+}
diff --git a/mm/compaction.c b/mm/compaction.c
index 6cb901b63482..eb95e9b435d0 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -2962,7 +2962,7 @@ static int compact_nodes(void)
return 0;
}
-static int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int write,
+static int compaction_proactiveness_sysctl_handler(const struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos)
{
int rc, nid;
@@ -2992,7 +2992,7 @@ static int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int
* This is the entry point for compacting all nodes via
* /proc/sys/vm/compact_memory
*/
-static int sysctl_compaction_handler(struct ctl_table *table, int write,
+static int sysctl_compaction_handler(const struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos)
{
int ret;
@@ -3303,7 +3303,7 @@ static int kcompactd_cpu_online(unsigned int cpu)
return 0;
}
-static int proc_dointvec_minmax_warn_RT_change(struct ctl_table *table,
+static int proc_dointvec_minmax_warn_RT_change(const struct ctl_table *table,
int write, void *buffer, size_t *lenp, loff_t *ppos)
{
int ret, old;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index f9696c94e211..f4be468e06a4 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -89,9 +89,17 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
bool smaps = tva_flags & TVA_SMAPS;
bool in_pf = tva_flags & TVA_IN_PF;
bool enforce_sysfs = tva_flags & TVA_ENFORCE_SYSFS;
+ unsigned long supported_orders;
+
/* Check the intersection of requested and supported orders. */
- orders &= vma_is_anonymous(vma) ?
- THP_ORDERS_ALL_ANON : THP_ORDERS_ALL_FILE;
+ if (vma_is_anonymous(vma))
+ supported_orders = THP_ORDERS_ALL_ANON;
+ else if (vma_is_dax(vma))
+ supported_orders = THP_ORDERS_ALL_FILE_DAX;
+ else
+ supported_orders = THP_ORDERS_ALL_FILE_DEFAULT;
+
+ orders &= supported_orders;
if (!orders)
return 0;
@@ -877,7 +885,7 @@ static unsigned long __thp_get_unmapped_area(struct file *filp,
loff_t off_align = round_up(off, size);
unsigned long len_pad, ret, off_sub;
- if (IS_ENABLED(CONFIG_32BIT) || in_compat_syscall())
+ if (!IS_ENABLED(CONFIG_64BIT) || in_compat_syscall())
return 0;
if (off_end <= off_align || (off_end - off_align) < size)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 0858a1827207..aaf508be0a2b 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4925,7 +4925,7 @@ out:
return ret;
}
-static int hugetlb_sysctl_handler(struct ctl_table *table, int write,
+static int hugetlb_sysctl_handler(const struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos)
{
@@ -4934,7 +4934,7 @@ static int hugetlb_sysctl_handler(struct ctl_table *table, int write,
}
#ifdef CONFIG_NUMA
-static int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
+static int hugetlb_mempolicy_sysctl_handler(const struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos)
{
return hugetlb_sysctl_handler_common(true, table, write,
@@ -4942,7 +4942,7 @@ static int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
}
#endif /* CONFIG_NUMA */
-static int hugetlb_overcommit_handler(struct ctl_table *table, int write,
+static int hugetlb_overcommit_handler(const struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos)
{
struct hstate *h = &default_hstate;
diff --git a/mm/ksm.c b/mm/ksm.c
index df6bae3a5a2c..14d9e53b1ec2 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -713,7 +713,7 @@ static bool vma_ksm_compatible(struct vm_area_struct *vma)
{
if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE | VM_PFNMAP |
VM_IO | VM_DONTEXPAND | VM_HUGETLB |
- VM_MIXEDMAP))
+ VM_MIXEDMAP| VM_DROPPABLE))
return false; /* just ignore the advice */
if (vma_is_dax(vma))
diff --git a/mm/madvise.c b/mm/madvise.c
index 96c026fe0c99..89089d84f8df 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -1068,13 +1068,16 @@ static int madvise_vma_behavior(struct vm_area_struct *vma,
new_flags |= VM_WIPEONFORK;
break;
case MADV_KEEPONFORK:
+ if (vma->vm_flags & VM_DROPPABLE)
+ return -EINVAL;
new_flags &= ~VM_WIPEONFORK;
break;
case MADV_DONTDUMP:
new_flags |= VM_DONTDUMP;
break;
case MADV_DODUMP:
- if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL)
+ if ((!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) ||
+ (vma->vm_flags & VM_DROPPABLE))
return -EINVAL;
new_flags &= ~VM_DONTDUMP;
break;
diff --git a/mm/memory.c b/mm/memory.c
index 4bcd79619574..34f8402d2046 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4780,7 +4780,7 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio,
{
struct vm_area_struct *vma = vmf->vma;
bool write = vmf->flags & FAULT_FLAG_WRITE;
- bool prefault = in_range(vmf->address, addr, nr * PAGE_SIZE);
+ bool prefault = !in_range(vmf->address, addr, nr * PAGE_SIZE);
pte_t entry;
flush_icache_pages(vma, page, nr);
@@ -5801,6 +5801,7 @@ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
/* If the fault handler drops the mmap_lock, vma may be freed */
struct mm_struct *mm = vma->vm_mm;
vm_fault_t ret;
+ bool is_droppable;
__set_current_state(TASK_RUNNING);
@@ -5815,6 +5816,8 @@ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
goto out;
}
+ is_droppable = !!(vma->vm_flags & VM_DROPPABLE);
+
/*
* Enable the memcg OOM handling for faults triggered in user
* space. Kernel faults are handled more gracefully.
@@ -5829,8 +5832,18 @@ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
else
ret = __handle_mm_fault(vma, address, flags);
+ /*
+ * Warning: It is no longer safe to dereference vma-> after this point,
+ * because mmap_lock might have been dropped by __handle_mm_fault(), so
+ * vma might be destroyed from underneath us.
+ */
+
lru_gen_exit_fault();
+ /* If the mapping is droppable, then errors due to OOM aren't fatal. */
+ if (is_droppable)
+ ret &= ~VM_FAULT_OOM;
+
if (flags & FAULT_FLAG_USER) {
mem_cgroup_exit_user_fault();
/*
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 327a19b0883d..b858e22b259d 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2305,6 +2305,9 @@ struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct
pgoff_t ilx;
struct folio *folio;
+ if (vma->vm_flags & VM_DROPPABLE)
+ gfp |= __GFP_NOWARN;
+
pol = get_vma_policy(vma, addr, order, &ilx);
folio = folio_alloc_mpol_noprof(gfp, order, pol, ilx, numa_node_id());
mpol_cond_put(pol);
diff --git a/mm/mlock.c b/mm/mlock.c
index 52d6e401ad67..e3e3dc2b2956 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -474,7 +474,7 @@ static int mlock_fixup(struct vma_iterator *vmi, struct vm_area_struct *vma,
if (newflags == oldflags || (oldflags & VM_SPECIAL) ||
is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) ||
- vma_is_dax(vma) || vma_is_secretmem(vma))
+ vma_is_dax(vma) || vma_is_secretmem(vma) || (oldflags & VM_DROPPABLE))
/* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */
goto out;
diff --git a/mm/mmap.c b/mm/mmap.c
index e42d89f98071..d0dfc85b209b 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1410,6 +1410,36 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
pgoff = 0;
vm_flags |= VM_SHARED | VM_MAYSHARE;
break;
+ case MAP_DROPPABLE:
+ if (VM_DROPPABLE == VM_NONE)
+ return -ENOTSUPP;
+ /*
+ * A locked or stack area makes no sense to be droppable.
+ *
+ * Also, since droppable pages can just go away at any time
+ * it makes no sense to copy them on fork or dump them.
+ *
+ * And don't attempt to combine with hugetlb for now.
+ */
+ if (flags & (MAP_LOCKED | MAP_HUGETLB))
+ return -EINVAL;
+ if (vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
+ return -EINVAL;
+
+ vm_flags |= VM_DROPPABLE;
+
+ /*
+ * If the pages can be dropped, then it doesn't make
+ * sense to reserve them.
+ */
+ vm_flags |= VM_NORESERVE;
+
+ /*
+ * Likewise, they're volatile enough that they
+ * shouldn't survive forks or coredumps.
+ */
+ vm_flags |= VM_WIPEONFORK | VM_DONTDUMP;
+ fallthrough;
case MAP_PRIVATE:
/*
* Set pgoff according to addr for anon_vma.
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index acff24e9fae4..4430ac68e4c4 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -506,7 +506,7 @@ bool node_dirty_ok(struct pglist_data *pgdat)
}
#ifdef CONFIG_SYSCTL
-static int dirty_background_ratio_handler(struct ctl_table *table, int write,
+static int dirty_background_ratio_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
@@ -517,7 +517,7 @@ static int dirty_background_ratio_handler(struct ctl_table *table, int write,
return ret;
}
-static int dirty_background_bytes_handler(struct ctl_table *table, int write,
+static int dirty_background_bytes_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
@@ -535,7 +535,7 @@ static int dirty_background_bytes_handler(struct ctl_table *table, int write,
return ret;
}
-static int dirty_ratio_handler(struct ctl_table *table, int write, void *buffer,
+static int dirty_ratio_handler(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
int old_ratio = vm_dirty_ratio;
@@ -549,7 +549,7 @@ static int dirty_ratio_handler(struct ctl_table *table, int write, void *buffer,
return ret;
}
-static int dirty_bytes_handler(struct ctl_table *table, int write,
+static int dirty_bytes_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
unsigned long old_bytes = vm_dirty_bytes;
@@ -2203,7 +2203,7 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
/*
* sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
*/
-static int dirty_writeback_centisecs_handler(struct ctl_table *table, int write,
+static int dirty_writeback_centisecs_handler(const struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos)
{
unsigned int old_interval = dirty_writeback_interval;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3398d914ed83..28f80daf5c04 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2343,16 +2343,20 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
static void drain_pages_zone(unsigned int cpu, struct zone *zone)
{
struct per_cpu_pages *pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
- int count = READ_ONCE(pcp->count);
-
- while (count) {
- int to_drain = min(count, pcp->batch << CONFIG_PCP_BATCH_SCALE_MAX);
- count -= to_drain;
+ int count;
+ do {
spin_lock(&pcp->lock);
- free_pcppages_bulk(zone, to_drain, pcp, 0);
+ count = pcp->count;
+ if (count) {
+ int to_drain = min(count,
+ pcp->batch << CONFIG_PCP_BATCH_SCALE_MAX);
+
+ free_pcppages_bulk(zone, to_drain, pcp, 0);
+ count -= to_drain;
+ }
spin_unlock(&pcp->lock);
- }
+ } while (count);
}
/*
@@ -5127,7 +5131,7 @@ static char numa_zonelist_order[] = "Node";
/*
* sysctl handler for numa_zonelist_order
*/
-static int numa_zonelist_order_handler(struct ctl_table *table, int write,
+static int numa_zonelist_order_handler(const struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos)
{
if (write)
@@ -5815,6 +5819,23 @@ unsigned long free_reserved_area(void *start, void *end, int poison, const char
return pages;
}
+void free_reserved_page(struct page *page)
+{
+ if (mem_alloc_profiling_enabled()) {
+ union codetag_ref *ref = get_page_tag_ref(page);
+
+ if (ref) {
+ set_codetag_empty(ref);
+ put_page_tag_ref(ref);
+ }
+ }
+ ClearPageReserved(page);
+ init_page_count(page);
+ __free_page(page);
+ adjust_managed_page_count(page, 1);
+}
+EXPORT_SYMBOL(free_reserved_page);
+
static int page_alloc_cpu_dead(unsigned int cpu)
{
struct zone *zone;
@@ -6091,7 +6112,7 @@ postcore_initcall(init_per_zone_wmark_min)
* that we can call two helper functions whenever min_free_kbytes
* changes.
*/
-static int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
+static int min_free_kbytes_sysctl_handler(const struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos)
{
int rc;
@@ -6107,7 +6128,7 @@ static int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
return 0;
}
-static int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
+static int watermark_scale_factor_sysctl_handler(const struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos)
{
int rc;
@@ -6137,7 +6158,7 @@ static void setup_min_unmapped_ratio(void)
}
-static int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
+static int sysctl_min_unmapped_ratio_sysctl_handler(const struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos)
{
int rc;
@@ -6164,7 +6185,7 @@ static void setup_min_slab_ratio(void)
sysctl_min_slab_ratio) / 100;
}
-static int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
+static int sysctl_min_slab_ratio_sysctl_handler(const struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos)
{
int rc;
@@ -6188,7 +6209,7 @@ static int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int wri
* minimum watermarks. The lowmem reserve ratio can only make sense
* if in function of the boot time zone sizes.
*/
-static int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table,
+static int lowmem_reserve_ratio_sysctl_handler(const struct ctl_table *table,
int write, void *buffer, size_t *length, loff_t *ppos)
{
int i;
@@ -6209,7 +6230,7 @@ static int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table,
* cpu. It is the fraction of total pages in each zone that a hot per cpu
* pagelist can have before it gets flushed back to buddy allocator.
*/
-static int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *table,
+static int percpu_pagelist_high_fraction_sysctl_handler(const struct ctl_table *table,
int write, void *buffer, size_t *length, loff_t *ppos)
{
struct zone *zone;
diff --git a/mm/rmap.c b/mm/rmap.c
index 8616308610b9..2490e727e2dc 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1412,7 +1412,11 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
VM_BUG_ON_VMA(address < vma->vm_start ||
address + (nr << PAGE_SHIFT) > vma->vm_end, vma);
- if (!folio_test_swapbacked(folio))
+ /*
+ * VM_DROPPABLE mappings don't swap; instead they're just dropped when
+ * under memory pressure.
+ */
+ if (!folio_test_swapbacked(folio) && !(vma->vm_flags & VM_DROPPABLE))
__folio_set_swapbacked(folio);
__folio_set_anon(folio, vma, address, exclusive);
@@ -1848,7 +1852,13 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
* plus the rmap(s) (dropped by discard:).
*/
if (ref_count == 1 + map_count &&
- !folio_test_dirty(folio)) {
+ (!folio_test_dirty(folio) ||
+ /*
+ * Unlike MADV_FREE mappings, VM_DROPPABLE
+ * ones can be dropped even if they've
+ * been dirtied.
+ */
+ (vma->vm_flags & VM_DROPPABLE))) {
dec_mm_counter(mm, MM_ANONPAGES);
goto discard;
}
@@ -1858,7 +1868,12 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
* discarded. Remap the page to page table.
*/
set_pte_at(mm, address, pvmw.pte, pteval);
- folio_set_swapbacked(folio);
+ /*
+ * Unlike MADV_FREE mappings, VM_DROPPABLE ones
+ * never get swap backed on failure to drop.
+ */
+ if (!(vma->vm_flags & VM_DROPPABLE))
+ folio_set_swapbacked(folio);
goto walk_abort;
}
diff --git a/mm/util.c b/mm/util.c
index bc488f0121a7..bd283e2132e0 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -868,7 +868,7 @@ int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
-int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer,
+int overcommit_ratio_handler(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
int ret;
@@ -884,7 +884,7 @@ static void sync_overcommit_as(struct work_struct *dummy)
percpu_counter_sync(&vm_committed_as);
}
-int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
+int overcommit_policy_handler(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
struct ctl_table t;
@@ -920,7 +920,7 @@ int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
return ret;
}
-int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer,
+int overcommit_kbytes_handler(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
int ret;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 525d3ffa8451..cfa839284b92 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4301,15 +4301,6 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c
return true;
}
- /* dirty lazyfree */
- if (type == LRU_GEN_FILE && folio_test_anon(folio) && folio_test_dirty(folio)) {
- success = lru_gen_del_folio(lruvec, folio, true);
- VM_WARN_ON_ONCE_FOLIO(!success, folio);
- folio_set_swapbacked(folio);
- lruvec_add_folio_tail(lruvec, folio);
- return true;
- }
-
/* promoted */
if (gen != lru_gen_from_seq(lrugen->min_seq[type])) {
list_move(&folio->lru, &lrugen->folios[gen][type][zone]);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 73d791d1caad..04a1cb6cc636 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -74,7 +74,7 @@ static void invalid_numa_statistics(void)
static DEFINE_MUTEX(vm_numa_stat_lock);
-int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
+int sysctl_vm_numa_stat_handler(const struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos)
{
int ret, oldval;
@@ -1888,7 +1888,7 @@ static void refresh_vm_stats(struct work_struct *work)
refresh_cpu_vm_stats(true);
}
-int vmstat_refresh(struct ctl_table *table, int write,
+int vmstat_refresh(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
long val;
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 3c9f6538990e..09f6a773a708 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -1189,7 +1189,7 @@ int br_nf_hook_thresh(unsigned int hook, struct net *net,
#ifdef CONFIG_SYSCTL
static
-int brnf_sysctl_call_tables(struct ctl_table *ctl, int write,
+int brnf_sysctl_call_tables(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
diff --git a/net/core/filter.c b/net/core/filter.c
index 4cf1d34f7617..f3c72cf86099 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -3548,13 +3548,20 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,
if (skb_is_gso(skb)) {
struct skb_shared_info *shinfo = skb_shinfo(skb);
- /* Due to header grow, MSS needs to be downgraded. */
- if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
- skb_decrease_gso_size(shinfo, len_diff);
-
/* Header must be checked, and gso_segs recomputed. */
shinfo->gso_type |= gso_type;
shinfo->gso_segs = 0;
+
+ /* Due to header growth, MSS needs to be downgraded.
+ * There is a BUG_ON() when segmenting the frag_list with
+ * head_frag true, so linearize the skb after downgrading
+ * the MSS.
+ */
+ if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO)) {
+ skb_decrease_gso_size(shinfo, len_diff);
+ if (shinfo->frag_list)
+ return skb_linearize(skb);
+ }
}
return 0;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 277751375b0a..a6fe88eca939 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -3543,7 +3543,7 @@ EXPORT_SYMBOL(neigh_app_ns);
#ifdef CONFIG_SYSCTL
static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
-static int proc_unres_qlen(struct ctl_table *ctl, int write,
+static int proc_unres_qlen(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int size, ret;
@@ -3595,7 +3595,7 @@ static void neigh_proc_update(const struct ctl_table *ctl, int write)
neigh_copy_dflt_parms(net, p, index);
}
-static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
+static int neigh_proc_dointvec_zero_intmax(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp,
loff_t *ppos)
{
@@ -3610,7 +3610,7 @@ static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
return ret;
}
-static int neigh_proc_dointvec_ms_jiffies_positive(struct ctl_table *ctl, int write,
+static int neigh_proc_dointvec_ms_jiffies_positive(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table tmp = *ctl;
@@ -3626,7 +3626,7 @@ static int neigh_proc_dointvec_ms_jiffies_positive(struct ctl_table *ctl, int wr
return ret;
}
-int neigh_proc_dointvec(struct ctl_table *ctl, int write, void *buffer,
+int neigh_proc_dointvec(const struct ctl_table *ctl, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
@@ -3636,7 +3636,7 @@ int neigh_proc_dointvec(struct ctl_table *ctl, int write, void *buffer,
}
EXPORT_SYMBOL(neigh_proc_dointvec);
-int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, void *buffer,
+int neigh_proc_dointvec_jiffies(const struct ctl_table *ctl, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
@@ -3646,7 +3646,7 @@ int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, void *buffer,
}
EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
-static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
+static int neigh_proc_dointvec_userhz_jiffies(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp,
loff_t *ppos)
{
@@ -3656,7 +3656,7 @@ static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
return ret;
}
-int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
+int neigh_proc_dointvec_ms_jiffies(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
@@ -3666,7 +3666,7 @@ int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
}
EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
-static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
+static int neigh_proc_dointvec_unres_qlen(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp,
loff_t *ppos)
{
@@ -3676,7 +3676,7 @@ static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
return ret;
}
-static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
+static int neigh_proc_base_reachable_time(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp,
loff_t *ppos)
{
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 2079000691e2..86a2476678c4 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -95,7 +95,7 @@ static struct cpumask *rps_default_mask_cow_alloc(struct net *net)
return rps_default_mask;
}
-static int rps_default_mask_sysctl(struct ctl_table *table, int write,
+static int rps_default_mask_sysctl(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = (struct net *)table->data;
@@ -126,7 +126,7 @@ done:
return err;
}
-static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
+static int rps_sock_flow_sysctl(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
unsigned int orig_size, size;
@@ -198,7 +198,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
#ifdef CONFIG_NET_FLOW_LIMIT
static DEFINE_MUTEX(flow_limit_update_mutex);
-static int flow_limit_cpu_sysctl(struct ctl_table *table, int write,
+static int flow_limit_cpu_sysctl(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct sd_flow_limit *cur;
@@ -255,7 +255,7 @@ done:
return ret;
}
-static int flow_limit_table_len_sysctl(struct ctl_table *table, int write,
+static int flow_limit_table_len_sysctl(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
unsigned int old, *ptr;
@@ -277,7 +277,7 @@ static int flow_limit_table_len_sysctl(struct ctl_table *table, int write,
#endif /* CONFIG_NET_FLOW_LIMIT */
#ifdef CONFIG_NET_SCHED
-static int set_default_qdisc(struct ctl_table *table, int write,
+static int set_default_qdisc(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
char id[IFNAMSIZ];
@@ -296,7 +296,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
}
#endif
-static int proc_do_dev_weight(struct ctl_table *table, int write,
+static int proc_do_dev_weight(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
static DEFINE_MUTEX(dev_weight_mutex);
@@ -314,7 +314,7 @@ static int proc_do_dev_weight(struct ctl_table *table, int write,
return ret;
}
-static int proc_do_rss_key(struct ctl_table *table, int write,
+static int proc_do_rss_key(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table fake_table;
@@ -327,7 +327,7 @@ static int proc_do_rss_key(struct ctl_table *table, int write,
}
#ifdef CONFIG_BPF_JIT
-static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write,
+static int proc_dointvec_minmax_bpf_enable(const struct ctl_table *table, int write,
void *buffer, size_t *lenp,
loff_t *ppos)
{
@@ -360,7 +360,7 @@ static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write,
# ifdef CONFIG_HAVE_EBPF_JIT
static int
-proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
+proc_dointvec_minmax_bpf_restricted(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
if (!capable(CAP_SYS_ADMIN))
@@ -371,7 +371,7 @@ proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
# endif /* CONFIG_HAVE_EBPF_JIT */
static int
-proc_dolongvec_minmax_bpf_restricted(struct ctl_table *table, int write,
+proc_dolongvec_minmax_bpf_restricted(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
if (!capable(CAP_SYS_ADMIN))
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index d09f557eaa77..d96f3e452fef 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -2390,7 +2390,7 @@ static int devinet_conf_ifindex(struct net *net, struct ipv4_devconf *cnf)
}
}
-static int devinet_conf_proc(struct ctl_table *ctl, int write,
+static int devinet_conf_proc(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int old_value = *(int *)ctl->data;
@@ -2442,7 +2442,7 @@ static int devinet_conf_proc(struct ctl_table *ctl, int write,
return ret;
}
-static int devinet_sysctl_forward(struct ctl_table *ctl, int write,
+static int devinet_sysctl_forward(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = ctl->data;
@@ -2489,7 +2489,7 @@ static int devinet_sysctl_forward(struct ctl_table *ctl, int write,
return ret;
}
-static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
+static int ipv4_doint_and_flush(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = ctl->data;
diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
index 535856b0f0ed..6b9787ee8601 100644
--- a/net/ipv4/nexthop.c
+++ b/net/ipv4/nexthop.c
@@ -888,9 +888,10 @@ static int nla_put_nh_group(struct sk_buff *skb, struct nexthop *nh,
p = nla_data(nla);
for (i = 0; i < nhg->num_nh; ++i) {
- p->id = nhg->nh_entries[i].nh->id;
- p->weight = nhg->nh_entries[i].weight - 1;
- p += 1;
+ *p++ = (struct nexthop_grp) {
+ .id = nhg->nh_entries[i].nh->id,
+ .weight = nhg->nh_entries[i].weight - 1,
+ };
}
if (nhg->resilient && nla_put_nh_group_res(skb, nhg))
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 5090912533d6..13c0f1d455f3 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1263,7 +1263,7 @@ void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
struct flowi4 fl4 = {
.daddr = iph->daddr,
.saddr = iph->saddr,
- .flowi4_tos = RT_TOS(iph->tos),
+ .flowi4_tos = iph->tos & IPTOS_RT_MASK,
.flowi4_oif = rt->dst.dev->ifindex,
.flowi4_iif = skb->dev->ifindex,
.flowi4_mark = skb->mark,
@@ -3388,7 +3388,7 @@ static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
static int ip_rt_gc_elasticity __read_mostly = 8;
static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU;
-static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
+static int ipv4_sysctl_rtcache_flush(const struct ctl_table *__ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = (struct net *)__ctl->extra1;
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 9140d20eb2d4..4af0c234d8d7 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -62,7 +62,7 @@ static void set_local_port_range(struct net *net, unsigned int low, unsigned int
}
/* Validate changes from /proc interface. */
-static int ipv4_local_port_range(struct ctl_table *table, int write,
+static int ipv4_local_port_range(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = table->data;
@@ -96,7 +96,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
}
/* Validate changes from /proc interface. */
-static int ipv4_privileged_ports(struct ctl_table *table, int write,
+static int ipv4_privileged_ports(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = container_of(table->data, struct net,
@@ -159,7 +159,7 @@ static void set_ping_group_range(const struct ctl_table *table,
}
/* Validate changes from /proc interface. */
-static int ipv4_ping_group_range(struct ctl_table *table, int write,
+static int ipv4_ping_group_range(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct user_namespace *user_ns = current_user_ns();
@@ -194,7 +194,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
return ret;
}
-static int ipv4_fwd_update_priority(struct ctl_table *table, int write,
+static int ipv4_fwd_update_priority(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net;
@@ -210,7 +210,7 @@ static int ipv4_fwd_update_priority(struct ctl_table *table, int write,
return ret;
}
-static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
+static int proc_tcp_congestion_control(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = container_of(ctl->data, struct net,
@@ -230,7 +230,7 @@ static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
return ret;
}
-static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
+static int proc_tcp_available_congestion_control(const struct ctl_table *ctl,
int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
@@ -246,7 +246,7 @@ static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
return ret;
}
-static int proc_allowed_congestion_control(struct ctl_table *ctl,
+static int proc_allowed_congestion_control(const struct ctl_table *ctl,
int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
@@ -283,7 +283,7 @@ static int sscanf_key(char *buf, __le32 *key)
return ret;
}
-static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
+static int proc_tcp_fastopen_key(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = container_of(table->data, struct net,
@@ -354,7 +354,7 @@ bad_key:
return ret;
}
-static int proc_tfo_blackhole_detect_timeout(struct ctl_table *table,
+static int proc_tfo_blackhole_detect_timeout(const struct ctl_table *table,
int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
@@ -369,7 +369,7 @@ static int proc_tfo_blackhole_detect_timeout(struct ctl_table *table,
return ret;
}
-static int proc_tcp_available_ulp(struct ctl_table *ctl,
+static int proc_tcp_available_ulp(const struct ctl_table *ctl,
int write, void *buffer, size_t *lenp,
loff_t *ppos)
{
@@ -386,7 +386,7 @@ static int proc_tcp_available_ulp(struct ctl_table *ctl,
return ret;
}
-static int proc_tcp_ehash_entries(struct ctl_table *table, int write,
+static int proc_tcp_ehash_entries(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = container_of(table->data, struct net,
@@ -410,7 +410,7 @@ static int proc_tcp_ehash_entries(struct ctl_table *table, int write,
return proc_dointvec(&tbl, write, buffer, lenp, ppos);
}
-static int proc_udp_hash_entries(struct ctl_table *table, int write,
+static int proc_udp_hash_entries(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = container_of(table->data, struct net,
@@ -434,7 +434,7 @@ static int proc_udp_hash_entries(struct ctl_table *table, int write,
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
-static int proc_fib_multipath_hash_policy(struct ctl_table *table, int write,
+static int proc_fib_multipath_hash_policy(const struct ctl_table *table, int write,
void *buffer, size_t *lenp,
loff_t *ppos)
{
@@ -449,7 +449,7 @@ static int proc_fib_multipath_hash_policy(struct ctl_table *table, int write,
return ret;
}
-static int proc_fib_multipath_hash_fields(struct ctl_table *table, int write,
+static int proc_fib_multipath_hash_fields(const struct ctl_table *table, int write,
void *buffer, size_t *lenp,
loff_t *ppos)
{
@@ -484,7 +484,7 @@ static void proc_fib_multipath_hash_set_seed(struct net *net, u32 user_seed)
WRITE_ONCE(net->ipv4.sysctl_fib_multipath_hash_seed, new);
}
-static int proc_fib_multipath_hash_seed(struct ctl_table *table, int write,
+static int proc_fib_multipath_hash_seed(const struct ctl_table *table, int write,
void *buffer, size_t *lenp,
loff_t *ppos)
{
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index ff9ab3d01ced..454362e359da 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -6819,9 +6819,6 @@ tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
tcp_fast_path_on(tp);
if (sk->sk_shutdown & SEND_SHUTDOWN)
tcp_shutdown(sk, SEND_SHUTDOWN);
-
- if (sk->sk_socket)
- goto consume;
break;
case TCP_FIN_WAIT1: {
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 55a0fd589fc8..f70d8757af1a 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -6309,7 +6309,7 @@ static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
#ifdef CONFIG_SYSCTL
-static int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
+static int addrconf_sysctl_forward(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = ctl->data;
@@ -6334,7 +6334,7 @@ static int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
return ret;
}
-static int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
+static int addrconf_sysctl_mtu(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct inet6_dev *idev = ctl->extra1;
@@ -6405,7 +6405,7 @@ static int addrconf_disable_ipv6(const struct ctl_table *table, int *p, int newf
return 0;
}
-static int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
+static int addrconf_sysctl_disable(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = ctl->data;
@@ -6430,7 +6430,7 @@ static int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
return ret;
}
-static int addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write,
+static int addrconf_sysctl_proxy_ndp(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = ctl->data;
@@ -6471,7 +6471,7 @@ static int addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write,
return ret;
}
-static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
+static int addrconf_sysctl_addr_gen_mode(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp,
loff_t *ppos)
{
@@ -6534,7 +6534,7 @@ out:
return ret;
}
-static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
+static int addrconf_sysctl_stable_secret(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp,
loff_t *ppos)
{
@@ -6602,7 +6602,7 @@ out:
}
static
-int addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl,
+int addrconf_sysctl_ignore_routes_with_linkdown(const struct ctl_table *ctl,
int write, void *buffer,
size_t *lenp,
loff_t *ppos)
@@ -6702,7 +6702,7 @@ int addrconf_disable_policy(const struct ctl_table *ctl, int *valp, int val)
return 0;
}
-static int addrconf_sysctl_disable_policy(struct ctl_table *ctl, int write,
+static int addrconf_sysctl_disable_policy(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = ctl->data;
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 254b192c5705..70a0b2ad6bd7 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1951,7 +1951,7 @@ static void ndisc_warn_deprecated_sysctl(const struct ctl_table *ctl,
}
}
-int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, void *buffer,
+int ndisc_ifinfo_sysctl_change(const struct ctl_table *ctl, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
struct net_device *dev = ctl->extra1;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index c752e9ed20e6..219701caba1e 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -6334,7 +6334,7 @@ static int rt6_stats_seq_show(struct seq_file *seq, void *v)
#ifdef CONFIG_SYSCTL
-static int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
+static int ipv6_sysctl_rtcache_flush(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net;
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index c060285ff47f..d2cd33e2698d 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -30,7 +30,7 @@ static u32 rt6_multipath_hash_fields_all_mask =
static u32 ioam6_id_max = IOAM6_DEFAULT_ID;
static u64 ioam6_id_wide_max = IOAM6_DEFAULT_ID_WIDE;
-static int proc_rt6_multipath_hash_policy(struct ctl_table *table, int write,
+static int proc_rt6_multipath_hash_policy(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net;
@@ -46,7 +46,7 @@ static int proc_rt6_multipath_hash_policy(struct ctl_table *table, int write,
}
static int
-proc_rt6_multipath_hash_fields(struct ctl_table *table, int write, void *buffer,
+proc_rt6_multipath_hash_fields(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
struct net *net;
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index b7bf34a5eb37..1e42e13ad24e 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -62,7 +62,7 @@
#define IUCV_IPNORPY 0x10
#define IUCV_IPALL 0x80
-static int iucv_bus_match(struct device *dev, struct device_driver *drv)
+static int iucv_bus_match(struct device *dev, const struct device_driver *drv)
{
return 0;
}
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 1c1decce7f06..c80ab3f26084 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -441,14 +441,15 @@ int l2tp_session_register(struct l2tp_session *session,
int err;
spin_lock_bh(&tunnel->list_lock);
+ spin_lock_bh(&pn->l2tp_session_idr_lock);
+
if (!tunnel->acpt_newsess) {
err = -ENODEV;
- goto err_tlock;
+ goto out;
}
if (tunnel->version == L2TP_HDR_VER_3) {
session_key = session->session_id;
- spin_lock_bh(&pn->l2tp_session_idr_lock);
err = idr_alloc_u32(&pn->l2tp_v3_session_idr, NULL,
&session_key, session_key, GFP_ATOMIC);
/* IP encap expects session IDs to be globally unique, while
@@ -462,43 +463,36 @@ int l2tp_session_register(struct l2tp_session *session,
err = l2tp_session_collision_add(pn, session,
other_session);
}
- spin_unlock_bh(&pn->l2tp_session_idr_lock);
} else {
session_key = l2tp_v2_session_key(tunnel->tunnel_id,
session->session_id);
- spin_lock_bh(&pn->l2tp_session_idr_lock);
err = idr_alloc_u32(&pn->l2tp_v2_session_idr, NULL,
&session_key, session_key, GFP_ATOMIC);
- spin_unlock_bh(&pn->l2tp_session_idr_lock);
}
if (err) {
if (err == -ENOSPC)
err = -EEXIST;
- goto err_tlock;
+ goto out;
}
l2tp_tunnel_inc_refcount(tunnel);
-
list_add(&session->list, &tunnel->session_list);
- spin_unlock_bh(&tunnel->list_lock);
- spin_lock_bh(&pn->l2tp_session_idr_lock);
if (tunnel->version == L2TP_HDR_VER_3) {
if (!other_session)
idr_replace(&pn->l2tp_v3_session_idr, session, session_key);
} else {
idr_replace(&pn->l2tp_v2_session_idr, session, session_key);
}
- spin_unlock_bh(&pn->l2tp_session_idr_lock);
-
- trace_register_session(session);
- return 0;
-
-err_tlock:
+out:
+ spin_unlock_bh(&pn->l2tp_session_idr_lock);
spin_unlock_bh(&tunnel->list_lock);
+ if (!err)
+ trace_register_session(session);
+
return err;
}
EXPORT_SYMBOL_GPL(l2tp_session_register);
@@ -1260,13 +1254,13 @@ static void l2tp_session_unhash(struct l2tp_session *session)
struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
struct l2tp_session *removed = session;
- /* Remove from the per-tunnel list */
spin_lock_bh(&tunnel->list_lock);
+ spin_lock_bh(&pn->l2tp_session_idr_lock);
+
+ /* Remove from the per-tunnel list */
list_del_init(&session->list);
- spin_unlock_bh(&tunnel->list_lock);
/* Remove from per-net IDR */
- spin_lock_bh(&pn->l2tp_session_idr_lock);
if (tunnel->version == L2TP_HDR_VER_3) {
if (hash_hashed(&session->hlist))
l2tp_session_collision_del(pn, session);
@@ -1280,7 +1274,9 @@ static void l2tp_session_unhash(struct l2tp_session *session)
session_key);
}
WARN_ON_ONCE(removed && removed != session);
+
spin_unlock_bh(&pn->l2tp_session_idr_lock);
+ spin_unlock_bh(&tunnel->list_lock);
synchronize_rcu();
}
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 2dc7a908a6bb..0e6c94a8c2bc 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -1347,7 +1347,7 @@ static int mpls_netconf_dump_devconf(struct sk_buff *skb,
#define MPLS_PERDEV_SYSCTL_OFFSET(field) \
(&((struct mpls_dev *)0)->field)
-static int mpls_conf_proc(struct ctl_table *ctl, int write,
+static int mpls_conf_proc(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int oval = *(int *)ctl->data;
@@ -2600,7 +2600,7 @@ nolabels:
return -ENOMEM;
}
-static int mpls_platform_labels(struct ctl_table *table, int write,
+static int mpls_platform_labels(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = table->data;
diff --git a/net/mptcp/ctrl.c b/net/mptcp/ctrl.c
index 98b1dd498ff6..99382c317ebb 100644
--- a/net/mptcp/ctrl.c
+++ b/net/mptcp/ctrl.c
@@ -113,7 +113,7 @@ static int mptcp_set_scheduler(const struct net *net, const char *name)
return ret;
}
-static int proc_scheduler(struct ctl_table *ctl, int write,
+static int proc_scheduler(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
const struct net *net = current->nsproxy->net_ns;
@@ -133,7 +133,7 @@ static int proc_scheduler(struct ctl_table *ctl, int write,
return ret;
}
-static int proc_available_schedulers(struct ctl_table *ctl,
+static int proc_available_schedulers(const struct ctl_table *ctl,
int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 706c2b52a1ac..dc6ddc4abbe2 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -1846,7 +1846,7 @@ static int ip_vs_zero_all(struct netns_ipvs *ipvs)
#ifdef CONFIG_SYSCTL
static int
-proc_do_defense_mode(struct ctl_table *table, int write,
+proc_do_defense_mode(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct netns_ipvs *ipvs = table->extra2;
@@ -1873,7 +1873,7 @@ proc_do_defense_mode(struct ctl_table *table, int write,
}
static int
-proc_do_sync_threshold(struct ctl_table *table, int write,
+proc_do_sync_threshold(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct netns_ipvs *ipvs = table->extra2;
@@ -1901,7 +1901,7 @@ proc_do_sync_threshold(struct ctl_table *table, int write,
}
static int
-proc_do_sync_ports(struct ctl_table *table, int write,
+proc_do_sync_ports(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = table->data;
@@ -1984,7 +1984,7 @@ static int ipvs_proc_est_cpumask_get(const struct ctl_table *table,
return ret;
}
-static int ipvs_proc_est_cpulist(struct ctl_table *table, int write,
+static int ipvs_proc_est_cpulist(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
@@ -2011,7 +2011,7 @@ static int ipvs_proc_est_cpulist(struct ctl_table *table, int write,
return ret;
}
-static int ipvs_proc_est_nice(struct ctl_table *table, int write,
+static int ipvs_proc_est_nice(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct netns_ipvs *ipvs = table->extra2;
@@ -2041,7 +2041,7 @@ static int ipvs_proc_est_nice(struct ctl_table *table, int write,
return ret;
}
-static int ipvs_proc_run_estimation(struct ctl_table *table, int write,
+static int ipvs_proc_run_estimation(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct netns_ipvs *ipvs = table->extra2;
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 6c40bdf8b05a..7d4f0fa8b609 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -524,7 +524,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_count);
static unsigned int nf_conntrack_htable_size_user __read_mostly;
static int
-nf_conntrack_hash_sysctl(struct ctl_table *table, int write,
+nf_conntrack_hash_sysctl(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
diff --git a/net/netfilter/nf_hooks_lwtunnel.c b/net/netfilter/nf_hooks_lwtunnel.c
index d8ebebc9775d..2d890dd04ff8 100644
--- a/net/netfilter/nf_hooks_lwtunnel.c
+++ b/net/netfilter/nf_hooks_lwtunnel.c
@@ -28,7 +28,7 @@ static inline int nf_hooks_lwtunnel_set(int enable)
}
#ifdef CONFIG_SYSCTL
-int nf_hooks_lwtunnel_sysctl_handler(struct ctl_table *table, int write,
+int nf_hooks_lwtunnel_sysctl_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int proc_nf_hooks_lwtunnel_enabled = 0;
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 769fd7680fac..6dd0de33eebd 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -408,7 +408,7 @@ static struct ctl_table nf_log_sysctl_ftable[] = {
},
};
-static int nf_log_proc_dostring(struct ctl_table *table, int write,
+static int nf_log_proc_dostring(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
const struct nf_logger *logger;
diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c
index 8910a5ac7ed1..b8d3c3213efe 100644
--- a/net/netfilter/nft_set_pipapo_avx2.c
+++ b/net/netfilter/nft_set_pipapo_avx2.c
@@ -1139,8 +1139,14 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
bool map_index;
int i, ret = 0;
- if (unlikely(!irq_fpu_usable()))
- return nft_pipapo_lookup(net, set, key, ext);
+ local_bh_disable();
+
+ if (unlikely(!irq_fpu_usable())) {
+ bool fallback_res = nft_pipapo_lookup(net, set, key, ext);
+
+ local_bh_enable();
+ return fallback_res;
+ }
m = rcu_dereference(priv->match);
@@ -1155,6 +1161,7 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
scratch = *raw_cpu_ptr(m->scratch);
if (unlikely(!scratch)) {
kernel_fpu_end();
+ local_bh_enable();
return false;
}
@@ -1235,6 +1242,7 @@ out:
if (i % 2)
scratch->map_index = !map_index;
kernel_fpu_end();
+ local_bh_enable();
return ret >= 0;
}
diff --git a/net/phonet/sysctl.c b/net/phonet/sysctl.c
index 82fc22467a09..463a74a27d3e 100644
--- a/net/phonet/sysctl.c
+++ b/net/phonet/sysctl.c
@@ -48,7 +48,7 @@ void phonet_get_local_port_range(int *min, int *max)
} while (read_seqretry(&local_port_range_lock, seq));
}
-static int proc_local_port_range(struct ctl_table *table, int write,
+static int proc_local_port_range(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 3dc6956f66f8..351ac1747224 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -61,7 +61,7 @@ static atomic_t rds_tcp_unloading = ATOMIC_INIT(0);
static struct kmem_cache *rds_tcp_conn_slab;
-static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write,
+static int rds_tcp_skbuf_handler(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *fpos);
static int rds_tcp_min_sndbuf = SOCK_MIN_SNDBUF;
@@ -682,7 +682,7 @@ static void rds_tcp_sysctl_reset(struct net *net)
spin_unlock_irq(&rds_tcp_conn_lock);
}
-static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write,
+static int rds_tcp_skbuf_handler(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *fpos)
{
struct net *net = current->nsproxy->net_ns;
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 61c6f3027e7f..e5a5af343c4c 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -43,19 +43,19 @@ static unsigned long max_autoclose_max =
(MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX)
? UINT_MAX : MAX_SCHEDULE_TIMEOUT / HZ;
-static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
+static int proc_sctp_do_hmac_alg(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos);
-static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
+static int proc_sctp_do_rto_min(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos);
-static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write, void *buffer,
+static int proc_sctp_do_rto_max(const struct ctl_table *ctl, int write, void *buffer,
size_t *lenp, loff_t *ppos);
-static int proc_sctp_do_udp_port(struct ctl_table *ctl, int write, void *buffer,
+static int proc_sctp_do_udp_port(const struct ctl_table *ctl, int write, void *buffer,
size_t *lenp, loff_t *ppos);
-static int proc_sctp_do_alpha_beta(struct ctl_table *ctl, int write,
+static int proc_sctp_do_alpha_beta(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos);
-static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
+static int proc_sctp_do_auth(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos);
-static int proc_sctp_do_probe_interval(struct ctl_table *ctl, int write,
+static int proc_sctp_do_probe_interval(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos);
static struct ctl_table sctp_table[] = {
@@ -384,7 +384,7 @@ static struct ctl_table sctp_net_table[] = {
},
};
-static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
+static int proc_sctp_do_hmac_alg(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = current->nsproxy->net_ns;
@@ -429,7 +429,7 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
return ret;
}
-static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
+static int proc_sctp_do_rto_min(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = current->nsproxy->net_ns;
@@ -457,7 +457,7 @@ static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
return ret;
}
-static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
+static int proc_sctp_do_rto_max(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = current->nsproxy->net_ns;
@@ -485,7 +485,7 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
return ret;
}
-static int proc_sctp_do_alpha_beta(struct ctl_table *ctl, int write,
+static int proc_sctp_do_alpha_beta(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
if (write)
@@ -495,7 +495,7 @@ static int proc_sctp_do_alpha_beta(struct ctl_table *ctl, int write,
return proc_dointvec_minmax(ctl, write, buffer, lenp, ppos);
}
-static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
+static int proc_sctp_do_auth(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = current->nsproxy->net_ns;
@@ -524,7 +524,7 @@ static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
return ret;
}
-static int proc_sctp_do_udp_port(struct ctl_table *ctl, int write,
+static int proc_sctp_do_udp_port(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = current->nsproxy->net_ns;
@@ -565,7 +565,7 @@ static int proc_sctp_do_udp_port(struct ctl_table *ctl, int write,
return ret;
}
-static int proc_sctp_do_probe_interval(struct ctl_table *ctl, int write,
+static int proc_sctp_do_probe_interval(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = current->nsproxy->net_ns;
diff --git a/net/sunrpc/sysctl.c b/net/sunrpc/sysctl.c
index 5f3170a1c9bb..bdb587a72422 100644
--- a/net/sunrpc/sysctl.c
+++ b/net/sunrpc/sysctl.c
@@ -40,7 +40,7 @@ EXPORT_SYMBOL_GPL(nlm_debug);
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
-static int proc_do_xprt(struct ctl_table *table, int write,
+static int proc_do_xprt(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
char tmpbuf[256];
@@ -62,7 +62,7 @@ static int proc_do_xprt(struct ctl_table *table, int write,
}
static int
-proc_dodebug(struct ctl_table *table, int write, void *buffer, size_t *lenp,
+proc_dodebug(const struct ctl_table *table, int write, void *buffer, size_t *lenp,
loff_t *ppos)
{
char tmpbuf[20], *s = NULL;
diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
index 474f7a98fe9e..58ae6ec4f25b 100644
--- a/net/sunrpc/xprtrdma/svc_rdma.c
+++ b/net/sunrpc/xprtrdma/svc_rdma.c
@@ -74,7 +74,7 @@ enum {
SVCRDMA_COUNTER_BUFSIZ = sizeof(unsigned long long),
};
-static int svcrdma_counter_handler(struct ctl_table *table, int write,
+static int svcrdma_counter_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct percpu_counter *stat = (struct percpu_counter *)table->data;
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index b849a3d133a0..439f75539977 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -135,8 +135,11 @@ static int tipc_udp_addr2str(struct tipc_media_addr *a, char *buf, int size)
snprintf(buf, size, "%pI4:%u", &ua->ipv4, ntohs(ua->port));
else if (ntohs(ua->proto) == ETH_P_IPV6)
snprintf(buf, size, "%pI6:%u", &ua->ipv6, ntohs(ua->port));
- else
+ else {
pr_err("Invalid UDP media address\n");
+ return 1;
+ }
+
return 0;
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index b0a4c6d08e0a..0be0dcb07f7b 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2721,10 +2721,49 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
{
+ struct unix_sock *u = unix_sk(sk);
+ struct sk_buff *skb;
+ int err;
+
if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED))
return -ENOTCONN;
- return unix_read_skb(sk, recv_actor);
+ mutex_lock(&u->iolock);
+ skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
+ mutex_unlock(&u->iolock);
+ if (!skb)
+ return err;
+
+#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
+ if (unlikely(skb == READ_ONCE(u->oob_skb))) {
+ bool drop = false;
+
+ unix_state_lock(sk);
+
+ if (sock_flag(sk, SOCK_DEAD)) {
+ unix_state_unlock(sk);
+ kfree_skb(skb);
+ return -ECONNRESET;
+ }
+
+ spin_lock(&sk->sk_receive_queue.lock);
+ if (likely(skb == u->oob_skb)) {
+ WRITE_ONCE(u->oob_skb, NULL);
+ drop = true;
+ }
+ spin_unlock(&sk->sk_receive_queue.lock);
+
+ unix_state_unlock(sk);
+
+ if (drop) {
+ WARN_ON_ONCE(skb_unref(skb));
+ kfree_skb(skb);
+ return -EAGAIN;
+ }
+ }
+#endif
+
+ return recv_actor(sk, skb);
}
static int unix_stream_read_generic(struct unix_stream_read_state *state,
diff --git a/net/unix/unix_bpf.c b/net/unix/unix_bpf.c
index bd84785bf8d6..bca2d86ba97d 100644
--- a/net/unix/unix_bpf.c
+++ b/net/unix/unix_bpf.c
@@ -54,6 +54,9 @@ static int unix_bpf_recvmsg(struct sock *sk, struct msghdr *msg,
struct sk_psock *psock;
int copied;
+ if (flags & MSG_OOB)
+ return -EOPNOTSUPP;
+
if (!len)
return 0;
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index caa340134b0e..9f76ca591d54 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -151,6 +151,7 @@ static int xdp_umem_account_pages(struct xdp_umem *umem)
#define XDP_UMEM_FLAGS_VALID ( \
XDP_UMEM_UNALIGNED_CHUNK_FLAG | \
XDP_UMEM_TX_SW_CSUM | \
+ XDP_UMEM_TX_METADATA_LEN | \
0)
static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
@@ -204,8 +205,11 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
if (headroom >= chunk_size - XDP_PACKET_HEADROOM)
return -EINVAL;
- if (mr->tx_metadata_len >= 256 || mr->tx_metadata_len % 8)
- return -EINVAL;
+ if (mr->flags & XDP_UMEM_TX_METADATA_LEN) {
+ if (mr->tx_metadata_len >= 256 || mr->tx_metadata_len % 8)
+ return -EINVAL;
+ umem->tx_metadata_len = mr->tx_metadata_len;
+ }
umem->size = size;
umem->headroom = headroom;
@@ -215,7 +219,6 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
umem->pgs = NULL;
umem->user = NULL;
umem->flags = mr->flags;
- umem->tx_metadata_len = mr->tx_metadata_len;
INIT_LIST_HEAD(&umem->xsk_dma_list);
refcount_set(&umem->users, 1);
diff --git a/rust/Makefile b/rust/Makefile
index f70d5e244fee..1f10f92737f2 100644
--- a/rust/Makefile
+++ b/rust/Makefile
@@ -44,17 +44,10 @@ rustc_sysroot := $(shell MAKEFLAGS= $(RUSTC) $(rust_flags) --print sysroot)
rustc_host_target := $(shell $(RUSTC) --version --verbose | grep -F 'host: ' | cut -d' ' -f2)
RUST_LIB_SRC ?= $(rustc_sysroot)/lib/rustlib/src/rust/library
-ifeq ($(quiet),silent_)
-cargo_quiet=-q
+ifneq ($(quiet),)
rust_test_quiet=-q
rustdoc_test_quiet=--test-args -q
rustdoc_test_kernel_quiet=>/dev/null
-else ifeq ($(quiet),quiet_)
-rust_test_quiet=-q
-rustdoc_test_quiet=--test-args -q
-rustdoc_test_kernel_quiet=>/dev/null
-else
-cargo_quiet=--verbose
endif
core-cfgs = \
@@ -135,22 +128,21 @@ quiet_cmd_rustc_test_library = RUSTC TL $<
@$(objtree)/include/generated/rustc_cfg $(rustc_target_flags) \
--crate-type $(if $(rustc_test_library_proc),proc-macro,rlib) \
--out-dir $(objtree)/$(obj)/test --cfg testlib \
- --sysroot $(objtree)/$(obj)/test/sysroot \
-L$(objtree)/$(obj)/test \
--crate-name $(subst rusttest-,,$(subst rusttestlib-,,$@)) $<
-rusttestlib-build_error: $(src)/build_error.rs rusttest-prepare FORCE
+rusttestlib-build_error: $(src)/build_error.rs FORCE
+$(call if_changed,rustc_test_library)
rusttestlib-macros: private rustc_target_flags = --extern proc_macro
rusttestlib-macros: private rustc_test_library_proc = yes
-rusttestlib-macros: $(src)/macros/lib.rs rusttest-prepare FORCE
+rusttestlib-macros: $(src)/macros/lib.rs FORCE
+$(call if_changed,rustc_test_library)
-rusttestlib-bindings: $(src)/bindings/lib.rs rusttest-prepare FORCE
+rusttestlib-bindings: $(src)/bindings/lib.rs FORCE
+$(call if_changed,rustc_test_library)
-rusttestlib-uapi: $(src)/uapi/lib.rs rusttest-prepare FORCE
+rusttestlib-uapi: $(src)/uapi/lib.rs FORCE
+$(call if_changed,rustc_test_library)
quiet_cmd_rustdoc_test = RUSTDOC T $<
@@ -159,7 +151,7 @@ quiet_cmd_rustdoc_test = RUSTDOC T $<
$(RUSTDOC) --test $(rust_common_flags) \
@$(objtree)/include/generated/rustc_cfg \
$(rustc_target_flags) $(rustdoc_test_target_flags) \
- --sysroot $(objtree)/$(obj)/test/sysroot $(rustdoc_test_quiet) \
+ $(rustdoc_test_quiet) \
-L$(objtree)/$(obj)/test --output $(rustdoc_output) \
--crate-name $(subst rusttest-,,$@) $<
@@ -192,7 +184,6 @@ quiet_cmd_rustc_test = RUSTC T $<
$(RUSTC) --test $(rust_common_flags) \
@$(objtree)/include/generated/rustc_cfg \
$(rustc_target_flags) --out-dir $(objtree)/$(obj)/test \
- --sysroot $(objtree)/$(obj)/test/sysroot \
-L$(objtree)/$(obj)/test \
--crate-name $(subst rusttest-,,$@) $<; \
$(objtree)/$(obj)/test/$(subst rusttest-,,$@) $(rust_test_quiet) \
@@ -200,60 +191,15 @@ quiet_cmd_rustc_test = RUSTC T $<
rusttest: rusttest-macros rusttest-kernel
-# This prepares a custom sysroot with our custom `alloc` instead of
-# the standard one.
-#
-# This requires several hacks:
-# - Unlike `core` and `alloc`, `std` depends on more than a dozen crates,
-# including third-party crates that need to be downloaded, plus custom
-# `build.rs` steps. Thus hardcoding things here is not maintainable.
-# - `cargo` knows how to build the standard library, but it is an unstable
-# feature so far (`-Zbuild-std`).
-# - `cargo` only considers the use case of building the standard library
-# to use it in a given package. Thus we need to create a dummy package
-# and pick the generated libraries from there.
-# - The usual ways of modifying the dependency graph in `cargo` do not seem
-# to apply for the `-Zbuild-std` steps, thus we have to mislead it
-# by modifying the sources in the sysroot.
-# - To avoid messing with the user's Rust installation, we create a clone
-# of the sysroot. However, `cargo` ignores `RUSTFLAGS` in the `-Zbuild-std`
-# steps, thus we use a wrapper binary passed via `RUSTC` to pass the flag.
-#
-# In the future, we hope to avoid the whole ordeal by either:
-# - Making the `test` crate not depend on `std` (either improving upstream
-# or having our own custom crate).
-# - Making the tests run in kernel space (requires the previous point).
-# - Making `std` and friends be more like a "normal" crate, so that
-# `-Zbuild-std` and related hacks are not needed.
-quiet_cmd_rustsysroot = RUSTSYSROOT
- cmd_rustsysroot = \
- rm -rf $(objtree)/$(obj)/test; \
- mkdir -p $(objtree)/$(obj)/test; \
- cp -a $(rustc_sysroot) $(objtree)/$(obj)/test/sysroot; \
- echo '\#!/bin/sh' > $(objtree)/$(obj)/test/rustc_sysroot; \
- echo "$(RUSTC) --sysroot=$(abspath $(objtree)/$(obj)/test/sysroot) \"\$$@\"" \
- >> $(objtree)/$(obj)/test/rustc_sysroot; \
- chmod u+x $(objtree)/$(obj)/test/rustc_sysroot; \
- $(CARGO) -q new $(objtree)/$(obj)/test/dummy; \
- RUSTC=$(objtree)/$(obj)/test/rustc_sysroot $(CARGO) $(cargo_quiet) \
- test -Zbuild-std --target $(rustc_host_target) \
- --manifest-path $(objtree)/$(obj)/test/dummy/Cargo.toml; \
- rm $(objtree)/$(obj)/test/sysroot/lib/rustlib/$(rustc_host_target)/lib/*; \
- cp $(objtree)/$(obj)/test/dummy/target/$(rustc_host_target)/debug/deps/* \
- $(objtree)/$(obj)/test/sysroot/lib/rustlib/$(rustc_host_target)/lib
-
-rusttest-prepare: FORCE
- +$(call if_changed,rustsysroot)
-
rusttest-macros: private rustc_target_flags = --extern proc_macro
rusttest-macros: private rustdoc_test_target_flags = --crate-type proc-macro
-rusttest-macros: $(src)/macros/lib.rs rusttest-prepare FORCE
+rusttest-macros: $(src)/macros/lib.rs FORCE
+$(call if_changed,rustc_test)
+$(call if_changed,rustdoc_test)
rusttest-kernel: private rustc_target_flags = --extern alloc \
--extern build_error --extern macros --extern bindings --extern uapi
-rusttest-kernel: $(src)/kernel/lib.rs rusttest-prepare \
+rusttest-kernel: $(src)/kernel/lib.rs \
rusttestlib-build_error rusttestlib-macros rusttestlib-bindings \
rusttestlib-uapi FORCE
+$(call if_changed,rustc_test)
@@ -421,12 +367,12 @@ ifneq ($(or $(CONFIG_ARM64),$(and $(CONFIG_RISCV),$(CONFIG_64BIT))),)
endif
$(obj)/core.o: private skip_clippy = 1
-$(obj)/core.o: private skip_flags = -Dunreachable_pub
+$(obj)/core.o: private skip_flags = -Wunreachable_pub
$(obj)/core.o: private rustc_objcopy = $(foreach sym,$(redirect-intrinsics),--redefine-sym $(sym)=__rust$(sym))
$(obj)/core.o: private rustc_target_flags = $(core-cfgs)
$(obj)/core.o: $(RUST_LIB_SRC)/core/src/lib.rs FORCE
+$(call if_changed_dep,rustc_library)
-ifdef CONFIG_X86_64
+ifneq ($(or $(CONFIG_X86_64),$(CONFIG_X86_32)),)
$(obj)/core.o: scripts/target.json
endif
@@ -435,7 +381,7 @@ $(obj)/compiler_builtins.o: $(src)/compiler_builtins.rs $(obj)/core.o FORCE
+$(call if_changed_dep,rustc_library)
$(obj)/alloc.o: private skip_clippy = 1
-$(obj)/alloc.o: private skip_flags = -Dunreachable_pub
+$(obj)/alloc.o: private skip_flags = -Wunreachable_pub
$(obj)/alloc.o: private rustc_target_flags = $(alloc-cfgs)
$(obj)/alloc.o: $(RUST_LIB_SRC)/alloc/src/lib.rs $(obj)/compiler_builtins.o FORCE
+$(call if_changed_dep,rustc_library)
diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h
index 6deee85a29c8..b940a5777330 100644
--- a/rust/bindings/bindings_helper.h
+++ b/rust/bindings/bindings_helper.h
@@ -12,6 +12,7 @@
#include <linux/blkdev.h>
#include <linux/errname.h>
#include <linux/ethtool.h>
+#include <linux/firmware.h>
#include <linux/jiffies.h>
#include <linux/mdio.h>
#include <linux/phy.h>
@@ -29,4 +30,5 @@ const gfp_t RUST_CONST_HELPER_GFP_KERNEL = GFP_KERNEL;
const gfp_t RUST_CONST_HELPER_GFP_KERNEL_ACCOUNT = GFP_KERNEL_ACCOUNT;
const gfp_t RUST_CONST_HELPER_GFP_NOWAIT = GFP_NOWAIT;
const gfp_t RUST_CONST_HELPER___GFP_ZERO = __GFP_ZERO;
+const gfp_t RUST_CONST_HELPER___GFP_HIGHMEM = ___GFP_HIGHMEM;
const blk_features_t RUST_CONST_HELPER_BLK_FEAT_ROTATIONAL = BLK_FEAT_ROTATIONAL;
diff --git a/rust/bindings/lib.rs b/rust/bindings/lib.rs
index 40ddaee50d8b..93a1a3fc97bc 100644
--- a/rust/bindings/lib.rs
+++ b/rust/bindings/lib.rs
@@ -24,6 +24,7 @@
unsafe_op_in_unsafe_fn
)]
+#[allow(dead_code)]
mod bindings_raw {
// Use glob import here to expose all helpers.
// Symbols defined within the module will take precedence to the glob import.
diff --git a/rust/helpers.c b/rust/helpers.c
index 3df5217fb2ff..92d3c03ae1bd 100644
--- a/rust/helpers.c
+++ b/rust/helpers.c
@@ -23,8 +23,11 @@
#include <kunit/test-bug.h>
#include <linux/bug.h>
#include <linux/build_bug.h>
+#include <linux/device.h>
#include <linux/err.h>
#include <linux/errname.h>
+#include <linux/gfp.h>
+#include <linux/highmem.h>
#include <linux/mutex.h>
#include <linux/refcount.h>
#include <linux/sched/signal.h>
@@ -39,6 +42,20 @@ __noreturn void rust_helper_BUG(void)
}
EXPORT_SYMBOL_GPL(rust_helper_BUG);
+unsigned long rust_helper_copy_from_user(void *to, const void __user *from,
+ unsigned long n)
+{
+ return copy_from_user(to, from, n);
+}
+EXPORT_SYMBOL_GPL(rust_helper_copy_from_user);
+
+unsigned long rust_helper_copy_to_user(void __user *to, const void *from,
+ unsigned long n)
+{
+ return copy_to_user(to, from, n);
+}
+EXPORT_SYMBOL_GPL(rust_helper_copy_to_user);
+
void rust_helper_mutex_lock(struct mutex *lock)
{
mutex_lock(lock);
@@ -80,6 +97,24 @@ int rust_helper_signal_pending(struct task_struct *t)
}
EXPORT_SYMBOL_GPL(rust_helper_signal_pending);
+struct page *rust_helper_alloc_pages(gfp_t gfp_mask, unsigned int order)
+{
+ return alloc_pages(gfp_mask, order);
+}
+EXPORT_SYMBOL_GPL(rust_helper_alloc_pages);
+
+void *rust_helper_kmap_local_page(struct page *page)
+{
+ return kmap_local_page(page);
+}
+EXPORT_SYMBOL_GPL(rust_helper_kmap_local_page);
+
+void rust_helper_kunmap_local(const void *addr)
+{
+ kunmap_local(addr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_kunmap_local);
+
refcount_t rust_helper_REFCOUNT_INIT(int n)
{
return (refcount_t)REFCOUNT_INIT(n);
diff --git a/rust/kernel/alloc.rs b/rust/kernel/alloc.rs
index 531b5e471cb1..1966bd407017 100644
--- a/rust/kernel/alloc.rs
+++ b/rust/kernel/alloc.rs
@@ -20,6 +20,13 @@ pub struct AllocError;
#[derive(Clone, Copy)]
pub struct Flags(u32);
+impl Flags {
+ /// Get the raw representation of this flag.
+ pub(crate) fn as_raw(self) -> u32 {
+ self.0
+ }
+}
+
impl core::ops::BitOr for Flags {
type Output = Self;
fn bitor(self, rhs: Self) -> Self::Output {
@@ -52,6 +59,14 @@ pub mod flags {
/// This is normally or'd with other flags.
pub const __GFP_ZERO: Flags = Flags(bindings::__GFP_ZERO);
+ /// Allow the allocation to be in high memory.
+ ///
+ /// Allocations in high memory may not be mapped into the kernel's address space, so this can't
+ /// be used with `kmalloc` and other similar methods.
+ ///
+ /// This is normally or'd with other flags.
+ pub const __GFP_HIGHMEM: Flags = Flags(bindings::__GFP_HIGHMEM);
+
/// Users can not sleep and need the allocation to succeed.
///
/// A lower watermark is applied to allow access to "atomic reserves". The current
@@ -66,7 +81,7 @@ pub mod flags {
/// The same as [`GFP_KERNEL`], except the allocation is accounted to kmemcg.
pub const GFP_KERNEL_ACCOUNT: Flags = Flags(bindings::GFP_KERNEL_ACCOUNT);
- /// Ror kernel allocations that should not stall for direct reclaim, start physical IO or
+ /// For kernel allocations that should not stall for direct reclaim, start physical IO or
/// use any filesystem callback. It is very likely to fail to allocate memory, even for very
/// small allocations.
pub const GFP_NOWAIT: Flags = Flags(bindings::GFP_NOWAIT);
diff --git a/rust/kernel/device.rs b/rust/kernel/device.rs
new file mode 100644
index 000000000000..851018eef885
--- /dev/null
+++ b/rust/kernel/device.rs
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Generic devices that are part of the kernel's driver model.
+//!
+//! C header: [`include/linux/device.h`](srctree/include/linux/device.h)
+
+use crate::{
+ bindings,
+ types::{ARef, Opaque},
+};
+use core::ptr;
+
+/// A reference-counted device.
+///
+/// This structure represents the Rust abstraction for a C `struct device`. This implementation
+/// abstracts the usage of an already existing C `struct device` within Rust code that we get
+/// passed from the C side.
+///
+/// An instance of this abstraction can be obtained temporarily or permanent.
+///
+/// A temporary one is bound to the lifetime of the C `struct device` pointer used for creation.
+/// A permanent instance is always reference-counted and hence not restricted by any lifetime
+/// boundaries.
+///
+/// For subsystems it is recommended to create a permanent instance to wrap into a subsystem
+/// specific device structure (e.g. `pci::Device`). This is useful for passing it to drivers in
+/// `T::probe()`, such that a driver can store the `ARef<Device>` (equivalent to storing a
+/// `struct device` pointer in a C driver) for arbitrary purposes, e.g. allocating DMA coherent
+/// memory.
+///
+/// # Invariants
+///
+/// A `Device` instance represents a valid `struct device` created by the C portion of the kernel.
+///
+/// Instances of this type are always reference-counted, that is, a call to `get_device` ensures
+/// that the allocation remains valid at least until the matching call to `put_device`.
+///
+/// `bindings::device::release` is valid to be called from any thread, hence `ARef<Device>` can be
+/// dropped from any thread.
+#[repr(transparent)]
+pub struct Device(Opaque<bindings::device>);
+
+impl Device {
+ /// Creates a new reference-counted abstraction instance of an existing `struct device` pointer.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that `ptr` is valid, non-null, and has a non-zero reference count,
+ /// i.e. it must be ensured that the reference count of the C `struct device` `ptr` points to
+ /// can't drop to zero, for the duration of this function call.
+ ///
+ /// It must also be ensured that `bindings::device::release` can be called from any thread.
+ /// While not officially documented, this should be the case for any `struct device`.
+ pub unsafe fn from_raw(ptr: *mut bindings::device) -> ARef<Self> {
+ // SAFETY: By the safety requirements, ptr is valid.
+ // Initially increase the reference count by one to compensate for the final decrement once
+ // this newly created `ARef<Device>` instance is dropped.
+ unsafe { bindings::get_device(ptr) };
+
+ // CAST: `Self` is a `repr(transparent)` wrapper around `bindings::device`.
+ let ptr = ptr.cast::<Self>();
+
+ // SAFETY: `ptr` is valid by the safety requirements of this function. By the above call to
+ // `bindings::get_device` we also own a reference to the underlying `struct device`.
+ unsafe { ARef::from_raw(ptr::NonNull::new_unchecked(ptr)) }
+ }
+
+ /// Obtain the raw `struct device *`.
+ pub(crate) fn as_raw(&self) -> *mut bindings::device {
+ self.0.get()
+ }
+
+ /// Convert a raw C `struct device` pointer to a `&'a Device`.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that `ptr` is valid, non-null, and has a non-zero reference count,
+ /// i.e. it must be ensured that the reference count of the C `struct device` `ptr` points to
+ /// can't drop to zero, for the duration of this function call and the entire duration when the
+ /// returned reference exists.
+ pub unsafe fn as_ref<'a>(ptr: *mut bindings::device) -> &'a Self {
+ // SAFETY: Guaranteed by the safety requirements of the function.
+ unsafe { &*ptr.cast() }
+ }
+}
+
+// SAFETY: Instances of `Device` are always reference-counted.
+unsafe impl crate::types::AlwaysRefCounted for Device {
+ fn inc_ref(&self) {
+ // SAFETY: The existence of a shared reference guarantees that the refcount is non-zero.
+ unsafe { bindings::get_device(self.as_raw()) };
+ }
+
+ unsafe fn dec_ref(obj: ptr::NonNull<Self>) {
+ // SAFETY: The safety requirements guarantee that the refcount is non-zero.
+ unsafe { bindings::put_device(obj.cast().as_ptr()) }
+ }
+}
+
+// SAFETY: As by the type invariant `Device` can be sent to any thread.
+unsafe impl Send for Device {}
+
+// SAFETY: `Device` can be shared among threads because all immutable methods are protected by the
+// synchronization in `struct device`.
+unsafe impl Sync for Device {}
diff --git a/rust/kernel/firmware.rs b/rust/kernel/firmware.rs
new file mode 100644
index 000000000000..2ba03af9f036
--- /dev/null
+++ b/rust/kernel/firmware.rs
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Firmware abstraction
+//!
+//! C header: [`include/linux/firmware.h`](srctree/include/linux/firmware.h")
+
+use crate::{bindings, device::Device, error::Error, error::Result, str::CStr};
+use core::ptr::NonNull;
+
+/// # Invariants
+///
+/// One of the following: `bindings::request_firmware`, `bindings::firmware_request_nowarn`,
+/// `bindings::firmware_request_platform`, `bindings::request_firmware_direct`.
+struct FwFunc(
+ unsafe extern "C" fn(*mut *const bindings::firmware, *const i8, *mut bindings::device) -> i32,
+);
+
+impl FwFunc {
+ fn request() -> Self {
+ Self(bindings::request_firmware)
+ }
+
+ fn request_nowarn() -> Self {
+ Self(bindings::firmware_request_nowarn)
+ }
+}
+
+/// Abstraction around a C `struct firmware`.
+///
+/// This is a simple abstraction around the C firmware API. Just like with the C API, firmware can
+/// be requested. Once requested the abstraction provides direct access to the firmware buffer as
+/// `&[u8]`. The firmware is released once [`Firmware`] is dropped.
+///
+/// # Invariants
+///
+/// The pointer is valid, and has ownership over the instance of `struct firmware`.
+///
+/// The `Firmware`'s backing buffer is not modified.
+///
+/// # Examples
+///
+/// ```no_run
+/// # use kernel::{c_str, device::Device, firmware::Firmware};
+///
+/// # fn no_run() -> Result<(), Error> {
+/// # // SAFETY: *NOT* safe, just for the example to get an `ARef<Device>` instance
+/// # let dev = unsafe { Device::from_raw(core::ptr::null_mut()) };
+///
+/// let fw = Firmware::request(c_str!("path/to/firmware.bin"), &dev)?;
+/// let blob = fw.data();
+///
+/// # Ok(())
+/// # }
+/// ```
+pub struct Firmware(NonNull<bindings::firmware>);
+
+impl Firmware {
+ fn request_internal(name: &CStr, dev: &Device, func: FwFunc) -> Result<Self> {
+ let mut fw: *mut bindings::firmware = core::ptr::null_mut();
+ let pfw: *mut *mut bindings::firmware = &mut fw;
+
+ // SAFETY: `pfw` is a valid pointer to a NULL initialized `bindings::firmware` pointer.
+ // `name` and `dev` are valid as by their type invariants.
+ let ret = unsafe { func.0(pfw as _, name.as_char_ptr(), dev.as_raw()) };
+ if ret != 0 {
+ return Err(Error::from_errno(ret));
+ }
+
+ // SAFETY: `func` not bailing out with a non-zero error code, guarantees that `fw` is a
+ // valid pointer to `bindings::firmware`.
+ Ok(Firmware(unsafe { NonNull::new_unchecked(fw) }))
+ }
+
+ /// Send a firmware request and wait for it. See also `bindings::request_firmware`.
+ pub fn request(name: &CStr, dev: &Device) -> Result<Self> {
+ Self::request_internal(name, dev, FwFunc::request())
+ }
+
+ /// Send a request for an optional firmware module. See also
+ /// `bindings::firmware_request_nowarn`.
+ pub fn request_nowarn(name: &CStr, dev: &Device) -> Result<Self> {
+ Self::request_internal(name, dev, FwFunc::request_nowarn())
+ }
+
+ fn as_raw(&self) -> *mut bindings::firmware {
+ self.0.as_ptr()
+ }
+
+ /// Returns the size of the requested firmware in bytes.
+ pub fn size(&self) -> usize {
+ // SAFETY: `self.as_raw()` is valid by the type invariant.
+ unsafe { (*self.as_raw()).size }
+ }
+
+ /// Returns the requested firmware as `&[u8]`.
+ pub fn data(&self) -> &[u8] {
+ // SAFETY: `self.as_raw()` is valid by the type invariant. Additionally,
+ // `bindings::firmware` guarantees, if successfully requested, that
+ // `bindings::firmware::data` has a size of `bindings::firmware::size` bytes.
+ unsafe { core::slice::from_raw_parts((*self.as_raw()).data, self.size()) }
+ }
+}
+
+impl Drop for Firmware {
+ fn drop(&mut self) {
+ // SAFETY: `self.as_raw()` is valid by the type invariant.
+ unsafe { bindings::release_firmware(self.as_raw()) };
+ }
+}
+
+// SAFETY: `Firmware` only holds a pointer to a C `struct firmware`, which is safe to be used from
+// any thread.
+unsafe impl Send for Firmware {}
+
+// SAFETY: `Firmware` only holds a pointer to a C `struct firmware`, references to which are safe to
+// be used from any thread.
+unsafe impl Sync for Firmware {}
diff --git a/rust/kernel/init.rs b/rust/kernel/init.rs
index 68605b633e73..495c09ebe3a3 100644
--- a/rust/kernel/init.rs
+++ b/rust/kernel/init.rs
@@ -843,11 +843,8 @@ where
let val = unsafe { &mut *slot };
// SAFETY: `slot` is considered pinned.
let val = unsafe { Pin::new_unchecked(val) };
- (self.1)(val).map_err(|e| {
- // SAFETY: `slot` was initialized above.
- unsafe { core::ptr::drop_in_place(slot) };
- e
- })
+ // SAFETY: `slot` was initialized above.
+ (self.1)(val).inspect_err(|_| unsafe { core::ptr::drop_in_place(slot) })
}
}
@@ -941,11 +938,9 @@ where
// SAFETY: All requirements fulfilled since this function is `__init`.
unsafe { self.0.__pinned_init(slot)? };
// SAFETY: The above call initialized `slot` and we still have unique access.
- (self.1)(unsafe { &mut *slot }).map_err(|e| {
+ (self.1)(unsafe { &mut *slot }).inspect_err(|_|
// SAFETY: `slot` was initialized above.
- unsafe { core::ptr::drop_in_place(slot) };
- e
- })
+ unsafe { core::ptr::drop_in_place(slot) })
}
}
diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
index 2cf7c6b6f66b..274bdc1b0a82 100644
--- a/rust/kernel/lib.rs
+++ b/rust/kernel/lib.rs
@@ -30,13 +30,17 @@ pub mod alloc;
#[cfg(CONFIG_BLOCK)]
pub mod block;
mod build_assert;
+pub mod device;
pub mod error;
+#[cfg(CONFIG_RUST_FW_LOADER_ABSTRACTIONS)]
+pub mod firmware;
pub mod init;
pub mod ioctl;
#[cfg(CONFIG_KUNIT)]
pub mod kunit;
#[cfg(CONFIG_NET)]
pub mod net;
+pub mod page;
pub mod prelude;
pub mod print;
mod static_assert;
@@ -47,6 +51,7 @@ pub mod sync;
pub mod task;
pub mod time;
pub mod types;
+pub mod uaccess;
pub mod workqueue;
#[doc(hidden)]
diff --git a/rust/kernel/page.rs b/rust/kernel/page.rs
new file mode 100644
index 000000000000..208a006d587c
--- /dev/null
+++ b/rust/kernel/page.rs
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Kernel page allocation and management.
+
+use crate::{
+ alloc::{AllocError, Flags},
+ bindings,
+ error::code::*,
+ error::Result,
+ uaccess::UserSliceReader,
+};
+use core::ptr::{self, NonNull};
+
+/// A bitwise shift for the page size.
+pub const PAGE_SHIFT: usize = bindings::PAGE_SHIFT as usize;
+
+/// The number of bytes in a page.
+pub const PAGE_SIZE: usize = bindings::PAGE_SIZE;
+
+/// A bitmask that gives the page containing a given address.
+pub const PAGE_MASK: usize = !(PAGE_SIZE - 1);
+
+/// A pointer to a page that owns the page allocation.
+///
+/// # Invariants
+///
+/// The pointer is valid, and has ownership over the page.
+pub struct Page {
+ page: NonNull<bindings::page>,
+}
+
+// SAFETY: Pages have no logic that relies on them staying on a given thread, so moving them across
+// threads is safe.
+unsafe impl Send for Page {}
+
+// SAFETY: Pages have no logic that relies on them not being accessed concurrently, so accessing
+// them concurrently is safe.
+unsafe impl Sync for Page {}
+
+impl Page {
+ /// Allocates a new page.
+ ///
+ /// # Examples
+ ///
+ /// Allocate memory for a page.
+ ///
+ /// ```
+ /// use kernel::page::Page;
+ ///
+ /// # fn dox() -> Result<(), kernel::alloc::AllocError> {
+ /// let page = Page::alloc_page(GFP_KERNEL)?;
+ /// # Ok(()) }
+ /// ```
+ ///
+ /// Allocate memory for a page and zero its contents.
+ ///
+ /// ```
+ /// use kernel::page::Page;
+ ///
+ /// # fn dox() -> Result<(), kernel::alloc::AllocError> {
+ /// let page = Page::alloc_page(GFP_KERNEL | __GFP_ZERO)?;
+ /// # Ok(()) }
+ /// ```
+ pub fn alloc_page(flags: Flags) -> Result<Self, AllocError> {
+ // SAFETY: Depending on the value of `gfp_flags`, this call may sleep. Other than that, it
+ // is always safe to call this method.
+ let page = unsafe { bindings::alloc_pages(flags.as_raw(), 0) };
+ let page = NonNull::new(page).ok_or(AllocError)?;
+ // INVARIANT: We just successfully allocated a page, so we now have ownership of the newly
+ // allocated page. We transfer that ownership to the new `Page` object.
+ Ok(Self { page })
+ }
+
+ /// Returns a raw pointer to the page.
+ pub fn as_ptr(&self) -> *mut bindings::page {
+ self.page.as_ptr()
+ }
+
+ /// Runs a piece of code with this page mapped to an address.
+ ///
+ /// The page is unmapped when this call returns.
+ ///
+ /// # Using the raw pointer
+ ///
+ /// It is up to the caller to use the provided raw pointer correctly. The pointer is valid for
+ /// `PAGE_SIZE` bytes and for the duration in which the closure is called. The pointer might
+ /// only be mapped on the current thread, and when that is the case, dereferencing it on other
+ /// threads is UB. Other than that, the usual rules for dereferencing a raw pointer apply: don't
+ /// cause data races, the memory may be uninitialized, and so on.
+ ///
+ /// If multiple threads map the same page at the same time, then they may reference with
+ /// different addresses. However, even if the addresses are different, the underlying memory is
+ /// still the same for these purposes (e.g., it's still a data race if they both write to the
+ /// same underlying byte at the same time).
+ fn with_page_mapped<T>(&self, f: impl FnOnce(*mut u8) -> T) -> T {
+ // SAFETY: `page` is valid due to the type invariants on `Page`.
+ let mapped_addr = unsafe { bindings::kmap_local_page(self.as_ptr()) };
+
+ let res = f(mapped_addr.cast());
+
+ // This unmaps the page mapped above.
+ //
+ // SAFETY: Since this API takes the user code as a closure, it can only be used in a manner
+ // where the pages are unmapped in reverse order. This is as required by `kunmap_local`.
+ //
+ // In other words, if this call to `kunmap_local` happens when a different page should be
+ // unmapped first, then there must necessarily be a call to `kmap_local_page` other than the
+ // call just above in `with_page_mapped` that made that possible. In this case, it is the
+ // unsafe block that wraps that other call that is incorrect.
+ unsafe { bindings::kunmap_local(mapped_addr) };
+
+ res
+ }
+
+ /// Runs a piece of code with a raw pointer to a slice of this page, with bounds checking.
+ ///
+ /// If `f` is called, then it will be called with a pointer that points at `off` bytes into the
+ /// page, and the pointer will be valid for at least `len` bytes. The pointer is only valid on
+ /// this task, as this method uses a local mapping.
+ ///
+ /// If `off` and `len` refers to a region outside of this page, then this method returns
+ /// [`EINVAL`] and does not call `f`.
+ ///
+ /// # Using the raw pointer
+ ///
+ /// It is up to the caller to use the provided raw pointer correctly. The pointer is valid for
+ /// `len` bytes and for the duration in which the closure is called. The pointer might only be
+ /// mapped on the current thread, and when that is the case, dereferencing it on other threads
+ /// is UB. Other than that, the usual rules for dereferencing a raw pointer apply: don't cause
+ /// data races, the memory may be uninitialized, and so on.
+ ///
+ /// If multiple threads map the same page at the same time, then they may reference with
+ /// different addresses. However, even if the addresses are different, the underlying memory is
+ /// still the same for these purposes (e.g., it's still a data race if they both write to the
+ /// same underlying byte at the same time).
+ fn with_pointer_into_page<T>(
+ &self,
+ off: usize,
+ len: usize,
+ f: impl FnOnce(*mut u8) -> Result<T>,
+ ) -> Result<T> {
+ let bounds_ok = off <= PAGE_SIZE && len <= PAGE_SIZE && (off + len) <= PAGE_SIZE;
+
+ if bounds_ok {
+ self.with_page_mapped(move |page_addr| {
+ // SAFETY: The `off` integer is at most `PAGE_SIZE`, so this pointer offset will
+ // result in a pointer that is in bounds or one off the end of the page.
+ f(unsafe { page_addr.add(off) })
+ })
+ } else {
+ Err(EINVAL)
+ }
+ }
+
+ /// Maps the page and reads from it into the given buffer.
+ ///
+ /// This method will perform bounds checks on the page offset. If `offset .. offset+len` goes
+ /// outside of the page, then this call returns [`EINVAL`].
+ ///
+ /// # Safety
+ ///
+ /// * Callers must ensure that `dst` is valid for writing `len` bytes.
+ /// * Callers must ensure that this call does not race with a write to the same page that
+ /// overlaps with this read.
+ pub unsafe fn read_raw(&self, dst: *mut u8, offset: usize, len: usize) -> Result {
+ self.with_pointer_into_page(offset, len, move |src| {
+ // SAFETY: If `with_pointer_into_page` calls into this closure, then
+ // it has performed a bounds check and guarantees that `src` is
+ // valid for `len` bytes.
+ //
+ // There caller guarantees that there is no data race.
+ unsafe { ptr::copy_nonoverlapping(src, dst, len) };
+ Ok(())
+ })
+ }
+
+ /// Maps the page and writes into it from the given buffer.
+ ///
+ /// This method will perform bounds checks on the page offset. If `offset .. offset+len` goes
+ /// outside of the page, then this call returns [`EINVAL`].
+ ///
+ /// # Safety
+ ///
+ /// * Callers must ensure that `src` is valid for reading `len` bytes.
+ /// * Callers must ensure that this call does not race with a read or write to the same page
+ /// that overlaps with this write.
+ pub unsafe fn write_raw(&self, src: *const u8, offset: usize, len: usize) -> Result {
+ self.with_pointer_into_page(offset, len, move |dst| {
+ // SAFETY: If `with_pointer_into_page` calls into this closure, then it has performed a
+ // bounds check and guarantees that `dst` is valid for `len` bytes.
+ //
+ // There caller guarantees that there is no data race.
+ unsafe { ptr::copy_nonoverlapping(src, dst, len) };
+ Ok(())
+ })
+ }
+
+ /// Maps the page and zeroes the given slice.
+ ///
+ /// This method will perform bounds checks on the page offset. If `offset .. offset+len` goes
+ /// outside of the page, then this call returns [`EINVAL`].
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that this call does not race with a read or write to the same page that
+ /// overlaps with this write.
+ pub unsafe fn fill_zero_raw(&self, offset: usize, len: usize) -> Result {
+ self.with_pointer_into_page(offset, len, move |dst| {
+ // SAFETY: If `with_pointer_into_page` calls into this closure, then it has performed a
+ // bounds check and guarantees that `dst` is valid for `len` bytes.
+ //
+ // There caller guarantees that there is no data race.
+ unsafe { ptr::write_bytes(dst, 0u8, len) };
+ Ok(())
+ })
+ }
+
+ /// Copies data from userspace into this page.
+ ///
+ /// This method will perform bounds checks on the page offset. If `offset .. offset+len` goes
+ /// outside of the page, then this call returns [`EINVAL`].
+ ///
+ /// Like the other `UserSliceReader` methods, data races are allowed on the userspace address.
+ /// However, they are not allowed on the page you are copying into.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that this call does not race with a read or write to the same page that
+ /// overlaps with this write.
+ pub unsafe fn copy_from_user_slice_raw(
+ &self,
+ reader: &mut UserSliceReader,
+ offset: usize,
+ len: usize,
+ ) -> Result {
+ self.with_pointer_into_page(offset, len, move |dst| {
+ // SAFETY: If `with_pointer_into_page` calls into this closure, then it has performed a
+ // bounds check and guarantees that `dst` is valid for `len` bytes. Furthermore, we have
+ // exclusive access to the slice since the caller guarantees that there are no races.
+ reader.read_raw(unsafe { core::slice::from_raw_parts_mut(dst.cast(), len) })
+ })
+ }
+}
+
+impl Drop for Page {
+ fn drop(&mut self) {
+ // SAFETY: By the type invariants, we have ownership of the page and can free it.
+ unsafe { bindings::__free_pages(self.page.as_ptr(), 0) };
+ }
+}
diff --git a/rust/kernel/types.rs b/rust/kernel/types.rs
index 2e7c9008621f..bd189d646adb 100644
--- a/rust/kernel/types.rs
+++ b/rust/kernel/types.rs
@@ -409,3 +409,67 @@ pub enum Either<L, R> {
/// Constructs an instance of [`Either`] containing a value of type `R`.
Right(R),
}
+
+/// Types for which any bit pattern is valid.
+///
+/// Not all types are valid for all values. For example, a `bool` must be either zero or one, so
+/// reading arbitrary bytes into something that contains a `bool` is not okay.
+///
+/// It's okay for the type to have padding, as initializing those bytes has no effect.
+///
+/// # Safety
+///
+/// All bit-patterns must be valid for this type. This type must not have interior mutability.
+pub unsafe trait FromBytes {}
+
+// SAFETY: All bit patterns are acceptable values of the types below.
+unsafe impl FromBytes for u8 {}
+unsafe impl FromBytes for u16 {}
+unsafe impl FromBytes for u32 {}
+unsafe impl FromBytes for u64 {}
+unsafe impl FromBytes for usize {}
+unsafe impl FromBytes for i8 {}
+unsafe impl FromBytes for i16 {}
+unsafe impl FromBytes for i32 {}
+unsafe impl FromBytes for i64 {}
+unsafe impl FromBytes for isize {}
+// SAFETY: If all bit patterns are acceptable for individual values in an array, then all bit
+// patterns are also acceptable for arrays of that type.
+unsafe impl<T: FromBytes> FromBytes for [T] {}
+unsafe impl<T: FromBytes, const N: usize> FromBytes for [T; N] {}
+
+/// Types that can be viewed as an immutable slice of initialized bytes.
+///
+/// If a struct implements this trait, then it is okay to copy it byte-for-byte to userspace. This
+/// means that it should not have any padding, as padding bytes are uninitialized. Reading
+/// uninitialized memory is not just undefined behavior, it may even lead to leaking sensitive
+/// information on the stack to userspace.
+///
+/// The struct should also not hold kernel pointers, as kernel pointer addresses are also considered
+/// sensitive. However, leaking kernel pointers is not considered undefined behavior by Rust, so
+/// this is a correctness requirement, but not a safety requirement.
+///
+/// # Safety
+///
+/// Values of this type may not contain any uninitialized bytes. This type must not have interior
+/// mutability.
+pub unsafe trait AsBytes {}
+
+// SAFETY: Instances of the following types have no uninitialized portions.
+unsafe impl AsBytes for u8 {}
+unsafe impl AsBytes for u16 {}
+unsafe impl AsBytes for u32 {}
+unsafe impl AsBytes for u64 {}
+unsafe impl AsBytes for usize {}
+unsafe impl AsBytes for i8 {}
+unsafe impl AsBytes for i16 {}
+unsafe impl AsBytes for i32 {}
+unsafe impl AsBytes for i64 {}
+unsafe impl AsBytes for isize {}
+unsafe impl AsBytes for bool {}
+unsafe impl AsBytes for char {}
+unsafe impl AsBytes for str {}
+// SAFETY: If individual values in an array have no uninitialized portions, then the array itself
+// does not have any uninitialized portions either.
+unsafe impl<T: AsBytes> AsBytes for [T] {}
+unsafe impl<T: AsBytes, const N: usize> AsBytes for [T; N] {}
diff --git a/rust/kernel/uaccess.rs b/rust/kernel/uaccess.rs
new file mode 100644
index 000000000000..e9347cff99ab
--- /dev/null
+++ b/rust/kernel/uaccess.rs
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Slices to user space memory regions.
+//!
+//! C header: [`include/linux/uaccess.h`](srctree/include/linux/uaccess.h)
+
+use crate::{
+ alloc::Flags,
+ bindings,
+ error::Result,
+ prelude::*,
+ types::{AsBytes, FromBytes},
+};
+use alloc::vec::Vec;
+use core::ffi::{c_ulong, c_void};
+use core::mem::{size_of, MaybeUninit};
+
+/// The type used for userspace addresses.
+pub type UserPtr = usize;
+
+/// A pointer to an area in userspace memory, which can be either read-only or read-write.
+///
+/// All methods on this struct are safe: attempting to read or write on bad addresses (either out of
+/// the bound of the slice or unmapped addresses) will return [`EFAULT`]. Concurrent access,
+/// *including data races to/from userspace memory*, is permitted, because fundamentally another
+/// userspace thread/process could always be modifying memory at the same time (in the same way that
+/// userspace Rust's [`std::io`] permits data races with the contents of files on disk). In the
+/// presence of a race, the exact byte values read/written are unspecified but the operation is
+/// well-defined. Kernelspace code should validate its copy of data after completing a read, and not
+/// expect that multiple reads of the same address will return the same value.
+///
+/// These APIs are designed to make it difficult to accidentally write TOCTOU (time-of-check to
+/// time-of-use) bugs. Every time a memory location is read, the reader's position is advanced by
+/// the read length and the next read will start from there. This helps prevent accidentally reading
+/// the same location twice and causing a TOCTOU bug.
+///
+/// Creating a [`UserSliceReader`] and/or [`UserSliceWriter`] consumes the `UserSlice`, helping
+/// ensure that there aren't multiple readers or writers to the same location.
+///
+/// If double-fetching a memory location is necessary for some reason, then that is done by creating
+/// multiple readers to the same memory location, e.g. using [`clone_reader`].
+///
+/// # Examples
+///
+/// Takes a region of userspace memory from the current process, and modify it by adding one to
+/// every byte in the region.
+///
+/// ```no_run
+/// use alloc::vec::Vec;
+/// use core::ffi::c_void;
+/// use kernel::error::Result;
+/// use kernel::uaccess::{UserPtr, UserSlice};
+///
+/// fn bytes_add_one(uptr: UserPtr, len: usize) -> Result<()> {
+/// let (read, mut write) = UserSlice::new(uptr, len).reader_writer();
+///
+/// let mut buf = Vec::new();
+/// read.read_all(&mut buf, GFP_KERNEL)?;
+///
+/// for b in &mut buf {
+/// *b = b.wrapping_add(1);
+/// }
+///
+/// write.write_slice(&buf)?;
+/// Ok(())
+/// }
+/// ```
+///
+/// Example illustrating a TOCTOU (time-of-check to time-of-use) bug.
+///
+/// ```no_run
+/// use alloc::vec::Vec;
+/// use core::ffi::c_void;
+/// use kernel::error::{code::EINVAL, Result};
+/// use kernel::uaccess::{UserPtr, UserSlice};
+///
+/// /// Returns whether the data in this region is valid.
+/// fn is_valid(uptr: UserPtr, len: usize) -> Result<bool> {
+/// let read = UserSlice::new(uptr, len).reader();
+///
+/// let mut buf = Vec::new();
+/// read.read_all(&mut buf, GFP_KERNEL)?;
+///
+/// todo!()
+/// }
+///
+/// /// Returns the bytes behind this user pointer if they are valid.
+/// fn get_bytes_if_valid(uptr: UserPtr, len: usize) -> Result<Vec<u8>> {
+/// if !is_valid(uptr, len)? {
+/// return Err(EINVAL);
+/// }
+///
+/// let read = UserSlice::new(uptr, len).reader();
+///
+/// let mut buf = Vec::new();
+/// read.read_all(&mut buf, GFP_KERNEL)?;
+///
+/// // THIS IS A BUG! The bytes could have changed since we checked them.
+/// //
+/// // To avoid this kind of bug, don't call `UserSlice::new` multiple
+/// // times with the same address.
+/// Ok(buf)
+/// }
+/// ```
+///
+/// [`std::io`]: https://doc.rust-lang.org/std/io/index.html
+/// [`clone_reader`]: UserSliceReader::clone_reader
+pub struct UserSlice {
+ ptr: UserPtr,
+ length: usize,
+}
+
+impl UserSlice {
+ /// Constructs a user slice from a raw pointer and a length in bytes.
+ ///
+ /// Constructing a [`UserSlice`] performs no checks on the provided address and length, it can
+ /// safely be constructed inside a kernel thread with no current userspace process. Reads and
+ /// writes wrap the kernel APIs `copy_from_user` and `copy_to_user`, which check the memory map
+ /// of the current process and enforce that the address range is within the user range (no
+ /// additional calls to `access_ok` are needed). Validity of the pointer is checked when you
+ /// attempt to read or write, not in the call to `UserSlice::new`.
+ ///
+ /// Callers must be careful to avoid time-of-check-time-of-use (TOCTOU) issues. The simplest way
+ /// is to create a single instance of [`UserSlice`] per user memory block as it reads each byte
+ /// at most once.
+ pub fn new(ptr: UserPtr, length: usize) -> Self {
+ UserSlice { ptr, length }
+ }
+
+ /// Reads the entirety of the user slice, appending it to the end of the provided buffer.
+ ///
+ /// Fails with [`EFAULT`] if the read happens on a bad address.
+ pub fn read_all(self, buf: &mut Vec<u8>, flags: Flags) -> Result {
+ self.reader().read_all(buf, flags)
+ }
+
+ /// Constructs a [`UserSliceReader`].
+ pub fn reader(self) -> UserSliceReader {
+ UserSliceReader {
+ ptr: self.ptr,
+ length: self.length,
+ }
+ }
+
+ /// Constructs a [`UserSliceWriter`].
+ pub fn writer(self) -> UserSliceWriter {
+ UserSliceWriter {
+ ptr: self.ptr,
+ length: self.length,
+ }
+ }
+
+ /// Constructs both a [`UserSliceReader`] and a [`UserSliceWriter`].
+ ///
+ /// Usually when this is used, you will first read the data, and then overwrite it afterwards.
+ pub fn reader_writer(self) -> (UserSliceReader, UserSliceWriter) {
+ (
+ UserSliceReader {
+ ptr: self.ptr,
+ length: self.length,
+ },
+ UserSliceWriter {
+ ptr: self.ptr,
+ length: self.length,
+ },
+ )
+ }
+}
+
+/// A reader for [`UserSlice`].
+///
+/// Used to incrementally read from the user slice.
+pub struct UserSliceReader {
+ ptr: UserPtr,
+ length: usize,
+}
+
+impl UserSliceReader {
+ /// Skip the provided number of bytes.
+ ///
+ /// Returns an error if skipping more than the length of the buffer.
+ pub fn skip(&mut self, num_skip: usize) -> Result {
+ // Update `self.length` first since that's the fallible part of this operation.
+ self.length = self.length.checked_sub(num_skip).ok_or(EFAULT)?;
+ self.ptr = self.ptr.wrapping_add(num_skip);
+ Ok(())
+ }
+
+ /// Create a reader that can access the same range of data.
+ ///
+ /// Reading from the clone does not advance the current reader.
+ ///
+ /// The caller should take care to not introduce TOCTOU issues, as described in the
+ /// documentation for [`UserSlice`].
+ pub fn clone_reader(&self) -> UserSliceReader {
+ UserSliceReader {
+ ptr: self.ptr,
+ length: self.length,
+ }
+ }
+
+ /// Returns the number of bytes left to be read from this reader.
+ ///
+ /// Note that even reading less than this number of bytes may fail.
+ pub fn len(&self) -> usize {
+ self.length
+ }
+
+ /// Returns `true` if no data is available in the io buffer.
+ pub fn is_empty(&self) -> bool {
+ self.length == 0
+ }
+
+ /// Reads raw data from the user slice into a kernel buffer.
+ ///
+ /// For a version that uses `&mut [u8]`, please see [`UserSliceReader::read_slice`].
+ ///
+ /// Fails with [`EFAULT`] if the read happens on a bad address, or if the read goes out of
+ /// bounds of this [`UserSliceReader`]. This call may modify `out` even if it returns an error.
+ ///
+ /// # Guarantees
+ ///
+ /// After a successful call to this method, all bytes in `out` are initialized.
+ pub fn read_raw(&mut self, out: &mut [MaybeUninit<u8>]) -> Result {
+ let len = out.len();
+ let out_ptr = out.as_mut_ptr().cast::<c_void>();
+ if len > self.length {
+ return Err(EFAULT);
+ }
+ let Ok(len_ulong) = c_ulong::try_from(len) else {
+ return Err(EFAULT);
+ };
+ // SAFETY: `out_ptr` points into a mutable slice of length `len_ulong`, so we may write
+ // that many bytes to it.
+ let res =
+ unsafe { bindings::copy_from_user(out_ptr, self.ptr as *const c_void, len_ulong) };
+ if res != 0 {
+ return Err(EFAULT);
+ }
+ self.ptr = self.ptr.wrapping_add(len);
+ self.length -= len;
+ Ok(())
+ }
+
+ /// Reads raw data from the user slice into a kernel buffer.
+ ///
+ /// Fails with [`EFAULT`] if the read happens on a bad address, or if the read goes out of
+ /// bounds of this [`UserSliceReader`]. This call may modify `out` even if it returns an error.
+ pub fn read_slice(&mut self, out: &mut [u8]) -> Result {
+ // SAFETY: The types are compatible and `read_raw` doesn't write uninitialized bytes to
+ // `out`.
+ let out = unsafe { &mut *(out as *mut [u8] as *mut [MaybeUninit<u8>]) };
+ self.read_raw(out)
+ }
+
+ /// Reads a value of the specified type.
+ ///
+ /// Fails with [`EFAULT`] if the read happens on a bad address, or if the read goes out of
+ /// bounds of this [`UserSliceReader`].
+ pub fn read<T: FromBytes>(&mut self) -> Result<T> {
+ let len = size_of::<T>();
+ if len > self.length {
+ return Err(EFAULT);
+ }
+ let Ok(len_ulong) = c_ulong::try_from(len) else {
+ return Err(EFAULT);
+ };
+ let mut out: MaybeUninit<T> = MaybeUninit::uninit();
+ // SAFETY: The local variable `out` is valid for writing `size_of::<T>()` bytes.
+ //
+ // By using the _copy_from_user variant, we skip the check_object_size check that verifies
+ // the kernel pointer. This mirrors the logic on the C side that skips the check when the
+ // length is a compile-time constant.
+ let res = unsafe {
+ bindings::_copy_from_user(
+ out.as_mut_ptr().cast::<c_void>(),
+ self.ptr as *const c_void,
+ len_ulong,
+ )
+ };
+ if res != 0 {
+ return Err(EFAULT);
+ }
+ self.ptr = self.ptr.wrapping_add(len);
+ self.length -= len;
+ // SAFETY: The read above has initialized all bytes in `out`, and since `T` implements
+ // `FromBytes`, any bit-pattern is a valid value for this type.
+ Ok(unsafe { out.assume_init() })
+ }
+
+ /// Reads the entirety of the user slice, appending it to the end of the provided buffer.
+ ///
+ /// Fails with [`EFAULT`] if the read happens on a bad address.
+ pub fn read_all(mut self, buf: &mut Vec<u8>, flags: Flags) -> Result {
+ let len = self.length;
+ VecExt::<u8>::reserve(buf, len, flags)?;
+
+ // The call to `try_reserve` was successful, so the spare capacity is at least `len` bytes
+ // long.
+ self.read_raw(&mut buf.spare_capacity_mut()[..len])?;
+
+ // SAFETY: Since the call to `read_raw` was successful, so the next `len` bytes of the
+ // vector have been initialized.
+ unsafe { buf.set_len(buf.len() + len) };
+ Ok(())
+ }
+}
+
+/// A writer for [`UserSlice`].
+///
+/// Used to incrementally write into the user slice.
+pub struct UserSliceWriter {
+ ptr: UserPtr,
+ length: usize,
+}
+
+impl UserSliceWriter {
+ /// Returns the amount of space remaining in this buffer.
+ ///
+ /// Note that even writing less than this number of bytes may fail.
+ pub fn len(&self) -> usize {
+ self.length
+ }
+
+ /// Returns `true` if no more data can be written to this buffer.
+ pub fn is_empty(&self) -> bool {
+ self.length == 0
+ }
+
+ /// Writes raw data to this user pointer from a kernel buffer.
+ ///
+ /// Fails with [`EFAULT`] if the write happens on a bad address, or if the write goes out of
+ /// bounds of this [`UserSliceWriter`]. This call may modify the associated userspace slice even
+ /// if it returns an error.
+ pub fn write_slice(&mut self, data: &[u8]) -> Result {
+ let len = data.len();
+ let data_ptr = data.as_ptr().cast::<c_void>();
+ if len > self.length {
+ return Err(EFAULT);
+ }
+ let Ok(len_ulong) = c_ulong::try_from(len) else {
+ return Err(EFAULT);
+ };
+ // SAFETY: `data_ptr` points into an immutable slice of length `len_ulong`, so we may read
+ // that many bytes from it.
+ let res = unsafe { bindings::copy_to_user(self.ptr as *mut c_void, data_ptr, len_ulong) };
+ if res != 0 {
+ return Err(EFAULT);
+ }
+ self.ptr = self.ptr.wrapping_add(len);
+ self.length -= len;
+ Ok(())
+ }
+
+ /// Writes the provided Rust value to this userspace pointer.
+ ///
+ /// Fails with [`EFAULT`] if the write happens on a bad address, or if the write goes out of
+ /// bounds of this [`UserSliceWriter`]. This call may modify the associated userspace slice even
+ /// if it returns an error.
+ pub fn write<T: AsBytes>(&mut self, value: &T) -> Result {
+ let len = size_of::<T>();
+ if len > self.length {
+ return Err(EFAULT);
+ }
+ let Ok(len_ulong) = c_ulong::try_from(len) else {
+ return Err(EFAULT);
+ };
+ // SAFETY: The reference points to a value of type `T`, so it is valid for reading
+ // `size_of::<T>()` bytes.
+ //
+ // By using the _copy_to_user variant, we skip the check_object_size check that verifies the
+ // kernel pointer. This mirrors the logic on the C side that skips the check when the length
+ // is a compile-time constant.
+ let res = unsafe {
+ bindings::_copy_to_user(
+ self.ptr as *mut c_void,
+ (value as *const T).cast::<c_void>(),
+ len_ulong,
+ )
+ };
+ if res != 0 {
+ return Err(EFAULT);
+ }
+ self.ptr = self.ptr.wrapping_add(len);
+ self.length -= len;
+ Ok(())
+ }
+}
diff --git a/rust/kernel/workqueue.rs b/rust/kernel/workqueue.rs
index 1cec63a2aea8..553a5cba2adc 100644
--- a/rust/kernel/workqueue.rs
+++ b/rust/kernel/workqueue.rs
@@ -482,24 +482,26 @@ pub unsafe trait HasWork<T, const ID: u64 = 0> {
/// use kernel::sync::Arc;
/// use kernel::workqueue::{self, impl_has_work, Work};
///
-/// struct MyStruct {
-/// work_field: Work<MyStruct, 17>,
+/// struct MyStruct<'a, T, const N: usize> {
+/// work_field: Work<MyStruct<'a, T, N>, 17>,
+/// f: fn(&'a [T; N]),
/// }
///
/// impl_has_work! {
-/// impl HasWork<MyStruct, 17> for MyStruct { self.work_field }
+/// impl{'a, T, const N: usize} HasWork<MyStruct<'a, T, N>, 17>
+/// for MyStruct<'a, T, N> { self.work_field }
/// }
/// ```
#[macro_export]
macro_rules! impl_has_work {
- ($(impl$(<$($implarg:ident),*>)?
+ ($(impl$({$($generics:tt)*})?
HasWork<$work_type:ty $(, $id:tt)?>
- for $self:ident $(<$($selfarg:ident),*>)?
+ for $self:ty
{ self.$field:ident }
)*) => {$(
// SAFETY: The implementation of `raw_get_work` only compiles if the field has the right
// type.
- unsafe impl$(<$($implarg),*>)? $crate::workqueue::HasWork<$work_type $(, $id)?> for $self $(<$($selfarg),*>)? {
+ unsafe impl$(<$($generics)+>)? $crate::workqueue::HasWork<$work_type $(, $id)?> for $self {
const OFFSET: usize = ::core::mem::offset_of!(Self, $field) as usize;
#[inline]
@@ -515,7 +517,7 @@ macro_rules! impl_has_work {
pub use impl_has_work;
impl_has_work! {
- impl<T> HasWork<Self> for ClosureWork<T> { self.work }
+ impl{T} HasWork<Self> for ClosureWork<T> { self.work }
}
unsafe impl<T, const ID: u64> WorkItemPointer<ID> for Arc<T>
diff --git a/rust/macros/lib.rs b/rust/macros/lib.rs
index 520eae5fd792..159e75292970 100644
--- a/rust/macros/lib.rs
+++ b/rust/macros/lib.rs
@@ -35,6 +35,7 @@ use proc_macro::TokenStream;
/// author: "Rust for Linux Contributors",
/// description: "My very own kernel module!",
/// license: "GPL",
+/// alias: ["alternate_module_name"],
/// }
///
/// struct MyModule;
@@ -55,13 +56,45 @@ use proc_macro::TokenStream;
/// }
/// ```
///
+/// ## Firmware
+///
+/// The following example shows how to declare a kernel module that needs
+/// to load binary firmware files. You need to specify the file names of
+/// the firmware in the `firmware` field. The information is embedded
+/// in the `modinfo` section of the kernel module. For example, a tool to
+/// build an initramfs uses this information to put the firmware files into
+/// the initramfs image.
+///
+/// ```ignore
+/// use kernel::prelude::*;
+///
+/// module!{
+/// type: MyDeviceDriverModule,
+/// name: "my_device_driver_module",
+/// author: "Rust for Linux Contributors",
+/// description: "My device driver requires firmware",
+/// license: "GPL",
+/// firmware: ["my_device_firmware1.bin", "my_device_firmware2.bin"],
+/// }
+///
+/// struct MyDeviceDriverModule;
+///
+/// impl kernel::Module for MyDeviceDriverModule {
+/// fn init() -> Result<Self> {
+/// Ok(Self)
+/// }
+/// }
+/// ```
+///
/// # Supported argument types
/// - `type`: type which implements the [`Module`] trait (required).
-/// - `name`: byte array of the name of the kernel module (required).
-/// - `author`: byte array of the author of the kernel module.
-/// - `description`: byte array of the description of the kernel module.
-/// - `license`: byte array of the license of the kernel module (required).
-/// - `alias`: byte array of alias name of the kernel module.
+/// - `name`: ASCII string literal of the name of the kernel module (required).
+/// - `author`: string literal of the author of the kernel module.
+/// - `description`: string literal of the description of the kernel module.
+/// - `license`: ASCII string literal of the license of the kernel module (required).
+/// - `alias`: array of ASCII string literals of the alias names of the kernel module.
+/// - `firmware`: array of ASCII string literals of the firmware files of
+/// the kernel module.
#[proc_macro]
pub fn module(ts: TokenStream) -> TokenStream {
module::module(ts)
@@ -312,7 +345,7 @@ pub fn pinned_drop(args: TokenStream, input: TokenStream) -> TokenStream {
///
/// Currently supported modifiers are:
/// * `span`: change the span of concatenated identifier to the span of the specified token. By
-/// default the span of the `[< >]` group is used.
+/// default the span of the `[< >]` group is used.
/// * `lower`: change the identifier to lower case.
/// * `upper`: change the identifier to upper case.
///
diff --git a/rust/macros/module.rs b/rust/macros/module.rs
index acd0393b5095..411dc103d82e 100644
--- a/rust/macros/module.rs
+++ b/rust/macros/module.rs
@@ -97,14 +97,22 @@ struct ModuleInfo {
author: Option<String>,
description: Option<String>,
alias: Option<Vec<String>>,
+ firmware: Option<Vec<String>>,
}
impl ModuleInfo {
fn parse(it: &mut token_stream::IntoIter) -> Self {
let mut info = ModuleInfo::default();
- const EXPECTED_KEYS: &[&str] =
- &["type", "name", "author", "description", "license", "alias"];
+ const EXPECTED_KEYS: &[&str] = &[
+ "type",
+ "name",
+ "author",
+ "description",
+ "license",
+ "alias",
+ "firmware",
+ ];
const REQUIRED_KEYS: &[&str] = &["type", "name", "license"];
let mut seen_keys = Vec::new();
@@ -131,6 +139,7 @@ impl ModuleInfo {
"description" => info.description = Some(expect_string(it)),
"license" => info.license = expect_string_ascii(it),
"alias" => info.alias = Some(expect_string_array(it)),
+ "firmware" => info.firmware = Some(expect_string_array(it)),
_ => panic!(
"Unknown key \"{}\". Valid keys are: {:?}.",
key, EXPECTED_KEYS
@@ -186,6 +195,11 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
modinfo.emit("alias", &alias);
}
}
+ if let Some(firmware) = info.firmware {
+ for fw in firmware {
+ modinfo.emit("firmware", &fw);
+ }
+ }
// Built-in modules also export the `file` modinfo string.
let file =
diff --git a/rust/uapi/lib.rs b/rust/uapi/lib.rs
index 0caad902ba40..80a00260e3e7 100644
--- a/rust/uapi/lib.rs
+++ b/rust/uapi/lib.rs
@@ -14,6 +14,7 @@
#![cfg_attr(test, allow(unsafe_op_in_unsafe_fn))]
#![allow(
clippy::all,
+ dead_code,
missing_docs,
non_camel_case_types,
non_upper_case_globals,
diff --git a/samples/kobject/kobject-example.c b/samples/kobject/kobject-example.c
index 96678ed73216..c9c3db19799a 100644
--- a/samples/kobject/kobject-example.c
+++ b/samples/kobject/kobject-example.c
@@ -140,5 +140,6 @@ static void __exit example_exit(void)
module_init(example_init);
module_exit(example_exit);
+MODULE_DESCRIPTION("Sample kobject implementation");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Greg Kroah-Hartman <greg@kroah.com>");
diff --git a/samples/kobject/kset-example.c b/samples/kobject/kset-example.c
index 342452282719..552d7e363539 100644
--- a/samples/kobject/kset-example.c
+++ b/samples/kobject/kset-example.c
@@ -284,5 +284,6 @@ static void __exit example_exit(void)
module_init(example_init);
module_exit(example_exit);
+MODULE_DESCRIPTION("Sample kset and ktype implementation");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Greg Kroah-Hartman <greg@kroah.com>");
diff --git a/samples/qmi/qmi_sample_client.c b/samples/qmi/qmi_sample_client.c
index c045e3d24326..a42892523d3b 100644
--- a/samples/qmi/qmi_sample_client.c
+++ b/samples/qmi/qmi_sample_client.c
@@ -511,7 +511,7 @@ err_release_qmi_handle:
return ret;
}
-static int qmi_sample_remove(struct platform_device *pdev)
+static void qmi_sample_remove(struct platform_device *pdev)
{
struct qmi_sample *sample = platform_get_drvdata(pdev);
@@ -520,13 +520,11 @@ static int qmi_sample_remove(struct platform_device *pdev)
debugfs_remove(sample->de_dir);
qmi_handle_release(&sample->qmi);
-
- return 0;
}
static struct platform_driver qmi_sample_driver = {
.probe = qmi_sample_probe,
- .remove = qmi_sample_remove,
+ .remove_new = qmi_sample_remove,
.driver = {
.name = "qmi_sample_client",
},
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index faf37bafa3f8..ed8a7493524b 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -68,7 +68,7 @@ kbuild-file = $(or $(wildcard $(src)/Kbuild),$(src)/Makefile)
# Read a file, replacing newlines with spaces
#
# Make 4.2 or later can read a file by using its builtin function.
-ifneq ($(filter-out 3.% 4.0 4.1, $(MAKE_VERSION)),)
+ifneq ($(filter-out 4.0 4.1, $(MAKE_VERSION)),)
read-file = $(subst $(newline),$(space),$(file < $1))
else
read-file = $(shell cat $1 2>/dev/null)
diff --git a/scripts/Makefile b/scripts/Makefile
index fe56eeef09dd..dccef663ca82 100644
--- a/scripts/Makefile
+++ b/scripts/Makefile
@@ -12,7 +12,7 @@ hostprogs-always-$(CONFIG_SYSTEM_EXTRA_CERTIFICATE) += insert-sys-cert
hostprogs-always-$(CONFIG_RUST_KERNEL_DOCTESTS) += rustdoc_test_builder
hostprogs-always-$(CONFIG_RUST_KERNEL_DOCTESTS) += rustdoc_test_gen
-ifdef CONFIG_X86_64
+ifneq ($(or $(CONFIG_X86_64),$(CONFIG_X86_32)),)
always-$(CONFIG_RUST) += target.json
filechk_rust_target = $< < include/config/auto.conf
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 29da0dc9776d..fe3668dc4954 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -409,12 +409,16 @@ cmd_dtc = $(HOSTCC) -E $(dtc_cpp_flags) -x assembler-with-cpp -o $(dtc-tmp) $< ;
DT_CHECK_CMD = $(DT_CHECKER) $(DT_CHECKER_FLAGS) -u $(srctree)/$(DT_BINDING_DIR) -p $(DT_TMP_SCHEMA)
+# NOTE:
+# Do not replace $(filter %.dtb %.dtbo, $^) with $(real-prereqs). When a single
+# DTB is turned into a multi-blob DTB, $^ will contain header file dependencies
+# recorded in the .*.cmd file.
ifneq ($(CHECK_DTBS),)
quiet_cmd_fdtoverlay = DTOVLCH $@
- cmd_fdtoverlay = $(objtree)/scripts/dtc/fdtoverlay -o $@ -i $(real-prereqs) ; $(DT_CHECK_CMD) $@ || true
+ cmd_fdtoverlay = $(objtree)/scripts/dtc/fdtoverlay -o $@ -i $(filter %.dtb %.dtbo, $^) ; $(DT_CHECK_CMD) $@ || true
else
quiet_cmd_fdtoverlay = DTOVL $@
- cmd_fdtoverlay = $(objtree)/scripts/dtc/fdtoverlay -o $@ -i $(real-prereqs)
+ cmd_fdtoverlay = $(objtree)/scripts/dtc/fdtoverlay -o $@ -i $(filter %.dtb %.dtbo, $^)
endif
$(multi-dtb-y): FORCE
@@ -529,6 +533,7 @@ quiet_cmd_fit = FIT $@
cmd_fit = $(MAKE_FIT) -o $@ --arch $(UIMAGE_ARCH) --os linux \
--name '$(UIMAGE_NAME)' \
$(if $(findstring 1,$(KBUILD_VERBOSE)),-v) \
+ $(if $(FIT_DECOMPOSE_DTBS),--decompose-dtbs) \
--compress $(FIT_COMPRESSION) -k $< @$(word 2,$^)
# XZ
diff --git a/scripts/Makefile.package b/scripts/Makefile.package
index bf016af8bf8a..4a80584ec771 100644
--- a/scripts/Makefile.package
+++ b/scripts/Makefile.package
@@ -141,6 +141,19 @@ snap-pkg:
cd $(objtree)/snap && \
snapcraft --target-arch=$(UTS_MACHINE)
+# pacman-pkg
+# ---------------------------------------------------------------------------
+
+PHONY += pacman-pkg
+pacman-pkg:
+ @ln -srf $(srctree)/scripts/package/PKGBUILD $(objtree)/PKGBUILD
+ +objtree="$(realpath $(objtree))" \
+ BUILDDIR="$(realpath $(objtree))/pacman" \
+ CARCH="$(UTS_MACHINE)" \
+ KBUILD_MAKEFLAGS="$(MAKEFLAGS)" \
+ KBUILD_REVISION="$(shell $(srctree)/scripts/build-version)" \
+ makepkg $(MAKEPKGOPTS)
+
# dir-pkg tar*-pkg - tarball targets
# ---------------------------------------------------------------------------
@@ -221,6 +234,7 @@ help:
@echo ' bindeb-pkg - Build only the binary kernel deb package'
@echo ' snap-pkg - Build only the binary kernel snap package'
@echo ' (will connect to external hosts)'
+ @echo ' pacman-pkg - Build only the binary kernel pacman package'
@echo ' dir-pkg - Build the kernel as a plain directory structure'
@echo ' tar-pkg - Build the kernel as an uncompressed tarball'
@echo ' targz-pkg - Build the kernel as a gzip compressed tarball'
diff --git a/init/build-version b/scripts/build-version
index 537d45815083..537d45815083 100755
--- a/init/build-version
+++ b/scripts/build-version
diff --git a/scripts/generate_rust_target.rs b/scripts/generate_rust_target.rs
index 641b713a033a..87f34925eb7b 100644
--- a/scripts/generate_rust_target.rs
+++ b/scripts/generate_rust_target.rs
@@ -169,6 +169,23 @@ fn main() {
ts.push("features", features);
ts.push("llvm-target", "x86_64-linux-gnu");
ts.push("target-pointer-width", "64");
+ } else if cfg.has("X86_32") {
+ // This only works on UML, as i386 otherwise needs regparm support in rustc
+ if !cfg.has("UML") {
+ panic!("32-bit x86 only works under UML");
+ }
+ ts.push("arch", "x86");
+ ts.push(
+ "data-layout",
+ "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-i128:128-f64:32:64-f80:32-n8:16:32-S128",
+ );
+ let mut features = "-3dnow,-3dnowa,-mmx,+soft-float".to_string();
+ if cfg.has("MITIGATION_RETPOLINE") {
+ features += ",+retpoline-external-thunk";
+ }
+ ts.push("features", features);
+ ts.push("llvm-target", "i386-unknown-linux-gnu");
+ ts.push("target-pointer-width", "32");
} else if cfg.has("LOONGARCH") {
panic!("loongarch uses the builtin rustc loongarch64-unknown-none-softfloat target");
} else {
diff --git a/scripts/kconfig/array_size.h b/scripts/include/array_size.h
index 26ba78d867d1..26ba78d867d1 100644
--- a/scripts/kconfig/array_size.h
+++ b/scripts/include/array_size.h
diff --git a/scripts/kconfig/hashtable.h b/scripts/include/hashtable.h
index a0a2c8f5f639..a0a2c8f5f639 100644
--- a/scripts/kconfig/hashtable.h
+++ b/scripts/include/hashtable.h
diff --git a/scripts/kconfig/list.h b/scripts/include/list.h
index 882859ddf9f4..409201cd495b 100644
--- a/scripts/kconfig/list.h
+++ b/scripts/include/list.h
@@ -128,6 +128,29 @@ static inline void list_del(struct list_head *entry)
}
/**
+ * list_move - delete from one list and add as another's head
+ * @list: the entry to move
+ * @head: the head that will precede our entry
+ */
+static inline void list_move(struct list_head *list, struct list_head *head)
+{
+ __list_del_entry(list);
+ list_add(list, head);
+}
+
+/**
+ * list_move_tail - delete from one list and add as another's tail
+ * @list: the entry to move
+ * @head: the head that will follow our entry
+ */
+static inline void list_move_tail(struct list_head *list,
+ struct list_head *head)
+{
+ __list_del_entry(list);
+ list_add_tail(list, head);
+}
+
+/**
* list_is_head - tests whether @list is the list @head
* @list: the entry to test
* @head: the head of the list
@@ -167,6 +190,17 @@ static inline int list_empty(const struct list_head *head)
list_entry((ptr)->next, type, member)
/**
+ * list_last_entry - get the last element from a list
+ * @ptr: the list head to take the element from.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_head within the struct.
+ *
+ * Note, that list is expected to be not empty.
+ */
+#define list_last_entry(ptr, type, member) \
+ list_entry((ptr)->prev, type, member)
+
+/**
* list_next_entry - get the next element in list
* @pos: the type * to cursor
* @member: the name of the list_head within the struct.
@@ -175,6 +209,14 @@ static inline int list_empty(const struct list_head *head)
list_entry((pos)->member.next, typeof(*(pos)), member)
/**
+ * list_prev_entry - get the prev element in list
+ * @pos: the type * to cursor
+ * @member: the name of the list_head within the struct.
+ */
+#define list_prev_entry(pos, member) \
+ list_entry((pos)->member.prev, typeof(*(pos)), member)
+
+/**
* list_entry_is_head - test if the entry points to the head of the list
* @pos: the type * to cursor
* @head: the head for your list.
@@ -195,6 +237,17 @@ static inline int list_empty(const struct list_head *head)
pos = list_next_entry(pos, member))
/**
+ * list_for_each_entry_reverse - iterate backwards over list of given type.
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ */
+#define list_for_each_entry_reverse(pos, head, member) \
+ for (pos = list_last_entry(head, typeof(*pos), member); \
+ !list_entry_is_head(pos, head, member); \
+ pos = list_prev_entry(pos, member))
+
+/**
* list_for_each_entry_safe - iterate over list of given type. Safe against removal of list entry
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
diff --git a/scripts/kconfig/list_types.h b/scripts/include/list_types.h
index d935b7c5aa81..d935b7c5aa81 100644
--- a/scripts/kconfig/list_types.h
+++ b/scripts/include/list_types.h
diff --git a/scripts/install.sh b/scripts/install.sh
index 9bb0fb44f04a..05d62ac513ee 100755
--- a/scripts/install.sh
+++ b/scripts/install.sh
@@ -20,6 +20,10 @@ do
fi
done
+if [ -n "${INSTALL_PATH}" ] && ! [ -e "${INSTALL_PATH}" ]; then
+ mkdir -p "${INSTALL_PATH}"
+fi
+
# User/arch may have a custom install script
for file in "${HOME}/bin/${INSTALLKERNEL}" \
"/sbin/${INSTALLKERNEL}" \
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index 47978efe4797..0ed873491bf5 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -6,7 +6,7 @@
* of the GNU General Public License, incorporated herein by reference.
*
* Usage: kallsyms [--all-symbols] [--absolute-percpu]
- * [--base-relative] [--lto-clang] in.map > out.S
+ * [--lto-clang] in.map > out.S
*
* Table compression uses all the unused char codes on the symbols and
* maps these to the most used substrings (tokens). For instance, it might
@@ -36,8 +36,7 @@ struct sym_entry {
unsigned long long addr;
unsigned int len;
unsigned int seq;
- unsigned int start_pos;
- unsigned int percpu_absolute;
+ bool percpu_absolute;
unsigned char sym[];
};
@@ -63,7 +62,6 @@ static struct sym_entry **table;
static unsigned int table_size, table_cnt;
static int all_symbols;
static int absolute_percpu;
-static int base_relative;
static int lto_clang;
static int token_profit[0x10000];
@@ -76,7 +74,7 @@ static unsigned char best_table_len[256];
static void usage(void)
{
fprintf(stderr, "Usage: kallsyms [--all-symbols] [--absolute-percpu] "
- "[--base-relative] [--lto-clang] in.map > out.S\n");
+ "[--lto-clang] in.map > out.S\n");
exit(1);
}
@@ -183,7 +181,7 @@ static struct sym_entry *read_symbol(FILE *in, char **buf, size_t *buf_len)
sym->len = len;
sym->sym[0] = type;
strcpy(sym_name(sym), name);
- sym->percpu_absolute = 0;
+ sym->percpu_absolute = false;
return sym;
}
@@ -259,12 +257,6 @@ static void shrink_table(void)
}
}
table_cnt = pos;
-
- /* When valid symbol is not registered, exit to error */
- if (!table_cnt) {
- fprintf(stderr, "No valid symbol.\n");
- exit(1);
- }
}
static void read_map(const char *in)
@@ -285,7 +277,7 @@ static void read_map(const char *in)
if (!sym)
continue;
- sym->start_pos = table_cnt;
+ sym->seq = table_cnt;
if (table_cnt >= table_size) {
table_size += 10000;
@@ -347,7 +339,7 @@ static int expand_symbol(const unsigned char *data, int len, char *result)
return total;
}
-static int symbol_absolute(const struct sym_entry *s)
+static bool symbol_absolute(const struct sym_entry *s)
{
return s->percpu_absolute;
}
@@ -400,7 +392,7 @@ static void write_src(void)
{
unsigned int i, k, off;
unsigned int best_idx[256];
- unsigned int *markers;
+ unsigned int *markers, markers_cnt;
char buf[KSYM_NAME_LEN];
printf("#include <asm/bitsperlong.h>\n");
@@ -420,7 +412,8 @@ static void write_src(void)
/* table of offset markers, that give the offset in the compressed stream
* every 256 symbols */
- markers = malloc(sizeof(unsigned int) * ((table_cnt + 255) / 256));
+ markers_cnt = (table_cnt + 255) / 256;
+ markers = malloc(sizeof(*markers) * markers_cnt);
if (!markers) {
fprintf(stderr, "kallsyms failure: "
"unable to allocate required memory\n");
@@ -462,21 +455,19 @@ static void write_src(void)
}
for (k = 0; k < table[i]->len; k++)
printf(", 0x%02x", table[i]->sym[k]);
- printf("\n");
- }
- printf("\n");
- /*
- * Now that we wrote out the compressed symbol names, restore the
- * original names, which are needed in some of the later steps.
- */
- for (i = 0; i < table_cnt; i++) {
+ /*
+ * Now that we wrote out the compressed symbol name, restore the
+ * original name and print it in the comment.
+ */
expand_symbol(table[i]->sym, table[i]->len, buf);
strcpy((char *)table[i]->sym, buf);
+ printf("\t/* %s */\n", table[i]->sym);
}
+ printf("\n");
output_label("kallsyms_markers");
- for (i = 0; i < ((table_cnt + 255) >> 8); i++)
+ for (i = 0; i < markers_cnt; i++)
printf("\t.long\t%u\n", markers[i]);
printf("\n");
@@ -497,54 +488,43 @@ static void write_src(void)
printf("\t.short\t%d\n", best_idx[i]);
printf("\n");
- if (!base_relative)
- output_label("kallsyms_addresses");
- else
- output_label("kallsyms_offsets");
+ output_label("kallsyms_offsets");
for (i = 0; i < table_cnt; i++) {
- if (base_relative) {
- /*
- * Use the offset relative to the lowest value
- * encountered of all relative symbols, and emit
- * non-relocatable fixed offsets that will be fixed
- * up at runtime.
- */
+ /*
+ * Use the offset relative to the lowest value
+ * encountered of all relative symbols, and emit
+ * non-relocatable fixed offsets that will be fixed
+ * up at runtime.
+ */
- long long offset;
- int overflow;
-
- if (!absolute_percpu) {
- offset = table[i]->addr - relative_base;
- overflow = (offset < 0 || offset > UINT_MAX);
- } else if (symbol_absolute(table[i])) {
- offset = table[i]->addr;
- overflow = (offset < 0 || offset > INT_MAX);
- } else {
- offset = relative_base - table[i]->addr - 1;
- overflow = (offset < INT_MIN || offset >= 0);
- }
- if (overflow) {
- fprintf(stderr, "kallsyms failure: "
- "%s symbol value %#llx out of range in relative mode\n",
- symbol_absolute(table[i]) ? "absolute" : "relative",
- table[i]->addr);
- exit(EXIT_FAILURE);
- }
- printf("\t.long\t%#x /* %s */\n", (int)offset, table[i]->sym);
- } else if (!symbol_absolute(table[i])) {
- output_address(table[i]->addr);
+ long long offset;
+ int overflow;
+
+ if (!absolute_percpu) {
+ offset = table[i]->addr - relative_base;
+ overflow = (offset < 0 || offset > UINT_MAX);
+ } else if (symbol_absolute(table[i])) {
+ offset = table[i]->addr;
+ overflow = (offset < 0 || offset > INT_MAX);
} else {
- printf("\tPTR\t%#llx\n", table[i]->addr);
+ offset = relative_base - table[i]->addr - 1;
+ overflow = (offset < INT_MIN || offset >= 0);
+ }
+ if (overflow) {
+ fprintf(stderr, "kallsyms failure: "
+ "%s symbol value %#llx out of range in relative mode\n",
+ symbol_absolute(table[i]) ? "absolute" : "relative",
+ table[i]->addr);
+ exit(EXIT_FAILURE);
}
+ printf("\t.long\t%#x\t/* %s */\n", (int)offset, table[i]->sym);
}
printf("\n");
- if (base_relative) {
- output_label("kallsyms_relative_base");
- output_address(relative_base);
- printf("\n");
- }
+ output_label("kallsyms_relative_base");
+ output_address(relative_base);
+ printf("\n");
if (lto_clang)
for (i = 0; i < table_cnt; i++)
@@ -553,10 +533,11 @@ static void write_src(void)
sort_symbols_by_name();
output_label("kallsyms_seqs_of_names");
for (i = 0; i < table_cnt; i++)
- printf("\t.byte 0x%02x, 0x%02x, 0x%02x\n",
+ printf("\t.byte 0x%02x, 0x%02x, 0x%02x\t/* %s */\n",
(unsigned char)(table[i]->seq >> 16),
(unsigned char)(table[i]->seq >> 8),
- (unsigned char)(table[i]->seq >> 0));
+ (unsigned char)(table[i]->seq >> 0),
+ table[i]->sym);
printf("\n");
}
@@ -780,7 +761,7 @@ static int compare_symbols(const void *a, const void *b)
return wa - wb;
/* sort by initial order, so that other symbols are left undisturbed */
- return sa->start_pos - sb->start_pos;
+ return sa->seq - sb->seq;
}
static void sort_symbols(void)
@@ -800,7 +781,7 @@ static void make_percpus_absolute(void)
* versions of this tool.
*/
table[i]->sym[0] = 'A';
- table[i]->percpu_absolute = 1;
+ table[i]->percpu_absolute = true;
}
}
@@ -826,7 +807,6 @@ int main(int argc, char **argv)
static const struct option long_options[] = {
{"all-symbols", no_argument, &all_symbols, 1},
{"absolute-percpu", no_argument, &absolute_percpu, 1},
- {"base-relative", no_argument, &base_relative, 1},
{"lto-clang", no_argument, &lto_clang, 1},
{},
};
@@ -847,8 +827,7 @@ int main(int argc, char **argv)
if (absolute_percpu)
make_percpus_absolute();
sort_symbols();
- if (base_relative)
- record_relative_base();
+ record_relative_base();
optimize_token_table();
write_src();
diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
index 8ad2c52d9b1f..3d7d454c54da 100644
--- a/scripts/kconfig/conf.c
+++ b/scripts/kconfig/conf.c
@@ -114,51 +114,54 @@ static void set_randconfig_seed(void)
srand(seed);
}
-static bool randomize_choice_values(struct symbol *csym)
+/**
+ * randomize_choice_values - randomize choice block
+ *
+ * @choice: menu entry for the choice
+ */
+static void randomize_choice_values(struct menu *choice)
{
- struct property *prop;
- struct symbol *sym;
- struct expr *e;
- int cnt, def;
+ struct menu *menu;
+ int x;
+ int cnt = 0;
/*
- * If choice is mod then we may have more items selected
- * and if no then no-one.
- * In both cases stop.
+ * First, count the number of symbols to randomize. If sym_has_value()
+ * is true, it was specified by KCONFIG_ALLCONFIG. It needs to be
+ * respected.
*/
- if (csym->curr.tri != yes)
- return false;
+ menu_for_each_sub_entry(menu, choice) {
+ struct symbol *sym = menu->sym;
- prop = sym_get_choice_prop(csym);
+ if (sym && !sym_has_value(sym))
+ cnt++;
+ }
- /* count entries in choice block */
- cnt = 0;
- expr_list_for_each_sym(prop->expr, e, sym)
- cnt++;
+ while (cnt > 0) {
+ x = rand() % cnt;
- /*
- * find a random value and set it to yes,
- * set the rest to no so we have only one set
- */
- def = rand() % cnt;
-
- cnt = 0;
- expr_list_for_each_sym(prop->expr, e, sym) {
- if (def == cnt++) {
- sym->def[S_DEF_USER].tri = yes;
- csym->def[S_DEF_USER].val = sym;
- } else {
- sym->def[S_DEF_USER].tri = no;
+ menu_for_each_sub_entry(menu, choice) {
+ struct symbol *sym = menu->sym;
+
+ if (sym && !sym_has_value(sym))
+ x--;
+
+ if (x < 0) {
+ sym->def[S_DEF_USER].tri = yes;
+ sym->flags |= SYMBOL_DEF_USER;
+ /*
+ * Move the selected item to the _tail_ because
+ * this needs to have a lower priority than the
+ * user input from KCONFIG_ALLCONFIG.
+ */
+ list_move_tail(&sym->choice_link,
+ &choice->choice_members);
+
+ break;
+ }
}
- sym->flags |= SYMBOL_DEF_USER;
- /* clear VALID to get value calculated */
- sym->flags &= ~SYMBOL_VALID;
+ cnt--;
}
- csym->flags |= SYMBOL_DEF_USER;
- /* clear VALID to get value calculated */
- csym->flags &= ~SYMBOL_VALID;
-
- return true;
}
enum conf_def_mode {
@@ -169,9 +172,9 @@ enum conf_def_mode {
def_random
};
-static bool conf_set_all_new_symbols(enum conf_def_mode mode)
+static void conf_set_all_new_symbols(enum conf_def_mode mode)
{
- struct symbol *sym, *csym;
+ struct menu *menu;
int cnt;
/*
* can't go as the default in switch-case below, otherwise gcc whines
@@ -180,7 +183,6 @@ static bool conf_set_all_new_symbols(enum conf_def_mode mode)
int pby = 50; /* probability of bool = y */
int pty = 33; /* probability of tristate = y */
int ptm = 33; /* probability of tristate = m */
- bool has_changed = false;
if (mode == def_random) {
int n, p[3];
@@ -227,79 +229,51 @@ static bool conf_set_all_new_symbols(enum conf_def_mode mode)
}
}
- for_all_symbols(sym) {
- if (sym_has_value(sym) || sym->flags & SYMBOL_VALID)
- continue;
- switch (sym_get_type(sym)) {
- case S_BOOLEAN:
- case S_TRISTATE:
- has_changed = true;
- switch (mode) {
- case def_yes:
- sym->def[S_DEF_USER].tri = yes;
- break;
- case def_mod:
- sym->def[S_DEF_USER].tri = mod;
- break;
- case def_no:
- sym->def[S_DEF_USER].tri = no;
- break;
- case def_random:
- sym->def[S_DEF_USER].tri = no;
- cnt = rand() % 100;
- if (sym->type == S_TRISTATE) {
- if (cnt < pty)
- sym->def[S_DEF_USER].tri = yes;
- else if (cnt < pty + ptm)
- sym->def[S_DEF_USER].tri = mod;
- } else if (cnt < pby)
- sym->def[S_DEF_USER].tri = yes;
- break;
- default:
- continue;
- }
- if (!(sym_is_choice(sym) && mode == def_random))
- sym->flags |= SYMBOL_DEF_USER;
- break;
- default:
- break;
- }
-
- }
+ menu_for_each_entry(menu) {
+ struct symbol *sym = menu->sym;
+ tristate val;
- sym_clear_all_valid();
+ if (!sym || !menu->prompt || sym_has_value(sym) ||
+ (sym->type != S_BOOLEAN && sym->type != S_TRISTATE) ||
+ sym_is_choice_value(sym))
+ continue;
- /*
- * We have different type of choice blocks.
- * If curr.tri equals to mod then we can select several
- * choice symbols in one block.
- * In this case we do nothing.
- * If curr.tri equals yes then only one symbol can be
- * selected in a choice block and we set it to yes,
- * and the rest to no.
- */
- if (mode != def_random) {
- for_all_symbols(csym) {
- if ((sym_is_choice(csym) && !sym_has_value(csym)) ||
- sym_is_choice_value(csym))
- csym->flags |= SYMBOL_NEED_SET_CHOICE_VALUES;
+ if (sym_is_choice(sym)) {
+ if (mode == def_random)
+ randomize_choice_values(menu);
+ continue;
}
- }
- for_all_symbols(csym) {
- if (sym_has_value(csym) || !sym_is_choice(csym))
+ switch (mode) {
+ case def_yes:
+ val = yes;
+ break;
+ case def_mod:
+ val = mod;
+ break;
+ case def_no:
+ val = no;
+ break;
+ case def_random:
+ val = no;
+ cnt = rand() % 100;
+ if (sym->type == S_TRISTATE) {
+ if (cnt < pty)
+ val = yes;
+ else if (cnt < pty + ptm)
+ val = mod;
+ } else if (cnt < pby) {
+ val = yes;
+ }
+ break;
+ default:
continue;
-
- sym_calc_value(csym);
- if (mode == def_random)
- has_changed |= randomize_choice_values(csym);
- else {
- set_all_choice_values(csym);
- has_changed = true;
}
+ sym->def[S_DEF_USER].tri = val;
+ sym->flags |= SYMBOL_DEF_USER;
}
- return has_changed;
+ sym_clear_all_valid();
}
static void conf_rewrite_tristates(tristate old_val, tristate new_val)
@@ -448,39 +422,15 @@ help:
static void conf_choice(struct menu *menu)
{
- struct symbol *sym, *def_sym;
+ struct symbol *def_sym;
struct menu *child;
- bool is_new;
-
- sym = menu->sym;
- is_new = !sym_has_value(sym);
- if (sym_is_changeable(sym)) {
- conf_sym(menu);
- sym_calc_value(sym);
- switch (sym_get_tristate_value(sym)) {
- case no:
- case mod:
- return;
- case yes:
- break;
- }
- } else {
- switch (sym_get_tristate_value(sym)) {
- case no:
- return;
- case mod:
- printf("%*s%s\n", indent - 1, "", menu_get_prompt(menu));
- return;
- case yes:
- break;
- }
- }
+ bool is_new = false;
while (1) {
int cnt, def;
printf("%*s%s\n", indent - 1, "", menu_get_prompt(menu));
- def_sym = sym_get_choice_value(sym);
+ def_sym = sym_calc_choice(menu);
cnt = def = 0;
line[0] = 0;
for (child = menu->list; child; child = child->next) {
@@ -498,8 +448,10 @@ static void conf_choice(struct menu *menu)
printf("%*c", indent, ' ');
printf(" %d. %s (%s)", cnt, menu_get_prompt(child),
child->sym->name);
- if (!sym_has_value(child->sym))
+ if (!sym_has_value(child->sym)) {
+ is_new = true;
printf(" (NEW)");
+ }
printf("\n");
}
printf("%*schoice", indent - 1, "");
@@ -549,7 +501,7 @@ static void conf_choice(struct menu *menu)
print_help(child);
continue;
}
- sym_set_tristate_value(child->sym, yes);
+ choice_set_value(menu, child->sym);
return;
}
}
@@ -596,9 +548,7 @@ static void conf(struct menu *menu)
if (sym_is_choice(sym)) {
conf_choice(menu);
- if (sym->curr.tri != mod)
- return;
- goto conf_childs;
+ return;
}
switch (sym->type) {
@@ -630,10 +580,7 @@ static void check_conf(struct menu *menu)
return;
sym = menu->sym;
- if (sym && !sym_has_value(sym) &&
- (sym_is_changeable(sym) ||
- (sym_is_choice(sym) && sym_get_tristate_value(sym) == yes))) {
-
+ if (sym && !sym_has_value(sym) && sym_is_changeable(sym)) {
switch (input_mode) {
case listnewconfig:
if (sym->name)
@@ -849,8 +796,7 @@ int main(int ac, char **av)
conf_set_all_new_symbols(def_default);
break;
case randconfig:
- /* Really nothing to do in this loop */
- while (conf_set_all_new_symbols(def_random)) ;
+ conf_set_all_new_symbols(def_random);
break;
case defconfig:
conf_set_all_new_symbols(def_default);
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index 85b53069ba7a..76193ce5a792 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -382,10 +382,7 @@ load:
def_flags = SYMBOL_DEF << def;
for_all_symbols(sym) {
- sym->flags |= SYMBOL_CHANGED;
sym->flags &= ~(def_flags|SYMBOL_VALID);
- if (sym_is_choice(sym))
- sym->flags |= def_flags;
switch (sym->type) {
case S_INT:
case S_HEX:
@@ -399,6 +396,8 @@ load:
}
while (getline_stripped(&line, &line_asize, in) != -1) {
+ struct menu *choice;
+
conf_lineno++;
if (!line[0]) /* blank line */
@@ -460,25 +459,14 @@ load:
if (conf_set_sym_val(sym, def, def_flags, val))
continue;
- if (sym && sym_is_choice_value(sym)) {
- struct symbol *cs = prop_get_symbol(sym_get_choice_prop(sym));
- switch (sym->def[def].tri) {
- case no:
- break;
- case mod:
- if (cs->def[def].tri == yes) {
- conf_warning("%s creates inconsistent choice state", sym->name);
- cs->flags &= ~def_flags;
- }
- break;
- case yes:
- if (cs->def[def].tri != no)
- conf_warning("override: %s changes choice state", sym->name);
- cs->def[def].val = sym;
- break;
- }
- cs->def[def].tri = EXPR_OR(cs->def[def].tri, sym->def[def].tri);
- }
+ /*
+ * If this is a choice member, give it the highest priority.
+ * If conflicting CONFIG options are given from an input file,
+ * the last one wins.
+ */
+ choice = sym_get_choice_menu(sym);
+ if (choice)
+ list_move(&sym->choice_link, &choice->choice_members);
}
free(line);
fclose(in);
@@ -489,7 +477,6 @@ load:
int conf_read(const char *name)
{
struct symbol *sym;
- int conf_unsaved = 0;
conf_set_changed(false);
@@ -520,23 +507,11 @@ int conf_read(const char *name)
} else if (!sym_has_value(sym) && !(sym->flags & SYMBOL_WRITE))
/* no previous value and not saved */
continue;
- conf_unsaved++;
+ conf_set_changed(true);
/* maybe print value in verbose mode... */
}
- for_all_symbols(sym) {
- if (sym_has_value(sym) && !sym_is_choice_value(sym)) {
- /* Reset values of generates values, so they'll appear
- * as new, if they should become visible, but that
- * doesn't quite work if the Kconfig and the saved
- * configuration disagree.
- */
- if (sym->visible == no && !conf_unsaved)
- sym->flags &= ~SYMBOL_DEF_USER;
- }
- }
-
- if (conf_warnings || conf_unsaved)
+ if (conf_warnings)
conf_set_changed(true);
return 0;
@@ -784,35 +759,31 @@ int conf_write_defconfig(const char *filename)
struct menu *choice;
sym = menu->sym;
- if (sym && !sym_is_choice(sym)) {
- sym_calc_value(sym);
- if (!(sym->flags & SYMBOL_WRITE))
- continue;
- sym->flags &= ~SYMBOL_WRITE;
- /* If we cannot change the symbol - skip */
- if (!sym_is_changeable(sym))
- continue;
- /* If symbol equals to default value - skip */
- if (strcmp(sym_get_string_value(sym), sym_get_string_default(sym)) == 0)
- continue;
- /*
- * If symbol is a choice value and equals to the
- * default for a choice - skip.
- */
- choice = sym_get_choice_menu(sym);
- if (choice) {
- struct symbol *ds;
-
- ds = sym_choice_default(choice->sym);
- if (sym == ds) {
- if ((sym->type == S_BOOLEAN) &&
- sym_get_tristate_value(sym) == yes)
- continue;
- }
- }
- print_symbol_for_dotconfig(out, sym);
+ if (!sym || sym_is_choice(sym))
+ continue;
+
+ sym_calc_value(sym);
+ if (!(sym->flags & SYMBOL_WRITE))
+ continue;
+ sym->flags &= ~SYMBOL_WRITE;
+ /* Skip unchangeable symbols */
+ if (!sym_is_changeable(sym))
+ continue;
+ /* Skip symbols that are equal to the default */
+ if (!strcmp(sym_get_string_value(sym), sym_get_string_default(sym)))
+ continue;
+
+ /* Skip choice values that are equal to the default */
+ choice = sym_get_choice_menu(sym);
+ if (choice) {
+ struct symbol *ds;
+
+ ds = sym_choice_default(choice);
+ if (sym == ds && sym_get_tristate_value(sym) == yes)
+ continue;
}
+ print_symbol_for_dotconfig(out, sym);
}
fclose(out);
return 0;
@@ -1141,16 +1112,14 @@ int conf_write_autoconf(int overwrite)
}
static bool conf_changed;
-static void (*conf_changed_callback)(void);
+static void (*conf_changed_callback)(bool);
void conf_set_changed(bool val)
{
- bool changed = conf_changed != val;
+ if (conf_changed_callback && conf_changed != val)
+ conf_changed_callback(val);
conf_changed = val;
-
- if (conf_changed_callback && changed)
- conf_changed_callback();
}
bool conf_get_changed(void)
@@ -1158,27 +1127,7 @@ bool conf_get_changed(void)
return conf_changed;
}
-void conf_set_changed_callback(void (*fn)(void))
+void conf_set_changed_callback(void (*fn)(bool))
{
conf_changed_callback = fn;
}
-
-void set_all_choice_values(struct symbol *csym)
-{
- struct property *prop;
- struct symbol *sym;
- struct expr *e;
-
- prop = sym_get_choice_prop(csym);
-
- /*
- * Set all non-assinged choice values to no
- */
- expr_list_for_each_sym(prop->expr, e, sym) {
- if (!sym_has_value(sym))
- sym->def[S_DEF_USER].tri = no;
- }
- csym->flags |= SYMBOL_DEF_USER;
- /* clear VALID to get value calculated */
- csym->flags &= ~(SYMBOL_VALID | SYMBOL_NEED_SET_CHOICE_VALUES);
-}
diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c
index fcc190b67b6f..c349da7fe3f8 100644
--- a/scripts/kconfig/expr.c
+++ b/scripts/kconfig/expr.c
@@ -90,7 +90,6 @@ struct expr *expr_copy(const struct expr *org)
break;
case E_AND:
case E_OR:
- case E_LIST:
e->left.expr = expr_copy(org->left.expr);
e->right.expr = expr_copy(org->right.expr);
break;
@@ -136,9 +135,6 @@ void expr_free(struct expr *e)
static int trans_count;
-#define e1 (*ep1)
-#define e2 (*ep2)
-
/*
* expr_eliminate_eq() helper.
*
@@ -151,38 +147,38 @@ static void __expr_eliminate_eq(enum expr_type type, struct expr **ep1, struct e
{
/* Recurse down to leaves */
- if (e1->type == type) {
- __expr_eliminate_eq(type, &e1->left.expr, &e2);
- __expr_eliminate_eq(type, &e1->right.expr, &e2);
+ if ((*ep1)->type == type) {
+ __expr_eliminate_eq(type, &(*ep1)->left.expr, ep2);
+ __expr_eliminate_eq(type, &(*ep1)->right.expr, ep2);
return;
}
- if (e2->type == type) {
- __expr_eliminate_eq(type, &e1, &e2->left.expr);
- __expr_eliminate_eq(type, &e1, &e2->right.expr);
+ if ((*ep2)->type == type) {
+ __expr_eliminate_eq(type, ep1, &(*ep2)->left.expr);
+ __expr_eliminate_eq(type, ep1, &(*ep2)->right.expr);
return;
}
- /* e1 and e2 are leaves. Compare them. */
+ /* *ep1 and *ep2 are leaves. Compare them. */
- if (e1->type == E_SYMBOL && e2->type == E_SYMBOL &&
- e1->left.sym == e2->left.sym &&
- (e1->left.sym == &symbol_yes || e1->left.sym == &symbol_no))
+ if ((*ep1)->type == E_SYMBOL && (*ep2)->type == E_SYMBOL &&
+ (*ep1)->left.sym == (*ep2)->left.sym &&
+ ((*ep1)->left.sym == &symbol_yes || (*ep1)->left.sym == &symbol_no))
return;
- if (!expr_eq(e1, e2))
+ if (!expr_eq(*ep1, *ep2))
return;
- /* e1 and e2 are equal leaves. Prepare them for elimination. */
+ /* *ep1 and *ep2 are equal leaves. Prepare them for elimination. */
trans_count++;
- expr_free(e1); expr_free(e2);
+ expr_free(*ep1); expr_free(*ep2);
switch (type) {
case E_OR:
- e1 = expr_alloc_symbol(&symbol_no);
- e2 = expr_alloc_symbol(&symbol_no);
+ *ep1 = expr_alloc_symbol(&symbol_no);
+ *ep2 = expr_alloc_symbol(&symbol_no);
break;
case E_AND:
- e1 = expr_alloc_symbol(&symbol_yes);
- e2 = expr_alloc_symbol(&symbol_yes);
+ *ep1 = expr_alloc_symbol(&symbol_yes);
+ *ep2 = expr_alloc_symbol(&symbol_yes);
break;
default:
;
@@ -220,29 +216,26 @@ static void __expr_eliminate_eq(enum expr_type type, struct expr **ep1, struct e
*/
void expr_eliminate_eq(struct expr **ep1, struct expr **ep2)
{
- if (!e1 || !e2)
+ if (!*ep1 || !*ep2)
return;
- switch (e1->type) {
+ switch ((*ep1)->type) {
case E_OR:
case E_AND:
- __expr_eliminate_eq(e1->type, ep1, ep2);
+ __expr_eliminate_eq((*ep1)->type, ep1, ep2);
default:
;
}
- if (e1->type != e2->type) switch (e2->type) {
+ if ((*ep1)->type != (*ep2)->type) switch ((*ep2)->type) {
case E_OR:
case E_AND:
- __expr_eliminate_eq(e2->type, ep1, ep2);
+ __expr_eliminate_eq((*ep2)->type, ep1, ep2);
default:
;
}
- e1 = expr_eliminate_yn(e1);
- e2 = expr_eliminate_yn(e2);
+ *ep1 = expr_eliminate_yn(*ep1);
+ *ep2 = expr_eliminate_yn(*ep2);
}
-#undef e1
-#undef e2
-
/*
* Returns true if 'e1' and 'e2' are equal, after minor simplification. Two
* &&/|| expressions are considered equal if every operand in one expression
@@ -286,7 +279,6 @@ int expr_eq(struct expr *e1, struct expr *e2)
expr_free(e2);
trans_count = old_count;
return res;
- case E_LIST:
case E_RANGE:
case E_NONE:
/* panic */;
@@ -566,59 +558,55 @@ static struct expr *expr_join_and(struct expr *e1, struct expr *e2)
*/
static void expr_eliminate_dups1(enum expr_type type, struct expr **ep1, struct expr **ep2)
{
-#define e1 (*ep1)
-#define e2 (*ep2)
struct expr *tmp;
/* Recurse down to leaves */
- if (e1->type == type) {
- expr_eliminate_dups1(type, &e1->left.expr, &e2);
- expr_eliminate_dups1(type, &e1->right.expr, &e2);
+ if ((*ep1)->type == type) {
+ expr_eliminate_dups1(type, &(*ep1)->left.expr, ep2);
+ expr_eliminate_dups1(type, &(*ep1)->right.expr, ep2);
return;
}
- if (e2->type == type) {
- expr_eliminate_dups1(type, &e1, &e2->left.expr);
- expr_eliminate_dups1(type, &e1, &e2->right.expr);
+ if ((*ep2)->type == type) {
+ expr_eliminate_dups1(type, ep1, &(*ep2)->left.expr);
+ expr_eliminate_dups1(type, ep1, &(*ep2)->right.expr);
return;
}
- /* e1 and e2 are leaves. Compare and process them. */
+ /* *ep1 and *ep2 are leaves. Compare and process them. */
- if (e1 == e2)
+ if (*ep1 == *ep2)
return;
- switch (e1->type) {
+ switch ((*ep1)->type) {
case E_OR: case E_AND:
- expr_eliminate_dups1(e1->type, &e1, &e1);
+ expr_eliminate_dups1((*ep1)->type, ep1, ep1);
default:
;
}
switch (type) {
case E_OR:
- tmp = expr_join_or(e1, e2);
+ tmp = expr_join_or(*ep1, *ep2);
if (tmp) {
- expr_free(e1); expr_free(e2);
- e1 = expr_alloc_symbol(&symbol_no);
- e2 = tmp;
+ expr_free(*ep1); expr_free(*ep2);
+ *ep1 = expr_alloc_symbol(&symbol_no);
+ *ep2 = tmp;
trans_count++;
}
break;
case E_AND:
- tmp = expr_join_and(e1, e2);
+ tmp = expr_join_and(*ep1, *ep2);
if (tmp) {
- expr_free(e1); expr_free(e2);
- e1 = expr_alloc_symbol(&symbol_yes);
- e2 = tmp;
+ expr_free(*ep1); expr_free(*ep2);
+ *ep1 = expr_alloc_symbol(&symbol_yes);
+ *ep2 = tmp;
trans_count++;
}
break;
default:
;
}
-#undef e1
-#undef e2
}
/*
@@ -639,7 +627,7 @@ struct expr *expr_eliminate_dups(struct expr *e)
return e;
oldcount = trans_count;
- while (1) {
+ do {
trans_count = 0;
switch (e->type) {
case E_OR: case E_AND:
@@ -647,11 +635,8 @@ struct expr *expr_eliminate_dups(struct expr *e)
default:
;
}
- if (!trans_count)
- /* No simplifications done in this pass. We're done */
- break;
e = expr_eliminate_yn(e);
- }
+ } while (trans_count); /* repeat until we get no more simplifications */
trans_count = oldcount;
return e;
}
@@ -676,7 +661,6 @@ struct expr *expr_transform(struct expr *e)
case E_LTH:
case E_UNEQUAL:
case E_SYMBOL:
- case E_LIST:
break;
default:
e->left.expr = expr_transform(e->left.expr);
@@ -947,7 +931,6 @@ struct expr *expr_trans_compare(struct expr *e, enum expr_type type, struct symb
break;
case E_SYMBOL:
return expr_alloc_comp(type, e->left.sym, sym);
- case E_LIST:
case E_RANGE:
case E_NONE:
/* panic */;
@@ -1083,29 +1066,27 @@ static int expr_compare_type(enum expr_type t1, enum expr_type t2)
case E_GTH:
if (t2 == E_EQUAL || t2 == E_UNEQUAL)
return 1;
+ /* fallthrough */
case E_EQUAL:
case E_UNEQUAL:
if (t2 == E_NOT)
return 1;
+ /* fallthrough */
case E_NOT:
if (t2 == E_AND)
return 1;
+ /* fallthrough */
case E_AND:
if (t2 == E_OR)
return 1;
- case E_OR:
- if (t2 == E_LIST)
- return 1;
- case E_LIST:
- if (t2 == 0)
- return 1;
+ /* fallthrough */
default:
- return -1;
+ break;
}
return 0;
}
-void expr_print(struct expr *e,
+void expr_print(const struct expr *e,
void (*fn)(void *, struct symbol *, const char *),
void *data, int prevtoken)
{
@@ -1171,13 +1152,6 @@ void expr_print(struct expr *e,
fn(data, NULL, " && ");
expr_print(e->right.expr, fn, data, E_AND);
break;
- case E_LIST:
- fn(data, e->right.sym, e->right.sym->name);
- if (e->left.expr) {
- fn(data, NULL, " ^ ");
- expr_print(e->left.expr, fn, data, E_LIST);
- }
- break;
case E_RANGE:
fn(data, NULL, "[");
fn(data, e->left.sym, e->left.sym->name);
@@ -1237,7 +1211,7 @@ static void expr_print_gstr_helper(void *data, struct symbol *sym, const char *s
str_printf(gs, " [=%s]", sym_str);
}
-void expr_gstr_print(struct expr *e, struct gstr *gs)
+void expr_gstr_print(const struct expr *e, struct gstr *gs)
{
expr_print(e, expr_print_gstr_helper, gs, E_NONE);
}
diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h
index 7c0c242318bc..2bc96cd28253 100644
--- a/scripts/kconfig/expr.h
+++ b/scripts/kconfig/expr.h
@@ -12,12 +12,11 @@ extern "C" {
#include <assert.h>
#include <stdio.h>
-#include "list_types.h"
#ifndef __cplusplus
#include <stdbool.h>
#endif
-#include "list_types.h"
+#include <list_types.h>
typedef enum tristate {
no, mod, yes
@@ -26,7 +25,7 @@ typedef enum tristate {
enum expr_type {
E_NONE, E_OR, E_AND, E_NOT,
E_EQUAL, E_UNEQUAL, E_LTH, E_LEQ, E_GTH, E_GEQ,
- E_LIST, E_SYMBOL, E_RANGE
+ E_SYMBOL, E_RANGE
};
union expr_data {
@@ -43,9 +42,6 @@ struct expr {
#define EXPR_AND(dep1, dep2) (((dep1)<(dep2))?(dep1):(dep2))
#define EXPR_NOT(dep) (2-(dep))
-#define expr_list_for_each_sym(l, e, s) \
- for (e = (l); e && (s = e->right.sym); e = e->left.expr)
-
struct expr_value {
struct expr *expr;
tristate tri;
@@ -73,6 +69,8 @@ enum {
* Represents a configuration symbol.
*
* Choices are represented as a special kind of symbol with null name.
+ *
+ * @choice_link: linked to menu::choice_members
*/
struct symbol {
/* link node for the hash table */
@@ -110,6 +108,8 @@ struct symbol {
/* config entries associated with this symbol */
struct list_head menus;
+ struct list_head choice_link;
+
/* SYMBOL_* flags */
int flags;
@@ -130,10 +130,8 @@ struct symbol {
#define SYMBOL_CONST 0x0001 /* symbol is const */
#define SYMBOL_CHECK 0x0008 /* used during dependency checking */
-#define SYMBOL_CHOICEVAL 0x0020 /* used as a value in a choice block */
#define SYMBOL_VALID 0x0080 /* set when symbol.curr is calculated */
#define SYMBOL_WRITE 0x0200 /* write symbol to file (KCONFIG_CONFIG) */
-#define SYMBOL_CHANGED 0x0400 /* ? */
#define SYMBOL_WRITTEN 0x0800 /* track info to avoid double-write to .config */
#define SYMBOL_CHECKED 0x2000 /* used during dependency checking */
#define SYMBOL_WARNED 0x8000 /* warning has been issued */
@@ -145,9 +143,6 @@ struct symbol {
#define SYMBOL_DEF3 0x40000 /* symbol.def[S_DEF_3] is valid */
#define SYMBOL_DEF4 0x80000 /* symbol.def[S_DEF_4] is valid */
-/* choice values need to be set before calculating this symbol value */
-#define SYMBOL_NEED_SET_CHOICE_VALUES 0x100000
-
#define SYMBOL_MAXLENGTH 256
/* A property represent the config options that can be associated
@@ -170,7 +165,6 @@ enum prop_type {
P_COMMENT, /* text associated with a comment */
P_MENU, /* prompt associated with a menu or menuconfig symbol */
P_DEFAULT, /* default y */
- P_CHOICE, /* choice value */
P_SELECT, /* select BAR */
P_IMPLY, /* imply BAR */
P_RANGE, /* range 7..100 (for a symbol) */
@@ -184,7 +178,7 @@ struct property {
struct expr_value visible;
struct expr *expr; /* the optional conditional part of the property */
struct menu *menu; /* the menu the property are associated with
- * valid for: P_SELECT, P_RANGE, P_CHOICE,
+ * valid for: P_SELECT, P_RANGE,
* P_PROMPT, P_DEFAULT, P_MENU, P_COMMENT */
const char *filename; /* what file was this property defined */
int lineno; /* what lineno was this property defined */
@@ -194,7 +188,6 @@ struct property {
for (st = sym->prop; st; st = st->next) \
if (st->type == (tok))
#define for_all_defaults(sym, st) for_all_properties(sym, st, P_DEFAULT)
-#define for_all_choices(sym, st) for_all_properties(sym, st, P_CHOICE)
#define for_all_prompts(sym, st) \
for (st = sym->prop; st; st = st->next) \
if (st->text)
@@ -204,6 +197,8 @@ struct property {
* for all front ends). Each symbol, menu, etc. defined in the Kconfig files
* gets a node. A symbol defined in multiple locations gets one node at each
* location.
+ *
+ * @choice_members: list of choice members with priority.
*/
struct menu {
/* The next menu node at the same level */
@@ -223,6 +218,8 @@ struct menu {
struct list_head link; /* link to symbol::menus */
+ struct list_head choice_members;
+
/*
* The prompt associated with the node. This holds the prompt for a
* symbol as well as the text for a menu or comment, along with the
@@ -292,11 +289,11 @@ struct expr *expr_trans_compare(struct expr *e, enum expr_type type, struct symb
void expr_fprint(struct expr *e, FILE *out);
struct gstr; /* forward */
-void expr_gstr_print(struct expr *e, struct gstr *gs);
+void expr_gstr_print(const struct expr *e, struct gstr *gs);
void expr_gstr_print_revdep(struct expr *e, struct gstr *gs,
tristate pr_type, const char *title);
-static inline int expr_is_yes(struct expr *e)
+static inline int expr_is_yes(const struct expr *e)
{
return !e || (e->type == E_SYMBOL && e->left.sym == &symbol_yes);
}
diff --git a/scripts/kconfig/gconf-cfg.sh b/scripts/kconfig/gconf-cfg.sh
index 040d8f338820..fc954c0538fa 100755
--- a/scripts/kconfig/gconf-cfg.sh
+++ b/scripts/kconfig/gconf-cfg.sh
@@ -1,6 +1,8 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
+set -eu
+
cflags=$1
libs=$2
diff --git a/scripts/kconfig/gconf.c b/scripts/kconfig/gconf.c
index e04dbafd3add..c0f46f189060 100644
--- a/scripts/kconfig/gconf.c
+++ b/scripts/kconfig/gconf.c
@@ -65,9 +65,6 @@ static void display_list(void);
static void display_tree(struct menu *menu);
static void display_tree_part(void);
static void update_tree(struct menu *src, GtkTreeIter * dst);
-static void set_node(GtkTreeIter * node, struct menu *menu, gchar ** row);
-static gchar **fill_row(struct menu *menu);
-static void conf_changed(void);
static void replace_button_icon(GladeXML *xml, GdkDrawable *window,
GtkStyle *style, gchar *btn_name, gchar **xpm)
@@ -87,6 +84,12 @@ static void replace_button_icon(GladeXML *xml, GdkDrawable *window,
gtk_tool_button_set_icon_widget(button, image);
}
+static void conf_changed(bool dirty)
+{
+ gtk_widget_set_sensitive(save_btn, dirty);
+ gtk_widget_set_sensitive(save_menu_item, dirty);
+}
+
/* Main Window Initialization */
static void init_main_window(const gchar *glade_file)
{
@@ -1051,7 +1054,7 @@ static gchar **fill_row(struct menu *menu)
if (sym_is_choice(sym)) { // parse childs for getting final value
struct menu *child;
- struct symbol *def_sym = sym_get_choice_value(sym);
+ struct symbol *def_sym = sym_calc_choice(menu);
struct menu *def_menu = NULL;
for (child = menu->list; child; child = child->next) {
@@ -1064,12 +1067,10 @@ static gchar **fill_row(struct menu *menu)
row[COL_VALUE] =
g_strdup(menu_get_prompt(def_menu));
- if (sym_get_type(sym) == S_BOOLEAN) {
- row[COL_BTNVIS] = GINT_TO_POINTER(FALSE);
- return row;
- }
+ row[COL_BTNVIS] = GINT_TO_POINTER(FALSE);
+ return row;
}
- if (sym->flags & SYMBOL_CHOICEVAL)
+ if (sym_is_choice_value(sym))
row[COL_BTNRAD] = GINT_TO_POINTER(TRUE);
stype = sym_get_type(sym);
@@ -1447,10 +1448,3 @@ int main(int ac, char *av[])
return 0;
}
-
-static void conf_changed(void)
-{
- bool changed = conf_get_changed();
- gtk_widget_set_sensitive(save_btn, changed);
- gtk_widget_set_sensitive(save_menu_item, changed);
-}
diff --git a/scripts/kconfig/internal.h b/scripts/kconfig/internal.h
index 6c721c4cfd72..02106eb7815e 100644
--- a/scripts/kconfig/internal.h
+++ b/scripts/kconfig/internal.h
@@ -2,7 +2,7 @@
#ifndef INTERNAL_H
#define INTERNAL_H
-#include "hashtable.h"
+#include <hashtable.h>
#define SYMBOL_HASHSIZE (1U << 14)
diff --git a/scripts/kconfig/lkc.h b/scripts/kconfig/lkc.h
index 64dfc354dd5c..401bdf36323a 100644
--- a/scripts/kconfig/lkc.h
+++ b/scripts/kconfig/lkc.h
@@ -40,7 +40,6 @@ void zconf_nextfile(const char *name);
/* confdata.c */
extern struct gstr autoconf_cmd;
const char *conf_get_configname(void);
-void set_all_choice_values(struct symbol *csym);
/* confdata.c and expr.c */
static inline void xfwrite(const void *str, size_t len, size_t count, FILE *out)
@@ -76,7 +75,7 @@ struct gstr str_new(void);
void str_free(struct gstr *gs);
void str_append(struct gstr *gs, const char *s);
void str_printf(struct gstr *gs, const char *fmt, ...);
-char *str_get(struct gstr *gs);
+char *str_get(const struct gstr *gs);
/* menu.c */
struct menu *menu_next(struct menu *menu, struct menu *root);
@@ -85,13 +84,14 @@ struct menu *menu_next(struct menu *menu, struct menu *root);
#define menu_for_each_entry(menu) \
menu_for_each_sub_entry(menu, &rootmenu)
void _menu_init(void);
-void menu_warn(struct menu *menu, const char *fmt, ...);
+void menu_warn(const struct menu *menu, const char *fmt, ...);
struct menu *menu_add_menu(void);
void menu_end_menu(void);
void menu_add_entry(struct symbol *sym);
void menu_add_dep(struct expr *dep);
void menu_add_visibility(struct expr *dep);
-struct property *menu_add_prompt(enum prop_type type, char *prompt, struct expr *dep);
+struct property *menu_add_prompt(enum prop_type type, const char *prompt,
+ struct expr *dep);
void menu_add_expr(enum prop_type type, struct expr *expr, struct expr *dep);
void menu_add_symbol(enum prop_type type, struct symbol *sym, struct expr *dep);
void menu_finalize(void);
@@ -101,8 +101,8 @@ extern struct menu rootmenu;
bool menu_is_empty(struct menu *menu);
bool menu_is_visible(struct menu *menu);
-bool menu_has_prompt(struct menu *menu);
-const char *menu_get_prompt(struct menu *menu);
+bool menu_has_prompt(const struct menu *menu);
+const char *menu_get_prompt(const struct menu *menu);
struct menu *menu_get_parent_menu(struct menu *menu);
int get_jump_key_char(void);
struct gstr get_relations_str(struct symbol **sym_arr, struct list_head *head);
@@ -110,35 +110,27 @@ void menu_get_ext_help(struct menu *menu, struct gstr *help);
/* symbol.c */
void sym_clear_all_valid(void);
-struct symbol *sym_choice_default(struct symbol *sym);
+struct symbol *sym_choice_default(struct menu *choice);
+struct symbol *sym_calc_choice(struct menu *choice);
struct property *sym_get_range_prop(struct symbol *sym);
const char *sym_get_string_default(struct symbol *sym);
struct symbol *sym_check_deps(struct symbol *sym);
-struct symbol *prop_get_symbol(struct property *prop);
+struct symbol *prop_get_symbol(const struct property *prop);
-static inline tristate sym_get_tristate_value(struct symbol *sym)
+static inline tristate sym_get_tristate_value(const struct symbol *sym)
{
return sym->curr.tri;
}
-
-static inline struct symbol *sym_get_choice_value(struct symbol *sym)
-{
- return (struct symbol *)sym->curr.val;
-}
-
-static inline bool sym_is_choice(struct symbol *sym)
+static inline bool sym_is_choice(const struct symbol *sym)
{
/* A choice is a symbol with no name */
return sym->name == NULL;
}
-static inline bool sym_is_choice_value(struct symbol *sym)
-{
- return sym->flags & SYMBOL_CHOICEVAL ? true : false;
-}
+bool sym_is_choice_value(const struct symbol *sym);
-static inline bool sym_has_value(struct symbol *sym)
+static inline bool sym_has_value(const struct symbol *sym)
{
return sym->flags & SYMBOL_DEF_USER ? true : false;
}
diff --git a/scripts/kconfig/lkc_proto.h b/scripts/kconfig/lkc_proto.h
index d76aaf4ea117..63519cd24bc7 100644
--- a/scripts/kconfig/lkc_proto.h
+++ b/scripts/kconfig/lkc_proto.h
@@ -13,7 +13,7 @@ int conf_write(const char *name);
int conf_write_autoconf(int overwrite);
void conf_set_changed(bool val);
bool conf_get_changed(void);
-void conf_set_changed_callback(void (*fn)(void));
+void conf_set_changed_callback(void (*fn)(bool));
void conf_set_message_callback(void (*fn)(const char *s));
bool conf_errors(void);
@@ -25,21 +25,23 @@ struct symbol ** sym_re_search(const char *pattern);
const char * sym_type_name(enum symbol_type type);
void sym_calc_value(struct symbol *sym);
bool sym_dep_errors(void);
-enum symbol_type sym_get_type(struct symbol *sym);
-bool sym_tristate_within_range(struct symbol *sym,tristate tri);
+enum symbol_type sym_get_type(const struct symbol *sym);
+bool sym_tristate_within_range(const struct symbol *sym, tristate tri);
bool sym_set_tristate_value(struct symbol *sym,tristate tri);
+void choice_set_value(struct menu *choice, struct symbol *sym);
tristate sym_toggle_tristate_value(struct symbol *sym);
bool sym_string_valid(struct symbol *sym, const char *newval);
bool sym_string_within_range(struct symbol *sym, const char *str);
bool sym_set_string_value(struct symbol *sym, const char *newval);
-bool sym_is_changeable(struct symbol *sym);
-struct property * sym_get_choice_prop(struct symbol *sym);
-struct menu *sym_get_choice_menu(struct symbol *sym);
+bool sym_is_changeable(const struct symbol *sym);
+struct menu *sym_get_choice_menu(const struct symbol *sym);
const char * sym_get_string_value(struct symbol *sym);
const char * prop_get_type_name(enum prop_type type);
/* expr.c */
-void expr_print(struct expr *e, void (*fn)(void *, struct symbol *, const char *), void *data, int prevtoken);
+void expr_print(const struct expr *e,
+ void (*fn)(void *, struct symbol *, const char *),
+ void *data, int prevtoken);
#endif /* LKC_PROTO_H */
diff --git a/scripts/kconfig/mconf-cfg.sh b/scripts/kconfig/mconf-cfg.sh
index 1e61f50a5905..1bc304dc2f7d 100755
--- a/scripts/kconfig/mconf-cfg.sh
+++ b/scripts/kconfig/mconf-cfg.sh
@@ -1,6 +1,8 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
+set -eu
+
cflags=$1
libs=$2
diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c
index d6a61ca1a984..3887eac75289 100644
--- a/scripts/kconfig/mconf.c
+++ b/scripts/kconfig/mconf.c
@@ -19,7 +19,7 @@
#include <signal.h>
#include <unistd.h>
-#include "list.h"
+#include <list.h>
#include "lkc.h"
#include "lxdialog/dialog.h"
#include "mnconf-common.h"
@@ -514,7 +514,7 @@ static void build_conf(struct menu *menu)
type = sym_get_type(sym);
if (sym_is_choice(sym)) {
- struct symbol *def_sym = sym_get_choice_value(sym);
+ struct symbol *def_sym = sym_calc_choice(menu);
struct menu *def_menu = NULL;
child_count++;
@@ -523,28 +523,14 @@ static void build_conf(struct menu *menu)
def_menu = child;
}
- val = sym_get_tristate_value(sym);
- if (sym_is_changeable(sym)) {
- switch (val) {
- case yes: ch = '*'; break;
- case mod: ch = 'M'; break;
- default: ch = ' '; break;
- }
- item_make("<%c>", ch);
- item_set_tag('t');
- item_set_data(menu);
- } else {
- item_make(" ");
- item_set_tag(def_menu ? 't' : ':');
- item_set_data(menu);
- }
+ item_make(" ");
+ item_set_tag(def_menu ? 't' : ':');
+ item_set_data(menu);
item_add_str("%*c%s", indent + 1, ' ', menu_get_prompt(menu));
- if (val == yes) {
- if (def_menu)
- item_add_str(" (%s) --->", menu_get_prompt(def_menu));
- return;
- }
+ if (def_menu)
+ item_add_str(" (%s) --->", menu_get_prompt(def_menu));
+ return;
} else {
if (menu == current_menu) {
item_make("---%*c%s", indent + 1, ' ', menu_get_prompt(menu));
@@ -614,7 +600,7 @@ static void conf_choice(struct menu *menu)
struct menu *child;
struct symbol *active;
- active = sym_get_choice_value(menu->sym);
+ active = sym_calc_choice(menu);
while (1) {
int res;
int selected;
@@ -633,7 +619,7 @@ static void conf_choice(struct menu *menu)
item_set_data(child);
if (child->sym == active)
item_set_selected(1);
- if (child->sym == sym_get_choice_value(menu->sym))
+ if (child->sym == sym_calc_choice(menu))
item_set_tag('X');
}
dialog_clear();
@@ -650,7 +636,7 @@ static void conf_choice(struct menu *menu)
if (!child->sym)
break;
- sym_set_tristate_value(child->sym, yes);
+ choice_set_value(menu, child->sym);
}
return;
case 1:
@@ -814,7 +800,7 @@ static void conf(struct menu *menu, struct menu *active_menu)
conf(submenu, NULL);
break;
case 't':
- if (sym_is_choice(sym) && sym_get_tristate_value(sym) == yes)
+ if (sym_is_choice(sym))
conf_choice(submenu);
else if (submenu->prompt->type == P_MENU)
conf(submenu, NULL);
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
index eef9b63cdf11..323cc0b62be6 100644
--- a/scripts/kconfig/menu.c
+++ b/scripts/kconfig/menu.c
@@ -8,9 +8,9 @@
#include <stdlib.h>
#include <string.h>
+#include <list.h>
#include "lkc.h"
#include "internal.h"
-#include "list.h"
static const char nohelp_text[] = "There is no help available for this option.";
@@ -38,7 +38,7 @@ struct menu *menu_next(struct menu *menu, struct menu *root)
return menu->next;
}
-void menu_warn(struct menu *menu, const char *fmt, ...)
+void menu_warn(const struct menu *menu, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
@@ -48,7 +48,7 @@ void menu_warn(struct menu *menu, const char *fmt, ...)
va_end(ap);
}
-static void prop_warn(struct property *prop, const char *fmt, ...)
+static void prop_warn(const struct property *prop, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
@@ -175,7 +175,7 @@ static struct property *menu_add_prop(enum prop_type type, struct expr *expr,
return prop;
}
-struct property *menu_add_prompt(enum prop_type type, char *prompt,
+struct property *menu_add_prompt(enum prop_type type, const char *prompt,
struct expr *dep)
{
struct property *prop = menu_add_prop(type, NULL, dep);
@@ -306,7 +306,7 @@ static void _menu_finalize(struct menu *parent, bool inside_choice)
struct menu *menu, *last_menu;
struct symbol *sym;
struct property *prop;
- struct expr *parentdep, *basedep, *dep, *dep2, **ep;
+ struct expr *basedep, *dep, *dep2;
sym = parent->sym;
if (parent->list) {
@@ -315,35 +315,6 @@ static void _menu_finalize(struct menu *parent, bool inside_choice)
* and propagate parent dependencies before moving on.
*/
- bool is_choice = false;
-
- if (sym && sym_is_choice(sym))
- is_choice = true;
-
- if (is_choice) {
- if (sym->type == S_UNKNOWN) {
- /* find the first choice value to find out choice type */
- current_entry = parent;
- for (menu = parent->list; menu; menu = menu->next) {
- if (menu->sym && menu->sym->type != S_UNKNOWN) {
- menu_set_type(menu->sym->type);
- break;
- }
- }
- }
-
- /*
- * Use the choice itself as the parent dependency of
- * the contained items. This turns the mode of the
- * choice into an upper bound on the visibility of the
- * choice value symbols.
- */
- parentdep = expr_alloc_symbol(sym);
- } else {
- /* Menu node for 'menu', 'if' */
- parentdep = parent->dep;
- }
-
/* For each child menu node... */
for (menu = parent->list; menu; menu = menu->next) {
/*
@@ -352,7 +323,7 @@ static void _menu_finalize(struct menu *parent, bool inside_choice)
*/
basedep = rewrite_m(menu->dep);
basedep = expr_transform(basedep);
- basedep = expr_alloc_and(expr_copy(parentdep), basedep);
+ basedep = expr_alloc_and(expr_copy(parent->dep), basedep);
basedep = expr_eliminate_dups(basedep);
menu->dep = basedep;
@@ -416,15 +387,12 @@ static void _menu_finalize(struct menu *parent, bool inside_choice)
}
}
- if (is_choice)
- expr_free(parentdep);
-
/*
* Recursively process children in the same fashion before
* moving on
*/
for (menu = parent->list; menu; menu = menu->next)
- _menu_finalize(menu, is_choice);
+ _menu_finalize(menu, sym && sym_is_choice(sym));
} else if (!inside_choice && sym) {
/*
* Automatic submenu creation. If sym is a symbol and A, B, C,
@@ -499,34 +467,6 @@ static void _menu_finalize(struct menu *parent, bool inside_choice)
sym->dir_dep.expr = expr_alloc_or(sym->dir_dep.expr, parent->dep);
}
for (menu = parent->list; menu; menu = menu->next) {
- if (sym && sym_is_choice(sym) &&
- menu->sym && !sym_is_choice_value(menu->sym)) {
- current_entry = menu;
- menu->sym->flags |= SYMBOL_CHOICEVAL;
- /* Non-tristate choice values of tristate choices must
- * depend on the choice being set to Y. The choice
- * values' dependencies were propagated to their
- * properties above, so the change here must be re-
- * propagated.
- */
- if (sym->type == S_TRISTATE && menu->sym->type != S_TRISTATE) {
- basedep = expr_alloc_comp(E_EQUAL, sym, &symbol_yes);
- menu->dep = expr_alloc_and(basedep, menu->dep);
- for (prop = menu->sym->prop; prop; prop = prop->next) {
- if (prop->menu != menu)
- continue;
- prop->visible.expr = expr_alloc_and(expr_copy(basedep),
- prop->visible.expr);
- }
- }
- menu_add_symbol(P_CHOICE, sym, NULL);
- prop = sym_get_choice_prop(sym);
- for (ep = &prop->expr; *ep; ep = &(*ep)->left.expr)
- ;
- *ep = expr_alloc_one(E_LIST, NULL);
- (*ep)->right.sym = menu->sym;
- }
-
/*
* This code serves two purposes:
*
@@ -575,17 +515,6 @@ static void _menu_finalize(struct menu *parent, bool inside_choice)
sym_check_prop(sym);
sym->flags |= SYMBOL_WARNED;
}
-
- /*
- * For choices, add a reverse dependency (corresponding to a select) of
- * '<visibility> && m'. This prevents the user from setting the choice
- * mode to 'n' when the choice is visible.
- */
- if (sym && sym_is_choice(sym) && parent->prompt) {
- sym->rev_dep.expr = expr_alloc_or(sym->rev_dep.expr,
- expr_alloc_and(parent->prompt->visible.expr,
- expr_alloc_symbol(&symbol_mod)));
- }
}
void menu_finalize(void)
@@ -593,7 +522,7 @@ void menu_finalize(void)
_menu_finalize(&rootmenu, false);
}
-bool menu_has_prompt(struct menu *menu)
+bool menu_has_prompt(const struct menu *menu)
{
if (!menu->prompt)
return false;
@@ -618,7 +547,6 @@ bool menu_is_empty(struct menu *menu)
bool menu_is_visible(struct menu *menu)
{
- struct menu *child;
struct symbol *sym;
tristate visible;
@@ -637,24 +565,10 @@ bool menu_is_visible(struct menu *menu)
} else
visible = menu->prompt->visible.tri = expr_calc_value(menu->prompt->visible.expr);
- if (visible != no)
- return true;
-
- if (!sym || sym_get_tristate_value(menu->sym) == no)
- return false;
-
- for (child = menu->list; child; child = child->next) {
- if (menu_is_visible(child)) {
- if (sym)
- sym->flags |= SYMBOL_DEF_USER;
- return true;
- }
- }
-
- return false;
+ return visible != no;
}
-const char *menu_get_prompt(struct menu *menu)
+const char *menu_get_prompt(const struct menu *menu)
{
if (menu->prompt)
return menu->prompt->text;
@@ -675,13 +589,14 @@ struct menu *menu_get_parent_menu(struct menu *menu)
return menu;
}
-static void get_def_str(struct gstr *r, struct menu *menu)
+static void get_def_str(struct gstr *r, const struct menu *menu)
{
str_printf(r, "Defined at %s:%d\n",
menu->filename, menu->lineno);
}
-static void get_dep_str(struct gstr *r, struct expr *expr, const char *prefix)
+static void get_dep_str(struct gstr *r, const struct expr *expr,
+ const char *prefix)
{
if (!expr_is_yes(expr)) {
str_append(r, prefix);
diff --git a/scripts/kconfig/mnconf-common.c b/scripts/kconfig/mnconf-common.c
index 18cb9a6c5aaa..8e24b07121df 100644
--- a/scripts/kconfig/mnconf-common.c
+++ b/scripts/kconfig/mnconf-common.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <list.h>
#include "expr.h"
-#include "list.h"
#include "mnconf-common.h"
int jump_key_char;
diff --git a/scripts/kconfig/mnconf-common.h b/scripts/kconfig/mnconf-common.h
index ab6292cc4bf2..53bd7292e931 100644
--- a/scripts/kconfig/mnconf-common.h
+++ b/scripts/kconfig/mnconf-common.h
@@ -4,6 +4,8 @@
#include <stddef.h>
+#include <list_types.h>
+
struct search_data {
struct list_head *head;
struct menu *target;
diff --git a/scripts/kconfig/nconf-cfg.sh b/scripts/kconfig/nconf-cfg.sh
index f871a2160e36..a20290b1a37d 100755
--- a/scripts/kconfig/nconf-cfg.sh
+++ b/scripts/kconfig/nconf-cfg.sh
@@ -1,6 +1,8 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
+set -eu
+
cflags=$1
libs=$2
diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
index e1cb09418cbe..b91ca47e9e9a 100644
--- a/scripts/kconfig/nconf.c
+++ b/scripts/kconfig/nconf.c
@@ -11,7 +11,7 @@
#include <strings.h>
#include <stdlib.h>
-#include "list.h"
+#include <list.h>
#include "lkc.h"
#include "mnconf-common.h"
#include "nconf.h"
@@ -815,7 +815,7 @@ static void build_conf(struct menu *menu)
type = sym_get_type(sym);
if (sym_is_choice(sym)) {
- struct symbol *def_sym = sym_get_choice_value(sym);
+ struct symbol *def_sym = sym_calc_choice(menu);
struct menu *def_menu = NULL;
child_count++;
@@ -825,30 +825,13 @@ static void build_conf(struct menu *menu)
}
val = sym_get_tristate_value(sym);
- if (sym_is_changeable(sym)) {
- switch (val) {
- case yes:
- ch = '*';
- break;
- case mod:
- ch = 'M';
- break;
- default:
- ch = ' ';
- break;
- }
- item_make(menu, 't', "<%c>", ch);
- } else {
- item_make(menu, def_menu ? 't' : ':', " ");
- }
+ item_make(menu, def_menu ? 't' : ':', " ");
item_add_str("%*c%s", indent + 1,
' ', menu_get_prompt(menu));
- if (val == yes) {
- if (def_menu)
- item_add_str(" (%s) --->", menu_get_prompt(def_menu));
- return;
- }
+ if (def_menu)
+ item_add_str(" (%s) --->", menu_get_prompt(def_menu));
+ return;
} else {
if (menu == current_menu) {
item_make(menu, ':',
@@ -1191,8 +1174,7 @@ static void selected_conf(struct menu *menu, struct menu *active_menu)
conf(submenu);
break;
case 't':
- if (sym_is_choice(sym) &&
- sym_get_tristate_value(sym) == yes)
+ if (sym_is_choice(sym))
conf_choice(submenu);
else if (submenu->prompt &&
submenu->prompt->type == P_MENU)
@@ -1257,7 +1239,7 @@ static void conf_choice(struct menu *menu)
.pattern = "",
};
- active = sym_get_choice_value(menu->sym);
+ active = sym_calc_choice(menu);
/* this is mostly duplicated from the conf() function. */
while (!global_exit) {
reset_menu();
@@ -1266,7 +1248,7 @@ static void conf_choice(struct menu *menu)
if (!show_all_items && !menu_is_visible(child))
continue;
- if (child->sym == sym_get_choice_value(menu->sym))
+ if (child->sym == sym_calc_choice(menu))
item_make(child, ':', "<X> %s",
menu_get_prompt(child));
else if (child->sym)
@@ -1349,7 +1331,7 @@ static void conf_choice(struct menu *menu)
case ' ':
case 10:
case KEY_RIGHT:
- sym_set_tristate_value(child->sym, yes);
+ choice_set_value(menu, child->sym);
return;
case 'h':
case '?':
diff --git a/scripts/kconfig/parser.y b/scripts/kconfig/parser.y
index ff709001b1f0..61900feb4254 100644
--- a/scripts/kconfig/parser.y
+++ b/scripts/kconfig/parser.y
@@ -28,9 +28,7 @@ static void zconf_error(const char *err, ...);
static bool zconf_endtoken(const char *tokenname,
const char *expected_tokenname);
-struct menu *current_menu, *current_entry;
-
-static bool inside_choice = false;
+struct menu *current_menu, *current_entry, *current_choice;
%}
@@ -90,7 +88,7 @@ static bool inside_choice = false;
%type <symbol> nonconst_symbol
%type <symbol> symbol
-%type <type> type logic_type default
+%type <type> type default
%type <expr> expr
%type <expr> if_expr
%type <string> end
@@ -147,12 +145,21 @@ config_entry_start: T_CONFIG nonconst_symbol T_EOL
config_stmt: config_entry_start config_option_list
{
- if (inside_choice) {
+ if (current_choice) {
if (!current_entry->prompt) {
fprintf(stderr, "%s:%d: error: choice member must have a prompt\n",
current_entry->filename, current_entry->lineno);
yynerrs++;
}
+
+ if (current_entry->sym->type != S_BOOLEAN) {
+ fprintf(stderr, "%s:%d: error: choice member must be bool\n",
+ current_entry->filename, current_entry->lineno);
+ yynerrs++;
+ }
+
+ list_add_tail(&current_entry->sym->choice_link,
+ &current_choice->choice_members);
}
printd(DEBUG_PARSE, "%s:%d:endconfig\n", cur_filename, cur_lineno);
@@ -234,7 +241,9 @@ choice: T_CHOICE T_EOL
struct symbol *sym = sym_lookup(NULL, 0);
menu_add_entry(sym);
- menu_add_expr(P_CHOICE, NULL, NULL);
+ menu_set_type(S_BOOLEAN);
+ INIT_LIST_HEAD(&current_entry->choice_members);
+
printd(DEBUG_PARSE, "%s:%d:choice\n", cur_filename, cur_lineno);
};
@@ -248,12 +257,12 @@ choice_entry: choice choice_option_list
$$ = menu_add_menu();
- inside_choice = true;
+ current_choice = current_entry;
};
choice_end: end
{
- inside_choice = false;
+ current_choice = NULL;
if (zconf_endtoken($1, "choice")) {
menu_end_menu();
@@ -277,10 +286,10 @@ choice_option: T_PROMPT T_WORD_QUOTE if_expr T_EOL
printd(DEBUG_PARSE, "%s:%d:prompt\n", cur_filename, cur_lineno);
};
-choice_option: logic_type prompt_stmt_opt T_EOL
+choice_option: T_BOOL T_WORD_QUOTE if_expr T_EOL
{
- menu_set_type($1);
- printd(DEBUG_PARSE, "%s:%d:type(%u)\n", cur_filename, cur_lineno, $1);
+ menu_add_prompt(P_PROMPT, $2, $3);
+ printd(DEBUG_PARSE, "%s:%d:bool\n", cur_filename, cur_lineno);
};
choice_option: T_DEFAULT nonconst_symbol if_expr T_EOL
@@ -290,15 +299,12 @@ choice_option: T_DEFAULT nonconst_symbol if_expr T_EOL
};
type:
- logic_type
+ T_BOOL { $$ = S_BOOLEAN; }
+ | T_TRISTATE { $$ = S_TRISTATE; }
| T_INT { $$ = S_INT; }
| T_HEX { $$ = S_HEX; }
| T_STRING { $$ = S_STRING; }
-logic_type:
- T_BOOL { $$ = S_BOOLEAN; }
- | T_TRISTATE { $$ = S_TRISTATE; }
-
default:
T_DEFAULT { $$ = S_UNKNOWN; }
| T_DEF_BOOL { $$ = S_BOOLEAN; }
@@ -483,7 +489,7 @@ assign_val:
*
* Return: -1 if an error is found, 0 otherwise.
*/
-static int choice_check_sanity(struct menu *menu)
+static int choice_check_sanity(const struct menu *menu)
{
struct property *prop;
int ret = 0;
@@ -638,7 +644,7 @@ static void print_quoted_string(FILE *out, const char *str)
putc('"', out);
}
-static void print_symbol(FILE *out, struct menu *menu)
+static void print_symbol(FILE *out, const struct menu *menu)
{
struct symbol *sym = menu->sym;
struct property *prop;
@@ -689,9 +695,6 @@ static void print_symbol(FILE *out, struct menu *menu)
}
fputc('\n', out);
break;
- case P_CHOICE:
- fputs(" #choice value\n", out);
- break;
case P_SELECT:
fputs( " select ", out);
expr_fprint(prop->expr, out);
diff --git a/scripts/kconfig/preprocess.c b/scripts/kconfig/preprocess.c
index f0a4a218c4a5..67d1fb95c491 100644
--- a/scripts/kconfig/preprocess.c
+++ b/scripts/kconfig/preprocess.c
@@ -9,9 +9,9 @@
#include <stdlib.h>
#include <string.h>
-#include "array_size.h"
+#include <array_size.h>
+#include <list.h>
#include "internal.h"
-#include "list.h"
#include "lkc.h"
#include "preprocess.h"
diff --git a/scripts/kconfig/qconf-cfg.sh b/scripts/kconfig/qconf-cfg.sh
index 0e113b0f2455..bb2df66363a8 100755
--- a/scripts/kconfig/qconf-cfg.sh
+++ b/scripts/kconfig/qconf-cfg.sh
@@ -1,6 +1,8 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
+set -eu
+
cflags=$1
libs=$2
bin=$3
diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc
index c6c42c0f4e5d..7d239c032b3d 100644
--- a/scripts/kconfig/qconf.cc
+++ b/scripts/kconfig/qconf.cc
@@ -147,7 +147,7 @@ void ConfigItem::updateMenu(void)
expr = sym_get_tristate_value(sym);
switch (expr) {
case yes:
- if (sym_is_choice_value(sym) && type == S_BOOLEAN)
+ if (sym_is_choice_value(sym))
setIcon(promptColIdx, choiceYesIcon);
else
setIcon(promptColIdx, symbolYesIcon);
@@ -1101,14 +1101,6 @@ QString ConfigInfoView::debug_info(struct symbol *sym)
&stream, E_NONE);
stream << "<br>";
break;
- case P_CHOICE:
- if (sym_is_choice(sym)) {
- stream << "choice: ";
- expr_print(prop->expr, expr_print_help,
- &stream, E_NONE);
- stream << "<br>";
- }
- break;
default:
stream << "unknown property: ";
stream << prop_get_type_name(prop->type);
@@ -1397,8 +1389,6 @@ ConfigMainWindow::ConfigMainWindow(void)
conf_set_changed_callback(conf_changed);
- // Set saveAction's initial state
- conf_changed();
configname = xstrdup(conf_get_configname());
QAction *saveAsAction = new QAction("Save &As...", this);
@@ -1851,10 +1841,10 @@ void ConfigMainWindow::saveSettings(void)
configSettings->writeSizes("/split2", split2->sizes());
}
-void ConfigMainWindow::conf_changed(void)
+void ConfigMainWindow::conf_changed(bool dirty)
{
if (saveAction)
- saveAction->setEnabled(conf_get_changed());
+ saveAction->setEnabled(dirty);
}
void fixup_rootmenu(struct menu *menu)
@@ -1904,7 +1894,6 @@ int main(int ac, char** av)
conf_parse(name);
fixup_rootmenu(&rootmenu);
- conf_read(NULL);
//zconfdump(stdout);
configApp = new QApplication(ac, av);
@@ -1916,6 +1905,9 @@ int main(int ac, char** av)
//zconfdump(stdout);
configApp->connect(configApp, SIGNAL(lastWindowClosed()), SLOT(quit()));
configApp->connect(configApp, SIGNAL(aboutToQuit()), v, SLOT(saveSettings()));
+
+ conf_read(NULL);
+
v->show();
configApp->exec();
diff --git a/scripts/kconfig/qconf.h b/scripts/kconfig/qconf.h
index 78b0a1dfcd53..53373064d90a 100644
--- a/scripts/kconfig/qconf.h
+++ b/scripts/kconfig/qconf.h
@@ -239,7 +239,7 @@ class ConfigMainWindow : public QMainWindow {
char *configname;
static QAction *saveAction;
- static void conf_changed(void);
+ static void conf_changed(bool);
public:
ConfigMainWindow(void);
public slots:
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index 0e439d3d48d1..71502abd3b12 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -40,16 +40,12 @@ struct symbol *modules_sym;
static tristate modules_val;
static int sym_warnings;
-enum symbol_type sym_get_type(struct symbol *sym)
+enum symbol_type sym_get_type(const struct symbol *sym)
{
enum symbol_type type = sym->type;
- if (type == S_TRISTATE) {
- if (sym_is_choice_value(sym) && sym->visible == yes)
- type = S_BOOLEAN;
- else if (modules_val == no)
- type = S_BOOLEAN;
- }
+ if (type == S_TRISTATE && modules_val == no)
+ type = S_BOOLEAN;
return type;
}
@@ -72,15 +68,6 @@ const char *sym_type_name(enum symbol_type type)
return "???";
}
-struct property *sym_get_choice_prop(struct symbol *sym)
-{
- struct property *prop;
-
- for_all_choices(sym, prop)
- return prop;
- return NULL;
-}
-
/**
* sym_get_choice_menu - get the parent choice menu if present
*
@@ -88,7 +75,7 @@ struct property *sym_get_choice_prop(struct symbol *sym)
*
* Return: a choice menu if this function is called against a choice member.
*/
-struct menu *sym_get_choice_menu(struct symbol *sym)
+struct menu *sym_get_choice_menu(const struct symbol *sym)
{
struct menu *menu = NULL;
struct menu *m;
@@ -192,7 +179,6 @@ static void sym_set_changed(struct symbol *sym)
{
struct menu *menu;
- sym->flags |= SYMBOL_CHANGED;
list_for_each_entry(menu, &sym->menus, link)
menu->flags |= MENU_CHANGED;
}
@@ -208,26 +194,12 @@ static void sym_set_all_changed(void)
static void sym_calc_visibility(struct symbol *sym)
{
struct property *prop;
- struct symbol *choice_sym = NULL;
tristate tri;
/* any prompt visible? */
tri = no;
-
- if (sym_is_choice_value(sym))
- choice_sym = prop_get_symbol(sym_get_choice_prop(sym));
-
for_all_prompts(sym, prop) {
prop->visible.tri = expr_calc_value(prop->visible.expr);
- /*
- * Tristate choice_values with visibility 'mod' are
- * not visible if the corresponding choice's value is
- * 'yes'.
- */
- if (choice_sym && sym->type == S_TRISTATE &&
- prop->visible.tri == mod && choice_sym->curr.tri == yes)
- prop->visible.tri = no;
-
tri = EXPR_OR(tri, prop->visible.tri);
}
if (tri == mod && (sym->type != S_TRISTATE || modules_val == no))
@@ -274,14 +246,14 @@ static void sym_calc_visibility(struct symbol *sym)
* Next locate the first visible choice value
* Return NULL if none was found
*/
-struct symbol *sym_choice_default(struct symbol *sym)
+struct symbol *sym_choice_default(struct menu *choice)
{
+ struct menu *menu;
struct symbol *def_sym;
struct property *prop;
- struct expr *e;
/* any of the defaults visible? */
- for_all_defaults(sym, prop) {
+ for_all_defaults(choice->sym, prop) {
prop->visible.tri = expr_calc_value(prop->visible.expr);
if (prop->visible.tri == no)
continue;
@@ -291,48 +263,99 @@ struct symbol *sym_choice_default(struct symbol *sym)
}
/* just get the first visible value */
- prop = sym_get_choice_prop(sym);
- expr_list_for_each_sym(prop->expr, e, def_sym)
- if (def_sym->visible != no)
- return def_sym;
+ menu_for_each_sub_entry(menu, choice)
+ if (menu->sym && menu->sym->visible != no)
+ return menu->sym;
/* failed to locate any defaults */
return NULL;
}
-static struct symbol *sym_calc_choice(struct symbol *sym)
+/*
+ * sym_calc_choice - calculate symbol values in a choice
+ *
+ * @choice: a menu of the choice
+ *
+ * Return: a chosen symbol
+ */
+struct symbol *sym_calc_choice(struct menu *choice)
{
- struct symbol *def_sym;
- struct property *prop;
- struct expr *e;
- int flags;
-
- /* first calculate all choice values' visibilities */
- flags = sym->flags;
- prop = sym_get_choice_prop(sym);
- expr_list_for_each_sym(prop->expr, e, def_sym) {
- sym_calc_visibility(def_sym);
- if (def_sym->visible != no)
- flags &= def_sym->flags;
+ struct symbol *res = NULL;
+ struct symbol *sym;
+ struct menu *menu;
+
+ /* Traverse the list of choice members in the priority order. */
+ list_for_each_entry(sym, &choice->choice_members, choice_link) {
+ sym_calc_visibility(sym);
+ if (sym->visible == no)
+ continue;
+
+ /* The first visible symble with the user value 'y'. */
+ if (sym_has_value(sym) && sym->def[S_DEF_USER].tri == yes) {
+ res = sym;
+ break;
+ }
}
- sym->flags &= flags | ~SYMBOL_DEF_USER;
+ /*
+ * If 'y' is not found in the user input, use the default, unless it is
+ * explicitly set to 'n'.
+ */
+ if (!res) {
+ res = sym_choice_default(choice);
+ if (res && sym_has_value(res) && res->def[S_DEF_USER].tri == no)
+ res = NULL;
+ }
- /* is the user choice visible? */
- def_sym = sym->def[S_DEF_USER].val;
- if (def_sym && def_sym->visible != no)
- return def_sym;
+ /* Still not found. Pick up the first visible, user-unspecified symbol. */
+ if (!res) {
+ menu_for_each_sub_entry(menu, choice) {
+ sym = menu->sym;
- def_sym = sym_choice_default(sym);
+ if (!sym || sym->visible == no || sym_has_value(sym))
+ continue;
- if (def_sym == NULL)
- /* no choice? reset tristate value */
- sym->curr.tri = no;
+ res = sym;
+ break;
+ }
+ }
+
+ /*
+ * Still not found. Traverse the linked list in the _reverse_ order to
+ * pick up the least prioritized 'n'.
+ */
+ if (!res) {
+ list_for_each_entry_reverse(sym, &choice->choice_members,
+ choice_link) {
+ if (sym->visible == no)
+ continue;
+
+ res = sym;
+ break;
+ }
+ }
+
+ menu_for_each_sub_entry(menu, choice) {
+ tristate val;
+
+ sym = menu->sym;
+
+ if (!sym || sym->visible == no)
+ continue;
+
+ val = sym == res ? yes : no;
+
+ if (sym->curr.tri != val)
+ sym_set_changed(sym);
+
+ sym->curr.tri = val;
+ sym->flags |= SYMBOL_VALID | SYMBOL_WRITE;
+ }
- return def_sym;
+ return res;
}
-static void sym_warn_unmet_dep(struct symbol *sym)
+static void sym_warn_unmet_dep(const struct symbol *sym)
{
struct gstr gs = str_new();
@@ -365,7 +388,7 @@ void sym_calc_value(struct symbol *sym)
{
struct symbol_value newval, oldval;
struct property *prop;
- struct expr *e;
+ struct menu *choice_menu;
if (!sym)
return;
@@ -373,13 +396,6 @@ void sym_calc_value(struct symbol *sym)
if (sym->flags & SYMBOL_VALID)
return;
- if (sym_is_choice_value(sym) &&
- sym->flags & SYMBOL_NEED_SET_CHOICE_VALUES) {
- sym->flags &= ~SYMBOL_NEED_SET_CHOICE_VALUES;
- prop = sym_get_choice_prop(sym);
- sym_calc_value(prop_get_symbol(prop));
- }
-
sym->flags |= SYMBOL_VALID;
oldval = sym->curr;
@@ -418,9 +434,11 @@ void sym_calc_value(struct symbol *sym)
switch (sym_get_type(sym)) {
case S_BOOLEAN:
case S_TRISTATE:
- if (sym_is_choice_value(sym) && sym->visible == yes) {
- prop = sym_get_choice_prop(sym);
- newval.tri = (prop_get_symbol(prop)->curr.val == sym) ? yes : no;
+ choice_menu = sym_get_choice_menu(sym);
+
+ if (choice_menu) {
+ sym_calc_choice(choice_menu);
+ newval.tri = sym->curr.tri;
} else {
if (sym->visible != no) {
/* if the symbol is visible use the user value
@@ -479,8 +497,6 @@ void sym_calc_value(struct symbol *sym)
}
sym->curr = newval;
- if (sym_is_choice(sym) && newval.tri == yes)
- sym->curr.val = sym_calc_choice(sym);
sym_validate_range(sym);
if (memcmp(&oldval, &sym->curr, sizeof(oldval))) {
@@ -491,23 +507,8 @@ void sym_calc_value(struct symbol *sym)
}
}
- if (sym_is_choice(sym)) {
- struct symbol *choice_sym;
-
- prop = sym_get_choice_prop(sym);
- expr_list_for_each_sym(prop->expr, e, choice_sym) {
- if ((sym->flags & SYMBOL_WRITE) &&
- choice_sym->visible != no)
- choice_sym->flags |= SYMBOL_WRITE;
- if (sym->flags & SYMBOL_CHANGED)
- sym_set_changed(choice_sym);
- }
-
+ if (sym_is_choice(sym))
sym->flags &= ~SYMBOL_WRITE;
- }
-
- if (sym->flags & SYMBOL_NEED_SET_CHOICE_VALUES)
- set_all_choice_values(sym);
}
void sym_clear_all_valid(void)
@@ -520,7 +521,7 @@ void sym_clear_all_valid(void)
sym_calc_value(modules_sym);
}
-bool sym_tristate_within_range(struct symbol *sym, tristate val)
+bool sym_tristate_within_range(const struct symbol *sym, tristate val)
{
int type = sym_get_type(sym);
@@ -534,8 +535,6 @@ bool sym_tristate_within_range(struct symbol *sym, tristate val)
return false;
if (sym->visible <= sym->rev_dep.tri)
return false;
- if (sym_is_choice_value(sym) && sym->visible == yes)
- return val == yes;
return val >= sym->rev_dep.tri && val <= sym->visible;
}
@@ -543,42 +542,75 @@ bool sym_set_tristate_value(struct symbol *sym, tristate val)
{
tristate oldval = sym_get_tristate_value(sym);
- if (oldval != val && !sym_tristate_within_range(sym, val))
+ if (!sym_tristate_within_range(sym, val))
return false;
- if (!(sym->flags & SYMBOL_DEF_USER)) {
+ if (!(sym->flags & SYMBOL_DEF_USER) || sym->def[S_DEF_USER].tri != val) {
+ sym->def[S_DEF_USER].tri = val;
sym->flags |= SYMBOL_DEF_USER;
sym_set_changed(sym);
}
- /*
- * setting a choice value also resets the new flag of the choice
- * symbol and all other choice values.
- */
- if (sym_is_choice_value(sym) && val == yes) {
- struct symbol *cs = prop_get_symbol(sym_get_choice_prop(sym));
- struct property *prop;
- struct expr *e;
-
- cs->def[S_DEF_USER].val = sym;
- cs->flags |= SYMBOL_DEF_USER;
- prop = sym_get_choice_prop(cs);
- for (e = prop->expr; e; e = e->left.expr) {
- if (e->right.sym->visible != no)
- e->right.sym->flags |= SYMBOL_DEF_USER;
- }
- }
- sym->def[S_DEF_USER].tri = val;
if (oldval != val)
sym_clear_all_valid();
return true;
}
+/**
+ * choice_set_value - set the user input to a choice
+ *
+ * @choice: menu entry for the choice
+ * @sym: selected symbol
+ */
+void choice_set_value(struct menu *choice, struct symbol *sym)
+{
+ struct menu *menu;
+ bool changed = false;
+
+ menu_for_each_sub_entry(menu, choice) {
+ tristate val;
+
+ if (!menu->sym)
+ continue;
+
+ if (menu->sym->visible == no)
+ continue;
+
+ val = menu->sym == sym ? yes : no;
+
+ if (menu->sym->curr.tri != val)
+ changed = true;
+
+ menu->sym->def[S_DEF_USER].tri = val;
+ menu->sym->flags |= SYMBOL_DEF_USER;
+
+ /*
+ * Now, the user has explicitly enabled or disabled this symbol,
+ * it should be given the highest priority. We are possibly
+ * setting multiple symbols to 'n', where the first symbol is
+ * given the least prioritized 'n'. This works well when the
+ * choice block ends up with selecting 'n' symbol.
+ * (see sym_calc_choice())
+ */
+ list_move(&menu->sym->choice_link, &choice->choice_members);
+ }
+
+ if (changed)
+ sym_clear_all_valid();
+}
+
tristate sym_toggle_tristate_value(struct symbol *sym)
{
+ struct menu *choice;
tristate oldval, newval;
+ choice = sym_get_choice_menu(sym);
+ if (choice) {
+ choice_set_value(choice, sym);
+ return yes;
+ }
+
oldval = newval = sym_get_tristate_value(sym);
do {
switch (newval) {
@@ -834,9 +866,14 @@ const char *sym_get_string_value(struct symbol *sym)
return (const char *)sym->curr.val;
}
-bool sym_is_changeable(struct symbol *sym)
+bool sym_is_changeable(const struct symbol *sym)
+{
+ return !sym_is_choice(sym) && sym->visible > sym->rev_dep.tri;
+}
+
+bool sym_is_choice_value(const struct symbol *sym)
{
- return sym->visible > sym->rev_dep.tri;
+ return !list_empty(&sym->choice_link);
}
HASHTABLE_DEFINE(sym_hashtable, SYMBOL_HASHSIZE);
@@ -876,6 +913,7 @@ struct symbol *sym_lookup(const char *name, int flags)
symbol->type = S_UNKNOWN;
symbol->flags = flags;
INIT_LIST_HEAD(&symbol->menus);
+ INIT_LIST_HEAD(&symbol->choice_link);
hash_add(sym_hashtable, &symbol->node, hash);
@@ -1036,13 +1074,14 @@ static void sym_check_print_recursive(struct symbol *last_sym)
{
struct dep_stack *stack;
struct symbol *sym, *next_sym;
- struct menu *menu = NULL;
- struct property *prop;
+ struct menu *choice;
struct dep_stack cv_stack;
+ enum prop_type type;
- if (sym_is_choice_value(last_sym)) {
+ choice = sym_get_choice_menu(last_sym);
+ if (choice) {
dep_stack_insert(&cv_stack, last_sym);
- last_sym = prop_get_symbol(sym_get_choice_prop(last_sym));
+ last_sym = choice->sym;
}
for (stack = check_top; stack != NULL; stack = stack->prev)
@@ -1056,59 +1095,37 @@ static void sym_check_print_recursive(struct symbol *last_sym)
for (; stack; stack = stack->next) {
sym = stack->sym;
next_sym = stack->next ? stack->next->sym : last_sym;
- prop = stack->prop;
- if (prop == NULL)
- prop = stack->sym->prop;
-
- /* for choice values find the menu entry (used below) */
- if (sym_is_choice(sym) || sym_is_choice_value(sym)) {
- for (prop = sym->prop; prop; prop = prop->next) {
- menu = prop->menu;
- if (prop->menu)
- break;
- }
- }
+ type = stack->prop ? stack->prop->type : P_UNKNOWN;
+
if (stack->sym == last_sym)
- fprintf(stderr, "%s:%d:error: recursive dependency detected!\n",
- prop->filename, prop->lineno);
+ fprintf(stderr, "error: recursive dependency detected!\n");
- if (sym_is_choice(sym)) {
- fprintf(stderr, "%s:%d:\tchoice %s contains symbol %s\n",
- menu->filename, menu->lineno,
- sym->name ? sym->name : "<choice>",
- next_sym->name ? next_sym->name : "<choice>");
- } else if (sym_is_choice_value(sym)) {
- fprintf(stderr, "%s:%d:\tsymbol %s is part of choice %s\n",
- menu->filename, menu->lineno,
+ if (sym_is_choice(next_sym)) {
+ choice = list_first_entry(&next_sym->menus, struct menu, link);
+
+ fprintf(stderr, "\tsymbol %s is part of choice block at %s:%d\n",
sym->name ? sym->name : "<choice>",
- next_sym->name ? next_sym->name : "<choice>");
+ choice->filename, choice->lineno);
} else if (stack->expr == &sym->dir_dep.expr) {
- fprintf(stderr, "%s:%d:\tsymbol %s depends on %s\n",
- prop->filename, prop->lineno,
+ fprintf(stderr, "\tsymbol %s depends on %s\n",
sym->name ? sym->name : "<choice>",
- next_sym->name ? next_sym->name : "<choice>");
+ next_sym->name);
} else if (stack->expr == &sym->rev_dep.expr) {
- fprintf(stderr, "%s:%d:\tsymbol %s is selected by %s\n",
- prop->filename, prop->lineno,
- sym->name ? sym->name : "<choice>",
- next_sym->name ? next_sym->name : "<choice>");
+ fprintf(stderr, "\tsymbol %s is selected by %s\n",
+ sym->name, next_sym->name);
} else if (stack->expr == &sym->implied.expr) {
- fprintf(stderr, "%s:%d:\tsymbol %s is implied by %s\n",
- prop->filename, prop->lineno,
- sym->name ? sym->name : "<choice>",
- next_sym->name ? next_sym->name : "<choice>");
+ fprintf(stderr, "\tsymbol %s is implied by %s\n",
+ sym->name, next_sym->name);
} else if (stack->expr) {
- fprintf(stderr, "%s:%d:\tsymbol %s %s value contains %s\n",
- prop->filename, prop->lineno,
+ fprintf(stderr, "\tsymbol %s %s value contains %s\n",
sym->name ? sym->name : "<choice>",
- prop_get_type_name(prop->type),
- next_sym->name ? next_sym->name : "<choice>");
+ prop_get_type_name(type),
+ next_sym->name);
} else {
- fprintf(stderr, "%s:%d:\tsymbol %s %s is visible depending on %s\n",
- prop->filename, prop->lineno,
+ fprintf(stderr, "\tsymbol %s %s is visible depending on %s\n",
sym->name ? sym->name : "<choice>",
- prop_get_type_name(prop->type),
- next_sym->name ? next_sym->name : "<choice>");
+ prop_get_type_name(type),
+ next_sym->name);
}
}
@@ -1121,7 +1138,7 @@ static void sym_check_print_recursive(struct symbol *last_sym)
dep_stack_remove();
}
-static struct symbol *sym_check_expr_deps(struct expr *e)
+static struct symbol *sym_check_expr_deps(const struct expr *e)
{
struct symbol *sym;
@@ -1182,8 +1199,7 @@ static struct symbol *sym_check_sym_deps(struct symbol *sym)
stack.expr = NULL;
for (prop = sym->prop; prop; prop = prop->next) {
- if (prop->type == P_CHOICE || prop->type == P_SELECT ||
- prop->type == P_IMPLY)
+ if (prop->type == P_SELECT || prop->type == P_IMPLY)
continue;
stack.prop = prop;
sym2 = sym_check_expr_deps(prop->visible.expr);
@@ -1237,9 +1253,13 @@ out:
if (menu->sym)
menu->sym->flags &= ~SYMBOL_CHECK;
- if (sym2 && sym_is_choice_value(sym2) &&
- prop_get_symbol(sym_get_choice_prop(sym2)) == choice)
- sym2 = choice;
+ if (sym2) {
+ struct menu *choice_menu2;
+
+ choice_menu2 = sym_get_choice_menu(sym2);
+ if (choice_menu2 == choice_menu)
+ sym2 = choice;
+ }
dep_stack_remove();
@@ -1248,8 +1268,8 @@ out:
struct symbol *sym_check_deps(struct symbol *sym)
{
+ struct menu *choice;
struct symbol *sym2;
- struct property *prop;
if (sym->flags & SYMBOL_CHECK) {
sym_check_print_recursive(sym);
@@ -1258,13 +1278,13 @@ struct symbol *sym_check_deps(struct symbol *sym)
if (sym->flags & SYMBOL_CHECKED)
return NULL;
- if (sym_is_choice_value(sym)) {
+ choice = sym_get_choice_menu(sym);
+ if (choice) {
struct dep_stack stack;
/* for choice groups start the check with main choice symbol */
dep_stack_insert(&stack, sym);
- prop = sym_get_choice_prop(sym);
- sym2 = sym_check_deps(prop_get_symbol(prop));
+ sym2 = sym_check_deps(choice->sym);
dep_stack_remove();
} else if (sym_is_choice(sym)) {
sym2 = sym_check_choice_deps(sym);
@@ -1277,10 +1297,9 @@ struct symbol *sym_check_deps(struct symbol *sym)
return sym2;
}
-struct symbol *prop_get_symbol(struct property *prop)
+struct symbol *prop_get_symbol(const struct property *prop)
{
- if (prop->expr && (prop->expr->type == E_SYMBOL ||
- prop->expr->type == E_LIST))
+ if (prop->expr && prop->expr->type == E_SYMBOL)
return prop->expr->left.sym;
return NULL;
}
@@ -1296,8 +1315,6 @@ const char *prop_get_type_name(enum prop_type type)
return "menu";
case P_DEFAULT:
return "default";
- case P_CHOICE:
- return "choice";
case P_SELECT:
return "select";
case P_IMPLY:
diff --git a/scripts/kconfig/tests/choice/Kconfig b/scripts/kconfig/tests/choice/Kconfig
index 8cdda40868a1..cd252579a623 100644
--- a/scripts/kconfig/tests/choice/Kconfig
+++ b/scripts/kconfig/tests/choice/Kconfig
@@ -1,10 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-config MODULES
- bool "Enable loadable module support"
- modules
- default y
-
choice
prompt "boolean choice"
default BOOL_CHOICE1
@@ -16,15 +11,3 @@ config BOOL_CHOICE1
bool "choice 1"
endchoice
-
-choice
- prompt "tristate choice"
- default TRI_CHOICE1
-
-config TRI_CHOICE0
- tristate "choice 0"
-
-config TRI_CHOICE1
- tristate "choice 1"
-
-endchoice
diff --git a/scripts/kconfig/tests/choice/__init__.py b/scripts/kconfig/tests/choice/__init__.py
index 05e162220085..0fc7bf9b5c78 100644
--- a/scripts/kconfig/tests/choice/__init__.py
+++ b/scripts/kconfig/tests/choice/__init__.py
@@ -1,11 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
"""
Basic choice tests.
-
-The handling of 'choice' is a bit complicated part in Kconfig.
-
-The behavior of 'y' choice is intuitive. If choice values are tristate,
-the choice can be 'm' where each value can be enabled independently.
"""
@@ -14,11 +9,6 @@ def test_oldask0(conf):
assert conf.stdout_contains('oldask0_expected_stdout')
-def test_oldask1(conf):
- assert conf.oldaskconfig('oldask1_config') == 0
- assert conf.stdout_contains('oldask1_expected_stdout')
-
-
def test_allyes(conf):
assert conf.allyesconfig() == 0
assert conf.config_contains('allyes_expected_config')
diff --git a/scripts/kconfig/tests/choice/alldef_expected_config b/scripts/kconfig/tests/choice/alldef_expected_config
index 7a754bf4be94..b359a2e6493e 100644
--- a/scripts/kconfig/tests/choice/alldef_expected_config
+++ b/scripts/kconfig/tests/choice/alldef_expected_config
@@ -1,5 +1,2 @@
-CONFIG_MODULES=y
# CONFIG_BOOL_CHOICE0 is not set
CONFIG_BOOL_CHOICE1=y
-# CONFIG_TRI_CHOICE0 is not set
-# CONFIG_TRI_CHOICE1 is not set
diff --git a/scripts/kconfig/tests/choice/allmod_expected_config b/scripts/kconfig/tests/choice/allmod_expected_config
index d1f51651740c..b359a2e6493e 100644
--- a/scripts/kconfig/tests/choice/allmod_expected_config
+++ b/scripts/kconfig/tests/choice/allmod_expected_config
@@ -1,5 +1,2 @@
-CONFIG_MODULES=y
# CONFIG_BOOL_CHOICE0 is not set
CONFIG_BOOL_CHOICE1=y
-CONFIG_TRI_CHOICE0=m
-CONFIG_TRI_CHOICE1=m
diff --git a/scripts/kconfig/tests/choice/allno_expected_config b/scripts/kconfig/tests/choice/allno_expected_config
index b88ee7a43136..b359a2e6493e 100644
--- a/scripts/kconfig/tests/choice/allno_expected_config
+++ b/scripts/kconfig/tests/choice/allno_expected_config
@@ -1,5 +1,2 @@
-# CONFIG_MODULES is not set
# CONFIG_BOOL_CHOICE0 is not set
CONFIG_BOOL_CHOICE1=y
-# CONFIG_TRI_CHOICE0 is not set
-CONFIG_TRI_CHOICE1=y
diff --git a/scripts/kconfig/tests/choice/allyes_expected_config b/scripts/kconfig/tests/choice/allyes_expected_config
index 8a76c1816893..b359a2e6493e 100644
--- a/scripts/kconfig/tests/choice/allyes_expected_config
+++ b/scripts/kconfig/tests/choice/allyes_expected_config
@@ -1,5 +1,2 @@
-CONFIG_MODULES=y
# CONFIG_BOOL_CHOICE0 is not set
CONFIG_BOOL_CHOICE1=y
-# CONFIG_TRI_CHOICE0 is not set
-CONFIG_TRI_CHOICE1=y
diff --git a/scripts/kconfig/tests/choice/oldask0_expected_stdout b/scripts/kconfig/tests/choice/oldask0_expected_stdout
index d2257db46423..80ec34c61ebc 100644
--- a/scripts/kconfig/tests/choice/oldask0_expected_stdout
+++ b/scripts/kconfig/tests/choice/oldask0_expected_stdout
@@ -1,8 +1,4 @@
-Enable loadable module support (MODULES) [Y/n/?] (NEW)
boolean choice
1. choice 0 (BOOL_CHOICE0) (NEW)
> 2. choice 1 (BOOL_CHOICE1) (NEW)
choice[1-2?]:
-tristate choice [M/y/?] (NEW)
- choice 0 (TRI_CHOICE0) [N/m/?] (NEW)
- choice 1 (TRI_CHOICE1) [N/m/?] (NEW)
diff --git a/scripts/kconfig/tests/choice/oldask1_config b/scripts/kconfig/tests/choice/oldask1_config
deleted file mode 100644
index 0f417856c81c..000000000000
--- a/scripts/kconfig/tests/choice/oldask1_config
+++ /dev/null
@@ -1 +0,0 @@
-# CONFIG_MODULES is not set
diff --git a/scripts/kconfig/tests/choice/oldask1_expected_stdout b/scripts/kconfig/tests/choice/oldask1_expected_stdout
deleted file mode 100644
index ffa20ad7f38e..000000000000
--- a/scripts/kconfig/tests/choice/oldask1_expected_stdout
+++ /dev/null
@@ -1,9 +0,0 @@
-Enable loadable module support (MODULES) [N/y/?]
-boolean choice
- 1. choice 0 (BOOL_CHOICE0) (NEW)
-> 2. choice 1 (BOOL_CHOICE1) (NEW)
-choice[1-2?]:
-tristate choice
- 1. choice 0 (TRI_CHOICE0) (NEW)
-> 2. choice 1 (TRI_CHOICE1) (NEW)
-choice[1-2?]:
diff --git a/scripts/kconfig/tests/choice_value_with_m_dep/Kconfig b/scripts/kconfig/tests/choice_value_with_m_dep/Kconfig
deleted file mode 100644
index bd970cec07d6..000000000000
--- a/scripts/kconfig/tests/choice_value_with_m_dep/Kconfig
+++ /dev/null
@@ -1,21 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-config MODULES
- def_bool y
- modules
-
-config DEP
- tristate
- default m
-
-choice
- prompt "Tristate Choice"
-
-config CHOICE0
- tristate "Choice 0"
-
-config CHOICE1
- tristate "Choice 1"
- depends on DEP
-
-endchoice
diff --git a/scripts/kconfig/tests/choice_value_with_m_dep/__init__.py b/scripts/kconfig/tests/choice_value_with_m_dep/__init__.py
deleted file mode 100644
index 075b4e08696e..000000000000
--- a/scripts/kconfig/tests/choice_value_with_m_dep/__init__.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-"""
-Hide tristate choice values with mod dependency in y choice.
-
-If tristate choice values depend on symbols set to 'm', they should be
-hidden when the choice containing them is changed from 'm' to 'y'
-(i.e. exclusive choice).
-
-Related Linux commit: fa64e5f6a35efd5e77d639125d973077ca506074
-"""
-
-
-def test(conf):
- assert conf.oldaskconfig('config', 'y') == 0
- assert conf.config_contains('expected_config')
- assert conf.stdout_contains('expected_stdout')
diff --git a/scripts/kconfig/tests/choice_value_with_m_dep/config b/scripts/kconfig/tests/choice_value_with_m_dep/config
deleted file mode 100644
index 3a126b7a2546..000000000000
--- a/scripts/kconfig/tests/choice_value_with_m_dep/config
+++ /dev/null
@@ -1,2 +0,0 @@
-CONFIG_CHOICE0=m
-CONFIG_CHOICE1=m
diff --git a/scripts/kconfig/tests/choice_value_with_m_dep/expected_config b/scripts/kconfig/tests/choice_value_with_m_dep/expected_config
deleted file mode 100644
index 4d07b449540e..000000000000
--- a/scripts/kconfig/tests/choice_value_with_m_dep/expected_config
+++ /dev/null
@@ -1,3 +0,0 @@
-CONFIG_MODULES=y
-CONFIG_DEP=m
-CONFIG_CHOICE0=y
diff --git a/scripts/kconfig/tests/choice_value_with_m_dep/expected_stdout b/scripts/kconfig/tests/choice_value_with_m_dep/expected_stdout
deleted file mode 100644
index 2b50ab65c86a..000000000000
--- a/scripts/kconfig/tests/choice_value_with_m_dep/expected_stdout
+++ /dev/null
@@ -1,4 +0,0 @@
-Tristate Choice [M/y/?] y
-Tristate Choice
-> 1. Choice 0 (CHOICE0)
-choice[1]: 1
diff --git a/scripts/kconfig/tests/err_recursive_dep/expected_stderr b/scripts/kconfig/tests/err_recursive_dep/expected_stderr
index 05d4ced70320..fc2e860af082 100644
--- a/scripts/kconfig/tests/err_recursive_dep/expected_stderr
+++ b/scripts/kconfig/tests/err_recursive_dep/expected_stderr
@@ -1,38 +1,38 @@
-Kconfig:5:error: recursive dependency detected!
-Kconfig:5: symbol A depends on A
+error: recursive dependency detected!
+ symbol A depends on A
For a resolution refer to Documentation/kbuild/kconfig-language.rst
subsection "Kconfig recursive dependency limitations"
-Kconfig:11:error: recursive dependency detected!
-Kconfig:11: symbol B is selected by B
+error: recursive dependency detected!
+ symbol B is selected by B
For a resolution refer to Documentation/kbuild/kconfig-language.rst
subsection "Kconfig recursive dependency limitations"
-Kconfig:17:error: recursive dependency detected!
-Kconfig:17: symbol C1 depends on C2
-Kconfig:21: symbol C2 depends on C1
+error: recursive dependency detected!
+ symbol C1 depends on C2
+ symbol C2 depends on C1
For a resolution refer to Documentation/kbuild/kconfig-language.rst
subsection "Kconfig recursive dependency limitations"
-Kconfig:27:error: recursive dependency detected!
-Kconfig:27: symbol D1 depends on D2
-Kconfig:32: symbol D2 is selected by D1
+error: recursive dependency detected!
+ symbol D1 depends on D2
+ symbol D2 is selected by D1
For a resolution refer to Documentation/kbuild/kconfig-language.rst
subsection "Kconfig recursive dependency limitations"
-Kconfig:37:error: recursive dependency detected!
-Kconfig:37: symbol E1 depends on E2
-Kconfig:42: symbol E2 is implied by E1
+error: recursive dependency detected!
+ symbol E1 depends on E2
+ symbol E2 is implied by E1
For a resolution refer to Documentation/kbuild/kconfig-language.rst
subsection "Kconfig recursive dependency limitations"
-Kconfig:49:error: recursive dependency detected!
-Kconfig:49: symbol F1 default value contains F2
-Kconfig:51: symbol F2 depends on F1
+error: recursive dependency detected!
+ symbol F1 default value contains F2
+ symbol F2 depends on F1
For a resolution refer to Documentation/kbuild/kconfig-language.rst
subsection "Kconfig recursive dependency limitations"
-Kconfig:60:error: recursive dependency detected!
-Kconfig:60: symbol G depends on G
+error: recursive dependency detected!
+ symbol G depends on G
For a resolution refer to Documentation/kbuild/kconfig-language.rst
subsection "Kconfig recursive dependency limitations"
diff --git a/scripts/kconfig/tests/inter_choice/Kconfig b/scripts/kconfig/tests/inter_choice/Kconfig
deleted file mode 100644
index 26c25f68695b..000000000000
--- a/scripts/kconfig/tests/inter_choice/Kconfig
+++ /dev/null
@@ -1,25 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-config MODULES
- def_bool y
- modules
-
-choice
- prompt "Choice"
-
-config CHOICE_VAL0
- tristate "Choice 0"
-
-config CHOIVE_VAL1
- tristate "Choice 1"
-
-endchoice
-
-choice
- prompt "Another choice"
- depends on CHOICE_VAL0
-
-config DUMMY
- bool "dummy"
-
-endchoice
diff --git a/scripts/kconfig/tests/inter_choice/__init__.py b/scripts/kconfig/tests/inter_choice/__init__.py
deleted file mode 100644
index ffea6b1148a6..000000000000
--- a/scripts/kconfig/tests/inter_choice/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-"""
-Do not affect user-assigned choice value by another choice.
-
-Handling of state flags for choices is complecated. In old days,
-the defconfig result of a choice could be affected by another choice
-if those choices interact by 'depends on', 'select', etc.
-
-Related Linux commit: fbe98bb9ed3dae23e320c6b113e35f129538d14a
-"""
-
-
-def test(conf):
- assert conf.defconfig('defconfig') == 0
- assert conf.config_contains('expected_config')
diff --git a/scripts/kconfig/tests/inter_choice/defconfig b/scripts/kconfig/tests/inter_choice/defconfig
deleted file mode 100644
index 162c4148e2a5..000000000000
--- a/scripts/kconfig/tests/inter_choice/defconfig
+++ /dev/null
@@ -1 +0,0 @@
-CONFIG_CHOICE_VAL0=y
diff --git a/scripts/kconfig/tests/inter_choice/expected_config b/scripts/kconfig/tests/inter_choice/expected_config
deleted file mode 100644
index 5dceefb054e3..000000000000
--- a/scripts/kconfig/tests/inter_choice/expected_config
+++ /dev/null
@@ -1,4 +0,0 @@
-CONFIG_MODULES=y
-CONFIG_CHOICE_VAL0=y
-# CONFIG_CHOIVE_VAL1 is not set
-CONFIG_DUMMY=y
diff --git a/scripts/kconfig/util.c b/scripts/kconfig/util.c
index 439c131b424e..696ff477671e 100644
--- a/scripts/kconfig/util.c
+++ b/scripts/kconfig/util.c
@@ -8,7 +8,7 @@
#include <stdlib.h>
#include <string.h>
-#include "hashtable.h"
+#include <hashtable.h>
#include "lkc.h"
unsigned int strhash(const char *s)
@@ -98,7 +98,7 @@ void str_printf(struct gstr *gs, const char *fmt, ...)
}
/* Retrieve value of growable string */
-char *str_get(struct gstr *gs)
+char *str_get(const struct gstr *gs)
{
return gs->s;
}
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index 518c70b8db50..f7b2503cdba9 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -45,7 +45,6 @@ info()
# Link of vmlinux
# ${1} - output file
-# ${2}, ${3}, ... - optional extra .o files
vmlinux_link()
{
local output=${1}
@@ -90,7 +89,7 @@ vmlinux_link()
ldflags="${ldflags} ${wl}--script=${objtree}/${KBUILD_LDS}"
# The kallsyms linking does not need debug symbols included.
- if [ "$output" != "${output#.tmp_vmlinux.kallsyms}" ] ; then
+ if [ -n "${strip_debug}" ] ; then
ldflags="${ldflags} ${wl}--strip-debug"
fi
@@ -101,15 +100,15 @@ vmlinux_link()
${ld} ${ldflags} -o ${output} \
${wl}--whole-archive ${objs} ${wl}--no-whole-archive \
${wl}--start-group ${libs} ${wl}--end-group \
- $@ ${ldlibs}
+ ${kallsymso} ${btf_vmlinux_bin_o} ${ldlibs}
}
# generate .BTF typeinfo from DWARF debuginfo
# ${1} - vmlinux image
-# ${2} - file to dump raw BTF data into
gen_btf()
{
local pahole_ver
+ local btf_data=${1}.btf.o
if ! [ -x "$(command -v ${PAHOLE})" ]; then
echo >&2 "BTF: ${1}: pahole (${PAHOLE}) is not available"
@@ -122,18 +121,16 @@ gen_btf()
return 1
fi
- vmlinux_link ${1}
-
- info "BTF" ${2}
+ info BTF "${btf_data}"
LLVM_OBJCOPY="${OBJCOPY}" ${PAHOLE} -J ${PAHOLE_FLAGS} ${1}
- # Create ${2} which contains just .BTF section but no symbols. Add
+ # Create ${btf_data} which contains just .BTF section but no symbols. Add
# SHF_ALLOC because .BTF will be part of the vmlinux image. --strip-all
# deletes all symbols including __start_BTF and __stop_BTF, which will
# be redefined in the linker script. Add 2>/dev/null to suppress GNU
# objcopy warnings: "empty loadable segment detected at ..."
${OBJCOPY} --only-section=.BTF --set-section-flags .BTF=alloc,readonly \
- --strip-all ${1} ${2} 2>/dev/null
+ --strip-all ${1} "${btf_data}" 2>/dev/null
# Change e_type to ET_REL so that it can be used to link final vmlinux.
# GNU ld 2.35+ and lld do not allow an ET_EXEC input.
if is_enabled CONFIG_CPU_BIG_ENDIAN; then
@@ -141,10 +138,12 @@ gen_btf()
else
et_rel='\1\0'
fi
- printf "${et_rel}" | dd of=${2} conv=notrunc bs=1 seek=16 status=none
+ printf "${et_rel}" | dd of="${btf_data}" conv=notrunc bs=1 seek=16 status=none
+
+ btf_vmlinux_bin_o=${btf_data}
}
-# Create ${2} .S file with all symbols from the ${1} object file
+# Create ${2}.o file with all symbols from the ${1} object file
kallsyms()
{
local kallsymopt;
@@ -157,35 +156,27 @@ kallsyms()
kallsymopt="${kallsymopt} --absolute-percpu"
fi
- if is_enabled CONFIG_KALLSYMS_BASE_RELATIVE; then
- kallsymopt="${kallsymopt} --base-relative"
- fi
-
if is_enabled CONFIG_LTO_CLANG; then
kallsymopt="${kallsymopt} --lto-clang"
fi
- info KSYMS ${2}
- scripts/kallsyms ${kallsymopt} ${1} > ${2}
+ info KSYMS "${2}.S"
+ scripts/kallsyms ${kallsymopt} "${1}" > "${2}.S"
+
+ info AS "${2}.o"
+ ${CC} ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS} \
+ ${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} -c -o "${2}.o" "${2}.S"
+
+ kallsymso=${2}.o
}
-# Perform one step in kallsyms generation, including temporary linking of
-# vmlinux.
-kallsyms_step()
+# Perform kallsyms for the given temporary vmlinux.
+sysmap_and_kallsyms()
{
- kallsymso_prev=${kallsymso}
- kallsyms_vmlinux=.tmp_vmlinux.kallsyms${1}
- kallsymso=${kallsyms_vmlinux}.o
- kallsyms_S=${kallsyms_vmlinux}.S
-
- vmlinux_link ${kallsyms_vmlinux} "${kallsymso_prev}" ${btf_vmlinux_bin_o}
- mksysmap ${kallsyms_vmlinux} ${kallsyms_vmlinux}.syms
- kallsyms ${kallsyms_vmlinux}.syms ${kallsyms_S}
+ mksysmap "${1}" "${1}.syms"
+ kallsyms "${1}.syms" "${1}.kallsyms"
- info AS ${kallsymso}
- ${CC} ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS} \
- ${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \
- -c -o ${kallsymso} ${kallsyms_S}
+ kallsyms_sysmap=${1}.syms
}
# Create map file with all symbols from ${1}
@@ -223,26 +214,40 @@ fi
${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init init/version-timestamp.o
-btf_vmlinux_bin_o=""
+btf_vmlinux_bin_o=
+kallsymso=
+strip_debug=
+
+if is_enabled CONFIG_KALLSYMS; then
+ kallsyms /dev/null .tmp_vmlinux0.kallsyms
+fi
+
+if is_enabled CONFIG_KALLSYMS || is_enabled CONFIG_DEBUG_INFO_BTF; then
+
+ # The kallsyms linking does not need debug symbols, but the BTF does.
+ if ! is_enabled CONFIG_DEBUG_INFO_BTF; then
+ strip_debug=1
+ fi
+
+ vmlinux_link .tmp_vmlinux1
+fi
+
if is_enabled CONFIG_DEBUG_INFO_BTF; then
- btf_vmlinux_bin_o=.btf.vmlinux.bin.o
- if ! gen_btf .tmp_vmlinux.btf $btf_vmlinux_bin_o ; then
+ if ! gen_btf .tmp_vmlinux1; then
echo >&2 "Failed to generate BTF for vmlinux"
echo >&2 "Try to disable CONFIG_DEBUG_INFO_BTF"
exit 1
fi
fi
-kallsymso=""
-kallsymso_prev=""
-kallsyms_vmlinux=""
if is_enabled CONFIG_KALLSYMS; then
# kallsyms support
# Generate section listing all symbols and add it into vmlinux
- # It's a three step process:
+ # It's a four step process:
+ # 0) Generate a dummy __kallsyms with empty symbol list.
# 1) Link .tmp_vmlinux.kallsyms1 so it has all symbols and sections,
- # but __kallsyms is empty.
+ # with a dummy __kallsyms.
# Running kallsyms on that gives us .tmp_kallsyms1.o with
# the right size
# 2) Link .tmp_vmlinux.kallsyms2 so it now has a __kallsyms section of
@@ -261,19 +266,25 @@ if is_enabled CONFIG_KALLSYMS; then
# a) Verify that the System.map from vmlinux matches the map from
# ${kallsymso}.
- kallsyms_step 1
- kallsyms_step 2
+ # The kallsyms linking does not need debug symbols included.
+ strip_debug=1
- # step 3
- size1=$(${CONFIG_SHELL} "${srctree}/scripts/file-size.sh" ${kallsymso_prev})
+ sysmap_and_kallsyms .tmp_vmlinux1
+ size1=$(${CONFIG_SHELL} "${srctree}/scripts/file-size.sh" ${kallsymso})
+
+ vmlinux_link .tmp_vmlinux2
+ sysmap_and_kallsyms .tmp_vmlinux2
size2=$(${CONFIG_SHELL} "${srctree}/scripts/file-size.sh" ${kallsymso})
if [ $size1 -ne $size2 ] || [ -n "${KALLSYMS_EXTRA_PASS}" ]; then
- kallsyms_step 3
+ vmlinux_link .tmp_vmlinux3
+ sysmap_and_kallsyms .tmp_vmlinux3
fi
fi
-vmlinux_link vmlinux "${kallsymso}" ${btf_vmlinux_bin_o}
+strip_debug=
+
+vmlinux_link vmlinux
# fill in BTF IDs
if is_enabled CONFIG_DEBUG_INFO_BTF && is_enabled CONFIG_BPF; then
@@ -293,7 +304,7 @@ fi
# step a (see comment above)
if is_enabled CONFIG_KALLSYMS; then
- if ! cmp -s System.map ${kallsyms_vmlinux}.syms; then
+ if ! cmp -s System.map "${kallsyms_sysmap}"; then
echo >&2 Inconsistent kallsyms data
echo >&2 'Try "make KALLSYMS_EXTRA_PASS=1" as a workaround'
exit 1
diff --git a/scripts/make_fit.py b/scripts/make_fit.py
index 263147df80a4..4a1bb2f55861 100755
--- a/scripts/make_fit.py
+++ b/scripts/make_fit.py
@@ -22,6 +22,11 @@ the entire FIT.
Use -c to compress the data, using bzip2, gzip, lz4, lzma, lzo and
zstd algorithms.
+Use -D to decompose "composite" DTBs into their base components and
+deduplicate the resulting base DTBs and DTB overlays. This requires the
+DTBs to be sourced from the kernel build directory, as the implementation
+looks at the .cmd files produced by the kernel build.
+
The resulting FIT can be booted by bootloaders which support FIT, such
as U-Boot, Linuxboot, Tianocore, etc.
@@ -64,6 +69,8 @@ def parse_args():
help='Specifies the architecture')
parser.add_argument('-c', '--compress', type=str, default='none',
help='Specifies the compression')
+ parser.add_argument('-D', '--decompose-dtbs', action='store_true',
+ help='Decompose composite DTBs into base DTB and overlays')
parser.add_argument('-E', '--external', action='store_true',
help='Convert the FIT to use external data')
parser.add_argument('-n', '--name', type=str, required=True,
@@ -140,12 +147,12 @@ def finish_fit(fsw, entries):
fsw.end_node()
seq = 0
with fsw.add_node('configurations'):
- for model, compat in entries:
+ for model, compat, files in entries:
seq += 1
with fsw.add_node(f'conf-{seq}'):
fsw.property('compatible', bytes(compat))
fsw.property_string('description', model)
- fsw.property_string('fdt', f'fdt-{seq}')
+ fsw.property('fdt', bytes(''.join(f'fdt-{x}\x00' for x in files), "ascii"))
fsw.property_string('kernel', 'kernel')
fsw.end_node()
@@ -193,21 +200,9 @@ def output_dtb(fsw, seq, fname, arch, compress):
fname (str): Filename containing the DTB
arch: FIT architecture, e.g. 'arm64'
compress (str): Compressed algorithm, e.g. 'gzip'
-
- Returns:
- tuple:
- str: Model name
- bytes: Compatible stringlist
"""
with fsw.add_node(f'fdt-{seq}'):
- # Get the compatible / model information
- with open(fname, 'rb') as inf:
- data = inf.read()
- fdt = libfdt.FdtRo(data)
- model = fdt.getprop(0, 'model').as_str()
- compat = fdt.getprop(0, 'compatible')
-
- fsw.property_string('description', model)
+ fsw.property_string('description', os.path.basename(fname))
fsw.property_string('type', 'flat_dt')
fsw.property_string('arch', arch)
fsw.property_string('compression', compress)
@@ -215,9 +210,45 @@ def output_dtb(fsw, seq, fname, arch, compress):
with open(fname, 'rb') as inf:
compressed = compress_data(inf, compress)
fsw.property('data', compressed)
- return model, compat
+def process_dtb(fname, args):
+ """Process an input DTB, decomposing it if requested and is possible
+
+ Args:
+ fname (str): Filename containing the DTB
+ args (Namespace): Program arguments
+ Returns:
+ tuple:
+ str: Model name string
+ str: Root compatible string
+ files: list of filenames corresponding to the DTB
+ """
+ # Get the compatible / model information
+ with open(fname, 'rb') as inf:
+ data = inf.read()
+ fdt = libfdt.FdtRo(data)
+ model = fdt.getprop(0, 'model').as_str()
+ compat = fdt.getprop(0, 'compatible')
+
+ if args.decompose_dtbs:
+ # Check if the DTB needs to be decomposed
+ path, basename = os.path.split(fname)
+ cmd_fname = os.path.join(path, f'.{basename}.cmd')
+ with open(cmd_fname, 'r', encoding='ascii') as inf:
+ cmd = inf.read()
+
+ if 'scripts/dtc/fdtoverlay' in cmd:
+ # This depends on the structure of the composite DTB command
+ files = cmd.split()
+ files = files[files.index('-i') + 1:]
+ else:
+ files = [fname]
+ else:
+ files = [fname]
+
+ return (model, compat, files)
+
def build_fit(args):
"""Build the FIT from the provided files and arguments
@@ -235,6 +266,7 @@ def build_fit(args):
fsw = libfdt.FdtSw()
setup_fit(fsw, args.name)
entries = []
+ fdts = {}
# Handle the kernel
with open(args.kernel, 'rb') as inf:
@@ -243,12 +275,22 @@ def build_fit(args):
write_kernel(fsw, comp_data, args)
for fname in args.dtbs:
- # Ignore overlay (.dtbo) files
- if os.path.splitext(fname)[1] == '.dtb':
- seq += 1
- size += os.path.getsize(fname)
- model, compat = output_dtb(fsw, seq, fname, args.arch, args.compress)
- entries.append([model, compat])
+ # Ignore non-DTB (*.dtb) files
+ if os.path.splitext(fname)[1] != '.dtb':
+ continue
+
+ (model, compat, files) = process_dtb(fname, args)
+
+ for fn in files:
+ if fn not in fdts:
+ seq += 1
+ size += os.path.getsize(fn)
+ output_dtb(fsw, seq, fn, args.arch, args.compress)
+ fdts[fn] = seq
+
+ files_seq = [fdts[fn] for fn in files]
+
+ entries.append([model, compat, files_seq])
finish_fit(fsw, entries)
diff --git a/scripts/mod/list.h b/scripts/mod/list.h
deleted file mode 100644
index a924a6c4aa4d..000000000000
--- a/scripts/mod/list.h
+++ /dev/null
@@ -1,213 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef LIST_H
-#define LIST_H
-
-#include <stdbool.h>
-#include <stddef.h>
-
-/* Are two types/vars the same type (ignoring qualifiers)? */
-#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
-
-/**
- * container_of - cast a member of a structure out to the containing structure
- * @ptr: the pointer to the member.
- * @type: the type of the container struct this is embedded in.
- * @member: the name of the member within the struct.
- *
- */
-#define container_of(ptr, type, member) ({ \
- void *__mptr = (void *)(ptr); \
- _Static_assert(__same_type(*(ptr), ((type *)0)->member) || \
- __same_type(*(ptr), void), \
- "pointer type mismatch in container_of()"); \
- ((type *)(__mptr - offsetof(type, member))); })
-
-#define LIST_POISON1 ((void *) 0x100)
-#define LIST_POISON2 ((void *) 0x122)
-
-/*
- * Circular doubly linked list implementation.
- *
- * Some of the internal functions ("__xxx") are useful when
- * manipulating whole lists rather than single entries, as
- * sometimes we already know the next/prev entries and we can
- * generate better code by using them directly rather than
- * using the generic single-entry routines.
- */
-
-struct list_head {
- struct list_head *next, *prev;
-};
-
-#define LIST_HEAD_INIT(name) { &(name), &(name) }
-
-#define LIST_HEAD(name) \
- struct list_head name = LIST_HEAD_INIT(name)
-
-/**
- * INIT_LIST_HEAD - Initialize a list_head structure
- * @list: list_head structure to be initialized.
- *
- * Initializes the list_head to point to itself. If it is a list header,
- * the result is an empty list.
- */
-static inline void INIT_LIST_HEAD(struct list_head *list)
-{
- list->next = list;
- list->prev = list;
-}
-
-/*
- * Insert a new entry between two known consecutive entries.
- *
- * This is only for internal list manipulation where we know
- * the prev/next entries already!
- */
-static inline void __list_add(struct list_head *new,
- struct list_head *prev,
- struct list_head *next)
-{
- next->prev = new;
- new->next = next;
- new->prev = prev;
- prev->next = new;
-}
-
-/**
- * list_add - add a new entry
- * @new: new entry to be added
- * @head: list head to add it after
- *
- * Insert a new entry after the specified head.
- * This is good for implementing stacks.
- */
-static inline void list_add(struct list_head *new, struct list_head *head)
-{
- __list_add(new, head, head->next);
-}
-
-/**
- * list_add_tail - add a new entry
- * @new: new entry to be added
- * @head: list head to add it before
- *
- * Insert a new entry before the specified head.
- * This is useful for implementing queues.
- */
-static inline void list_add_tail(struct list_head *new, struct list_head *head)
-{
- __list_add(new, head->prev, head);
-}
-
-/*
- * Delete a list entry by making the prev/next entries
- * point to each other.
- *
- * This is only for internal list manipulation where we know
- * the prev/next entries already!
- */
-static inline void __list_del(struct list_head *prev, struct list_head *next)
-{
- next->prev = prev;
- prev->next = next;
-}
-
-static inline void __list_del_entry(struct list_head *entry)
-{
- __list_del(entry->prev, entry->next);
-}
-
-/**
- * list_del - deletes entry from list.
- * @entry: the element to delete from the list.
- * Note: list_empty() on entry does not return true after this, the entry is
- * in an undefined state.
- */
-static inline void list_del(struct list_head *entry)
-{
- __list_del_entry(entry);
- entry->next = LIST_POISON1;
- entry->prev = LIST_POISON2;
-}
-
-/**
- * list_is_head - tests whether @list is the list @head
- * @list: the entry to test
- * @head: the head of the list
- */
-static inline int list_is_head(const struct list_head *list, const struct list_head *head)
-{
- return list == head;
-}
-
-/**
- * list_empty - tests whether a list is empty
- * @head: the list to test.
- */
-static inline int list_empty(const struct list_head *head)
-{
- return head->next == head;
-}
-
-/**
- * list_entry - get the struct for this entry
- * @ptr: the &struct list_head pointer.
- * @type: the type of the struct this is embedded in.
- * @member: the name of the list_head within the struct.
- */
-#define list_entry(ptr, type, member) \
- container_of(ptr, type, member)
-
-/**
- * list_first_entry - get the first element from a list
- * @ptr: the list head to take the element from.
- * @type: the type of the struct this is embedded in.
- * @member: the name of the list_head within the struct.
- *
- * Note, that list is expected to be not empty.
- */
-#define list_first_entry(ptr, type, member) \
- list_entry((ptr)->next, type, member)
-
-/**
- * list_next_entry - get the next element in list
- * @pos: the type * to cursor
- * @member: the name of the list_head within the struct.
- */
-#define list_next_entry(pos, member) \
- list_entry((pos)->member.next, typeof(*(pos)), member)
-
-/**
- * list_entry_is_head - test if the entry points to the head of the list
- * @pos: the type * to cursor
- * @head: the head for your list.
- * @member: the name of the list_head within the struct.
- */
-#define list_entry_is_head(pos, head, member) \
- (&pos->member == (head))
-
-/**
- * list_for_each_entry - iterate over list of given type
- * @pos: the type * to use as a loop cursor.
- * @head: the head for your list.
- * @member: the name of the list_head within the struct.
- */
-#define list_for_each_entry(pos, head, member) \
- for (pos = list_first_entry(head, typeof(*pos), member); \
- !list_entry_is_head(pos, head, member); \
- pos = list_next_entry(pos, member))
-
-/**
- * list_for_each_entry_safe - iterate over list of given type. Safe against removal of list entry
- * @pos: the type * to use as a loop cursor.
- * @n: another type * to use as temporary storage
- * @head: the head for your list.
- * @member: the name of the list_head within the struct.
- */
-#define list_for_each_entry_safe(pos, n, head, member) \
- for (pos = list_first_entry(head, typeof(*pos), member), \
- n = list_next_entry(pos, member); \
- !list_entry_is_head(pos, head, member); \
- pos = n, n = list_next_entry(n, member))
-
-#endif /* LIST_H */
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 4b1edb257618..d16d0ace2775 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -20,6 +20,9 @@
#include <limits.h>
#include <stdbool.h>
#include <errno.h>
+
+#include <hashtable.h>
+#include <list.h>
#include "modpost.h"
#include "../../include/linux/license.h"
@@ -199,13 +202,8 @@ static struct module *new_module(const char *name, size_t namelen)
return mod;
}
-/* A hash of all exported symbols,
- * struct symbol is also used for lists of unresolved symbols */
-
-#define SYMBOL_HASH_SIZE 1024
-
struct symbol {
- struct symbol *next;
+ struct hlist_node hnode;/* link to hash table */
struct list_head list; /* link to module::exported_symbols or module::unresolved_symbols */
struct module *module;
char *namespace;
@@ -218,7 +216,7 @@ struct symbol {
char name[];
};
-static struct symbol *symbolhash[SYMBOL_HASH_SIZE];
+static HASHTABLE_DEFINE(symbol_hashtable, 1U << 10);
/* This is based on the hash algorithm from gdbm, via tdb */
static inline unsigned int tdb_hash(const char *name)
@@ -250,11 +248,7 @@ static struct symbol *alloc_symbol(const char *name)
/* For the hash of exported symbols */
static void hash_add_symbol(struct symbol *sym)
{
- unsigned int hash;
-
- hash = tdb_hash(sym->name) % SYMBOL_HASH_SIZE;
- sym->next = symbolhash[hash];
- symbolhash[hash] = sym;
+ hash_add(symbol_hashtable, &sym->hnode, tdb_hash(sym->name));
}
static void sym_add_unresolved(const char *name, struct module *mod, bool weak)
@@ -275,7 +269,7 @@ static struct symbol *sym_find_with_module(const char *name, struct module *mod)
if (name[0] == '.')
name++;
- for (s = symbolhash[tdb_hash(name) % SYMBOL_HASH_SIZE]; s; s = s->next) {
+ hash_for_each_possible(symbol_hashtable, s, hnode, tdb_hash(name)) {
if (strcmp(s->name, name) == 0 && (!mod || s->module == mod))
return s;
}
@@ -954,17 +948,6 @@ static int secref_whitelist(const char *fromsec, const char *fromsym,
match(fromsym, PATTERNS("*_ops", "*_probe", "*_console")))
return 0;
- /*
- * symbols in data sections must not refer to .exit.*, but there are
- * quite a few offenders, so hide these unless for W=1 builds until
- * these are fixed.
- */
- if (!extra_warn &&
- match(fromsec, PATTERNS(DATA_SECTIONS)) &&
- match(tosec, PATTERNS(ALL_EXIT_SECTIONS)) &&
- match(fromsym, PATTERNS("*driver")))
- return 0;
-
/* Check for pattern 3 */
if (strstarts(fromsec, ".head.text") &&
match(tosec, PATTERNS(ALL_INIT_SECTIONS)))
@@ -1168,40 +1151,6 @@ static Elf_Addr addend_386_rel(uint32_t *location, unsigned int r_type)
return (Elf_Addr)(-1);
}
-#ifndef R_ARM_CALL
-#define R_ARM_CALL 28
-#endif
-#ifndef R_ARM_JUMP24
-#define R_ARM_JUMP24 29
-#endif
-
-#ifndef R_ARM_THM_CALL
-#define R_ARM_THM_CALL 10
-#endif
-#ifndef R_ARM_THM_JUMP24
-#define R_ARM_THM_JUMP24 30
-#endif
-
-#ifndef R_ARM_MOVW_ABS_NC
-#define R_ARM_MOVW_ABS_NC 43
-#endif
-
-#ifndef R_ARM_MOVT_ABS
-#define R_ARM_MOVT_ABS 44
-#endif
-
-#ifndef R_ARM_THM_MOVW_ABS_NC
-#define R_ARM_THM_MOVW_ABS_NC 47
-#endif
-
-#ifndef R_ARM_THM_MOVT_ABS
-#define R_ARM_THM_MOVT_ABS 48
-#endif
-
-#ifndef R_ARM_THM_JUMP19
-#define R_ARM_THM_JUMP19 51
-#endif
-
static int32_t sign_extend32(int32_t value, int index)
{
uint8_t shift = 31 - index;
@@ -1262,7 +1211,7 @@ static Elf_Addr addend_arm_rel(void *loc, Elf_Sym *sym, unsigned int r_type)
((lower & 0x07ff) << 1),
20);
return offset + sym->st_value + 4;
- case R_ARM_THM_CALL:
+ case R_ARM_THM_PC22:
case R_ARM_THM_JUMP24:
/*
* Encoding T4:
diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
index ee43c7950636..58197b34a3c8 100644
--- a/scripts/mod/modpost.h
+++ b/scripts/mod/modpost.h
@@ -13,7 +13,7 @@
#include <elf.h>
#include "../../include/linux/module_symbol.h"
-#include "list.h"
+#include <list_types.h>
#include "elfconfig.h"
/* On BSD-alike OSes elf.h defines these according to host's word size */
diff --git a/scripts/package/PKGBUILD b/scripts/package/PKGBUILD
new file mode 100644
index 000000000000..663ce300dd06
--- /dev/null
+++ b/scripts/package/PKGBUILD
@@ -0,0 +1,108 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Maintainer: Thomas Weißschuh <linux@weissschuh.net>
+# Contributor: Jan Alexander Steffens (heftig) <heftig@archlinux.org>
+
+pkgbase=${PACMAN_PKGBASE:-linux-upstream}
+pkgname=("${pkgbase}" "${pkgbase}-api-headers")
+if grep -q CONFIG_MODULES=y include/config/auto.conf; then
+ pkgname+=("${pkgbase}-headers")
+fi
+pkgver="${KERNELRELEASE//-/_}"
+# The PKGBUILD is evaluated multiple times.
+# Running scripts/build-version from here would introduce inconsistencies.
+pkgrel="${KBUILD_REVISION}"
+pkgdesc='Upstream Linux'
+url='https://www.kernel.org/'
+# Enable flexible cross-compilation
+arch=(${CARCH})
+license=(GPL-2.0-only)
+makedepends=(
+ bc
+ bison
+ cpio
+ flex
+ gettext
+ kmod
+ libelf
+ openssl
+ pahole
+ perl
+ python
+ rsync
+ tar
+)
+options=(!debug !strip !buildflags !makeflags)
+
+build() {
+ # MAKEFLAGS from makepkg.conf override the ones inherited from kbuild.
+ # Bypass this override with a custom variable.
+ export MAKEFLAGS="${KBUILD_MAKEFLAGS}"
+ cd "${objtree}"
+
+ ${MAKE} KERNELRELEASE="${KERNELRELEASE}" KBUILD_BUILD_VERSION="${pkgrel}"
+}
+
+_package() {
+ pkgdesc="The ${pkgdesc} kernel and modules"
+
+ export MAKEFLAGS="${KBUILD_MAKEFLAGS}"
+ cd "${objtree}"
+ local modulesdir="${pkgdir}/usr/${MODLIB}"
+
+ echo "Installing boot image..."
+ # systemd expects to find the kernel here to allow hibernation
+ # https://github.com/systemd/systemd/commit/edda44605f06a41fb86b7ab8128dcf99161d2344
+ install -Dm644 "$(${MAKE} -s image_name)" "${modulesdir}/vmlinuz"
+
+ # Used by mkinitcpio to name the kernel
+ echo "${pkgbase}" > "${modulesdir}/pkgbase"
+
+ echo "Installing modules..."
+ ${MAKE} INSTALL_MOD_PATH="${pkgdir}/usr" INSTALL_MOD_STRIP=1 \
+ DEPMOD=true modules_install
+
+ if [ -d "${srctree}/arch/${SRCARCH}/boot/dts" ]; then
+ echo "Installing dtbs..."
+ ${MAKE} INSTALL_DTBS_PATH="${modulesdir}/dtb" dtbs_install
+ fi
+
+ # remove build link, will be part of -headers package
+ rm -f "${modulesdir}/build"
+}
+
+_package-headers() {
+ pkgdesc="Headers and scripts for building modules for the ${pkgdesc} kernel"
+
+ export MAKEFLAGS="${KBUILD_MAKEFLAGS}"
+ cd "${objtree}"
+ local builddir="${pkgdir}/usr/${MODLIB}/build"
+
+ echo "Installing build files..."
+ "${srctree}/scripts/package/install-extmod-build" "${builddir}"
+
+ echo "Installing System.map and config..."
+ cp System.map "${builddir}/System.map"
+ cp .config "${builddir}/.config"
+
+ echo "Adding symlink..."
+ mkdir -p "${pkgdir}/usr/src"
+ ln -sr "${builddir}" "${pkgdir}/usr/src/${pkgbase}"
+}
+
+_package-api-headers() {
+ pkgdesc="Kernel headers sanitized for use in userspace"
+ provides=(linux-api-headers)
+ conflicts=(linux-api-headers)
+
+ export MAKEFLAGS="${KBUILD_MAKEFLAGS}"
+ cd "${objtree}"
+
+ ${MAKE} headers_install INSTALL_HDR_PATH="${pkgdir}/usr"
+}
+
+for _p in "${pkgname[@]}"; do
+ eval "package_$_p() {
+ $(declare -f "_package${_p#$pkgbase}")
+ _package${_p#$pkgbase}
+ }"
+done
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index e797ad360f7a..c1757db6aa8a 100755
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -10,7 +10,7 @@
# specified in KDEB_HOOKDIR) that will be called on package install and
# removal.
-set -e
+set -eu
is_enabled() {
grep -q "^$1=y" include/config/auto.conf
diff --git a/scripts/package/buildtar b/scripts/package/buildtar
index eb67787f8673..cc87a473c01f 100755
--- a/scripts/package/buildtar
+++ b/scripts/package/buildtar
@@ -11,7 +11,7 @@
# Wichert Akkerman <wichert@wiggy.net>.
#
-set -e
+set -eu
#
# Some variables and settings used throughout the script
diff --git a/scripts/package/gen-diff-patch b/scripts/package/gen-diff-patch
index 8a98b7bb78a0..f272f7770ea3 100755
--- a/scripts/package/gen-diff-patch
+++ b/scripts/package/gen-diff-patch
@@ -1,6 +1,8 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0-only
+set -eu
+
diff_patch=$1
mkdir -p "$(dirname "${diff_patch}")"
diff --git a/scripts/package/install-extmod-build b/scripts/package/install-extmod-build
index 76e0765dfcd6..8cc9e13403ae 100755
--- a/scripts/package/install-extmod-build
+++ b/scripts/package/install-extmod-build
@@ -1,13 +1,10 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0-only
-set -e
+set -eu
destdir=${1}
-test -n "${srctree}"
-test -n "${SRCARCH}"
-
is_enabled() {
grep -q "^$1=y" include/config/auto.conf
}
diff --git a/scripts/package/kernel.spec b/scripts/package/kernel.spec
index c52d517b9364..74355ff0e106 100644
--- a/scripts/package/kernel.spec
+++ b/scripts/package/kernel.spec
@@ -27,7 +27,7 @@ The Linux Kernel, the operating system core itself
%package headers
Summary: Header files for the Linux kernel for use by glibc
Group: Development/System
-Obsoletes: kernel-headers
+Obsoletes: kernel-headers < %{version}
Provides: kernel-headers = %{version}
%description headers
Kernel-headers includes the C header files that specify the interface
diff --git a/scripts/package/mkdebian b/scripts/package/mkdebian
index 070149c985fe..10637d403777 100755
--- a/scripts/package/mkdebian
+++ b/scripts/package/mkdebian
@@ -4,7 +4,7 @@
#
# Simple script to generate a debian/ directory for a Linux kernel.
-set -e
+set -eu
is_enabled() {
grep -q "^$1=y" include/config/auto.conf
@@ -19,7 +19,7 @@ if_enabled_echo() {
}
set_debarch() {
- if [ -n "$KBUILD_DEBARCH" ] ; then
+ if [ "${KBUILD_DEBARCH:+set}" ]; then
debarch="$KBUILD_DEBARCH"
return
fi
@@ -125,32 +125,34 @@ gen_source ()
rm -rf debian
mkdir debian
-email=${DEBEMAIL-$EMAIL}
-
-# use email string directly if it contains <email>
-if echo "${email}" | grep -q '<.*>'; then
- maintainer=${email}
+user=${KBUILD_BUILD_USER:-$(id -nu)}
+name=${DEBFULLNAME:-${user}}
+if [ "${DEBEMAIL:+set}" ]; then
+ email=${DEBEMAIL}
else
- # or construct the maintainer string
- user=${KBUILD_BUILD_USER-$(id -nu)}
- name=${DEBFULLNAME-${user}}
- if [ -z "${email}" ]; then
- buildhost=${KBUILD_BUILD_HOST-$(hostname -f 2>/dev/null || hostname)}
- email="${user}@${buildhost}"
- fi
- maintainer="${name} <${email}>"
+ buildhost=${KBUILD_BUILD_HOST:-$(hostname -f 2>/dev/null || hostname)}
+ email="${user}@${buildhost}"
fi
+maintainer="${name} <${email}>"
-if [ "$1" = --need-source ]; then
- gen_source
-fi
+while [ $# -gt 0 ]; do
+ case "$1" in
+ --need-source)
+ gen_source
+ shift
+ ;;
+ *)
+ break
+ ;;
+ esac
+done
# Some variables and settings used throughout the script
version=$KERNELRELEASE
-if [ -n "$KDEB_PKGVERSION" ]; then
+if [ "${KDEB_PKGVERSION:+set}" ]; then
packageversion=$KDEB_PKGVERSION
else
- packageversion=$(${srctree}/scripts/setlocalversion --no-local ${srctree})-$($srctree/init/build-version)
+ packageversion=$(${srctree}/scripts/setlocalversion --no-local ${srctree})-$($srctree/scripts/build-version)
fi
sourcename=${KDEB_SOURCENAME:-linux-upstream}
@@ -164,7 +166,7 @@ debarch=
set_debarch
# Try to determine distribution
-if [ -n "$KDEB_CHANGELOG_DIST" ]; then
+if [ "${KDEB_CHANGELOG_DIST:+set}" ]; then
distribution=$KDEB_CHANGELOG_DIST
# In some cases lsb_release returns the codename as n/a, which breaks dpkg-parsechangelog
elif distribution=$(lsb_release -cs 2>/dev/null) && [ -n "$distribution" ] && [ "$distribution" != "n/a" ]; then
diff --git a/scripts/package/mkspec b/scripts/package/mkspec
index ce201bfa8377..ead54d67a024 100755
--- a/scripts/package/mkspec
+++ b/scripts/package/mkspec
@@ -9,6 +9,8 @@
# Patched for non-x86 by Opencon (L) 2002 <opencon@rio.skydome.net>
#
+set -eu
+
output=$1
mkdir -p "$(dirname "${output}")"
@@ -24,7 +26,30 @@ fi
cat<<EOF
%define ARCH ${ARCH}
%define KERNELRELEASE ${KERNELRELEASE}
-%define pkg_release $("${srctree}/init/build-version")
+%define pkg_release $("${srctree}/scripts/build-version")
EOF
cat "${srctree}/scripts/package/kernel.spec"
+
+# collect the user's name and email address for the changelog entry
+if [ "$(command -v git)" ]; then
+ name=$(git config user.name) || true
+ email=$(git config user.email) || true
+fi
+
+if [ ! "${name:+set}" ]; then
+ name=${KBUILD_BUILD_USER:-$(id -nu)}
+fi
+
+if [ ! "${email:+set}" ]; then
+ buildhost=${KBUILD_BUILD_HOST:-$(hostname -f 2>/dev/null || hostname)}
+ builduser=${KBUILD_BUILD_USER:-$(id -nu)}
+ email="${builduser}@${buildhost}"
+fi
+
+cat << EOF
+
+%changelog
+* $(LC_ALL=C; date +'%a %b %d %Y') ${name} <${email}>
+- Custom built Linux kernel.
+EOF
diff --git a/scripts/remove-stale-files b/scripts/remove-stale-files
index 385610fe3936..f38d26b78c2a 100755
--- a/scripts/remove-stale-files
+++ b/scripts/remove-stale-files
@@ -20,22 +20,4 @@ set -e
# yard. Stale files stay in this file for a while (for some release cycles?),
# then will be really dead and removed from the code base entirely.
-rm -f arch/powerpc/purgatory/kexec-purgatory.c
-rm -f arch/riscv/purgatory/kexec-purgatory.c
-rm -f arch/x86/purgatory/kexec-purgatory.c
-
-rm -f scripts/extract-cert
-
-rm -f scripts/kconfig/[gmnq]conf-cfg
-
-rm -f rust/target.json
-
-rm -f scripts/bin2c
-
-rm -f .scmversion
-
-rm -rf include/ksym
-
-find . -name '*.usyms' | xargs rm -f
-
rm -f *.spec
diff --git a/scripts/rust_is_available.sh b/scripts/rust_is_available.sh
index 117018946b57..5262c56dd674 100755
--- a/scripts/rust_is_available.sh
+++ b/scripts/rust_is_available.sh
@@ -117,20 +117,16 @@ if [ "$rust_compiler_cversion" -lt "$rust_compiler_min_cversion" ]; then
echo >&2 "***"
exit 1
fi
-if [ "$rust_compiler_cversion" -gt "$rust_compiler_min_cversion" ]; then
- echo >&2 "***"
- echo >&2 "*** Rust compiler '$RUSTC' is too new. This may or may not work."
- echo >&2 "*** Your version: $rust_compiler_version"
- echo >&2 "*** Expected version: $rust_compiler_min_version"
- echo >&2 "***"
- warning=1
-fi
# Check that the Rust bindings generator is suitable.
#
# Non-stable and distributions' versions may have a version suffix, e.g. `-dev`.
+#
+# The dummy parameter `workaround-for-0.69.0` is required to support 0.69.0
+# (https://github.com/rust-lang/rust-bindgen/pull/2678). It can be removed when
+# the minimum version is upgraded past that (0.69.1 already fixed the issue).
rust_bindings_generator_output=$( \
- LC_ALL=C "$BINDGEN" --version 2>/dev/null
+ LC_ALL=C "$BINDGEN" --version workaround-for-0.69.0 2>/dev/null
) || rust_bindings_generator_code=$?
if [ -n "$rust_bindings_generator_code" ]; then
echo >&2 "***"
@@ -165,13 +161,18 @@ if [ "$rust_bindings_generator_cversion" -lt "$rust_bindings_generator_min_cvers
echo >&2 "***"
exit 1
fi
-if [ "$rust_bindings_generator_cversion" -gt "$rust_bindings_generator_min_cversion" ]; then
- echo >&2 "***"
- echo >&2 "*** Rust bindings generator '$BINDGEN' is too new. This may or may not work."
- echo >&2 "*** Your version: $rust_bindings_generator_version"
- echo >&2 "*** Expected version: $rust_bindings_generator_min_version"
- echo >&2 "***"
- warning=1
+if [ "$rust_bindings_generator_cversion" -eq 6600 ] ||
+ [ "$rust_bindings_generator_cversion" -eq 6601 ]; then
+ # Distributions may have patched the issue (e.g. Debian did).
+ if ! "$BINDGEN" $(dirname $0)/rust_is_available_bindgen_0_66.h >/dev/null; then
+ echo >&2 "***"
+ echo >&2 "*** Rust bindings generator '$BINDGEN' versions 0.66.0 and 0.66.1 may not"
+ echo >&2 "*** work due to a bug (https://github.com/rust-lang/rust-bindgen/pull/2567),"
+ echo >&2 "*** unless patched (like Debian's)."
+ echo >&2 "*** Your version: $rust_bindings_generator_version"
+ echo >&2 "***"
+ warning=1
+ fi
fi
# Check that the `libclang` used by the Rust bindings generator is suitable.
diff --git a/scripts/rust_is_available_bindgen_0_66.h b/scripts/rust_is_available_bindgen_0_66.h
new file mode 100644
index 000000000000..c0431293421c
--- /dev/null
+++ b/scripts/rust_is_available_bindgen_0_66.h
@@ -0,0 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#define A "\0"
diff --git a/scripts/rust_is_available_test.py b/scripts/rust_is_available_test.py
index 57613fe5ed75..413741037fb3 100755
--- a/scripts/rust_is_available_test.py
+++ b/scripts/rust_is_available_test.py
@@ -54,18 +54,34 @@ else:
""")
@classmethod
- def generate_bindgen(cls, version_stdout, libclang_stderr):
+ def generate_bindgen(cls, version_stdout, libclang_stderr, version_0_66_patched=False):
+ if libclang_stderr is None:
+ libclang_case = f"raise SystemExit({cls.bindgen_default_bindgen_libclang_failure_exit_code})"
+ else:
+ libclang_case = f"print({repr(libclang_stderr)}, file=sys.stderr)"
+
+ if version_0_66_patched:
+ version_0_66_case = "pass"
+ else:
+ version_0_66_case = "raise SystemExit(1)"
+
return cls.generate_executable(f"""#!/usr/bin/env python3
import sys
if "rust_is_available_bindgen_libclang.h" in " ".join(sys.argv):
- print({repr(libclang_stderr)}, file=sys.stderr)
+ {libclang_case}
+elif "rust_is_available_bindgen_0_66.h" in " ".join(sys.argv):
+ {version_0_66_case}
else:
print({repr(version_stdout)})
""")
@classmethod
- def generate_bindgen_version(cls, stdout):
- return cls.generate_bindgen(stdout, cls.bindgen_default_bindgen_libclang_stderr)
+ def generate_bindgen_version(cls, stdout, version_0_66_patched=False):
+ return cls.generate_bindgen(stdout, cls.bindgen_default_bindgen_libclang_stderr, version_0_66_patched)
+
+ @classmethod
+ def generate_bindgen_libclang_failure(cls):
+ return cls.generate_bindgen(cls.bindgen_default_bindgen_version_stdout, None)
@classmethod
def generate_bindgen_libclang(cls, stderr):
@@ -89,6 +105,7 @@ else:
cls.rust_default_sysroot = subprocess.check_output(("rustc", "--print", "sysroot")).decode().strip()
cls.bindgen_default_bindgen_version_stdout = f"bindgen {cls.bindgen_default_version}"
+ cls.bindgen_default_bindgen_libclang_failure_exit_code = 42
cls.bindgen_default_bindgen_libclang_stderr = f"scripts/rust_is_available_bindgen_libclang.h:2:9: warning: clang version {cls.llvm_default_version} [-W#pragma-messages], err: false"
cls.default_rustc = cls.generate_rustc(f"rustc {cls.rustc_default_version}")
@@ -193,11 +210,6 @@ else:
result = self.run_script(self.Expected.FAILURE, { "RUSTC": rustc })
self.assertIn(f"Rust compiler '{rustc}' is too old.", result.stderr)
- def test_rustc_new_version(self):
- rustc = self.generate_rustc("rustc 1.999.0 (a8314ef7d 2099-06-27)")
- result = self.run_script(self.Expected.SUCCESS_WITH_WARNINGS, { "RUSTC": rustc })
- self.assertIn(f"Rust compiler '{rustc}' is too new. This may or may not work.", result.stderr)
-
def test_bindgen_nonexecutable(self):
result = self.run_script(self.Expected.FAILURE, { "BINDGEN": self.nonexecutable })
self.assertIn(f"Running '{self.nonexecutable}' to check the Rust bindings generator version failed with", result.stderr)
@@ -226,21 +238,24 @@ else:
result = self.run_script(self.Expected.FAILURE, { "BINDGEN": bindgen })
self.assertIn(f"Rust bindings generator '{bindgen}' is too old.", result.stderr)
- def test_bindgen_new_version(self):
- bindgen = self.generate_bindgen_version("bindgen 0.999.0")
- result = self.run_script(self.Expected.SUCCESS_WITH_WARNINGS, { "BINDGEN": bindgen })
- self.assertIn(f"Rust bindings generator '{bindgen}' is too new. This may or may not work.", result.stderr)
+ def test_bindgen_bad_version_0_66_0_and_0_66_1(self):
+ for version in ("0.66.0", "0.66.1"):
+ with self.subTest(version=version):
+ bindgen = self.generate_bindgen_version(f"bindgen {version}")
+ result = self.run_script(self.Expected.SUCCESS_WITH_WARNINGS, { "BINDGEN": bindgen })
+ self.assertIn(f"Rust bindings generator '{bindgen}' versions 0.66.0 and 0.66.1 may not", result.stderr)
+
+ def test_bindgen_bad_version_0_66_0_and_0_66_1_patched(self):
+ for version in ("0.66.0", "0.66.1"):
+ with self.subTest(version=version):
+ bindgen = self.generate_bindgen_version(f"bindgen {version}", True)
+ result = self.run_script(self.Expected.SUCCESS, { "BINDGEN": bindgen })
def test_bindgen_libclang_failure(self):
- for env in (
- { "LLVM_CONFIG_PATH": self.missing },
- { "LIBCLANG_PATH": self.missing },
- { "CLANG_PATH": self.missing },
- ):
- with self.subTest(env=env):
- result = self.run_script(self.Expected.FAILURE, env | { "PATH": os.environ["PATH"], "BINDGEN": "bindgen" })
- self.assertIn("Running 'bindgen' to check the libclang version (used by the Rust", result.stderr)
- self.assertIn("bindings generator) failed with code ", result.stderr)
+ bindgen = self.generate_bindgen_libclang_failure()
+ result = self.run_script(self.Expected.FAILURE, { "BINDGEN": bindgen })
+ self.assertIn(f"Running '{bindgen}' to check the libclang version (used by the Rust", result.stderr)
+ self.assertIn(f"bindings generator) failed with code {self.bindgen_default_bindgen_libclang_failure_exit_code}. This may be caused by", result.stderr)
def test_bindgen_libclang_unexpected_version(self):
bindgen = self.generate_bindgen_libclang("scripts/rust_is_available_bindgen_libclang.h:2:9: warning: clang version unexpected [-W#pragma-messages], err: false")
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index bcfea073e3f2..01b923d97a44 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -1692,6 +1692,10 @@ int __aafs_profile_mkdir(struct aa_profile *profile, struct dentry *parent)
struct aa_profile *p;
p = aa_deref_parent(profile);
dent = prof_dir(p);
+ if (!dent) {
+ error = -ENOENT;
+ goto fail2;
+ }
/* adding to parent that previously didn't have children */
dent = aafs_create_dir("profiles", dent);
if (IS_ERR(dent))
diff --git a/security/apparmor/file.c b/security/apparmor/file.c
index c03eb7c19f16..d52a5b14dad4 100644
--- a/security/apparmor/file.c
+++ b/security/apparmor/file.c
@@ -144,19 +144,6 @@ int aa_audit_file(const struct cred *subj_cred,
return aa_audit(type, profile, &ad, file_audit_cb);
}
-/**
- * is_deleted - test if a file has been completely unlinked
- * @dentry: dentry of file to test for deletion (NOT NULL)
- *
- * Returns: true if deleted else false
- */
-static inline bool is_deleted(struct dentry *dentry)
-{
- if (d_unlinked(dentry) && d_backing_inode(dentry)->i_nlink == 0)
- return true;
- return false;
-}
-
static int path_name(const char *op, const struct cred *subj_cred,
struct aa_label *label,
const struct path *path, int flags, char *buffer,
diff --git a/security/apparmor/include/cred.h b/security/apparmor/include/cred.h
index 58fdc72af664..7265d2f81dd5 100644
--- a/security/apparmor/include/cred.h
+++ b/security/apparmor/include/cred.h
@@ -63,6 +63,26 @@ static inline struct aa_label *aa_get_newest_cred_label(const struct cred *cred)
return aa_get_newest_label(aa_cred_raw_label(cred));
}
+static inline struct aa_label *aa_get_newest_cred_label_condref(const struct cred *cred,
+ bool *needput)
+{
+ struct aa_label *l = aa_cred_raw_label(cred);
+
+ if (unlikely(label_is_stale(l))) {
+ *needput = true;
+ return aa_get_newest_label(l);
+ }
+
+ *needput = false;
+ return l;
+}
+
+static inline void aa_put_label_condref(struct aa_label *l, bool needput)
+{
+ if (unlikely(needput))
+ aa_put_label(l);
+}
+
/**
* aa_current_raw_label - find the current tasks confining label
*
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 6239777090c4..808060f9effb 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -461,6 +461,7 @@ static int apparmor_file_open(struct file *file)
struct aa_file_ctx *fctx = file_ctx(file);
struct aa_label *label;
int error = 0;
+ bool needput;
if (!path_mediated_fs(file->f_path.dentry))
return 0;
@@ -477,7 +478,7 @@ static int apparmor_file_open(struct file *file)
return 0;
}
- label = aa_get_newest_cred_label(file->f_cred);
+ label = aa_get_newest_cred_label_condref(file->f_cred, &needput);
if (!unconfined(label)) {
struct mnt_idmap *idmap = file_mnt_idmap(file);
struct inode *inode = file_inode(file);
@@ -494,7 +495,7 @@ static int apparmor_file_open(struct file *file)
/* todo cache full allowed permissions set and state */
fctx->allow = aa_map_file_to_perms(file);
}
- aa_put_label(label);
+ aa_put_label_condref(label, needput);
return error;
}
@@ -1124,7 +1125,7 @@ static int apparmor_socket_create(int family, int type, int protocol, int kern)
* @sock: socket that is being setup
* @family: family of socket being created
* @type: type of the socket
- * @ptotocol: protocol of the socket
+ * @protocol: protocol of the socket
* @kern: socket is a special kernel socket
*
* Note:
@@ -1304,6 +1305,13 @@ static int apparmor_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
if (!skb->secmark)
return 0;
+ /*
+ * If reach here before socket_post_create hook is called, in which
+ * case label is null, drop the packet.
+ */
+ if (!ctx->label)
+ return -EACCES;
+
return apparmor_secmark_check(ctx->label, OP_RECVMSG, AA_MAY_RECEIVE,
skb->secmark, sk);
}
@@ -2029,7 +2037,7 @@ static int __init alloc_buffers(void)
}
#ifdef CONFIG_SYSCTL
-static int apparmor_dointvec(struct ctl_table *table, int write,
+static int apparmor_dointvec(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
if (!aa_current_policy_admin_capable(NULL))
diff --git a/security/apparmor/mount.c b/security/apparmor/mount.c
index 49fe8da6fea4..bf8863253e07 100644
--- a/security/apparmor/mount.c
+++ b/security/apparmor/mount.c
@@ -44,6 +44,8 @@ static void audit_mnt_flags(struct audit_buffer *ab, unsigned long flags)
audit_log_format(ab, ", mand");
if (flags & MS_DIRSYNC)
audit_log_format(ab, ", dirsync");
+ if (flags & MS_NOSYMFOLLOW)
+ audit_log_format(ab, ", nosymfollow");
if (flags & MS_NOATIME)
audit_log_format(ab, ", noatime");
if (flags & MS_NODIRATIME)
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
index 957654d253dd..14df15e35695 100644
--- a/security/apparmor/policy.c
+++ b/security/apparmor/policy.c
@@ -225,7 +225,7 @@ static void aa_free_data(void *ptr, void *arg)
{
struct aa_data *data = ptr;
- kfree_sensitive(data->data);
+ kvfree_sensitive(data->data, data->size);
kfree_sensitive(data->key);
kfree_sensitive(data);
}
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index 5e578ef0ddff..5a570235427d 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -747,34 +747,42 @@ static int unpack_pdb(struct aa_ext *e, struct aa_policydb **policy,
*info = "missing required dfa";
goto fail;
}
- goto out;
+ } else {
+ /*
+ * only unpack the following if a dfa is present
+ *
+ * sadly start was given different names for file and policydb
+ * but since it is optional we can try both
+ */
+ if (!aa_unpack_u32(e, &pdb->start[0], "start"))
+ /* default start state */
+ pdb->start[0] = DFA_START;
+ if (!aa_unpack_u32(e, &pdb->start[AA_CLASS_FILE], "dfa_start")) {
+ /* default start state for xmatch and file dfa */
+ pdb->start[AA_CLASS_FILE] = DFA_START;
+ } /* setup class index */
+ for (i = AA_CLASS_FILE + 1; i <= AA_CLASS_LAST; i++) {
+ pdb->start[i] = aa_dfa_next(pdb->dfa, pdb->start[0],
+ i);
+ }
}
/*
- * only unpack the following if a dfa is present
- *
- * sadly start was given different names for file and policydb
- * but since it is optional we can try both
+ * Unfortunately due to a bug in earlier userspaces, a
+ * transition table may be present even when the dfa is
+ * not. For compatibility reasons unpack and discard.
*/
- if (!aa_unpack_u32(e, &pdb->start[0], "start"))
- /* default start state */
- pdb->start[0] = DFA_START;
- if (!aa_unpack_u32(e, &pdb->start[AA_CLASS_FILE], "dfa_start")) {
- /* default start state for xmatch and file dfa */
- pdb->start[AA_CLASS_FILE] = DFA_START;
- } /* setup class index */
- for (i = AA_CLASS_FILE + 1; i <= AA_CLASS_LAST; i++) {
- pdb->start[i] = aa_dfa_next(pdb->dfa, pdb->start[0],
- i);
- }
if (!unpack_trans_table(e, &pdb->trans) && required_trans) {
*info = "failed to unpack profile transition table";
goto fail;
}
+ if (!pdb->dfa && pdb->trans.table)
+ aa_free_str_table(&pdb->trans);
+
/* TODO: move compat mapping here, requires dfa merging first */
/* TODO: move verify here, it has to be done after compat mappings */
-out:
+
*policy = pdb;
return 0;
@@ -1071,6 +1079,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
if (rhashtable_insert_fast(profile->data, &data->head,
profile->data->p)) {
+ kvfree_sensitive(data->data, data->size);
kfree_sensitive(data->key);
kfree_sensitive(data);
info = "failed to insert data to table";
diff --git a/security/apparmor/policy_unpack_test.c b/security/apparmor/policy_unpack_test.c
index 5c9bde25e56d..874fcf97794e 100644
--- a/security/apparmor/policy_unpack_test.c
+++ b/security/apparmor/policy_unpack_test.c
@@ -604,4 +604,5 @@ static struct kunit_suite apparmor_policy_unpack_test_module = {
kunit_test_suite(apparmor_policy_unpack_test_module);
+MODULE_DESCRIPTION("KUnit tests for AppArmor's policy unpack");
MODULE_LICENSE("GPL");
diff --git a/security/landlock/cred.c b/security/landlock/cred.c
index 786af18c4a1c..db9fe7d906ba 100644
--- a/security/landlock/cred.c
+++ b/security/landlock/cred.c
@@ -14,8 +14,8 @@
#include "ruleset.h"
#include "setup.h"
-static int hook_cred_prepare(struct cred *const new,
- const struct cred *const old, const gfp_t gfp)
+static void hook_cred_transfer(struct cred *const new,
+ const struct cred *const old)
{
struct landlock_ruleset *const old_dom = landlock_cred(old)->domain;
@@ -23,6 +23,12 @@ static int hook_cred_prepare(struct cred *const new,
landlock_get_ruleset(old_dom);
landlock_cred(new)->domain = old_dom;
}
+}
+
+static int hook_cred_prepare(struct cred *const new,
+ const struct cred *const old, const gfp_t gfp)
+{
+ hook_cred_transfer(new, old);
return 0;
}
@@ -36,6 +42,7 @@ static void hook_cred_free(struct cred *const cred)
static struct security_hook_list landlock_hooks[] __ro_after_init = {
LSM_HOOK_INIT(cred_prepare, hook_cred_prepare),
+ LSM_HOOK_INIT(cred_transfer, hook_cred_transfer),
LSM_HOOK_INIT(cred_free, hook_cred_free),
};
diff --git a/security/min_addr.c b/security/min_addr.c
index 88c9a6a21f47..0ce267c041ab 100644
--- a/security/min_addr.c
+++ b/security/min_addr.c
@@ -29,7 +29,7 @@ static void update_mmap_min_addr(void)
* sysctl handler which just sets dac_mmap_min_addr = the new value and then
* calls update_mmap_min_addr() so non MAP_FIXED hints get rounded properly
*/
-int mmap_min_addr_handler(struct ctl_table *table, int write,
+int mmap_min_addr_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index 39944a859ff6..e1a5e13ea269 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -436,7 +436,7 @@ static struct security_hook_list yama_hooks[] __ro_after_init = {
};
#ifdef CONFIG_SYSCTL
-static int yama_dointvec_minmax(struct ctl_table *table, int write,
+static int yama_dointvec_minmax(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table table_copy;
diff --git a/sound/ac97/bus.c b/sound/ac97/bus.c
index 40e88d79c483..96d4d7eb879f 100644
--- a/sound/ac97/bus.c
+++ b/sound/ac97/bus.c
@@ -469,10 +469,10 @@ static struct attribute *ac97_dev_attrs[] = {
};
ATTRIBUTE_GROUPS(ac97_dev);
-static int ac97_bus_match(struct device *dev, struct device_driver *drv)
+static int ac97_bus_match(struct device *dev, const struct device_driver *drv)
{
struct ac97_codec_device *adev = to_ac97_device(dev);
- struct ac97_codec_driver *adrv = to_ac97_driver(drv);
+ const struct ac97_codec_driver *adrv = to_ac97_driver(drv);
const struct ac97_id *id = adrv->id_table;
int i = 0;
diff --git a/sound/core/seq_device.c b/sound/core/seq_device.c
index 654d620d0199..4492be5d2317 100644
--- a/sound/core/seq_device.c
+++ b/sound/core/seq_device.c
@@ -40,7 +40,7 @@ MODULE_LICENSE("GPL");
/*
* bus definition
*/
-static int snd_seq_bus_match(struct device *dev, struct device_driver *drv)
+static int snd_seq_bus_match(struct device *dev, const struct device_driver *drv)
{
struct snd_seq_device *sdev = to_seq_dev(dev);
struct snd_seq_driver *sdrv = to_seq_drv(drv);
@@ -234,7 +234,7 @@ int snd_seq_device_new(struct snd_card *card, int device, const char *id,
put_device(&dev->dev);
return err;
}
-
+
if (result)
*result = dev;
diff --git a/sound/core/ump.c b/sound/core/ump.c
index 3f61220c23b4..0f0d7e895c5a 100644
--- a/sound/core/ump.c
+++ b/sound/core/ump.c
@@ -733,6 +733,12 @@ static void fill_fb_info(struct snd_ump_endpoint *ump,
info->block_id, info->direction, info->active,
info->first_group, info->num_groups, info->midi_ci_version,
info->sysex8_streams, info->flags);
+
+ if ((info->flags & SNDRV_UMP_BLOCK_IS_MIDI1) && info->num_groups != 1) {
+ info->num_groups = 1;
+ ump_dbg(ump, "FB %d: corrected groups to 1 for MIDI1\n",
+ info->block_id);
+ }
}
/* check whether the FB info gets updated by the current message */
@@ -806,6 +812,13 @@ static int ump_handle_fb_name_msg(struct snd_ump_endpoint *ump,
if (!fb)
return -ENODEV;
+ if (ump->parsed &&
+ (ump->info.flags & SNDRV_UMP_EP_INFO_STATIC_BLOCKS)) {
+ ump_dbg(ump, "Skipping static FB name update (blk#%d)\n",
+ fb->info.block_id);
+ return 0;
+ }
+
ret = ump_append_string(ump, fb->info.name, sizeof(fb->info.name),
buf->raw, 3);
/* notify the FB name update to sequencer, too */
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
index d35d0a420ee0..1a163bbcabd7 100644
--- a/sound/firewire/amdtp-stream.c
+++ b/sound/firewire/amdtp-stream.c
@@ -1180,8 +1180,7 @@ static void process_rx_packets(struct fw_iso_context *context, u32 tstamp, size_
(void)fw_card_read_cycle_time(fw_parent_device(s->unit)->card, &curr_cycle_time);
for (i = 0; i < packets; ++i) {
- DEFINE_FLEX(struct fw_iso_packet, template, header,
- header_length, CIP_HEADER_QUADLETS);
+ DEFINE_RAW_FLEX(struct fw_iso_packet, template, header, CIP_HEADER_QUADLETS);
bool sched_irq = false;
build_it_pkt_header(s, desc->cycle, template, pkt_header_length,
diff --git a/sound/hda/hda_bus_type.c b/sound/hda/hda_bus_type.c
index cce2c30511a2..7545ace7b0ee 100644
--- a/sound/hda/hda_bus_type.c
+++ b/sound/hda/hda_bus_type.c
@@ -46,7 +46,7 @@ static int hdac_codec_match(struct hdac_device *dev, struct hdac_driver *drv)
return 0;
}
-static int hda_bus_match(struct device *dev, struct device_driver *drv)
+static int hda_bus_match(struct device *dev, const struct device_driver *drv)
{
struct hdac_device *hdev = dev_to_hdac_dev(dev);
struct hdac_driver *hdrv = drv_to_hdac_driver(drv);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index c3a86a99f8c6..ba0ce8750ca4 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4800,6 +4800,8 @@ static void alc298_fixup_samsung_amp(struct hda_codec *codec,
}
}
+#include "samsung_helper.c"
+
#if IS_REACHABLE(CONFIG_INPUT)
static void gpio2_mic_hotkey_event(struct hda_codec *codec,
struct hda_jack_callback *event)
@@ -7429,6 +7431,7 @@ enum {
ALC236_FIXUP_HP_MUTE_LED,
ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF,
ALC298_FIXUP_SAMSUNG_AMP,
+ ALC298_FIXUP_SAMSUNG_AMP2,
ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET,
ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET,
ALC295_FIXUP_ASUS_MIC_NO_PRESENCE,
@@ -9055,6 +9058,10 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET
},
+ [ALC298_FIXUP_SAMSUNG_AMP2] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc298_fixup_samsung_amp2
+ },
[ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET] = {
.type = HDA_FIXUP_VERBS,
.v.verbs = (const struct hda_verb[]) {
@@ -10359,10 +10366,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1f62, "ASUS UX7602ZM", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x1f92, "ASUS ROG Flow X16", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
- SND_PCI_QUIRK(0x1043, 0x3a20, "ASUS G614JZR", ALC245_FIXUP_CS35L41_SPI_2),
- SND_PCI_QUIRK(0x1043, 0x3a30, "ASUS G814JVR/JIR", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x3a20, "ASUS G614JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
+ SND_PCI_QUIRK(0x1043, 0x3a30, "ASUS G814JVR/JIR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
SND_PCI_QUIRK(0x1043, 0x3a40, "ASUS G814JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
- SND_PCI_QUIRK(0x1043, 0x3a50, "ASUS G834JYR/JZR", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x3a50, "ASUS G834JYR/JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
SND_PCI_QUIRK(0x1043, 0x3a60, "ASUS G634JYR/JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC),
@@ -10406,6 +10413,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x144d, 0xc832, "Samsung Galaxy Book Flex Alpha (NP730QCJ)", ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
SND_PCI_QUIRK(0x144d, 0xca03, "Samsung Galaxy Book2 Pro 360 (NP930QED)", ALC298_FIXUP_SAMSUNG_AMP),
SND_PCI_QUIRK(0x144d, 0xc868, "Samsung Galaxy Book2 Pro (NP930XED)", ALC298_FIXUP_SAMSUNG_AMP),
+ SND_PCI_QUIRK(0x144d, 0xc1ca, "Samsung Galaxy Book3 Pro 360 (NP960QFG-KB1US)", ALC298_FIXUP_SAMSUNG_AMP2),
SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
@@ -10843,6 +10851,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{.id = ALC298_FIXUP_HUAWEI_MBX_STEREO, .name = "huawei-mbx-stereo"},
{.id = ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, .name = "alc256-medion-headset"},
{.id = ALC298_FIXUP_SAMSUNG_AMP, .name = "alc298-samsung-amp"},
+ {.id = ALC298_FIXUP_SAMSUNG_AMP2, .name = "alc298-samsung-amp2"},
{.id = ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, .name = "alc256-samsung-headphone"},
{.id = ALC255_FIXUP_XIAOMI_HEADSET_MIC, .name = "alc255-xiaomi-headset"},
{.id = ALC274_FIXUP_HP_MIC, .name = "alc274-hp-mic-detect"},
diff --git a/sound/pci/hda/samsung_helper.c b/sound/pci/hda/samsung_helper.c
new file mode 100644
index 000000000000..a40175b69015
--- /dev/null
+++ b/sound/pci/hda/samsung_helper.c
@@ -0,0 +1,310 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Helper functions for Samsung Galaxy Book3 audio initialization */
+
+struct alc298_samsung_coeff_fixup_desc {
+ unsigned char coeff_idx;
+ unsigned short coeff_value;
+};
+
+struct alc298_samsung_coeff_seq_desc {
+ unsigned short coeff_0x23;
+ unsigned short coeff_0x24;
+ unsigned short coeff_0x25;
+ unsigned short coeff_0x26;
+};
+
+
+static inline void alc298_samsung_write_coef_pack2(struct hda_codec *codec,
+ const struct alc298_samsung_coeff_seq_desc *seq)
+{
+ int i;
+
+ for (i = 0; i < 100; i++) {
+ if ((alc_read_coef_idx(codec, 0x26) & 0x0010) == 0)
+ break;
+
+ usleep_range(500, 1000);
+ }
+
+ alc_write_coef_idx(codec, 0x23, seq->coeff_0x23);
+ alc_write_coef_idx(codec, 0x24, seq->coeff_0x24);
+ alc_write_coef_idx(codec, 0x25, seq->coeff_0x25);
+ alc_write_coef_idx(codec, 0x26, seq->coeff_0x26);
+}
+
+static inline void alc298_samsung_write_coef_pack_seq(
+ struct hda_codec *codec,
+ unsigned char target,
+ const struct alc298_samsung_coeff_seq_desc seq[],
+ int count)
+{
+ alc_write_coef_idx(codec, 0x22, target);
+ for (int i = 0; i < count; i++)
+ alc298_samsung_write_coef_pack2(codec, &seq[i]);
+}
+
+static void alc298_fixup_samsung_amp2(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ int i;
+ static const struct alc298_samsung_coeff_fixup_desc fixups1[] = {
+ { 0x99, 0x8000 }, { 0x82, 0x4408 }, { 0x32, 0x3f00 }, { 0x0e, 0x6f80 },
+ { 0x10, 0x0e21 }, { 0x55, 0x8000 }, { 0x08, 0x2fcf }, { 0x08, 0x2fcf },
+ { 0x2d, 0xc020 }, { 0x19, 0x0017 }, { 0x50, 0x1000 }, { 0x0e, 0x6f80 },
+ { 0x08, 0x2fcf }, { 0x80, 0x0011 }, { 0x2b, 0x0c10 }, { 0x2d, 0xc020 },
+ { 0x03, 0x0042 }, { 0x0f, 0x0062 }, { 0x08, 0x2fcf },
+ };
+
+ static const struct alc298_samsung_coeff_seq_desc amp_0x38[] = {
+ { 0x2000, 0x0000, 0x0001, 0xb011 }, { 0x23ff, 0x0000, 0x0000, 0xb011 },
+ { 0x203a, 0x0000, 0x0080, 0xb011 }, { 0x23e1, 0x0000, 0x0000, 0xb011 },
+ { 0x2012, 0x0000, 0x006f, 0xb011 }, { 0x2014, 0x0000, 0x0000, 0xb011 },
+ { 0x201b, 0x0000, 0x0001, 0xb011 }, { 0x201d, 0x0000, 0x0001, 0xb011 },
+ { 0x201f, 0x0000, 0x00fe, 0xb011 }, { 0x2021, 0x0000, 0x0000, 0xb011 },
+ { 0x2022, 0x0000, 0x0010, 0xb011 }, { 0x203d, 0x0000, 0x0005, 0xb011 },
+ { 0x203f, 0x0000, 0x0003, 0xb011 }, { 0x2050, 0x0000, 0x002c, 0xb011 },
+ { 0x2076, 0x0000, 0x000e, 0xb011 }, { 0x207c, 0x0000, 0x004a, 0xb011 },
+ { 0x2081, 0x0000, 0x0003, 0xb011 }, { 0x2399, 0x0000, 0x0003, 0xb011 },
+ { 0x23a4, 0x0000, 0x00b5, 0xb011 }, { 0x23a5, 0x0000, 0x0001, 0xb011 },
+ { 0x23ba, 0x0000, 0x0094, 0xb011 }, { 0x2100, 0x00d0, 0x950e, 0xb017 },
+ { 0x2104, 0x0061, 0xd4e2, 0xb017 }, { 0x2108, 0x00d0, 0x950e, 0xb017 },
+ { 0x210c, 0x0075, 0xf4e2, 0xb017 }, { 0x2110, 0x00b4, 0x4b0d, 0xb017 },
+ { 0x2114, 0x000a, 0x1000, 0xb017 }, { 0x2118, 0x0015, 0x2000, 0xb017 },
+ { 0x211c, 0x000a, 0x1000, 0xb017 }, { 0x2120, 0x0075, 0xf4e2, 0xb017 },
+ { 0x2124, 0x00b4, 0x4b0d, 0xb017 }, { 0x2128, 0x0000, 0x0010, 0xb017 },
+ { 0x212c, 0x0000, 0x0000, 0xb017 }, { 0x2130, 0x0000, 0x0000, 0xb017 },
+ { 0x2134, 0x0000, 0x0000, 0xb017 }, { 0x2138, 0x0000, 0x0000, 0xb017 },
+ { 0x213c, 0x0000, 0x0010, 0xb017 }, { 0x2140, 0x0000, 0x0000, 0xb017 },
+ { 0x2144, 0x0000, 0x0000, 0xb017 }, { 0x2148, 0x0000, 0x0000, 0xb017 },
+ { 0x214c, 0x0000, 0x0000, 0xb017 }, { 0x2150, 0x0000, 0x0010, 0xb017 },
+ { 0x2154, 0x0000, 0x0000, 0xb017 }, { 0x2158, 0x0000, 0x0000, 0xb017 },
+ { 0x215c, 0x0000, 0x0000, 0xb017 }, { 0x2160, 0x0000, 0x0000, 0xb017 },
+ { 0x2164, 0x0000, 0x0010, 0xb017 }, { 0x2168, 0x0000, 0x0000, 0xb017 },
+ { 0x216c, 0x0000, 0x0000, 0xb017 }, { 0x2170, 0x0000, 0x0000, 0xb017 },
+ { 0x2174, 0x0000, 0x0000, 0xb017 }, { 0x2178, 0x0000, 0x0010, 0xb017 },
+ { 0x217c, 0x0000, 0x0000, 0xb017 }, { 0x2180, 0x0000, 0x0000, 0xb017 },
+ { 0x2184, 0x0000, 0x0000, 0xb017 }, { 0x2188, 0x0000, 0x0000, 0xb017 },
+ { 0x218c, 0x0064, 0x5800, 0xb017 }, { 0x2190, 0x00c8, 0xb000, 0xb017 },
+ { 0x2194, 0x0064, 0x5800, 0xb017 }, { 0x2198, 0x003d, 0x5be7, 0xb017 },
+ { 0x219c, 0x0054, 0x060a, 0xb017 }, { 0x21a0, 0x00c8, 0xa310, 0xb017 },
+ { 0x21a4, 0x0029, 0x4de5, 0xb017 }, { 0x21a8, 0x0032, 0x420c, 0xb017 },
+ { 0x21ac, 0x0029, 0x4de5, 0xb017 }, { 0x21b0, 0x00fa, 0xe50c, 0xb017 },
+ { 0x21b4, 0x0000, 0x0010, 0xb017 }, { 0x21b8, 0x0000, 0x0000, 0xb017 },
+ { 0x21bc, 0x0000, 0x0000, 0xb017 }, { 0x21c0, 0x0000, 0x0000, 0xb017 },
+ { 0x21c4, 0x0000, 0x0000, 0xb017 }, { 0x21c8, 0x0056, 0xc50f, 0xb017 },
+ { 0x21cc, 0x007b, 0xd7e1, 0xb017 }, { 0x21d0, 0x0077, 0xa70e, 0xb017 },
+ { 0x21d4, 0x00e0, 0xbde1, 0xb017 }, { 0x21d8, 0x0032, 0x530e, 0xb017 },
+ { 0x2204, 0x00fb, 0x7e0f, 0xb017 }, { 0x2208, 0x000b, 0x02e1, 0xb017 },
+ { 0x220c, 0x00fb, 0x7e0f, 0xb017 }, { 0x2210, 0x00d5, 0x17e1, 0xb017 },
+ { 0x2214, 0x00c0, 0x130f, 0xb017 }, { 0x2218, 0x00e5, 0x0a00, 0xb017 },
+ { 0x221c, 0x00cb, 0x1500, 0xb017 }, { 0x2220, 0x00e5, 0x0a00, 0xb017 },
+ { 0x2224, 0x00d5, 0x17e1, 0xb017 }, { 0x2228, 0x00c0, 0x130f, 0xb017 },
+ { 0x222c, 0x00f5, 0xdb0e, 0xb017 }, { 0x2230, 0x0017, 0x48e2, 0xb017 },
+ { 0x2234, 0x00f5, 0xdb0e, 0xb017 }, { 0x2238, 0x00ef, 0x5ce2, 0xb017 },
+ { 0x223c, 0x00c1, 0xcc0d, 0xb017 }, { 0x2240, 0x00f5, 0xdb0e, 0xb017 },
+ { 0x2244, 0x0017, 0x48e2, 0xb017 }, { 0x2248, 0x00f5, 0xdb0e, 0xb017 },
+ { 0x224c, 0x00ef, 0x5ce2, 0xb017 }, { 0x2250, 0x00c1, 0xcc0d, 0xb017 },
+ { 0x2254, 0x00f5, 0xdb0e, 0xb017 }, { 0x2258, 0x0017, 0x48e2, 0xb017 },
+ { 0x225c, 0x00f5, 0xdb0e, 0xb017 }, { 0x2260, 0x00ef, 0x5ce2, 0xb017 },
+ { 0x2264, 0x00c1, 0xcc0d, 0xb017 }, { 0x2268, 0x00f5, 0xdb0e, 0xb017 },
+ { 0x226c, 0x0017, 0x48e2, 0xb017 }, { 0x2270, 0x00f5, 0xdb0e, 0xb017 },
+ { 0x2274, 0x00ef, 0x5ce2, 0xb017 }, { 0x2278, 0x00c1, 0xcc0d, 0xb017 },
+ { 0x227c, 0x00f5, 0xdb0e, 0xb017 }, { 0x2280, 0x0017, 0x48e2, 0xb017 },
+ { 0x2284, 0x00f5, 0xdb0e, 0xb017 }, { 0x2288, 0x00ef, 0x5ce2, 0xb017 },
+ { 0x228c, 0x00c1, 0xcc0d, 0xb017 }, { 0x22cc, 0x00e8, 0x8d00, 0xb017 },
+ { 0x22d0, 0x0000, 0x0000, 0xb017 }, { 0x22d4, 0x0018, 0x72ff, 0xb017 },
+ { 0x22d8, 0x00ce, 0x25e1, 0xb017 }, { 0x22dc, 0x002f, 0xe40e, 0xb017 },
+ { 0x238e, 0x0000, 0x0099, 0xb011 }, { 0x238f, 0x0000, 0x0011, 0xb011 },
+ { 0x2390, 0x0000, 0x0056, 0xb011 }, { 0x2391, 0x0000, 0x0004, 0xb011 },
+ { 0x2392, 0x0000, 0x00bb, 0xb011 }, { 0x2393, 0x0000, 0x006d, 0xb011 },
+ { 0x2394, 0x0000, 0x0010, 0xb011 }, { 0x2395, 0x0000, 0x0064, 0xb011 },
+ { 0x2396, 0x0000, 0x00b6, 0xb011 }, { 0x2397, 0x0000, 0x0028, 0xb011 },
+ { 0x2398, 0x0000, 0x000b, 0xb011 }, { 0x239a, 0x0000, 0x0099, 0xb011 },
+ { 0x239b, 0x0000, 0x000d, 0xb011 }, { 0x23a6, 0x0000, 0x0064, 0xb011 },
+ { 0x23a7, 0x0000, 0x0078, 0xb011 }, { 0x23b9, 0x0000, 0x0000, 0xb011 },
+ { 0x23e0, 0x0000, 0x0021, 0xb011 }, { 0x23e1, 0x0000, 0x0001, 0xb011 },
+ };
+
+ static const struct alc298_samsung_coeff_seq_desc amp_0x39[] = {
+ { 0x2000, 0x0000, 0x0001, 0xb011 }, { 0x23ff, 0x0000, 0x0000, 0xb011 },
+ { 0x203a, 0x0000, 0x0080, 0xb011 }, { 0x23e1, 0x0000, 0x0000, 0xb011 },
+ { 0x2012, 0x0000, 0x006f, 0xb011 }, { 0x2014, 0x0000, 0x0000, 0xb011 },
+ { 0x201b, 0x0000, 0x0002, 0xb011 }, { 0x201d, 0x0000, 0x0002, 0xb011 },
+ { 0x201f, 0x0000, 0x00fd, 0xb011 }, { 0x2021, 0x0000, 0x0001, 0xb011 },
+ { 0x2022, 0x0000, 0x0010, 0xb011 }, { 0x203d, 0x0000, 0x0005, 0xb011 },
+ { 0x203f, 0x0000, 0x0003, 0xb011 }, { 0x2050, 0x0000, 0x002c, 0xb011 },
+ { 0x2076, 0x0000, 0x000e, 0xb011 }, { 0x207c, 0x0000, 0x004a, 0xb011 },
+ { 0x2081, 0x0000, 0x0003, 0xb011 }, { 0x2399, 0x0000, 0x0003, 0xb011 },
+ { 0x23a4, 0x0000, 0x00b5, 0xb011 }, { 0x23a5, 0x0000, 0x0001, 0xb011 },
+ { 0x23ba, 0x0000, 0x0094, 0xb011 }, { 0x2100, 0x00d0, 0x950e, 0xb017 },
+ { 0x2104, 0x0061, 0xd4e2, 0xb017 }, { 0x2108, 0x00d0, 0x950e, 0xb017 },
+ { 0x210c, 0x0075, 0xf4e2, 0xb017 }, { 0x2110, 0x00b4, 0x4b0d, 0xb017 },
+ { 0x2114, 0x000a, 0x1000, 0xb017 }, { 0x2118, 0x0015, 0x2000, 0xb017 },
+ { 0x211c, 0x000a, 0x1000, 0xb017 }, { 0x2120, 0x0075, 0xf4e2, 0xb017 },
+ { 0x2124, 0x00b4, 0x4b0d, 0xb017 }, { 0x2128, 0x0000, 0x0010, 0xb017 },
+ { 0x212c, 0x0000, 0x0000, 0xb017 }, { 0x2130, 0x0000, 0x0000, 0xb017 },
+ { 0x2134, 0x0000, 0x0000, 0xb017 }, { 0x2138, 0x0000, 0x0000, 0xb017 },
+ { 0x213c, 0x0000, 0x0010, 0xb017 }, { 0x2140, 0x0000, 0x0000, 0xb017 },
+ { 0x2144, 0x0000, 0x0000, 0xb017 }, { 0x2148, 0x0000, 0x0000, 0xb017 },
+ { 0x214c, 0x0000, 0x0000, 0xb017 }, { 0x2150, 0x0000, 0x0010, 0xb017 },
+ { 0x2154, 0x0000, 0x0000, 0xb017 }, { 0x2158, 0x0000, 0x0000, 0xb017 },
+ { 0x215c, 0x0000, 0x0000, 0xb017 }, { 0x2160, 0x0000, 0x0000, 0xb017 },
+ { 0x2164, 0x0000, 0x0010, 0xb017 }, { 0x2168, 0x0000, 0x0000, 0xb017 },
+ { 0x216c, 0x0000, 0x0000, 0xb017 }, { 0x2170, 0x0000, 0x0000, 0xb017 },
+ { 0x2174, 0x0000, 0x0000, 0xb017 }, { 0x2178, 0x0000, 0x0010, 0xb017 },
+ { 0x217c, 0x0000, 0x0000, 0xb017 }, { 0x2180, 0x0000, 0x0000, 0xb017 },
+ { 0x2184, 0x0000, 0x0000, 0xb017 }, { 0x2188, 0x0000, 0x0000, 0xb017 },
+ { 0x218c, 0x0064, 0x5800, 0xb017 }, { 0x2190, 0x00c8, 0xb000, 0xb017 },
+ { 0x2194, 0x0064, 0x5800, 0xb017 }, { 0x2198, 0x003d, 0x5be7, 0xb017 },
+ { 0x219c, 0x0054, 0x060a, 0xb017 }, { 0x21a0, 0x00c8, 0xa310, 0xb017 },
+ { 0x21a4, 0x0029, 0x4de5, 0xb017 }, { 0x21a8, 0x0032, 0x420c, 0xb017 },
+ { 0x21ac, 0x0029, 0x4de5, 0xb017 }, { 0x21b0, 0x00fa, 0xe50c, 0xb017 },
+ { 0x21b4, 0x0000, 0x0010, 0xb017 }, { 0x21b8, 0x0000, 0x0000, 0xb017 },
+ { 0x21bc, 0x0000, 0x0000, 0xb017 }, { 0x21c0, 0x0000, 0x0000, 0xb017 },
+ { 0x21c4, 0x0000, 0x0000, 0xb017 }, { 0x21c8, 0x0056, 0xc50f, 0xb017 },
+ { 0x21cc, 0x007b, 0xd7e1, 0xb017 }, { 0x21d0, 0x0077, 0xa70e, 0xb017 },
+ { 0x21d4, 0x00e0, 0xbde1, 0xb017 }, { 0x21d8, 0x0032, 0x530e, 0xb017 },
+ { 0x2204, 0x00fb, 0x7e0f, 0xb017 }, { 0x2208, 0x000b, 0x02e1, 0xb017 },
+ { 0x220c, 0x00fb, 0x7e0f, 0xb017 }, { 0x2210, 0x00d5, 0x17e1, 0xb017 },
+ { 0x2214, 0x00c0, 0x130f, 0xb017 }, { 0x2218, 0x00e5, 0x0a00, 0xb017 },
+ { 0x221c, 0x00cb, 0x1500, 0xb017 }, { 0x2220, 0x00e5, 0x0a00, 0xb017 },
+ { 0x2224, 0x00d5, 0x17e1, 0xb017 }, { 0x2228, 0x00c0, 0x130f, 0xb017 },
+ { 0x222c, 0x00f5, 0xdb0e, 0xb017 }, { 0x2230, 0x0017, 0x48e2, 0xb017 },
+ { 0x2234, 0x00f5, 0xdb0e, 0xb017 }, { 0x2238, 0x00ef, 0x5ce2, 0xb017 },
+ { 0x223c, 0x00c1, 0xcc0d, 0xb017 }, { 0x2240, 0x00f5, 0xdb0e, 0xb017 },
+ { 0x2244, 0x0017, 0x48e2, 0xb017 }, { 0x2248, 0x00f5, 0xdb0e, 0xb017 },
+ { 0x224c, 0x00ef, 0x5ce2, 0xb017 }, { 0x2250, 0x00c1, 0xcc0d, 0xb017 },
+ { 0x2254, 0x00f5, 0xdb0e, 0xb017 }, { 0x2258, 0x0017, 0x48e2, 0xb017 },
+ { 0x225c, 0x00f5, 0xdb0e, 0xb017 }, { 0x2260, 0x00ef, 0x5ce2, 0xb017 },
+ { 0x2264, 0x00c1, 0xcc0d, 0xb017 }, { 0x2268, 0x00f5, 0xdb0e, 0xb017 },
+ { 0x226c, 0x0017, 0x48e2, 0xb017 }, { 0x2270, 0x00f5, 0xdb0e, 0xb017 },
+ { 0x2274, 0x00ef, 0x5ce2, 0xb017 }, { 0x2278, 0x00c1, 0xcc0d, 0xb017 },
+ { 0x227c, 0x00f5, 0xdb0e, 0xb017 }, { 0x2280, 0x0017, 0x48e2, 0xb017 },
+ { 0x2284, 0x00f5, 0xdb0e, 0xb017 }, { 0x2288, 0x00ef, 0x5ce2, 0xb017 },
+ { 0x228c, 0x00c1, 0xcc0d, 0xb017 }, { 0x22cc, 0x00e8, 0x8d00, 0xb017 },
+ { 0x22d0, 0x0000, 0x0000, 0xb017 }, { 0x22d4, 0x0018, 0x72ff, 0xb017 },
+ { 0x22d8, 0x00ce, 0x25e1, 0xb017 }, { 0x22dc, 0x002f, 0xe40e, 0xb017 },
+ { 0x238e, 0x0000, 0x0099, 0xb011 }, { 0x238f, 0x0000, 0x0011, 0xb011 },
+ { 0x2390, 0x0000, 0x0056, 0xb011 }, { 0x2391, 0x0000, 0x0004, 0xb011 },
+ { 0x2392, 0x0000, 0x00bb, 0xb011 }, { 0x2393, 0x0000, 0x006d, 0xb011 },
+ { 0x2394, 0x0000, 0x0010, 0xb011 }, { 0x2395, 0x0000, 0x0064, 0xb011 },
+ { 0x2396, 0x0000, 0x00b6, 0xb011 }, { 0x2397, 0x0000, 0x0028, 0xb011 },
+ { 0x2398, 0x0000, 0x000b, 0xb011 }, { 0x239a, 0x0000, 0x0099, 0xb011 },
+ { 0x239b, 0x0000, 0x000d, 0xb011 }, { 0x23a6, 0x0000, 0x0064, 0xb011 },
+ { 0x23a7, 0x0000, 0x0078, 0xb011 }, { 0x23b9, 0x0000, 0x0000, 0xb011 },
+ { 0x23e0, 0x0000, 0x0021, 0xb011 }, { 0x23e1, 0x0000, 0x0001, 0xb011 },
+ };
+
+ static const struct alc298_samsung_coeff_seq_desc amp_0x3c[] = {
+ { 0x2000, 0x0000, 0x0001, 0xb011 }, { 0x23ff, 0x0000, 0x0000, 0xb011 },
+ { 0x203a, 0x0000, 0x0080, 0xb011 }, { 0x23e1, 0x0000, 0x0000, 0xb011 },
+ { 0x2012, 0x0000, 0x006f, 0xb011 }, { 0x2014, 0x0000, 0x0000, 0xb011 },
+ { 0x201b, 0x0000, 0x0001, 0xb011 }, { 0x201d, 0x0000, 0x0001, 0xb011 },
+ { 0x201f, 0x0000, 0x00fe, 0xb011 }, { 0x2021, 0x0000, 0x0000, 0xb011 },
+ { 0x2022, 0x0000, 0x0010, 0xb011 }, { 0x203d, 0x0000, 0x0005, 0xb011 },
+ { 0x203f, 0x0000, 0x0003, 0xb011 }, { 0x2050, 0x0000, 0x002c, 0xb011 },
+ { 0x2076, 0x0000, 0x000e, 0xb011 }, { 0x207c, 0x0000, 0x004a, 0xb011 },
+ { 0x2081, 0x0000, 0x0003, 0xb011 }, { 0x23ba, 0x0000, 0x008d, 0xb011 },
+ { 0x2128, 0x0005, 0x460d, 0xb017 }, { 0x212c, 0x00f6, 0x73e5, 0xb017 },
+ { 0x2130, 0x0005, 0x460d, 0xb017 }, { 0x2134, 0x00c0, 0xe9e5, 0xb017 },
+ { 0x2138, 0x00d5, 0x010b, 0xb017 }, { 0x213c, 0x009d, 0x7809, 0xb017 },
+ { 0x2140, 0x00c5, 0x0eed, 0xb017 }, { 0x2144, 0x009d, 0x7809, 0xb017 },
+ { 0x2148, 0x00c4, 0x4ef0, 0xb017 }, { 0x214c, 0x003a, 0x3106, 0xb017 },
+ { 0x2150, 0x00af, 0x750e, 0xb017 }, { 0x2154, 0x008c, 0x1ff1, 0xb017 },
+ { 0x2158, 0x009e, 0x360c, 0xb017 }, { 0x215c, 0x008c, 0x1ff1, 0xb017 },
+ { 0x2160, 0x004d, 0xac0a, 0xb017 }, { 0x2164, 0x007d, 0xa00f, 0xb017 },
+ { 0x2168, 0x00e1, 0x9ce3, 0xb017 }, { 0x216c, 0x00e8, 0x590e, 0xb017 },
+ { 0x2170, 0x00e1, 0x9ce3, 0xb017 }, { 0x2174, 0x0066, 0xfa0d, 0xb017 },
+ { 0x2178, 0x0000, 0x0010, 0xb017 }, { 0x217c, 0x0000, 0x0000, 0xb017 },
+ { 0x2180, 0x0000, 0x0000, 0xb017 }, { 0x2184, 0x0000, 0x0000, 0xb017 },
+ { 0x2188, 0x0000, 0x0000, 0xb017 }, { 0x218c, 0x0000, 0x0010, 0xb017 },
+ { 0x2190, 0x0000, 0x0000, 0xb017 }, { 0x2194, 0x0000, 0x0000, 0xb017 },
+ { 0x2198, 0x0000, 0x0000, 0xb017 }, { 0x219c, 0x0000, 0x0000, 0xb017 },
+ { 0x21a0, 0x0000, 0x0010, 0xb017 }, { 0x21a4, 0x0000, 0x0000, 0xb017 },
+ { 0x21a8, 0x0000, 0x0000, 0xb017 }, { 0x21ac, 0x0000, 0x0000, 0xb017 },
+ { 0x21b0, 0x0000, 0x0000, 0xb017 }, { 0x21b4, 0x0000, 0x0010, 0xb017 },
+ { 0x21b8, 0x0000, 0x0000, 0xb017 }, { 0x21bc, 0x0000, 0x0000, 0xb017 },
+ { 0x21c0, 0x0000, 0x0000, 0xb017 }, { 0x21c4, 0x0000, 0x0000, 0xb017 },
+ { 0x23b9, 0x0000, 0x0000, 0xb011 }, { 0x23e0, 0x0000, 0x0020, 0xb011 },
+ { 0x23e1, 0x0000, 0x0001, 0xb011 },
+ };
+
+ static const struct alc298_samsung_coeff_seq_desc amp_0x3d[] = {
+ { 0x2000, 0x0000, 0x0001, 0xb011 }, { 0x23ff, 0x0000, 0x0000, 0xb011 },
+ { 0x203a, 0x0000, 0x0080, 0xb011 }, { 0x23e1, 0x0000, 0x0000, 0xb011 },
+ { 0x2012, 0x0000, 0x006f, 0xb011 }, { 0x2014, 0x0000, 0x0000, 0xb011 },
+ { 0x201b, 0x0000, 0x0002, 0xb011 }, { 0x201d, 0x0000, 0x0002, 0xb011 },
+ { 0x201f, 0x0000, 0x00fd, 0xb011 }, { 0x2021, 0x0000, 0x0001, 0xb011 },
+ { 0x2022, 0x0000, 0x0010, 0xb011 }, { 0x203d, 0x0000, 0x0005, 0xb011 },
+ { 0x203f, 0x0000, 0x0003, 0xb011 }, { 0x2050, 0x0000, 0x002c, 0xb011 },
+ { 0x2076, 0x0000, 0x000e, 0xb011 }, { 0x207c, 0x0000, 0x004a, 0xb011 },
+ { 0x2081, 0x0000, 0x0003, 0xb011 }, { 0x23ba, 0x0000, 0x008d, 0xb011 },
+ { 0x2128, 0x0005, 0x460d, 0xb017 }, { 0x212c, 0x00f6, 0x73e5, 0xb017 },
+ { 0x2130, 0x0005, 0x460d, 0xb017 }, { 0x2134, 0x00c0, 0xe9e5, 0xb017 },
+ { 0x2138, 0x00d5, 0x010b, 0xb017 }, { 0x213c, 0x009d, 0x7809, 0xb017 },
+ { 0x2140, 0x00c5, 0x0eed, 0xb017 }, { 0x2144, 0x009d, 0x7809, 0xb017 },
+ { 0x2148, 0x00c4, 0x4ef0, 0xb017 }, { 0x214c, 0x003a, 0x3106, 0xb017 },
+ { 0x2150, 0x00af, 0x750e, 0xb017 }, { 0x2154, 0x008c, 0x1ff1, 0xb017 },
+ { 0x2158, 0x009e, 0x360c, 0xb017 }, { 0x215c, 0x008c, 0x1ff1, 0xb017 },
+ { 0x2160, 0x004d, 0xac0a, 0xb017 }, { 0x2164, 0x007d, 0xa00f, 0xb017 },
+ { 0x2168, 0x00e1, 0x9ce3, 0xb017 }, { 0x216c, 0x00e8, 0x590e, 0xb017 },
+ { 0x2170, 0x00e1, 0x9ce3, 0xb017 }, { 0x2174, 0x0066, 0xfa0d, 0xb017 },
+ { 0x2178, 0x0000, 0x0010, 0xb017 }, { 0x217c, 0x0000, 0x0000, 0xb017 },
+ { 0x2180, 0x0000, 0x0000, 0xb017 }, { 0x2184, 0x0000, 0x0000, 0xb017 },
+ { 0x2188, 0x0000, 0x0000, 0xb017 }, { 0x218c, 0x0000, 0x0010, 0xb017 },
+ { 0x2190, 0x0000, 0x0000, 0xb017 }, { 0x2194, 0x0000, 0x0000, 0xb017 },
+ { 0x2198, 0x0000, 0x0000, 0xb017 }, { 0x219c, 0x0000, 0x0000, 0xb017 },
+ { 0x21a0, 0x0000, 0x0010, 0xb017 }, { 0x21a4, 0x0000, 0x0000, 0xb017 },
+ { 0x21a8, 0x0000, 0x0000, 0xb017 }, { 0x21ac, 0x0000, 0x0000, 0xb017 },
+ { 0x21b0, 0x0000, 0x0000, 0xb017 }, { 0x21b4, 0x0000, 0x0010, 0xb017 },
+ { 0x21b8, 0x0000, 0x0000, 0xb017 }, { 0x21bc, 0x0000, 0x0000, 0xb017 },
+ { 0x21c0, 0x0000, 0x0000, 0xb017 }, { 0x21c4, 0x0000, 0x0000, 0xb017 },
+ { 0x23b9, 0x0000, 0x0000, 0xb011 }, { 0x23e0, 0x0000, 0x0020, 0xb011 },
+ { 0x23e1, 0x0000, 0x0001, 0xb011 },
+ };
+
+ static const struct alc298_samsung_coeff_seq_desc amp_seq1[] = {
+ { 0x23ff, 0x0000, 0x0000, 0xb011 }, { 0x203a, 0x0000, 0x0080, 0xb011 },
+ };
+
+ static const struct alc298_samsung_coeff_fixup_desc fixups2[] = {
+ { 0x4f, 0xb029 }, { 0x05, 0x2be0 }, { 0x30, 0x2421 },
+ };
+
+
+ static const struct alc298_samsung_coeff_seq_desc amp_seq2[] = {
+ { 0x203a, 0x0000, 0x0081, 0xb011 }, { 0x23ff, 0x0000, 0x0001, 0xb011 },
+ };
+
+ if (action != HDA_FIXUP_ACT_INIT)
+ return;
+
+ // First set of fixups
+ for (i = 0; i < ARRAY_SIZE(fixups1); i++)
+ alc_write_coef_idx(codec, fixups1[i].coeff_idx, fixups1[i].coeff_value);
+
+ // First set of writes
+ alc298_samsung_write_coef_pack_seq(codec, 0x38, amp_0x38, ARRAY_SIZE(amp_0x38));
+ alc298_samsung_write_coef_pack_seq(codec, 0x39, amp_0x39, ARRAY_SIZE(amp_0x39));
+ alc298_samsung_write_coef_pack_seq(codec, 0x3c, amp_0x3c, ARRAY_SIZE(amp_0x3c));
+ alc298_samsung_write_coef_pack_seq(codec, 0x3d, amp_0x3d, ARRAY_SIZE(amp_0x3d));
+
+ // Second set of writes
+ alc298_samsung_write_coef_pack_seq(codec, 0x38, amp_seq1, ARRAY_SIZE(amp_seq1));
+ alc298_samsung_write_coef_pack_seq(codec, 0x39, amp_seq1, ARRAY_SIZE(amp_seq1));
+ alc298_samsung_write_coef_pack_seq(codec, 0x3c, amp_seq1, ARRAY_SIZE(amp_seq1));
+ alc298_samsung_write_coef_pack_seq(codec, 0x3d, amp_seq1, ARRAY_SIZE(amp_seq1));
+
+ // Second set of fixups
+ for (i = 0; i < ARRAY_SIZE(fixups2); i++)
+ alc_write_coef_idx(codec, fixups2[i].coeff_idx, fixups2[i].coeff_value);
+
+ // Third set of writes
+ alc298_samsung_write_coef_pack_seq(codec, 0x38, amp_seq2, ARRAY_SIZE(amp_seq2));
+ alc298_samsung_write_coef_pack_seq(codec, 0x39, amp_seq2, ARRAY_SIZE(amp_seq2));
+ alc298_samsung_write_coef_pack_seq(codec, 0x3c, amp_seq2, ARRAY_SIZE(amp_seq2));
+ alc298_samsung_write_coef_pack_seq(codec, 0x3d, amp_seq2, ARRAY_SIZE(amp_seq2));
+
+ // Final fixup
+ alc_write_coef_idx(codec, 0x10, 0x0F21);
+}
diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
index f54466ed8e3e..1769e07e83dc 100644
--- a/sound/soc/amd/yc/acp6x-mach.c
+++ b/sound/soc/amd/yc/acp6x-mach.c
@@ -224,6 +224,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
.driver_data = &acp6x_card,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21M5"),
+ }
+ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "82QF"),
}
},
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
index 7434aeeda292..402b9a2ff024 100644
--- a/sound/soc/codecs/arizona.c
+++ b/sound/soc/codecs/arizona.c
@@ -2786,15 +2786,13 @@ int arizona_of_get_audio_pdata(struct arizona *arizona)
{
struct arizona_pdata *pdata = &arizona->pdata;
struct device_node *np = arizona->dev->of_node;
- struct property *prop;
- const __be32 *cur;
u32 val;
u32 pdm_val[ARIZONA_MAX_PDM_SPK];
int ret;
int count = 0;
count = 0;
- of_property_for_each_u32(np, "wlf,inmode", prop, cur, val) {
+ of_property_for_each_u32(np, "wlf,inmode", val) {
if (count == ARRAY_SIZE(pdata->inmode))
break;
@@ -2803,7 +2801,7 @@ int arizona_of_get_audio_pdata(struct arizona *arizona)
}
count = 0;
- of_property_for_each_u32(np, "wlf,dmic-ref", prop, cur, val) {
+ of_property_for_each_u32(np, "wlf,dmic-ref", val) {
if (count == ARRAY_SIZE(pdata->dmic_ref))
break;
@@ -2812,7 +2810,7 @@ int arizona_of_get_audio_pdata(struct arizona *arizona)
}
count = 0;
- of_property_for_each_u32(np, "wlf,out-mono", prop, cur, val) {
+ of_property_for_each_u32(np, "wlf,out-mono", val) {
if (count == ARRAY_SIZE(pdata->out_mono))
break;
@@ -2821,7 +2819,7 @@ int arizona_of_get_audio_pdata(struct arizona *arizona)
}
count = 0;
- of_property_for_each_u32(np, "wlf,max-channels-clocked", prop, cur, val) {
+ of_property_for_each_u32(np, "wlf,max-channels-clocked", val) {
if (count == ARRAY_SIZE(pdata->max_channels_clocked))
break;
@@ -2830,7 +2828,7 @@ int arizona_of_get_audio_pdata(struct arizona *arizona)
}
count = 0;
- of_property_for_each_u32(np, "wlf,out-volume-limit", prop, cur, val) {
+ of_property_for_each_u32(np, "wlf,out-volume-limit", val) {
if (count == ARRAY_SIZE(pdata->out_vol_limit))
break;
diff --git a/sound/soc/codecs/tas2781-fmwlib.c b/sound/soc/codecs/tas2781-fmwlib.c
index 63626b982d04..8f9a3ae7153e 100644
--- a/sound/soc/codecs/tas2781-fmwlib.c
+++ b/sound/soc/codecs/tas2781-fmwlib.c
@@ -2162,7 +2162,7 @@ static void tasdev_load_calibrated_data(struct tasdevice_priv *priv, int i)
return;
cal = cal_fmw->calibrations;
- if (cal)
+ if (!cal)
return;
load_calib_data(priv, &cal->dev_data);
diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
index 82df887b3af5..f6c3aeff0d8e 100644
--- a/sound/soc/fsl/fsl-asoc-card.c
+++ b/sound/soc/fsl/fsl-asoc-card.c
@@ -306,27 +306,12 @@ static int be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
return 0;
}
-SND_SOC_DAILINK_DEFS(hifi,
- DAILINK_COMP_ARRAY(COMP_EMPTY()),
- DAILINK_COMP_ARRAY(COMP_EMPTY(), COMP_EMPTY()),
- DAILINK_COMP_ARRAY(COMP_EMPTY()));
-
-SND_SOC_DAILINK_DEFS(hifi_fe,
- DAILINK_COMP_ARRAY(COMP_EMPTY()),
- DAILINK_COMP_ARRAY(COMP_DUMMY()),
- DAILINK_COMP_ARRAY(COMP_EMPTY()));
-
-SND_SOC_DAILINK_DEFS(hifi_be,
- DAILINK_COMP_ARRAY(COMP_EMPTY()),
- DAILINK_COMP_ARRAY(COMP_EMPTY(), COMP_EMPTY()));
-
static const struct snd_soc_dai_link fsl_asoc_card_dai[] = {
/* Default ASoC DAI Link*/
{
.name = "HiFi",
.stream_name = "HiFi",
.ops = &fsl_asoc_card_ops,
- SND_SOC_DAILINK_REG(hifi),
},
/* DPCM Link between Front-End and Back-End (Optional) */
{
@@ -335,7 +320,6 @@ static const struct snd_soc_dai_link fsl_asoc_card_dai[] = {
.dpcm_playback = 1,
.dpcm_capture = 1,
.dynamic = 1,
- SND_SOC_DAILINK_REG(hifi_fe),
},
{
.name = "HiFi-ASRC-BE",
@@ -345,7 +329,6 @@ static const struct snd_soc_dai_link fsl_asoc_card_dai[] = {
.dpcm_playback = 1,
.dpcm_capture = 1,
.no_pcm = 1,
- SND_SOC_DAILINK_REG(hifi_be),
},
};
@@ -637,6 +620,7 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
struct platform_device *cpu_pdev;
struct fsl_asoc_card_priv *priv;
struct device *codec_dev[2] = { NULL, NULL };
+ struct snd_soc_dai_link_component *dlc;
const char *codec_dai_name[2];
const char *codec_dev_name[2];
u32 asrc_fmt = 0;
@@ -717,7 +701,35 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
memcpy(priv->dai_link, fsl_asoc_card_dai,
sizeof(struct snd_soc_dai_link) * ARRAY_SIZE(priv->dai_link));
+ /*
+ * "Default ASoC DAI Link": 1 cpus, 2 codecs, 1 platforms
+ * "DPCM Link Front-End": 1 cpus, 1 codecs (dummy), 1 platforms
+ * "DPCM Link Back-End": 1 cpus, 2 codecs
+ * totally 10 components
+ */
+ dlc = devm_kcalloc(&pdev->dev, 10, sizeof(*dlc), GFP_KERNEL);
+ if (!dlc) {
+ ret = -ENOMEM;
+ goto asrc_fail;
+ }
+
+ priv->dai_link[0].cpus = &dlc[0];
+ priv->dai_link[0].num_cpus = 1;
+ priv->dai_link[0].codecs = &dlc[1];
priv->dai_link[0].num_codecs = 1;
+ priv->dai_link[0].platforms = &dlc[3];
+ priv->dai_link[0].num_platforms = 1;
+
+ priv->dai_link[1].cpus = &dlc[4];
+ priv->dai_link[1].num_cpus = 1;
+ priv->dai_link[1].codecs = &dlc[5];
+ priv->dai_link[1].num_codecs = 0; /* dummy */
+ priv->dai_link[1].platforms = &dlc[6];
+ priv->dai_link[1].num_platforms = 1;
+
+ priv->dai_link[2].cpus = &dlc[7];
+ priv->dai_link[2].num_cpus = 1;
+ priv->dai_link[2].codecs = &dlc[8];
priv->dai_link[2].num_codecs = 1;
priv->card.dapm_routes = audio_map;
diff --git a/sound/soc/intel/common/soc-acpi-intel-ssp-common.c b/sound/soc/intel/common/soc-acpi-intel-ssp-common.c
index 75d0b931d895..de7a3f7f47f1 100644
--- a/sound/soc/intel/common/soc-acpi-intel-ssp-common.c
+++ b/sound/soc/intel/common/soc-acpi-intel-ssp-common.c
@@ -64,6 +64,15 @@ static const struct codec_map amps[] = {
CODEC_MAP_ENTRY("RT1015P", "rt1015", RT1015P_ACPI_HID, CODEC_RT1015P),
CODEC_MAP_ENTRY("RT1019P", "rt1019", RT1019P_ACPI_HID, CODEC_RT1019P),
CODEC_MAP_ENTRY("RT1308", "rt1308", RT1308_ACPI_HID, CODEC_RT1308),
+
+ /*
+ * Monolithic components
+ *
+ * Only put components that can serve as both the amp and the codec below this line.
+ * This will ensure that if the part is used just as a codec and there is an amp as well
+ * then the amp will be selected properly.
+ */
+ CODEC_MAP_ENTRY("RT5650", "rt5650", RT5650_ACPI_HID, CODEC_RT5650),
};
enum snd_soc_acpi_intel_codec
diff --git a/sound/soc/intel/common/soc-intel-quirks.h b/sound/soc/intel/common/soc-intel-quirks.h
index de4e550c5b34..42bd51456b94 100644
--- a/sound/soc/intel/common/soc-intel-quirks.h
+++ b/sound/soc/intel/common/soc-intel-quirks.h
@@ -11,7 +11,7 @@
#include <linux/platform_data/x86/soc.h>
-#if IS_ENABLED(CONFIG_X86)
+#if IS_REACHABLE(CONFIG_IOSF_MBI)
#include <linux/dmi.h>
#include <asm/iosf_mbi.h>
diff --git a/sound/soc/sof/amd/pci-vangogh.c b/sound/soc/sof/amd/pci-vangogh.c
index 16eb2994fbab..eba580840100 100644
--- a/sound/soc/sof/amd/pci-vangogh.c
+++ b/sound/soc/sof/amd/pci-vangogh.c
@@ -34,7 +34,6 @@ static const struct sof_amd_acp_desc vangogh_chip_info = {
.dsp_intr_base = ACP5X_DSP_SW_INTR_BASE,
.sram_pte_offset = ACP5X_SRAM_PTE_OFFSET,
.hw_semaphore_offset = ACP5X_AXI2DAGB_SEM_0,
- .acp_clkmux_sel = ACP5X_CLKMUX_SEL,
.probe_reg_offset = ACP5X_FUTURE_REG_ACLK_0,
};
diff --git a/sound/soc/sof/imx/imx8m.c b/sound/soc/sof/imx/imx8m.c
index 1c7019c3cbd3..cdd1e79ef9f6 100644
--- a/sound/soc/sof/imx/imx8m.c
+++ b/sound/soc/sof/imx/imx8m.c
@@ -234,7 +234,7 @@ static int imx8m_probe(struct snd_sof_dev *sdev)
/* set default mailbox offset for FW ready message */
sdev->dsp_box.offset = MBOX_OFFSET;
- priv->regmap = syscon_regmap_lookup_by_compatible("fsl,dsp-ctrl");
+ priv->regmap = syscon_regmap_lookup_by_phandle(np, "fsl,dsp-ctrl");
if (IS_ERR(priv->regmap)) {
dev_err(sdev->dev, "cannot find dsp-ctrl registers");
ret = PTR_ERR(priv->regmap);
diff --git a/sound/soc/sof/intel/hda-loader.c b/sound/soc/sof/intel/hda-loader.c
index b8b914eaf7e0..75f6240cf3e1 100644
--- a/sound/soc/sof/intel/hda-loader.c
+++ b/sound/soc/sof/intel/hda-loader.c
@@ -310,15 +310,19 @@ int hda_cl_copy_fw(struct snd_sof_dev *sdev, struct hdac_ext_stream *hext_stream
return ret;
}
- /* Wait for completion of transfer */
- time_left = wait_for_completion_timeout(&hda_stream->ioc,
- msecs_to_jiffies(HDA_CL_DMA_IOC_TIMEOUT_MS));
-
- if (!time_left) {
- dev_err(sdev->dev, "Code loader DMA did not complete\n");
- return -ETIMEDOUT;
+ if (sdev->pdata->ipc_type == SOF_IPC_TYPE_4) {
+ /* Wait for completion of transfer */
+ time_left = wait_for_completion_timeout(&hda_stream->ioc,
+ msecs_to_jiffies(HDA_CL_DMA_IOC_TIMEOUT_MS));
+
+ if (!time_left) {
+ dev_err(sdev->dev, "Code loader DMA did not complete\n");
+ return -ETIMEDOUT;
+ }
+ dev_dbg(sdev->dev, "Code loader DMA done\n");
}
- dev_dbg(sdev->dev, "Code loader DMA done, waiting for FW_ENTERED status\n");
+
+ dev_dbg(sdev->dev, "waiting for FW_ENTERED status\n");
status = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
chip->rom_status_reg, reg,
diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
index daf364f773dd..5a40b8fbbbd3 100644
--- a/sound/soc/sof/intel/hda.c
+++ b/sound/soc/sof/intel/hda.c
@@ -1307,9 +1307,10 @@ struct snd_soc_acpi_mach *hda_machine_select(struct snd_sof_dev *sdev)
const struct sof_dev_desc *desc = sof_pdata->desc;
struct hdac_bus *bus = sof_to_bus(sdev);
struct snd_soc_acpi_mach *mach = NULL;
- enum snd_soc_acpi_intel_codec codec_type;
+ enum snd_soc_acpi_intel_codec codec_type, amp_type;
const char *tplg_filename;
const char *tplg_suffix;
+ bool amp_name_valid;
/* Try I2S or DMIC if it is supported */
if (interface_mask & (BIT(SOF_DAI_INTEL_SSP) | BIT(SOF_DAI_INTEL_DMIC)))
@@ -1413,15 +1414,16 @@ struct snd_soc_acpi_mach *hda_machine_select(struct snd_sof_dev *sdev)
}
}
- codec_type = snd_soc_acpi_intel_detect_amp_type(sdev->dev);
+ amp_type = snd_soc_acpi_intel_detect_amp_type(sdev->dev);
+ codec_type = snd_soc_acpi_intel_detect_codec_type(sdev->dev);
+ amp_name_valid = amp_type != CODEC_NONE && amp_type != codec_type;
- if (tplg_fixup &&
- mach->tplg_quirk_mask & SND_SOC_ACPI_TPLG_INTEL_AMP_NAME &&
- codec_type != CODEC_NONE) {
- tplg_suffix = snd_soc_acpi_intel_get_amp_tplg_suffix(codec_type);
+ if (tplg_fixup && amp_name_valid &&
+ mach->tplg_quirk_mask & SND_SOC_ACPI_TPLG_INTEL_AMP_NAME) {
+ tplg_suffix = snd_soc_acpi_intel_get_amp_tplg_suffix(amp_type);
if (!tplg_suffix) {
dev_err(sdev->dev, "no tplg suffix found, amp %d\n",
- codec_type);
+ amp_type);
return NULL;
}
@@ -1436,7 +1438,6 @@ struct snd_soc_acpi_mach *hda_machine_select(struct snd_sof_dev *sdev)
add_extension = true;
}
- codec_type = snd_soc_acpi_intel_detect_codec_type(sdev->dev);
if (tplg_fixup &&
mach->tplg_quirk_mask & SND_SOC_ACPI_TPLG_INTEL_CODEC_NAME &&
diff --git a/sound/soc/sof/ipc4-topology.c b/sound/soc/sof/ipc4-topology.c
index 90f6856ee80c..87be7f16e8c2 100644
--- a/sound/soc/sof/ipc4-topology.c
+++ b/sound/soc/sof/ipc4-topology.c
@@ -1358,7 +1358,13 @@ static void sof_ipc4_unprepare_copier_module(struct snd_sof_widget *swidget)
ipc4_copier = dai->private;
if (pipeline->use_chain_dma) {
- pipeline->msg.primary = 0;
+ /*
+ * Preserve the DMA Link ID and clear other bits since
+ * the DMA Link ID is only configured once during
+ * dai_config, other fields are expected to be 0 for
+ * re-configuration
+ */
+ pipeline->msg.primary &= SOF_IPC4_GLB_CHAIN_DMA_LINK_ID_MASK;
pipeline->msg.extension = 0;
}
@@ -3095,8 +3101,14 @@ static int sof_ipc4_dai_config(struct snd_sof_dev *sdev, struct snd_sof_widget *
return 0;
if (pipeline->use_chain_dma) {
- pipeline->msg.primary &= ~SOF_IPC4_GLB_CHAIN_DMA_LINK_ID_MASK;
- pipeline->msg.primary |= SOF_IPC4_GLB_CHAIN_DMA_LINK_ID(data->dai_data);
+ /*
+ * Only configure the DMA Link ID for ChainDMA when this op is
+ * invoked with SOF_DAI_CONFIG_FLAGS_HW_PARAMS
+ */
+ if (flags & SOF_DAI_CONFIG_FLAGS_HW_PARAMS) {
+ pipeline->msg.primary &= ~SOF_IPC4_GLB_CHAIN_DMA_LINK_ID_MASK;
+ pipeline->msg.primary |= SOF_IPC4_GLB_CHAIN_DMA_LINK_ID(data->dai_data);
+ }
return 0;
}
diff --git a/sound/soc/sof/sof-client.c b/sound/soc/sof/sof-client.c
index 99f74def4ab6..5d6005a88e79 100644
--- a/sound/soc/sof/sof-client.c
+++ b/sound/soc/sof/sof-client.c
@@ -357,7 +357,7 @@ EXPORT_SYMBOL_NS_GPL(sof_client_ipc4_find_module, SND_SOC_SOF_CLIENT);
int sof_suspend_clients(struct snd_sof_dev *sdev, pm_message_t state)
{
- struct auxiliary_driver *adrv;
+ const struct auxiliary_driver *adrv;
struct sof_client_dev *cdev;
mutex_lock(&sdev->ipc_client_mutex);
@@ -380,7 +380,7 @@ EXPORT_SYMBOL_NS_GPL(sof_suspend_clients, SND_SOC_SOF_CLIENT);
int sof_resume_clients(struct snd_sof_dev *sdev)
{
- struct auxiliary_driver *adrv;
+ const struct auxiliary_driver *adrv;
struct sof_client_dev *cdev;
mutex_lock(&sdev->ipc_client_mutex);
diff --git a/sound/soc/tegra/Kconfig b/sound/soc/tegra/Kconfig
index 74effc57a7a0..2463c22e9cf6 100644
--- a/sound/soc/tegra/Kconfig
+++ b/sound/soc/tegra/Kconfig
@@ -78,6 +78,7 @@ config SND_SOC_TEGRA210_DMIC
config SND_SOC_TEGRA210_I2S
tristate "Tegra210 I2S module"
+ select SND_SIMPLE_CARD_UTILS
help
Config to enable the Inter-IC Sound (I2S) Controller which
implements full-duplex and bidirectional and single direction
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index c00009b545c0..f7ce8e8c3c3e 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -1211,6 +1211,13 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
cval->res = 16;
}
break;
+ case USB_ID(0x1bcf, 0x2281): /* HD Webcam */
+ if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
+ usb_audio_info(chip,
+ "set resolution quirk: cval->res = 16\n");
+ cval->res = 16;
+ }
+ break;
}
}
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 58156fbca02c..ea063a14cdd8 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -2125,6 +2125,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
QUIRK_FLAG_CTL_MSG_DELAY_1M),
DEVICE_FLG(0x0b0e, 0x0349, /* Jabra 550a */
QUIRK_FLAG_CTL_MSG_DELAY_1M),
+ DEVICE_FLG(0x0c45, 0x6340, /* Sonix HD USB Camera */
+ QUIRK_FLAG_GET_SAMPLE_RATE),
DEVICE_FLG(0x0ecb, 0x205c, /* JBL Quantum610 Wireless */
QUIRK_FLAG_FIXED_RATE),
DEVICE_FLG(0x0ecb, 0x2069, /* JBL Quantum810 Wireless */
@@ -2167,6 +2169,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
QUIRK_FLAG_GET_SAMPLE_RATE),
DEVICE_FLG(0x19f7, 0x0035, /* RODE NT-USB+ */
QUIRK_FLAG_GET_SAMPLE_RATE),
+ DEVICE_FLG(0x1bcf, 0x2281, /* HD Webcam */
+ QUIRK_FLAG_GET_SAMPLE_RATE),
DEVICE_FLG(0x1bcf, 0x2283, /* NexiGo N930AF FHD Webcam */
QUIRK_FLAG_GET_SAMPLE_RATE),
DEVICE_FLG(0x2040, 0x7200, /* Hauppauge HVR-950Q */
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 40ea743d139f..2ff949ea82fa 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -2489,7 +2489,7 @@ static int do_help(int argc, char **argv)
" cgroup/connect_unix | cgroup/getpeername4 | cgroup/getpeername6 |\n"
" cgroup/getpeername_unix | cgroup/getsockname4 | cgroup/getsockname6 |\n"
" cgroup/getsockname_unix | cgroup/sendmsg4 | cgroup/sendmsg6 |\n"
- " cgroup/sendmsg°unix | cgroup/recvmsg4 | cgroup/recvmsg6 | cgroup/recvmsg_unix |\n"
+ " cgroup/sendmsg_unix | cgroup/recvmsg4 | cgroup/recvmsg6 | cgroup/recvmsg_unix |\n"
" cgroup/getsockopt | cgroup/setsockopt | cgroup/sock_release |\n"
" struct_ops | fentry | fexit | freplace | sk_lookup }\n"
" ATTACH_TYPE := { sk_msg_verdict | sk_skb_verdict | sk_skb_stream_verdict |\n"
diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c
index 936ef95c3d32..d54aaa0619df 100644
--- a/tools/bpf/resolve_btfids/main.c
+++ b/tools/bpf/resolve_btfids/main.c
@@ -704,7 +704,7 @@ static int sets_patch(struct object *obj)
* Make sure id is at the beginning of the pairs
* struct, otherwise the below qsort would not work.
*/
- BUILD_BUG_ON(set8->pairs != &set8->pairs[0].id);
+ BUILD_BUG_ON((u32 *)set8->pairs != &set8->pairs[0].id);
qsort(set8->pairs, set8->cnt, sizeof(set8->pairs[0]), cmp_id);
/*
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index 1e2ab148d5db..e1900abd44f6 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -149,6 +149,24 @@ FEATURE_DISPLAY ?= \
#
FEATURE_GROUP_MEMBERS-libbfd = libbfd-liberty libbfd-liberty-z
+#
+# Declare list of feature dependency packages that provide pkg-config files.
+#
+FEATURE_PKG_CONFIG ?= \
+ libtraceevent \
+ libtracefs
+
+feature_pkg_config = $(eval $(feature_pkg_config_code))
+define feature_pkg_config_code
+ FEATURE_CHECK_CFLAGS-$(1) := $(shell $(PKG_CONFIG) --cflags $(1) 2>/dev/null)
+ FEATURE_CHECK_LDFLAGS-$(1) := $(shell $(PKG_CONFIG) --libs $(1) 2>/dev/null)
+endef
+
+# Set FEATURE_CHECK_(C|LD)FLAGS-$(package) for packages using pkg-config.
+ifneq ($(PKG_CONFIG),)
+ $(foreach package,$(FEATURE_PKG_CONFIG),$(call feature_pkg_config,$(package)))
+endif
+
# Set FEATURE_CHECK_(C|LD)FLAGS-all for all FEATURE_TESTS features.
# If in the future we need per-feature checks/flags for features not
# mentioned in this list we need to refactor this ;-).
diff --git a/tools/include/asm/rwonce.h b/tools/include/asm/rwonce.h
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/tools/include/asm/rwonce.h
diff --git a/tools/include/linux/bitmap.h b/tools/include/linux/bitmap.h
index 210c13b1b857..2a7f260ef9dc 100644
--- a/tools/include/linux/bitmap.h
+++ b/tools/include/linux/bitmap.h
@@ -19,7 +19,7 @@ bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits);
bool __bitmap_equal(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits);
-void bitmap_clear(unsigned long *map, unsigned int start, int len);
+void __bitmap_clear(unsigned long *map, unsigned int start, int len);
bool __bitmap_intersects(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits);
@@ -150,4 +150,19 @@ static inline bool bitmap_intersects(const unsigned long *src1,
return __bitmap_intersects(src1, src2, nbits);
}
+static inline void bitmap_clear(unsigned long *map, unsigned int start,
+ unsigned int nbits)
+{
+ if (__builtin_constant_p(nbits) && nbits == 1)
+ __clear_bit(start, map);
+ else if (small_const_nbits(start + nbits))
+ *map &= ~GENMASK(start + nbits - 1, start);
+ else if (__builtin_constant_p(start & BITMAP_MEM_MASK) &&
+ IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) &&
+ __builtin_constant_p(nbits & BITMAP_MEM_MASK) &&
+ IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT))
+ memset((char *)map + start / 8, 0, nbits / 8);
+ else
+ __bitmap_clear(map, start, nbits);
+}
#endif /* _TOOLS_LINUX_BITMAP_H */
diff --git a/tools/include/uapi/linux/if_xdp.h b/tools/include/uapi/linux/if_xdp.h
index 638c606dfa74..2f082b01ff22 100644
--- a/tools/include/uapi/linux/if_xdp.h
+++ b/tools/include/uapi/linux/if_xdp.h
@@ -41,6 +41,10 @@
*/
#define XDP_UMEM_TX_SW_CSUM (1 << 1)
+/* Request to reserve tx_metadata_len bytes of per-chunk metadata.
+ */
+#define XDP_UMEM_TX_METADATA_LEN (1 << 2)
+
struct sockaddr_xdp {
__u16 sxdp_family;
__u16 sxdp_flags;
diff --git a/tools/include/uapi/linux/mman.h b/tools/include/uapi/linux/mman.h
index a246e11988d5..e89d00528f2f 100644
--- a/tools/include/uapi/linux/mman.h
+++ b/tools/include/uapi/linux/mman.h
@@ -17,6 +17,7 @@
#define MAP_SHARED 0x01 /* Share changes */
#define MAP_PRIVATE 0x02 /* Changes are private */
#define MAP_SHARED_VALIDATE 0x03 /* share + validate extension flags */
+#define MAP_DROPPABLE 0x08 /* Zero memory under memory pressure. */
/*
* Huge page size encoding when MAP_HUGETLB is specified, and a huge page
diff --git a/tools/lib/bitmap.c b/tools/lib/bitmap.c
index c3e4871967bc..2178862bb114 100644
--- a/tools/lib/bitmap.c
+++ b/tools/lib/bitmap.c
@@ -100,3 +100,23 @@ bool __bitmap_intersects(const unsigned long *bitmap1,
return true;
return false;
}
+
+void __bitmap_clear(unsigned long *map, unsigned int start, int len)
+{
+ unsigned long *p = map + BIT_WORD(start);
+ const unsigned int size = start + len;
+ int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
+ unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
+
+ while (len - bits_to_clear >= 0) {
+ *p &= ~mask_to_clear;
+ len -= bits_to_clear;
+ bits_to_clear = BITS_PER_LONG;
+ mask_to_clear = ~0UL;
+ p++;
+ }
+ if (len) {
+ mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
+ *p &= ~mask_to_clear;
+ }
+}
diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
index 5dbca76b953f..894860111ddb 100644
--- a/tools/lib/bpf/btf_dump.c
+++ b/tools/lib/bpf/btf_dump.c
@@ -1559,10 +1559,12 @@ static void btf_dump_emit_type_chain(struct btf_dump *d,
* Clang for BPF target generates func_proto with no
* args as a func_proto with a single void arg (e.g.,
* `int (*f)(void)` vs just `int (*f)()`). We are
- * going to pretend there are no args for such case.
+ * going to emit valid empty args (void) syntax for
+ * such case. Similarly and conveniently, valid
+ * no args case can be special-cased here as well.
*/
- if (vlen == 1 && p->type == 0) {
- btf_dump_printf(d, ")");
+ if (vlen == 0 || (vlen == 1 && p->type == 0)) {
+ btf_dump_printf(d, "void)");
return;
}
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index a4829b6532d8..c896babf7a74 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -182,20 +182,15 @@ endif
FEATURE_CHECK_CFLAGS-libzstd := $(LIBZSTD_CFLAGS)
FEATURE_CHECK_LDFLAGS-libzstd := $(LIBZSTD_LDFLAGS)
+# for linking with debug library, run like:
+# make DEBUG=1 PKG_CONFIG_PATH=/opt/libtraceevent/(lib|lib64)/pkgconfig
+
ifneq ($(NO_LIBTRACEEVENT),1)
ifeq ($(call get-executable,$(PKG_CONFIG)),)
$(error Error: $(PKG_CONFIG) needed by libtraceevent is missing on this system, please install it)
endif
endif
-# for linking with debug library, run like:
-# make DEBUG=1 PKG_CONFIG_PATH=/opt/libtraceevent/(lib|lib64)/pkgconfig
-FEATURE_CHECK_CFLAGS-libtraceevent := $(shell $(PKG_CONFIG) --cflags libtraceevent 2>/dev/null)
-FEATURE_CHECK_LDFLAGS-libtraceevent := $(shell $(PKG_CONFIG) --libs libtraceevent 2>/dev/null)
-
-FEATURE_CHECK_CFLAGS-libtracefs := $(shell $(PKG_CONFIG) --cflags libtracefs 2>/dev/null)
-FEATURE_CHECK_LDFLAGS-libtracefs := $(shell $(PKG_CONFIG) --libs libtracefs 2>/dev/null)
-
FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(SRCARCH)/include/uapi -I$(srctree)/tools/include/uapi
# include ARCH specific config
-include $(src-perf)/arch/$(SRCARCH)/Makefile
@@ -1206,6 +1201,8 @@ ifneq ($(NO_LIBTRACEEVENT),1)
LIBTRACEFS_VERSION_3 := $(word 3, $(subst ., ,$(LIBTRACEFS_VERSION)))
LIBTRACEFS_VERSION_CPP := $(shell expr $(LIBTRACEFS_VERSION_1) \* 255 \* 255 + $(LIBTRACEFS_VERSION_2) \* 255 + $(LIBTRACEFS_VERSION_3))
CFLAGS += -DLIBTRACEFS_VERSION=$(LIBTRACEFS_VERSION_CPP)
+ else
+ $(warning libtracefs is missing. Please install libtracefs-dev/libtracefs-devel)
endif
endif
diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c
index e30fd55f8e51..cd3b480d20bd 100644
--- a/tools/perf/tests/vmlinux-kallsyms.c
+++ b/tools/perf/tests/vmlinux-kallsyms.c
@@ -26,7 +26,6 @@ static bool is_ignored_symbol(const char *name, char type)
* when --all-symbols is specified so exclude them to get a
* stable symbol list.
*/
- "kallsyms_addresses",
"kallsyms_offsets",
"kallsyms_relative_base",
"kallsyms_num_syms",
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index 2340c4f6d0c2..67414944f245 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -1501,7 +1501,7 @@ void dso__delete(struct dso *dso)
auxtrace_cache__free(RC_CHK_ACCESS(dso)->auxtrace_cache);
dso_cache__free(dso);
dso__free_a2l(dso);
- zfree(&RC_CHK_ACCESS(dso)->symsrc_filename);
+ dso__free_symsrc_filename(dso);
nsinfo__zput(RC_CHK_ACCESS(dso)->nsinfo);
mutex_destroy(dso__lock(dso));
RC_CHK_FREE(dso);
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index 878c1f441868..ed0068251c65 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -602,6 +602,11 @@ static inline void dso__set_symsrc_filename(struct dso *dso, char *val)
RC_CHK_ACCESS(dso)->symsrc_filename = val;
}
+static inline void dso__free_symsrc_filename(struct dso *dso)
+{
+ zfree(&RC_CHK_ACCESS(dso)->symsrc_filename);
+}
+
static inline enum dso_binary_type dso__symtab_type(const struct dso *dso)
{
return RC_CHK_ACCESS(dso)->symtab_type;
diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
index f6a6f6a91030..16c2b03831f3 100644
--- a/tools/perf/util/unwind-libunwind-local.c
+++ b/tools/perf/util/unwind-libunwind-local.c
@@ -413,7 +413,7 @@ static int read_unwind_spec_debug_frame(struct dso *dso,
__func__,
dso__symsrc_filename(dso),
debuglink);
- zfree(&dso__symsrc_filename(dso));
+ dso__free_symsrc_filename(dso);
}
dso__set_symsrc_filename(dso, debuglink);
} else {
diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile
index 7527f738b4a1..d1acd7d58850 100644
--- a/tools/testing/radix-tree/Makefile
+++ b/tools/testing/radix-tree/Makefile
@@ -5,8 +5,8 @@ CFLAGS += -I. -I../../include -I../../../lib -g -Og -Wall \
LDFLAGS += -fsanitize=address -fsanitize=undefined
LDLIBS+= -lpthread -lurcu
TARGETS = main idr-test multiorder xarray maple
-CORE_OFILES := xarray.o radix-tree.o idr.o linux.o test.o find_bit.o bitmap.o \
- slab.o maple.o
+LIBS := slab.o find_bit.o bitmap.o hweight.o vsprintf.o
+CORE_OFILES := xarray.o radix-tree.o idr.o linux.o test.o maple.o $(LIBS)
OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \
regression4.o tag_check.o multiorder.o idr-test.o iteration_check.o \
iteration_check_2.o benchmark.o
diff --git a/tools/testing/radix-tree/bitmap.c b/tools/testing/radix-tree/bitmap.c
deleted file mode 100644
index 66ec4a24a203..000000000000
--- a/tools/testing/radix-tree/bitmap.c
+++ /dev/null
@@ -1,23 +0,0 @@
-/* lib/bitmap.c pulls in at least two other files. */
-
-#include <linux/bitmap.h>
-
-void bitmap_clear(unsigned long *map, unsigned int start, int len)
-{
- unsigned long *p = map + BIT_WORD(start);
- const unsigned int size = start + len;
- int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
- unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
-
- while (len - bits_to_clear >= 0) {
- *p &= ~mask_to_clear;
- len -= bits_to_clear;
- bits_to_clear = BITS_PER_LONG;
- mask_to_clear = ~0UL;
- p++;
- }
- if (len) {
- mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
- *p &= ~mask_to_clear;
- }
-}
diff --git a/tools/testing/selftests/arm64/abi/ptrace.c b/tools/testing/selftests/arm64/abi/ptrace.c
index 4c941270d8de..e4fa507cbdd0 100644
--- a/tools/testing/selftests/arm64/abi/ptrace.c
+++ b/tools/testing/selftests/arm64/abi/ptrace.c
@@ -156,7 +156,7 @@ static void test_hw_debug(pid_t child, int type, const char *type_name)
/* Zero is not currently architecturally valid */
ksft_test_result(arch, "%s_arch_set\n", type_name);
} else {
- ksft_test_result_skip("%s_arch_set\n");
+ ksft_test_result_skip("%s_arch_set\n", type_name);
}
}
diff --git a/tools/testing/selftests/bpf/DENYLIST.aarch64 b/tools/testing/selftests/bpf/DENYLIST.aarch64
index 3c7c3e79aa93..901349da680f 100644
--- a/tools/testing/selftests/bpf/DENYLIST.aarch64
+++ b/tools/testing/selftests/bpf/DENYLIST.aarch64
@@ -1,6 +1,5 @@
bpf_cookie/multi_kprobe_attach_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3
bpf_cookie/multi_kprobe_link_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3
-fexit_sleep # The test never returns. The remaining tests cannot start.
kprobe_multi_bench_attach # needs CONFIG_FPROBE
kprobe_multi_test # needs CONFIG_FPROBE
module_attach # prog 'kprobe_multi': failed to auto-attach: -95
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c b/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c
index f949647dbbc2..552a0875ca6d 100644
--- a/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c
@@ -21,13 +21,13 @@ static int do_sleep(void *skel)
}
#define STACK_SIZE (1024 * 1024)
-static char child_stack[STACK_SIZE];
void test_fexit_sleep(void)
{
struct fexit_sleep_lskel *fexit_skel = NULL;
int wstatus, duration = 0;
pid_t cpid;
+ char *child_stack = NULL;
int err, fexit_cnt;
fexit_skel = fexit_sleep_lskel__open_and_load();
@@ -38,6 +38,11 @@ void test_fexit_sleep(void)
if (CHECK(err, "fexit_attach", "fexit attach failed: %d\n", err))
goto cleanup;
+ child_stack = mmap(NULL, STACK_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE |
+ MAP_ANONYMOUS | MAP_STACK, -1, 0);
+ if (!ASSERT_NEQ(child_stack, MAP_FAILED, "mmap"))
+ goto cleanup;
+
cpid = clone(do_sleep, child_stack + STACK_SIZE, CLONE_FILES | SIGCHLD, fexit_skel);
if (CHECK(cpid == -1, "clone", "%s\n", strerror(errno)))
goto cleanup;
@@ -78,5 +83,6 @@ void test_fexit_sleep(void)
goto cleanup;
cleanup:
+ munmap(child_stack, STACK_SIZE);
fexit_sleep_lskel__destroy(fexit_skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
index e91b59366030..9ce0e0e0b7da 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
@@ -29,6 +29,8 @@
#include "sockmap_helpers.h"
+#define NO_FLAGS 0
+
static void test_insert_invalid(struct test_sockmap_listen *skel __always_unused,
int family, int sotype, int mapfd)
{
@@ -1376,7 +1378,8 @@ static void test_redir(struct test_sockmap_listen *skel, struct bpf_map *map,
static void pairs_redir_to_connected(int cli0, int peer0, int cli1, int peer1,
int sock_mapfd, int nop_mapfd,
- int verd_mapfd, enum redir_mode mode)
+ int verd_mapfd, enum redir_mode mode,
+ int send_flags)
{
const char *log_prefix = redir_mode_str(mode);
unsigned int pass;
@@ -1396,12 +1399,11 @@ static void pairs_redir_to_connected(int cli0, int peer0, int cli1, int peer1,
return;
}
- n = write(cli1, "a", 1);
- if (n < 0)
- FAIL_ERRNO("%s: write", log_prefix);
- if (n == 0)
- FAIL("%s: incomplete write", log_prefix);
- if (n < 1)
+ /* Last byte is OOB data when send_flags has MSG_OOB bit set */
+ n = xsend(cli1, "ab", 2, send_flags);
+ if (n >= 0 && n < 2)
+ FAIL("%s: incomplete send", log_prefix);
+ if (n < 2)
return;
key = SK_PASS;
@@ -1416,6 +1418,25 @@ static void pairs_redir_to_connected(int cli0, int peer0, int cli1, int peer1,
FAIL_ERRNO("%s: recv_timeout", log_prefix);
if (n == 0)
FAIL("%s: incomplete recv", log_prefix);
+
+ if (send_flags & MSG_OOB) {
+ /* Check that we can't read OOB while in sockmap */
+ errno = 0;
+ n = recv(peer1, &b, 1, MSG_OOB | MSG_DONTWAIT);
+ if (n != -1 || errno != EOPNOTSUPP)
+ FAIL("%s: recv(MSG_OOB): expected EOPNOTSUPP: retval=%d errno=%d",
+ log_prefix, n, errno);
+
+ /* Remove peer1 from sockmap */
+ xbpf_map_delete_elem(sock_mapfd, &(int){ 1 });
+
+ /* Check that OOB was dropped on redirect */
+ errno = 0;
+ n = recv(peer1, &b, 1, MSG_OOB | MSG_DONTWAIT);
+ if (n != -1 || errno != EINVAL)
+ FAIL("%s: recv(MSG_OOB): expected EINVAL: retval=%d errno=%d",
+ log_prefix, n, errno);
+ }
}
static void unix_redir_to_connected(int sotype, int sock_mapfd,
@@ -1432,7 +1453,8 @@ static void unix_redir_to_connected(int sotype, int sock_mapfd,
goto close0;
c1 = sfd[0], p1 = sfd[1];
- pairs_redir_to_connected(c0, p0, c1, p1, sock_mapfd, -1, verd_mapfd, mode);
+ pairs_redir_to_connected(c0, p0, c1, p1, sock_mapfd, -1, verd_mapfd,
+ mode, NO_FLAGS);
xclose(c1);
xclose(p1);
@@ -1722,7 +1744,8 @@ static void udp_redir_to_connected(int family, int sock_mapfd, int verd_mapfd,
if (err)
goto close_cli0;
- pairs_redir_to_connected(c0, p0, c1, p1, sock_mapfd, -1, verd_mapfd, mode);
+ pairs_redir_to_connected(c0, p0, c1, p1, sock_mapfd, -1, verd_mapfd,
+ mode, NO_FLAGS);
xclose(c1);
xclose(p1);
@@ -1780,7 +1803,8 @@ static void inet_unix_redir_to_connected(int family, int type, int sock_mapfd,
if (err)
goto close;
- pairs_redir_to_connected(c0, p0, c1, p1, sock_mapfd, -1, verd_mapfd, mode);
+ pairs_redir_to_connected(c0, p0, c1, p1, sock_mapfd, -1, verd_mapfd,
+ mode, NO_FLAGS);
xclose(c1);
xclose(p1);
@@ -1815,10 +1839,9 @@ static void inet_unix_skb_redir_to_connected(struct test_sockmap_listen *skel,
xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_VERDICT);
}
-static void unix_inet_redir_to_connected(int family, int type,
- int sock_mapfd, int nop_mapfd,
- int verd_mapfd,
- enum redir_mode mode)
+static void unix_inet_redir_to_connected(int family, int type, int sock_mapfd,
+ int nop_mapfd, int verd_mapfd,
+ enum redir_mode mode, int send_flags)
{
int c0, c1, p0, p1;
int sfd[2];
@@ -1828,19 +1851,18 @@ static void unix_inet_redir_to_connected(int family, int type,
if (err)
return;
- if (socketpair(AF_UNIX, SOCK_DGRAM | SOCK_NONBLOCK, 0, sfd))
+ if (socketpair(AF_UNIX, type | SOCK_NONBLOCK, 0, sfd))
goto close_cli0;
c1 = sfd[0], p1 = sfd[1];
- pairs_redir_to_connected(c0, p0, c1, p1,
- sock_mapfd, nop_mapfd, verd_mapfd, mode);
+ pairs_redir_to_connected(c0, p0, c1, p1, sock_mapfd, nop_mapfd,
+ verd_mapfd, mode, send_flags);
xclose(c1);
xclose(p1);
close_cli0:
xclose(c0);
xclose(p0);
-
}
static void unix_inet_skb_redir_to_connected(struct test_sockmap_listen *skel,
@@ -1859,31 +1881,42 @@ static void unix_inet_skb_redir_to_connected(struct test_sockmap_listen *skel,
skel->bss->test_ingress = false;
unix_inet_redir_to_connected(family, SOCK_DGRAM,
sock_map, -1, verdict_map,
- REDIR_EGRESS);
+ REDIR_EGRESS, NO_FLAGS);
unix_inet_redir_to_connected(family, SOCK_DGRAM,
sock_map, -1, verdict_map,
- REDIR_EGRESS);
+ REDIR_EGRESS, NO_FLAGS);
unix_inet_redir_to_connected(family, SOCK_DGRAM,
sock_map, nop_map, verdict_map,
- REDIR_EGRESS);
+ REDIR_EGRESS, NO_FLAGS);
+ unix_inet_redir_to_connected(family, SOCK_STREAM,
+ sock_map, nop_map, verdict_map,
+ REDIR_EGRESS, NO_FLAGS);
+
+ /* MSG_OOB not supported by AF_UNIX SOCK_DGRAM */
unix_inet_redir_to_connected(family, SOCK_STREAM,
sock_map, nop_map, verdict_map,
- REDIR_EGRESS);
+ REDIR_EGRESS, MSG_OOB);
+
skel->bss->test_ingress = true;
unix_inet_redir_to_connected(family, SOCK_DGRAM,
sock_map, -1, verdict_map,
- REDIR_INGRESS);
+ REDIR_INGRESS, NO_FLAGS);
unix_inet_redir_to_connected(family, SOCK_STREAM,
sock_map, -1, verdict_map,
- REDIR_INGRESS);
+ REDIR_INGRESS, NO_FLAGS);
unix_inet_redir_to_connected(family, SOCK_DGRAM,
sock_map, nop_map, verdict_map,
- REDIR_INGRESS);
+ REDIR_INGRESS, NO_FLAGS);
+ unix_inet_redir_to_connected(family, SOCK_STREAM,
+ sock_map, nop_map, verdict_map,
+ REDIR_INGRESS, NO_FLAGS);
+
+ /* MSG_OOB not supported by AF_UNIX SOCK_DGRAM */
unix_inet_redir_to_connected(family, SOCK_STREAM,
sock_map, nop_map, verdict_map,
- REDIR_INGRESS);
+ REDIR_INGRESS, MSG_OOB);
xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_VERDICT);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c b/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c
index f76b5d67a3ee..c87ee2bf558c 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c
@@ -68,7 +68,8 @@ static int open_xsk(int ifindex, struct xsk *xsk)
.fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
.comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS,
.frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE,
- .flags = XDP_UMEM_UNALIGNED_CHUNK_FLAG | XDP_UMEM_TX_SW_CSUM,
+ .flags = XDP_UMEM_UNALIGNED_CHUNK_FLAG | XDP_UMEM_TX_SW_CSUM |
+ XDP_UMEM_TX_METADATA_LEN,
.tx_metadata_len = sizeof(struct xsk_tx_metadata),
};
__u32 idx;
diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_multidim.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_multidim.c
index ba97165bdb28..a657651eba52 100644
--- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_multidim.c
+++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_multidim.c
@@ -14,9 +14,9 @@ typedef int *ptr_arr_t[6];
typedef int *ptr_multiarr_t[7][8][9][10];
-typedef int * (*fn_ptr_arr_t[11])();
+typedef int * (*fn_ptr_arr_t[11])(void);
-typedef int * (*fn_ptr_multiarr_t[12][13])();
+typedef int * (*fn_ptr_multiarr_t[12][13])(void);
struct root_struct {
arr_t _1;
diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c
index ad21ee8c7e23..29d01fff32bd 100644
--- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c
+++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c
@@ -100,7 +100,7 @@ typedef void (*printf_fn_t)(const char *, ...);
* `int -> char *` function and returns pointer to a char. Equivalent:
* typedef char * (*fn_input_t)(int);
* typedef char * (*fn_output_outer_t)(fn_input_t);
- * typedef const fn_output_outer_t (* fn_output_inner_t)();
+ * typedef const fn_output_outer_t (* fn_output_inner_t)(void);
* typedef const fn_output_inner_t fn_ptr_arr2_t[5];
*/
/* ----- START-EXPECTED-OUTPUT ----- */
@@ -127,7 +127,7 @@ typedef void (* (*signal_t)(int, void (*)(int)))(int);
typedef char * (*fn_ptr_arr1_t[10])(int **);
-typedef char * (* (* const fn_ptr_arr2_t[5])())(char * (*)(int));
+typedef char * (* (* const fn_ptr_arr2_t[5])(void))(char * (*)(int));
struct struct_w_typedefs {
int_t a;
diff --git a/tools/testing/selftests/landlock/base_test.c b/tools/testing/selftests/landlock/base_test.c
index 3c1e9f35b531..3b26bf3cf5b9 100644
--- a/tools/testing/selftests/landlock/base_test.c
+++ b/tools/testing/selftests/landlock/base_test.c
@@ -9,6 +9,7 @@
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
+#include <linux/keyctl.h>
#include <linux/landlock.h>
#include <string.h>
#include <sys/prctl.h>
@@ -326,4 +327,77 @@ TEST(ruleset_fd_transfer)
ASSERT_EQ(EXIT_SUCCESS, WEXITSTATUS(status));
}
+TEST(cred_transfer)
+{
+ struct landlock_ruleset_attr ruleset_attr = {
+ .handled_access_fs = LANDLOCK_ACCESS_FS_READ_DIR,
+ };
+ int ruleset_fd, dir_fd;
+ pid_t child;
+ int status;
+
+ drop_caps(_metadata);
+
+ dir_fd = open("/", O_RDONLY | O_DIRECTORY | O_CLOEXEC);
+ EXPECT_LE(0, dir_fd);
+ EXPECT_EQ(0, close(dir_fd));
+
+ /* Denies opening directories. */
+ ruleset_fd =
+ landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
+ ASSERT_LE(0, ruleset_fd);
+ EXPECT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0));
+ ASSERT_EQ(0, landlock_restrict_self(ruleset_fd, 0));
+ EXPECT_EQ(0, close(ruleset_fd));
+
+ /* Checks ruleset enforcement. */
+ EXPECT_EQ(-1, open("/", O_RDONLY | O_DIRECTORY | O_CLOEXEC));
+ EXPECT_EQ(EACCES, errno);
+
+ /* Needed for KEYCTL_SESSION_TO_PARENT permission checks */
+ EXPECT_NE(-1, syscall(__NR_keyctl, KEYCTL_JOIN_SESSION_KEYRING, NULL, 0,
+ 0, 0))
+ {
+ TH_LOG("Failed to join session keyring: %s", strerror(errno));
+ }
+
+ child = fork();
+ ASSERT_LE(0, child);
+ if (child == 0) {
+ /* Checks ruleset enforcement. */
+ EXPECT_EQ(-1, open("/", O_RDONLY | O_DIRECTORY | O_CLOEXEC));
+ EXPECT_EQ(EACCES, errno);
+
+ /*
+ * KEYCTL_SESSION_TO_PARENT is a no-op unless we have a
+ * different session keyring in the child, so make that happen.
+ */
+ EXPECT_NE(-1, syscall(__NR_keyctl, KEYCTL_JOIN_SESSION_KEYRING,
+ NULL, 0, 0, 0));
+
+ /*
+ * KEYCTL_SESSION_TO_PARENT installs credentials on the parent
+ * that never go through the cred_prepare hook, this path uses
+ * cred_transfer instead.
+ */
+ EXPECT_EQ(0, syscall(__NR_keyctl, KEYCTL_SESSION_TO_PARENT, 0,
+ 0, 0, 0));
+
+ /* Re-checks ruleset enforcement. */
+ EXPECT_EQ(-1, open("/", O_RDONLY | O_DIRECTORY | O_CLOEXEC));
+ EXPECT_EQ(EACCES, errno);
+
+ _exit(_metadata->exit_code);
+ return;
+ }
+
+ EXPECT_EQ(child, waitpid(child, &status, 0));
+ EXPECT_EQ(1, WIFEXITED(status));
+ EXPECT_EQ(EXIT_SUCCESS, WEXITSTATUS(status));
+
+ /* Re-checks ruleset enforcement. */
+ EXPECT_EQ(-1, open("/", O_RDONLY | O_DIRECTORY | O_CLOEXEC));
+ EXPECT_EQ(EACCES, errno);
+}
+
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/landlock/config b/tools/testing/selftests/landlock/config
index 0086efaa7b68..29af19c4e9f9 100644
--- a/tools/testing/selftests/landlock/config
+++ b/tools/testing/selftests/landlock/config
@@ -2,6 +2,7 @@ CONFIG_CGROUPS=y
CONFIG_CGROUP_SCHED=y
CONFIG_INET=y
CONFIG_IPV6=y
+CONFIG_KEYS=y
CONFIG_NET=y
CONFIG_NET_NS=y
CONFIG_OVERLAY_FS=y
diff --git a/tools/testing/selftests/livepatch/test-livepatch.sh b/tools/testing/selftests/livepatch/test-livepatch.sh
index e3455a6b1158..65c9c058458d 100755
--- a/tools/testing/selftests/livepatch/test-livepatch.sh
+++ b/tools/testing/selftests/livepatch/test-livepatch.sh
@@ -4,7 +4,9 @@
. $(dirname $0)/functions.sh
-MOD_LIVEPATCH=test_klp_livepatch
+MOD_LIVEPATCH1=test_klp_livepatch
+MOD_LIVEPATCH2=test_klp_syscall
+MOD_LIVEPATCH3=test_klp_callbacks_demo
MOD_REPLACE=test_klp_atomic_replace
setup_config
@@ -16,33 +18,33 @@ setup_config
start_test "basic function patching"
-load_lp $MOD_LIVEPATCH
+load_lp $MOD_LIVEPATCH1
-if [[ "$(cat /proc/cmdline)" != "$MOD_LIVEPATCH: this has been live patched" ]] ; then
+if [[ "$(cat /proc/cmdline)" != "$MOD_LIVEPATCH1: this has been live patched" ]] ; then
echo -e "FAIL\n\n"
die "livepatch kselftest(s) failed"
fi
-disable_lp $MOD_LIVEPATCH
-unload_lp $MOD_LIVEPATCH
+disable_lp $MOD_LIVEPATCH1
+unload_lp $MOD_LIVEPATCH1
-if [[ "$(cat /proc/cmdline)" == "$MOD_LIVEPATCH: this has been live patched" ]] ; then
+if [[ "$(cat /proc/cmdline)" == "$MOD_LIVEPATCH1: this has been live patched" ]] ; then
echo -e "FAIL\n\n"
die "livepatch kselftest(s) failed"
fi
-check_result "% insmod test_modules/$MOD_LIVEPATCH.ko
-livepatch: enabling patch '$MOD_LIVEPATCH'
-livepatch: '$MOD_LIVEPATCH': initializing patching transition
-livepatch: '$MOD_LIVEPATCH': starting patching transition
-livepatch: '$MOD_LIVEPATCH': completing patching transition
-livepatch: '$MOD_LIVEPATCH': patching complete
-% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH/enabled
-livepatch: '$MOD_LIVEPATCH': initializing unpatching transition
-livepatch: '$MOD_LIVEPATCH': starting unpatching transition
-livepatch: '$MOD_LIVEPATCH': completing unpatching transition
-livepatch: '$MOD_LIVEPATCH': unpatching complete
-% rmmod $MOD_LIVEPATCH"
+check_result "% insmod test_modules/$MOD_LIVEPATCH1.ko
+livepatch: enabling patch '$MOD_LIVEPATCH1'
+livepatch: '$MOD_LIVEPATCH1': initializing patching transition
+livepatch: '$MOD_LIVEPATCH1': starting patching transition
+livepatch: '$MOD_LIVEPATCH1': completing patching transition
+livepatch: '$MOD_LIVEPATCH1': patching complete
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH1/enabled
+livepatch: '$MOD_LIVEPATCH1': initializing unpatching transition
+livepatch: '$MOD_LIVEPATCH1': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH1': completing unpatching transition
+livepatch: '$MOD_LIVEPATCH1': unpatching complete
+% rmmod $MOD_LIVEPATCH1"
# - load a livepatch that modifies the output from /proc/cmdline and
@@ -53,7 +55,7 @@ livepatch: '$MOD_LIVEPATCH': unpatching complete
start_test "multiple livepatches"
-load_lp $MOD_LIVEPATCH
+load_lp $MOD_LIVEPATCH1
grep 'live patched' /proc/cmdline > /dev/kmsg
grep 'live patched' /proc/meminfo > /dev/kmsg
@@ -69,26 +71,26 @@ unload_lp $MOD_REPLACE
grep 'live patched' /proc/cmdline > /dev/kmsg
grep 'live patched' /proc/meminfo > /dev/kmsg
-disable_lp $MOD_LIVEPATCH
-unload_lp $MOD_LIVEPATCH
+disable_lp $MOD_LIVEPATCH1
+unload_lp $MOD_LIVEPATCH1
grep 'live patched' /proc/cmdline > /dev/kmsg
grep 'live patched' /proc/meminfo > /dev/kmsg
-check_result "% insmod test_modules/$MOD_LIVEPATCH.ko
-livepatch: enabling patch '$MOD_LIVEPATCH'
-livepatch: '$MOD_LIVEPATCH': initializing patching transition
-livepatch: '$MOD_LIVEPATCH': starting patching transition
-livepatch: '$MOD_LIVEPATCH': completing patching transition
-livepatch: '$MOD_LIVEPATCH': patching complete
-$MOD_LIVEPATCH: this has been live patched
+check_result "% insmod test_modules/$MOD_LIVEPATCH1.ko
+livepatch: enabling patch '$MOD_LIVEPATCH1'
+livepatch: '$MOD_LIVEPATCH1': initializing patching transition
+livepatch: '$MOD_LIVEPATCH1': starting patching transition
+livepatch: '$MOD_LIVEPATCH1': completing patching transition
+livepatch: '$MOD_LIVEPATCH1': patching complete
+$MOD_LIVEPATCH1: this has been live patched
% insmod test_modules/$MOD_REPLACE.ko replace=0
livepatch: enabling patch '$MOD_REPLACE'
livepatch: '$MOD_REPLACE': initializing patching transition
livepatch: '$MOD_REPLACE': starting patching transition
livepatch: '$MOD_REPLACE': completing patching transition
livepatch: '$MOD_REPLACE': patching complete
-$MOD_LIVEPATCH: this has been live patched
+$MOD_LIVEPATCH1: this has been live patched
$MOD_REPLACE: this has been live patched
% echo 0 > /sys/kernel/livepatch/$MOD_REPLACE/enabled
livepatch: '$MOD_REPLACE': initializing unpatching transition
@@ -96,35 +98,57 @@ livepatch: '$MOD_REPLACE': starting unpatching transition
livepatch: '$MOD_REPLACE': completing unpatching transition
livepatch: '$MOD_REPLACE': unpatching complete
% rmmod $MOD_REPLACE
-$MOD_LIVEPATCH: this has been live patched
-% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH/enabled
-livepatch: '$MOD_LIVEPATCH': initializing unpatching transition
-livepatch: '$MOD_LIVEPATCH': starting unpatching transition
-livepatch: '$MOD_LIVEPATCH': completing unpatching transition
-livepatch: '$MOD_LIVEPATCH': unpatching complete
-% rmmod $MOD_LIVEPATCH"
+$MOD_LIVEPATCH1: this has been live patched
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH1/enabled
+livepatch: '$MOD_LIVEPATCH1': initializing unpatching transition
+livepatch: '$MOD_LIVEPATCH1': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH1': completing unpatching transition
+livepatch: '$MOD_LIVEPATCH1': unpatching complete
+% rmmod $MOD_LIVEPATCH1"
# - load a livepatch that modifies the output from /proc/cmdline and
# verify correct behavior
-# - load an atomic replace livepatch and verify that only the second is active
-# - remove the first livepatch and verify that the atomic replace livepatch
-# is still active
+# - load two additional livepatches and check the number of livepatch modules
+# applied
+# - load an atomic replace livepatch and check that the other three modules were
+# disabled
+# - remove all livepatches besides the atomic replace one and verify that the
+# atomic replace livepatch is still active
# - remove the atomic replace livepatch and verify that none are active
start_test "atomic replace livepatch"
-load_lp $MOD_LIVEPATCH
+load_lp $MOD_LIVEPATCH1
grep 'live patched' /proc/cmdline > /dev/kmsg
grep 'live patched' /proc/meminfo > /dev/kmsg
+for mod in $MOD_LIVEPATCH2 $MOD_LIVEPATCH3; do
+ load_lp "$mod"
+done
+
+mods=(/sys/kernel/livepatch/*)
+nmods=${#mods[@]}
+if [ "$nmods" -ne 3 ]; then
+ die "Expecting three modules listed, found $nmods"
+fi
+
load_lp $MOD_REPLACE replace=1
grep 'live patched' /proc/cmdline > /dev/kmsg
grep 'live patched' /proc/meminfo > /dev/kmsg
-unload_lp $MOD_LIVEPATCH
+mods=(/sys/kernel/livepatch/*)
+nmods=${#mods[@]}
+if [ "$nmods" -ne 1 ]; then
+ die "Expecting only one moduled listed, found $nmods"
+fi
+
+# These modules were disabled by the atomic replace
+for mod in $MOD_LIVEPATCH3 $MOD_LIVEPATCH2 $MOD_LIVEPATCH1; do
+ unload_lp "$mod"
+done
grep 'live patched' /proc/cmdline > /dev/kmsg
grep 'live patched' /proc/meminfo > /dev/kmsg
@@ -135,13 +159,27 @@ unload_lp $MOD_REPLACE
grep 'live patched' /proc/cmdline > /dev/kmsg
grep 'live patched' /proc/meminfo > /dev/kmsg
-check_result "% insmod test_modules/$MOD_LIVEPATCH.ko
-livepatch: enabling patch '$MOD_LIVEPATCH'
-livepatch: '$MOD_LIVEPATCH': initializing patching transition
-livepatch: '$MOD_LIVEPATCH': starting patching transition
-livepatch: '$MOD_LIVEPATCH': completing patching transition
-livepatch: '$MOD_LIVEPATCH': patching complete
-$MOD_LIVEPATCH: this has been live patched
+check_result "% insmod test_modules/$MOD_LIVEPATCH1.ko
+livepatch: enabling patch '$MOD_LIVEPATCH1'
+livepatch: '$MOD_LIVEPATCH1': initializing patching transition
+livepatch: '$MOD_LIVEPATCH1': starting patching transition
+livepatch: '$MOD_LIVEPATCH1': completing patching transition
+livepatch: '$MOD_LIVEPATCH1': patching complete
+$MOD_LIVEPATCH1: this has been live patched
+% insmod test_modules/$MOD_LIVEPATCH2.ko
+livepatch: enabling patch '$MOD_LIVEPATCH2'
+livepatch: '$MOD_LIVEPATCH2': initializing patching transition
+livepatch: '$MOD_LIVEPATCH2': starting patching transition
+livepatch: '$MOD_LIVEPATCH2': completing patching transition
+livepatch: '$MOD_LIVEPATCH2': patching complete
+% insmod test_modules/$MOD_LIVEPATCH3.ko
+livepatch: enabling patch '$MOD_LIVEPATCH3'
+livepatch: '$MOD_LIVEPATCH3': initializing patching transition
+$MOD_LIVEPATCH3: pre_patch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH3': starting patching transition
+livepatch: '$MOD_LIVEPATCH3': completing patching transition
+$MOD_LIVEPATCH3: post_patch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH3': patching complete
% insmod test_modules/$MOD_REPLACE.ko replace=1
livepatch: enabling patch '$MOD_REPLACE'
livepatch: '$MOD_REPLACE': initializing patching transition
@@ -149,7 +187,9 @@ livepatch: '$MOD_REPLACE': starting patching transition
livepatch: '$MOD_REPLACE': completing patching transition
livepatch: '$MOD_REPLACE': patching complete
$MOD_REPLACE: this has been live patched
-% rmmod $MOD_LIVEPATCH
+% rmmod $MOD_LIVEPATCH3
+% rmmod $MOD_LIVEPATCH2
+% rmmod $MOD_LIVEPATCH1
$MOD_REPLACE: this has been live patched
% echo 0 > /sys/kernel/livepatch/$MOD_REPLACE/enabled
livepatch: '$MOD_REPLACE': initializing unpatching transition
diff --git a/tools/testing/selftests/livepatch/test-syscall.sh b/tools/testing/selftests/livepatch/test-syscall.sh
index b76a881d4013..289eb7d4c4b3 100755
--- a/tools/testing/selftests/livepatch/test-syscall.sh
+++ b/tools/testing/selftests/livepatch/test-syscall.sh
@@ -15,7 +15,10 @@ setup_config
start_test "patch getpid syscall while being heavily hammered"
-for i in $(seq 1 $(getconf _NPROCESSORS_ONLN)); do
+NPROC=$(getconf _NPROCESSORS_ONLN)
+MAXPROC=128
+
+for i in $(seq 1 $(($NPROC < $MAXPROC ? $NPROC : $MAXPROC))); do
./test_klp-call_getpid &
pids[$i]="$!"
done
diff --git a/tools/testing/selftests/livepatch/test-sysfs.sh b/tools/testing/selftests/livepatch/test-sysfs.sh
index 6c646afa7395..05a14f5a7bfb 100755
--- a/tools/testing/selftests/livepatch/test-sysfs.sh
+++ b/tools/testing/selftests/livepatch/test-sysfs.sh
@@ -18,6 +18,7 @@ check_sysfs_rights "$MOD_LIVEPATCH" "" "drwxr-xr-x"
check_sysfs_rights "$MOD_LIVEPATCH" "enabled" "-rw-r--r--"
check_sysfs_value "$MOD_LIVEPATCH" "enabled" "1"
check_sysfs_rights "$MOD_LIVEPATCH" "force" "--w-------"
+check_sysfs_rights "$MOD_LIVEPATCH" "replace" "-r--r--r--"
check_sysfs_rights "$MOD_LIVEPATCH" "transition" "-r--r--r--"
check_sysfs_value "$MOD_LIVEPATCH" "transition" "0"
check_sysfs_rights "$MOD_LIVEPATCH" "vmlinux/patched" "-r--r--r--"
@@ -83,4 +84,51 @@ test_klp_callbacks_demo: post_unpatch_callback: vmlinux
livepatch: 'test_klp_callbacks_demo': unpatching complete
% rmmod test_klp_callbacks_demo"
+start_test "sysfs test replace enabled"
+
+MOD_LIVEPATCH=test_klp_atomic_replace
+load_lp $MOD_LIVEPATCH replace=1
+
+check_sysfs_rights "$MOD_LIVEPATCH" "replace" "-r--r--r--"
+check_sysfs_value "$MOD_LIVEPATCH" "replace" "1"
+
+disable_lp $MOD_LIVEPATCH
+unload_lp $MOD_LIVEPATCH
+
+check_result "% insmod test_modules/$MOD_LIVEPATCH.ko replace=1
+livepatch: enabling patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': initializing patching transition
+livepatch: '$MOD_LIVEPATCH': starting patching transition
+livepatch: '$MOD_LIVEPATCH': completing patching transition
+livepatch: '$MOD_LIVEPATCH': patching complete
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH/enabled
+livepatch: '$MOD_LIVEPATCH': initializing unpatching transition
+livepatch: '$MOD_LIVEPATCH': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH': completing unpatching transition
+livepatch: '$MOD_LIVEPATCH': unpatching complete
+% rmmod $MOD_LIVEPATCH"
+
+start_test "sysfs test replace disabled"
+
+load_lp $MOD_LIVEPATCH replace=0
+
+check_sysfs_rights "$MOD_LIVEPATCH" "replace" "-r--r--r--"
+check_sysfs_value "$MOD_LIVEPATCH" "replace" "0"
+
+disable_lp $MOD_LIVEPATCH
+unload_lp $MOD_LIVEPATCH
+
+check_result "% insmod test_modules/$MOD_LIVEPATCH.ko replace=0
+livepatch: enabling patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': initializing patching transition
+livepatch: '$MOD_LIVEPATCH': starting patching transition
+livepatch: '$MOD_LIVEPATCH': completing patching transition
+livepatch: '$MOD_LIVEPATCH': patching complete
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH/enabled
+livepatch: '$MOD_LIVEPATCH': initializing unpatching transition
+livepatch: '$MOD_LIVEPATCH': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH': completing unpatching transition
+livepatch: '$MOD_LIVEPATCH': unpatching complete
+% rmmod $MOD_LIVEPATCH"
+
exit 0
diff --git a/tools/testing/selftests/mm/.gitignore b/tools/testing/selftests/mm/.gitignore
index 064e7b125643..da030b43e43b 100644
--- a/tools/testing/selftests/mm/.gitignore
+++ b/tools/testing/selftests/mm/.gitignore
@@ -50,3 +50,4 @@ hugetlb_fault_after_madv
hugetlb_madv_vs_map
mseal_test
seal_elf
+droppable
diff --git a/tools/testing/selftests/mm/Makefile b/tools/testing/selftests/mm/Makefile
index e1aa09ddaa3d..901e0d07765b 100644
--- a/tools/testing/selftests/mm/Makefile
+++ b/tools/testing/selftests/mm/Makefile
@@ -76,6 +76,7 @@ TEST_GEN_FILES += mdwe_test
TEST_GEN_FILES += hugetlb_fault_after_madv
TEST_GEN_FILES += hugetlb_madv_vs_map
TEST_GEN_FILES += hugetlb_dio
+TEST_GEN_FILES += droppable
ifneq ($(ARCH),arm64)
TEST_GEN_FILES += soft-dirty
diff --git a/tools/testing/selftests/mm/droppable.c b/tools/testing/selftests/mm/droppable.c
new file mode 100644
index 000000000000..f3d9ecf96890
--- /dev/null
+++ b/tools/testing/selftests/mm/droppable.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <signal.h>
+#include <sys/mman.h>
+#include <linux/mman.h>
+
+#include "../kselftest.h"
+
+int main(int argc, char *argv[])
+{
+ size_t alloc_size = 134217728;
+ size_t page_size = getpagesize();
+ void *alloc;
+ pid_t child;
+
+ ksft_print_header();
+ ksft_set_plan(1);
+
+ alloc = mmap(0, alloc_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_DROPPABLE, -1, 0);
+ assert(alloc != MAP_FAILED);
+ memset(alloc, 'A', alloc_size);
+ for (size_t i = 0; i < alloc_size; i += page_size)
+ assert(*(uint8_t *)(alloc + i));
+
+ child = fork();
+ assert(child >= 0);
+ if (!child) {
+ for (;;)
+ *(char *)malloc(page_size) = 'B';
+ }
+
+ for (bool done = false; !done;) {
+ for (size_t i = 0; i < alloc_size; i += page_size) {
+ if (!*(uint8_t *)(alloc + i)) {
+ done = true;
+ break;
+ }
+ }
+ }
+ kill(child, SIGTERM);
+
+ ksft_test_result_pass("MAP_DROPPABLE: PASS\n");
+ exit(KSFT_PASS);
+}
diff --git a/tools/testing/selftests/mm/va_high_addr_switch.c b/tools/testing/selftests/mm/va_high_addr_switch.c
index fa7eabfaf841..896b3f73fc53 100644
--- a/tools/testing/selftests/mm/va_high_addr_switch.c
+++ b/tools/testing/selftests/mm/va_high_addr_switch.c
@@ -293,6 +293,20 @@ static int run_test(struct testcase *test, int count)
return ret;
}
+#ifdef __aarch64__
+/* Check if userspace VA > 48 bits */
+static int high_address_present(void)
+{
+ void *ptr = mmap((void *)(1UL << 50), 1, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
+ if (ptr == MAP_FAILED)
+ return 0;
+
+ munmap(ptr, 1);
+ return 1;
+}
+#endif
+
static int supported_arch(void)
{
#if defined(__powerpc64__)
@@ -300,7 +314,7 @@ static int supported_arch(void)
#elif defined(__x86_64__)
return 1;
#elif defined(__aarch64__)
- return 1;
+ return high_address_present();
#else
return 0;
#endif
diff --git a/tools/testing/selftests/net/forwarding/bridge_fdb_learning_limit.sh b/tools/testing/selftests/net/forwarding/bridge_fdb_learning_limit.sh
index 0760a34b7114..a21b7085da2e 100755
--- a/tools/testing/selftests/net/forwarding/bridge_fdb_learning_limit.sh
+++ b/tools/testing/selftests/net/forwarding/bridge_fdb_learning_limit.sh
@@ -178,6 +178,22 @@ fdb_del()
check_err $? "Failed to remove a FDB entry of type ${type}"
}
+check_fdb_n_learned_support()
+{
+ if ! ip link help bridge 2>&1 | grep -q "fdb_max_learned"; then
+ echo "SKIP: iproute2 too old, missing bridge max learned support"
+ exit $ksft_skip
+ fi
+
+ ip link add dev br0 type bridge
+ local learned=$(fdb_get_n_learned)
+ ip link del dev br0
+ if [ "$learned" == "null" ]; then
+ echo "SKIP: kernel too old; bridge fdb_n_learned feature not supported."
+ exit $ksft_skip
+ fi
+}
+
check_accounting_one_type()
{
local type=$1 is_counted=$2 overrides_learned=$3
@@ -274,6 +290,8 @@ check_limit()
done
}
+check_fdb_n_learned_support
+
trap cleanup EXIT
setup_prepare
diff --git a/tools/testing/selftests/pidfd/pidfd_setns_test.c b/tools/testing/selftests/pidfd/pidfd_setns_test.c
index 47746b0c6acd..7c2a4349170a 100644
--- a/tools/testing/selftests/pidfd/pidfd_setns_test.c
+++ b/tools/testing/selftests/pidfd/pidfd_setns_test.c
@@ -16,11 +16,56 @@
#include <unistd.h>
#include <sys/socket.h>
#include <sys/stat.h>
+#include <linux/ioctl.h>
#include "pidfd.h"
#include "../clone3/clone3_selftests.h"
#include "../kselftest_harness.h"
+#ifndef PIDFS_IOCTL_MAGIC
+#define PIDFS_IOCTL_MAGIC 0xFF
+#endif
+
+#ifndef PIDFD_GET_CGROUP_NAMESPACE
+#define PIDFD_GET_CGROUP_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 1)
+#endif
+
+#ifndef PIDFD_GET_IPC_NAMESPACE
+#define PIDFD_GET_IPC_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 2)
+#endif
+
+#ifndef PIDFD_GET_MNT_NAMESPACE
+#define PIDFD_GET_MNT_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 3)
+#endif
+
+#ifndef PIDFD_GET_NET_NAMESPACE
+#define PIDFD_GET_NET_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 4)
+#endif
+
+#ifndef PIDFD_GET_PID_NAMESPACE
+#define PIDFD_GET_PID_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 5)
+#endif
+
+#ifndef PIDFD_GET_PID_FOR_CHILDREN_NAMESPACE
+#define PIDFD_GET_PID_FOR_CHILDREN_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 6)
+#endif
+
+#ifndef PIDFD_GET_TIME_NAMESPACE
+#define PIDFD_GET_TIME_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 7)
+#endif
+
+#ifndef PIDFD_GET_TIME_FOR_CHILDREN_NAMESPACE
+#define PIDFD_GET_TIME_FOR_CHILDREN_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 8)
+#endif
+
+#ifndef PIDFD_GET_USER_NAMESPACE
+#define PIDFD_GET_USER_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 9)
+#endif
+
+#ifndef PIDFD_GET_UTS_NAMESPACE
+#define PIDFD_GET_UTS_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 10)
+#endif
+
enum {
PIDFD_NS_USER,
PIDFD_NS_MNT,
@@ -31,22 +76,25 @@ enum {
PIDFD_NS_CGROUP,
PIDFD_NS_PIDCLD,
PIDFD_NS_TIME,
+ PIDFD_NS_TIMECLD,
PIDFD_NS_MAX
};
const struct ns_info {
const char *name;
int flag;
+ unsigned int pidfd_ioctl;
} ns_info[] = {
- [PIDFD_NS_USER] = { "user", CLONE_NEWUSER, },
- [PIDFD_NS_MNT] = { "mnt", CLONE_NEWNS, },
- [PIDFD_NS_PID] = { "pid", CLONE_NEWPID, },
- [PIDFD_NS_UTS] = { "uts", CLONE_NEWUTS, },
- [PIDFD_NS_IPC] = { "ipc", CLONE_NEWIPC, },
- [PIDFD_NS_NET] = { "net", CLONE_NEWNET, },
- [PIDFD_NS_CGROUP] = { "cgroup", CLONE_NEWCGROUP, },
- [PIDFD_NS_PIDCLD] = { "pid_for_children", 0, },
- [PIDFD_NS_TIME] = { "time", CLONE_NEWTIME, },
+ [PIDFD_NS_USER] = { "user", CLONE_NEWUSER, PIDFD_GET_USER_NAMESPACE, },
+ [PIDFD_NS_MNT] = { "mnt", CLONE_NEWNS, PIDFD_GET_MNT_NAMESPACE, },
+ [PIDFD_NS_PID] = { "pid", CLONE_NEWPID, PIDFD_GET_PID_NAMESPACE, },
+ [PIDFD_NS_UTS] = { "uts", CLONE_NEWUTS, PIDFD_GET_UTS_NAMESPACE, },
+ [PIDFD_NS_IPC] = { "ipc", CLONE_NEWIPC, PIDFD_GET_IPC_NAMESPACE, },
+ [PIDFD_NS_NET] = { "net", CLONE_NEWNET, PIDFD_GET_NET_NAMESPACE, },
+ [PIDFD_NS_CGROUP] = { "cgroup", CLONE_NEWCGROUP, PIDFD_GET_CGROUP_NAMESPACE, },
+ [PIDFD_NS_TIME] = { "time", CLONE_NEWTIME, PIDFD_GET_TIME_NAMESPACE, },
+ [PIDFD_NS_PIDCLD] = { "pid_for_children", 0, PIDFD_GET_PID_FOR_CHILDREN_NAMESPACE, },
+ [PIDFD_NS_TIMECLD] = { "time_for_children", 0, PIDFD_GET_TIME_FOR_CHILDREN_NAMESPACE, },
};
FIXTURE(current_nsset)
@@ -54,6 +102,7 @@ FIXTURE(current_nsset)
pid_t pid;
int pidfd;
int nsfds[PIDFD_NS_MAX];
+ int child_pidfd_derived_nsfds[PIDFD_NS_MAX];
pid_t child_pid_exited;
int child_pidfd_exited;
@@ -61,10 +110,12 @@ FIXTURE(current_nsset)
pid_t child_pid1;
int child_pidfd1;
int child_nsfds1[PIDFD_NS_MAX];
+ int child_pidfd_derived_nsfds1[PIDFD_NS_MAX];
pid_t child_pid2;
int child_pidfd2;
int child_nsfds2[PIDFD_NS_MAX];
+ int child_pidfd_derived_nsfds2[PIDFD_NS_MAX];
};
static int sys_waitid(int which, pid_t pid, int options)
@@ -128,9 +179,12 @@ FIXTURE_SETUP(current_nsset)
char c;
for (i = 0; i < PIDFD_NS_MAX; i++) {
- self->nsfds[i] = -EBADF;
- self->child_nsfds1[i] = -EBADF;
- self->child_nsfds2[i] = -EBADF;
+ self->nsfds[i] = -EBADF;
+ self->child_nsfds1[i] = -EBADF;
+ self->child_nsfds2[i] = -EBADF;
+ self->child_pidfd_derived_nsfds[i] = -EBADF;
+ self->child_pidfd_derived_nsfds1[i] = -EBADF;
+ self->child_pidfd_derived_nsfds2[i] = -EBADF;
}
proc_fd = open("/proc/self/ns", O_DIRECTORY | O_CLOEXEC);
@@ -139,6 +193,11 @@ FIXTURE_SETUP(current_nsset)
}
self->pid = getpid();
+ self->pidfd = sys_pidfd_open(self->pid, 0);
+ EXPECT_GT(self->pidfd, 0) {
+ TH_LOG("%m - Failed to open pidfd for process %d", self->pid);
+ }
+
for (i = 0; i < PIDFD_NS_MAX; i++) {
const struct ns_info *info = &ns_info[i];
self->nsfds[i] = openat(proc_fd, info->name, O_RDONLY | O_CLOEXEC);
@@ -148,20 +207,27 @@ FIXTURE_SETUP(current_nsset)
info->name, self->pid);
}
}
- }
- self->pidfd = sys_pidfd_open(self->pid, 0);
- EXPECT_GT(self->pidfd, 0) {
- TH_LOG("%m - Failed to open pidfd for process %d", self->pid);
+ self->child_pidfd_derived_nsfds[i] = ioctl(self->pidfd, info->pidfd_ioctl, 0);
+ if (self->child_pidfd_derived_nsfds[i] < 0) {
+ EXPECT_EQ(errno, EOPNOTSUPP) {
+ TH_LOG("%m - Failed to derive %s namespace from pidfd of process %d",
+ info->name, self->pid);
+ }
+ }
}
/* Create task that exits right away. */
- self->child_pid_exited = create_child(&self->child_pidfd_exited,
- CLONE_NEWUSER | CLONE_NEWNET);
+ self->child_pid_exited = create_child(&self->child_pidfd_exited, 0);
EXPECT_GE(self->child_pid_exited, 0);
- if (self->child_pid_exited == 0)
+ if (self->child_pid_exited == 0) {
+ if (self->nsfds[PIDFD_NS_USER] >= 0 && unshare(CLONE_NEWUSER) < 0)
+ _exit(EXIT_FAILURE);
+ if (self->nsfds[PIDFD_NS_NET] >= 0 && unshare(CLONE_NEWNET) < 0)
+ _exit(EXIT_FAILURE);
_exit(EXIT_SUCCESS);
+ }
ASSERT_EQ(sys_waitid(P_PID, self->child_pid_exited, WEXITED | WNOWAIT), 0);
@@ -174,18 +240,43 @@ FIXTURE_SETUP(current_nsset)
EXPECT_EQ(ret, 0);
/* Create tasks that will be stopped. */
- self->child_pid1 = create_child(&self->child_pidfd1,
- CLONE_NEWUSER | CLONE_NEWNS |
- CLONE_NEWCGROUP | CLONE_NEWIPC |
- CLONE_NEWUTS | CLONE_NEWPID |
- CLONE_NEWNET);
+ if (self->nsfds[PIDFD_NS_USER] >= 0 && self->nsfds[PIDFD_NS_PID] >= 0)
+ self->child_pid1 = create_child(&self->child_pidfd1, CLONE_NEWUSER | CLONE_NEWPID);
+ else if (self->nsfds[PIDFD_NS_PID] >= 0)
+ self->child_pid1 = create_child(&self->child_pidfd1, CLONE_NEWPID);
+ else if (self->nsfds[PIDFD_NS_USER] >= 0)
+ self->child_pid1 = create_child(&self->child_pidfd1, CLONE_NEWUSER);
+ else
+ self->child_pid1 = create_child(&self->child_pidfd1, 0);
EXPECT_GE(self->child_pid1, 0);
if (self->child_pid1 == 0) {
close(ipc_sockets[0]);
- if (!switch_timens())
+ if (self->nsfds[PIDFD_NS_MNT] >= 0 && unshare(CLONE_NEWNS) < 0) {
+ TH_LOG("%m - Failed to unshare mount namespace for process %d", self->pid);
_exit(EXIT_FAILURE);
+ }
+ if (self->nsfds[PIDFD_NS_CGROUP] >= 0 && unshare(CLONE_NEWCGROUP) < 0) {
+ TH_LOG("%m - Failed to unshare cgroup namespace for process %d", self->pid);
+ _exit(EXIT_FAILURE);
+ }
+ if (self->nsfds[PIDFD_NS_IPC] >= 0 && unshare(CLONE_NEWIPC) < 0) {
+ TH_LOG("%m - Failed to unshare ipc namespace for process %d", self->pid);
+ _exit(EXIT_FAILURE);
+ }
+ if (self->nsfds[PIDFD_NS_UTS] >= 0 && unshare(CLONE_NEWUTS) < 0) {
+ TH_LOG("%m - Failed to unshare uts namespace for process %d", self->pid);
+ _exit(EXIT_FAILURE);
+ }
+ if (self->nsfds[PIDFD_NS_NET] >= 0 && unshare(CLONE_NEWNET) < 0) {
+ TH_LOG("%m - Failed to unshare net namespace for process %d", self->pid);
+ _exit(EXIT_FAILURE);
+ }
+ if (self->nsfds[PIDFD_NS_TIME] >= 0 && !switch_timens()) {
+ TH_LOG("%m - Failed to unshare time namespace for process %d", self->pid);
+ _exit(EXIT_FAILURE);
+ }
if (write_nointr(ipc_sockets[1], "1", 1) < 0)
_exit(EXIT_FAILURE);
@@ -203,18 +294,43 @@ FIXTURE_SETUP(current_nsset)
ret = socketpair(AF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
EXPECT_EQ(ret, 0);
- self->child_pid2 = create_child(&self->child_pidfd2,
- CLONE_NEWUSER | CLONE_NEWNS |
- CLONE_NEWCGROUP | CLONE_NEWIPC |
- CLONE_NEWUTS | CLONE_NEWPID |
- CLONE_NEWNET);
+ if (self->nsfds[PIDFD_NS_USER] >= 0 && self->nsfds[PIDFD_NS_PID] >= 0)
+ self->child_pid2 = create_child(&self->child_pidfd2, CLONE_NEWUSER | CLONE_NEWPID);
+ else if (self->nsfds[PIDFD_NS_PID] >= 0)
+ self->child_pid2 = create_child(&self->child_pidfd2, CLONE_NEWPID);
+ else if (self->nsfds[PIDFD_NS_USER] >= 0)
+ self->child_pid2 = create_child(&self->child_pidfd2, CLONE_NEWUSER);
+ else
+ self->child_pid2 = create_child(&self->child_pidfd2, 0);
EXPECT_GE(self->child_pid2, 0);
if (self->child_pid2 == 0) {
close(ipc_sockets[0]);
- if (!switch_timens())
+ if (self->nsfds[PIDFD_NS_MNT] >= 0 && unshare(CLONE_NEWNS) < 0) {
+ TH_LOG("%m - Failed to unshare mount namespace for process %d", self->pid);
+ _exit(EXIT_FAILURE);
+ }
+ if (self->nsfds[PIDFD_NS_CGROUP] >= 0 && unshare(CLONE_NEWCGROUP) < 0) {
+ TH_LOG("%m - Failed to unshare cgroup namespace for process %d", self->pid);
_exit(EXIT_FAILURE);
+ }
+ if (self->nsfds[PIDFD_NS_IPC] >= 0 && unshare(CLONE_NEWIPC) < 0) {
+ TH_LOG("%m - Failed to unshare ipc namespace for process %d", self->pid);
+ _exit(EXIT_FAILURE);
+ }
+ if (self->nsfds[PIDFD_NS_UTS] >= 0 && unshare(CLONE_NEWUTS) < 0) {
+ TH_LOG("%m - Failed to unshare uts namespace for process %d", self->pid);
+ _exit(EXIT_FAILURE);
+ }
+ if (self->nsfds[PIDFD_NS_NET] >= 0 && unshare(CLONE_NEWNET) < 0) {
+ TH_LOG("%m - Failed to unshare net namespace for process %d", self->pid);
+ _exit(EXIT_FAILURE);
+ }
+ if (self->nsfds[PIDFD_NS_TIME] >= 0 && !switch_timens()) {
+ TH_LOG("%m - Failed to unshare time namespace for process %d", self->pid);
+ _exit(EXIT_FAILURE);
+ }
if (write_nointr(ipc_sockets[1], "1", 1) < 0)
_exit(EXIT_FAILURE);
@@ -267,6 +383,22 @@ FIXTURE_SETUP(current_nsset)
info->name, self->child_pid1);
}
}
+
+ self->child_pidfd_derived_nsfds1[i] = ioctl(self->child_pidfd1, info->pidfd_ioctl, 0);
+ if (self->child_pidfd_derived_nsfds1[i] < 0) {
+ EXPECT_EQ(errno, EOPNOTSUPP) {
+ TH_LOG("%m - Failed to derive %s namespace from pidfd of process %d",
+ info->name, self->child_pid1);
+ }
+ }
+
+ self->child_pidfd_derived_nsfds2[i] = ioctl(self->child_pidfd2, info->pidfd_ioctl, 0);
+ if (self->child_pidfd_derived_nsfds2[i] < 0) {
+ EXPECT_EQ(errno, EOPNOTSUPP) {
+ TH_LOG("%m - Failed to derive %s namespace from pidfd of process %d",
+ info->name, self->child_pid2);
+ }
+ }
}
close(proc_fd);
@@ -288,6 +420,12 @@ FIXTURE_TEARDOWN(current_nsset)
close(self->child_nsfds1[i]);
if (self->child_nsfds2[i] >= 0)
close(self->child_nsfds2[i]);
+ if (self->child_pidfd_derived_nsfds[i] >= 0)
+ close(self->child_pidfd_derived_nsfds[i]);
+ if (self->child_pidfd_derived_nsfds1[i] >= 0)
+ close(self->child_pidfd_derived_nsfds1[i]);
+ if (self->child_pidfd_derived_nsfds2[i] >= 0)
+ close(self->child_pidfd_derived_nsfds2[i]);
}
if (self->child_pidfd1 >= 0)
@@ -446,6 +584,42 @@ TEST_F(current_nsset, nsfd_incremental_setns)
}
}
+TEST_F(current_nsset, pidfd_derived_nsfd_incremental_setns)
+{
+ int i;
+ pid_t pid;
+
+ pid = getpid();
+ for (i = 0; i < PIDFD_NS_MAX; i++) {
+ const struct ns_info *info = &ns_info[i];
+ int nsfd;
+
+ if (self->child_pidfd_derived_nsfds1[i] < 0)
+ continue;
+
+ if (info->flag) {
+ ASSERT_EQ(setns(self->child_pidfd_derived_nsfds1[i], info->flag), 0) {
+ TH_LOG("%m - Failed to setns to %s namespace of %d via nsfd %d",
+ info->name, self->child_pid1,
+ self->child_pidfd_derived_nsfds1[i]);
+ }
+ }
+
+ /* Verify that we have changed to the correct namespaces. */
+ if (info->flag == CLONE_NEWPID)
+ nsfd = self->child_pidfd_derived_nsfds[i];
+ else
+ nsfd = self->child_pidfd_derived_nsfds1[i];
+ ASSERT_EQ(in_same_namespace(nsfd, pid, info->name), 1) {
+ TH_LOG("setns failed to place us correctly into %s namespace of %d via nsfd %d",
+ info->name, self->child_pid1,
+ self->child_pidfd_derived_nsfds1[i]);
+ }
+ TH_LOG("Managed to correctly setns to %s namespace of %d via nsfd %d",
+ info->name, self->child_pid1, self->child_pidfd_derived_nsfds1[i]);
+ }
+}
+
TEST_F(current_nsset, pidfd_one_shot_setns)
{
unsigned flags = 0;
@@ -542,6 +716,28 @@ TEST_F(current_nsset, no_foul_play)
info->name, self->child_pid2,
self->child_nsfds2[i]);
}
+
+ /*
+ * Can't setns to a user namespace outside of our hierarchy since we
+ * don't have caps in there and didn't create it. That means that under
+ * no circumstances should we be able to setns to any of the other
+ * ones since they aren't owned by our user namespace.
+ */
+ for (i = 0; i < PIDFD_NS_MAX; i++) {
+ const struct ns_info *info = &ns_info[i];
+
+ if (self->child_pidfd_derived_nsfds2[i] < 0 || !info->flag)
+ continue;
+
+ ASSERT_NE(setns(self->child_pidfd_derived_nsfds2[i], info->flag), 0) {
+ TH_LOG("Managed to setns to %s namespace of %d via nsfd %d",
+ info->name, self->child_pid2,
+ self->child_pidfd_derived_nsfds2[i]);
+ }
+ TH_LOG("%m - Correctly failed to setns to %s namespace of %d via nsfd %d",
+ info->name, self->child_pid2,
+ self->child_pidfd_derived_nsfds2[i]);
+ }
}
TEST(setns_einval)
diff --git a/tools/testing/selftests/vDSO/.gitignore b/tools/testing/selftests/vDSO/.gitignore
index a8dc51af5a9c..30d5c8f0e5c7 100644
--- a/tools/testing/selftests/vDSO/.gitignore
+++ b/tools/testing/selftests/vDSO/.gitignore
@@ -6,3 +6,5 @@ vdso_test_correctness
vdso_test_gettimeofday
vdso_test_getcpu
vdso_standalone_test_x86
+vdso_test_getrandom
+vdso_test_chacha
diff --git a/tools/testing/selftests/vDSO/Makefile b/tools/testing/selftests/vDSO/Makefile
index 98d8ba2afa00..3de8e7e052ae 100644
--- a/tools/testing/selftests/vDSO/Makefile
+++ b/tools/testing/selftests/vDSO/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
uname_M := $(shell uname -m 2>/dev/null || echo not)
ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
+SODIUM := $(shell pkg-config --libs libsodium 2>/dev/null)
TEST_GEN_PROGS := vdso_test_gettimeofday
TEST_GEN_PROGS += vdso_test_getcpu
@@ -10,6 +11,12 @@ ifeq ($(ARCH),$(filter $(ARCH),x86 x86_64))
TEST_GEN_PROGS += vdso_standalone_test_x86
endif
TEST_GEN_PROGS += vdso_test_correctness
+ifeq ($(uname_M),x86_64)
+TEST_GEN_PROGS += vdso_test_getrandom
+ifneq ($(SODIUM),)
+TEST_GEN_PROGS += vdso_test_chacha
+endif
+endif
CFLAGS := -std=gnu99
@@ -28,3 +35,14 @@ $(OUTPUT)/vdso_standalone_test_x86: CFLAGS +=-nostdlib -fno-asynchronous-unwind-
$(OUTPUT)/vdso_test_correctness: vdso_test_correctness.c
$(OUTPUT)/vdso_test_correctness: LDFLAGS += -ldl
+
+$(OUTPUT)/vdso_test_getrandom: parse_vdso.c
+$(OUTPUT)/vdso_test_getrandom: CFLAGS += -isystem $(top_srcdir)/tools/include \
+ -isystem $(top_srcdir)/include/uapi
+
+$(OUTPUT)/vdso_test_chacha: $(top_srcdir)/arch/$(ARCH)/entry/vdso/vgetrandom-chacha.S
+$(OUTPUT)/vdso_test_chacha: CFLAGS += -idirafter $(top_srcdir)/tools/include \
+ -isystem $(top_srcdir)/arch/$(ARCH)/include \
+ -isystem $(top_srcdir)/include \
+ -D__ASSEMBLY__ -DBULID_VDSO -DCONFIG_FUNCTION_ALIGNMENT=0 \
+ -Wa,--noexecstack $(SODIUM)
diff --git a/tools/testing/selftests/vDSO/vdso_test_chacha.c b/tools/testing/selftests/vDSO/vdso_test_chacha.c
new file mode 100644
index 000000000000..e38f44e5f803
--- /dev/null
+++ b/tools/testing/selftests/vDSO/vdso_test_chacha.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include <sodium/crypto_stream_chacha20.h>
+#include <sys/random.h>
+#include <string.h>
+#include <stdint.h>
+#include "../kselftest.h"
+
+extern void __arch_chacha20_blocks_nostack(uint8_t *dst_bytes, const uint8_t *key, uint32_t *counter, size_t nblocks);
+
+int main(int argc, char *argv[])
+{
+ enum { TRIALS = 1000, BLOCKS = 128, BLOCK_SIZE = 64 };
+ static const uint8_t nonce[8] = { 0 };
+ uint32_t counter[2];
+ uint8_t key[32];
+ uint8_t output1[BLOCK_SIZE * BLOCKS], output2[BLOCK_SIZE * BLOCKS];
+
+ ksft_print_header();
+ ksft_set_plan(1);
+
+ for (unsigned int trial = 0; trial < TRIALS; ++trial) {
+ if (getrandom(key, sizeof(key), 0) != sizeof(key)) {
+ printf("getrandom() failed!\n");
+ return KSFT_SKIP;
+ }
+ crypto_stream_chacha20(output1, sizeof(output1), nonce, key);
+ for (unsigned int split = 0; split < BLOCKS; ++split) {
+ memset(output2, 'X', sizeof(output2));
+ memset(counter, 0, sizeof(counter));
+ if (split)
+ __arch_chacha20_blocks_nostack(output2, key, counter, split);
+ __arch_chacha20_blocks_nostack(output2 + split * BLOCK_SIZE, key, counter, BLOCKS - split);
+ if (memcmp(output1, output2, sizeof(output1)))
+ return KSFT_FAIL;
+ }
+ }
+ ksft_test_result_pass("chacha: PASS\n");
+ return KSFT_PASS;
+}
diff --git a/tools/testing/selftests/vDSO/vdso_test_getrandom.c b/tools/testing/selftests/vDSO/vdso_test_getrandom.c
new file mode 100644
index 000000000000..05122425a873
--- /dev/null
+++ b/tools/testing/selftests/vDSO/vdso_test_getrandom.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include <assert.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+#include <signal.h>
+#include <sys/auxv.h>
+#include <sys/mman.h>
+#include <sys/random.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <linux/random.h>
+
+#include "../kselftest.h"
+#include "parse_vdso.h"
+
+#ifndef timespecsub
+#define timespecsub(tsp, usp, vsp) \
+ do { \
+ (vsp)->tv_sec = (tsp)->tv_sec - (usp)->tv_sec; \
+ (vsp)->tv_nsec = (tsp)->tv_nsec - (usp)->tv_nsec; \
+ if ((vsp)->tv_nsec < 0) { \
+ (vsp)->tv_sec--; \
+ (vsp)->tv_nsec += 1000000000L; \
+ } \
+ } while (0)
+#endif
+
+static struct {
+ pthread_mutex_t lock;
+ void **states;
+ size_t len, cap;
+} grnd_allocator = {
+ .lock = PTHREAD_MUTEX_INITIALIZER
+};
+
+static struct {
+ ssize_t(*fn)(void *, size_t, unsigned long, void *, size_t);
+ pthread_key_t key;
+ pthread_once_t initialized;
+ struct vgetrandom_opaque_params params;
+} grnd_ctx = {
+ .initialized = PTHREAD_ONCE_INIT
+};
+
+static void *vgetrandom_get_state(void)
+{
+ void *state = NULL;
+
+ pthread_mutex_lock(&grnd_allocator.lock);
+ if (!grnd_allocator.len) {
+ size_t page_size = getpagesize();
+ size_t new_cap;
+ size_t alloc_size, num = sysconf(_SC_NPROCESSORS_ONLN); /* Just a decent heuristic. */
+ void *new_block, *new_states;
+
+ alloc_size = (num * grnd_ctx.params.size_of_opaque_state + page_size - 1) & (~(page_size - 1));
+ num = (page_size / grnd_ctx.params.size_of_opaque_state) * (alloc_size / page_size);
+ new_block = mmap(0, alloc_size, grnd_ctx.params.mmap_prot, grnd_ctx.params.mmap_flags, -1, 0);
+ if (new_block == MAP_FAILED)
+ goto out;
+
+ new_cap = grnd_allocator.cap + num;
+ new_states = reallocarray(grnd_allocator.states, new_cap, sizeof(*grnd_allocator.states));
+ if (!new_states)
+ goto unmap;
+ grnd_allocator.cap = new_cap;
+ grnd_allocator.states = new_states;
+
+ for (size_t i = 0; i < num; ++i) {
+ if (((uintptr_t)new_block & (page_size - 1)) + grnd_ctx.params.size_of_opaque_state > page_size)
+ new_block = (void *)(((uintptr_t)new_block + page_size - 1) & (~(page_size - 1)));
+ grnd_allocator.states[i] = new_block;
+ new_block += grnd_ctx.params.size_of_opaque_state;
+ }
+ grnd_allocator.len = num;
+ goto success;
+
+ unmap:
+ munmap(new_block, alloc_size);
+ goto out;
+ }
+success:
+ state = grnd_allocator.states[--grnd_allocator.len];
+
+out:
+ pthread_mutex_unlock(&grnd_allocator.lock);
+ return state;
+}
+
+static void vgetrandom_put_state(void *state)
+{
+ if (!state)
+ return;
+ pthread_mutex_lock(&grnd_allocator.lock);
+ grnd_allocator.states[grnd_allocator.len++] = state;
+ pthread_mutex_unlock(&grnd_allocator.lock);
+}
+
+static void vgetrandom_init(void)
+{
+ if (pthread_key_create(&grnd_ctx.key, vgetrandom_put_state) != 0)
+ return;
+ unsigned long sysinfo_ehdr = getauxval(AT_SYSINFO_EHDR);
+ if (!sysinfo_ehdr) {
+ printf("AT_SYSINFO_EHDR is not present!\n");
+ exit(KSFT_SKIP);
+ }
+ vdso_init_from_sysinfo_ehdr(sysinfo_ehdr);
+ grnd_ctx.fn = (__typeof__(grnd_ctx.fn))vdso_sym("LINUX_2.6", "__vdso_getrandom");
+ if (!grnd_ctx.fn) {
+ printf("__vdso_getrandom is missing!\n");
+ exit(KSFT_FAIL);
+ }
+ if (grnd_ctx.fn(NULL, 0, 0, &grnd_ctx.params, ~0UL) != 0) {
+ printf("failed to fetch vgetrandom params!\n");
+ exit(KSFT_FAIL);
+ }
+}
+
+static ssize_t vgetrandom(void *buf, size_t len, unsigned long flags)
+{
+ void *state;
+
+ pthread_once(&grnd_ctx.initialized, vgetrandom_init);
+ state = pthread_getspecific(grnd_ctx.key);
+ if (!state) {
+ state = vgetrandom_get_state();
+ if (pthread_setspecific(grnd_ctx.key, state) != 0) {
+ vgetrandom_put_state(state);
+ state = NULL;
+ }
+ if (!state) {
+ printf("vgetrandom_get_state failed!\n");
+ exit(KSFT_FAIL);
+ }
+ }
+ return grnd_ctx.fn(buf, len, flags, state, grnd_ctx.params.size_of_opaque_state);
+}
+
+enum { TRIALS = 25000000, THREADS = 256 };
+
+static void *test_vdso_getrandom(void *)
+{
+ for (size_t i = 0; i < TRIALS; ++i) {
+ unsigned int val;
+ ssize_t ret = vgetrandom(&val, sizeof(val), 0);
+ assert(ret == sizeof(val));
+ }
+ return NULL;
+}
+
+static void *test_libc_getrandom(void *)
+{
+ for (size_t i = 0; i < TRIALS; ++i) {
+ unsigned int val;
+ ssize_t ret = getrandom(&val, sizeof(val), 0);
+ assert(ret == sizeof(val));
+ }
+ return NULL;
+}
+
+static void *test_syscall_getrandom(void *)
+{
+ for (size_t i = 0; i < TRIALS; ++i) {
+ unsigned int val;
+ ssize_t ret = syscall(__NR_getrandom, &val, sizeof(val), 0);
+ assert(ret == sizeof(val));
+ }
+ return NULL;
+}
+
+static void bench_single(void)
+{
+ struct timespec start, end, diff;
+
+ clock_gettime(CLOCK_MONOTONIC, &start);
+ test_vdso_getrandom(NULL);
+ clock_gettime(CLOCK_MONOTONIC, &end);
+ timespecsub(&end, &start, &diff);
+ printf(" vdso: %u times in %lu.%09lu seconds\n", TRIALS, diff.tv_sec, diff.tv_nsec);
+
+ clock_gettime(CLOCK_MONOTONIC, &start);
+ test_libc_getrandom(NULL);
+ clock_gettime(CLOCK_MONOTONIC, &end);
+ timespecsub(&end, &start, &diff);
+ printf(" libc: %u times in %lu.%09lu seconds\n", TRIALS, diff.tv_sec, diff.tv_nsec);
+
+ clock_gettime(CLOCK_MONOTONIC, &start);
+ test_syscall_getrandom(NULL);
+ clock_gettime(CLOCK_MONOTONIC, &end);
+ timespecsub(&end, &start, &diff);
+ printf("syscall: %u times in %lu.%09lu seconds\n", TRIALS, diff.tv_sec, diff.tv_nsec);
+}
+
+static void bench_multi(void)
+{
+ struct timespec start, end, diff;
+ pthread_t threads[THREADS];
+
+ clock_gettime(CLOCK_MONOTONIC, &start);
+ for (size_t i = 0; i < THREADS; ++i)
+ assert(pthread_create(&threads[i], NULL, test_vdso_getrandom, NULL) == 0);
+ for (size_t i = 0; i < THREADS; ++i)
+ pthread_join(threads[i], NULL);
+ clock_gettime(CLOCK_MONOTONIC, &end);
+ timespecsub(&end, &start, &diff);
+ printf(" vdso: %u x %u times in %lu.%09lu seconds\n", TRIALS, THREADS, diff.tv_sec, diff.tv_nsec);
+
+ clock_gettime(CLOCK_MONOTONIC, &start);
+ for (size_t i = 0; i < THREADS; ++i)
+ assert(pthread_create(&threads[i], NULL, test_libc_getrandom, NULL) == 0);
+ for (size_t i = 0; i < THREADS; ++i)
+ pthread_join(threads[i], NULL);
+ clock_gettime(CLOCK_MONOTONIC, &end);
+ timespecsub(&end, &start, &diff);
+ printf(" libc: %u x %u times in %lu.%09lu seconds\n", TRIALS, THREADS, diff.tv_sec, diff.tv_nsec);
+
+ clock_gettime(CLOCK_MONOTONIC, &start);
+ for (size_t i = 0; i < THREADS; ++i)
+ assert(pthread_create(&threads[i], NULL, test_syscall_getrandom, NULL) == 0);
+ for (size_t i = 0; i < THREADS; ++i)
+ pthread_join(threads[i], NULL);
+ clock_gettime(CLOCK_MONOTONIC, &end);
+ timespecsub(&end, &start, &diff);
+ printf(" syscall: %u x %u times in %lu.%09lu seconds\n", TRIALS, THREADS, diff.tv_sec, diff.tv_nsec);
+}
+
+static void fill(void)
+{
+ uint8_t weird_size[323929];
+ for (;;)
+ vgetrandom(weird_size, sizeof(weird_size), 0);
+}
+
+static void kselftest(void)
+{
+ uint8_t weird_size[1263];
+
+ ksft_print_header();
+ ksft_set_plan(1);
+
+ for (size_t i = 0; i < 1000; ++i) {
+ ssize_t ret = vgetrandom(weird_size, sizeof(weird_size), 0);
+ if (ret != sizeof(weird_size))
+ exit(KSFT_FAIL);
+ }
+
+ ksft_test_result_pass("getrandom: PASS\n");
+ exit(KSFT_PASS);
+}
+
+static void usage(const char *argv0)
+{
+ fprintf(stderr, "Usage: %s [bench-single|bench-multi|fill]\n", argv0);
+}
+
+int main(int argc, char *argv[])
+{
+ if (argc == 1) {
+ kselftest();
+ return 0;
+ }
+
+ if (argc != 2) {
+ usage(argv[0]);
+ return 1;
+ }
+ if (!strcmp(argv[1], "bench-single"))
+ bench_single();
+ else if (!strcmp(argv[1], "bench-multi"))
+ bench_multi();
+ else if (!strcmp(argv[1], "fill"))
+ fill();
+ else {
+ usage(argv[0]);
+ return 1;
+ }
+ return 0;
+}
diff --git a/tools/tracing/latency/Makefile.config b/tools/tracing/latency/Makefile.config
index b25e531a1f95..0fe6b50f029b 100644
--- a/tools/tracing/latency/Makefile.config
+++ b/tools/tracing/latency/Makefile.config
@@ -3,8 +3,9 @@
STOP_ERROR :=
define lib_setup
- $(eval EXTLIBS += -l$(1))
$(eval LIB_INCLUDES += $(shell sh -c "$(PKG_CONFIG) --cflags lib$(1)"))
+ $(eval LDFLAGS += $(shell sh -c "$(PKG_CONFIG) --libs-only-L lib$(1)"))
+ $(eval EXTLIBS += $(shell sh -c "$(PKG_CONFIG) --libs-only-l lib$(1)"))
endef
$(call feature_check,libtraceevent)
diff --git a/tools/tracing/rtla/Makefile.config b/tools/tracing/rtla/Makefile.config
index 0b7ecfb30d19..5f8c286712d4 100644
--- a/tools/tracing/rtla/Makefile.config
+++ b/tools/tracing/rtla/Makefile.config
@@ -7,7 +7,8 @@ LIBTRACEFS_MIN_VERSION = 1.6
define lib_setup
$(eval LIB_INCLUDES += $(shell sh -c "$(PKG_CONFIG) --cflags lib$(1)"))
- $(eval EXTLIBS += -l$(1))
+ $(eval LDFLAGS += $(shell sh -c "$(PKG_CONFIG) --libs-only-L lib$(1)"))
+ $(eval EXTLIBS += $(shell sh -c "$(PKG_CONFIG) --libs-only-l lib$(1)"))
endef
$(call feature_check,libtraceevent)
diff --git a/tools/verification/rv/Makefile.config b/tools/verification/rv/Makefile.config
index 6d4ba77847b6..066302230eb2 100644
--- a/tools/verification/rv/Makefile.config
+++ b/tools/verification/rv/Makefile.config
@@ -7,7 +7,8 @@ LIBTRACEFS_MIN_VERSION = 1.3
define lib_setup
$(eval LIB_INCLUDES += $(shell sh -c "$(PKG_CONFIG) --cflags lib$(1)"))
- $(eval EXTLIBS += -l$(1))
+ $(eval LDFLAGS += $(shell sh -c "$(PKG_CONFIG) --libs-only-L lib$(1)"))
+ $(eval EXTLIBS += $(shell sh -c "$(PKG_CONFIG) --libs-only-l lib$(1)"))
endef
$(call feature_check,libtraceevent)
diff --git a/usr/Makefile b/usr/Makefile
index 132ef7e96e6d..f1779496bca7 100644
--- a/usr/Makefile
+++ b/usr/Makefile
@@ -62,8 +62,8 @@ $(deps_initramfs): ;
quiet_cmd_initfs = GEN $@
cmd_initfs = \
$(CONFIG_SHELL) $< -o $@ -l $(obj)/.initramfs_data.cpio.d \
- $(if $(CONFIG_INITRAMFS_ROOT_UID), -u $(CONFIG_INITRAMFS_ROOT_UID)) \
- $(if $(CONFIG_INITRAMFS_ROOT_GID), -g $(CONFIG_INITRAMFS_ROOT_GID)) \
+ $(addprefix -u , $(CONFIG_INITRAMFS_ROOT_UID)) \
+ $(addprefix -g , $(CONFIG_INITRAMFS_ROOT_GID)) \
$(if $(KBUILD_BUILD_TIMESTAMP), -d "$(KBUILD_BUILD_TIMESTAMP)") \
$(ramfs-input)